├── .gitignore ├── CRAN-SUBMISSION ├── DESCRIPTION ├── LICENSE ├── NAMESPACE ├── NEWS.md ├── R ├── add_stats.R ├── count_data.r ├── custom_stat.r ├── custom_stats.r ├── data.R ├── describe_data.r ├── globals.r ├── helper_functions.r ├── in-progress │ ├── tidy_stats.marginaleffects.R │ └── tidy_stats.metafor.r ├── read_stats.R ├── tidy_stats.BFBayesFactor.r ├── tidy_stats.Hmisc.r ├── tidy_stats.R ├── tidy_stats.afex.r ├── tidy_stats.brmsfit.r ├── tidy_stats.effectsize.r ├── tidy_stats.effsize.r ├── tidy_stats.emmeans.R ├── tidy_stats.irr.r ├── tidy_stats.lavaan.r ├── tidy_stats.lmerMod.r ├── tidy_stats.nlme.r ├── tidy_stats.psych.r ├── tidy_stats.tidystats.r ├── tidy_stats_to_data_frame.R ├── write_stats.R └── zzz.r ├── README.Rmd ├── README.md ├── _pkgdown.yml ├── cran-comments.md ├── data └── quote_source.rda ├── docs ├── .nojekyll ├── 404.html ├── CNAME ├── LICENSE-text.html ├── articles │ ├── custom-statistics.html │ ├── index.html │ ├── introduction-to-tidystats.html │ ├── reusing-statistics.html │ └── supported-functions.html ├── authors.html ├── deps │ ├── bootstrap-5.3.1 │ │ ├── bootstrap.bundle.min.js │ │ ├── bootstrap.bundle.min.js.map │ │ └── bootstrap.min.css │ ├── bootstrap-toc-1.0.1 │ │ └── bootstrap-toc.min.js │ ├── clipboard.js-2.0.11 │ │ └── clipboard.min.js │ ├── data-deps.txt │ ├── font-awesome-6.5.2 │ │ ├── css │ │ │ ├── all.css │ │ │ ├── all.min.css │ │ │ ├── v4-shims.css │ │ │ └── v4-shims.min.css │ │ └── webfonts │ │ │ ├── fa-brands-400.ttf │ │ │ ├── fa-brands-400.woff2 │ │ │ ├── fa-regular-400.ttf │ │ │ ├── fa-regular-400.woff2 │ │ │ ├── fa-solid-900.ttf │ │ │ ├── fa-solid-900.woff2 │ │ │ ├── fa-v4compatibility.ttf │ │ │ └── fa-v4compatibility.woff2 │ ├── headroom-0.11.0 │ │ ├── headroom.min.js │ │ └── jQuery.headroom.min.js │ ├── jquery-3.6.0 │ │ ├── jquery-3.6.0.js │ │ ├── jquery-3.6.0.min.js │ │ └── jquery-3.6.0.min.map │ └── search-1.0.0 │ │ ├── autocomplete.jquery.min.js │ │ ├── fuse.min.js │ │ └── mark.min.js ├── index.html ├── katex-auto.js ├── lightswitch.js ├── link.svg ├── news │ └── index.html ├── pkgdown.js ├── pkgdown.yml ├── reference │ ├── add_statistic.html │ ├── add_stats.html │ ├── count_data.html │ ├── custom_stat.html │ ├── custom_stats.html │ ├── describe_data.html │ ├── expect_equal_models.html │ ├── helper_functions.html │ ├── index.html │ ├── quote_source.html │ ├── read_stats.html │ ├── symbol.html │ ├── tidy_stats.afex_aov.html │ ├── tidy_stats.anova.html │ ├── tidy_stats.anova.lme.html │ ├── tidy_stats.aov.html │ ├── tidy_stats.aovlist.html │ ├── tidy_stats.brmsfit.html │ ├── tidy_stats.confint.html │ ├── tidy_stats.effectsize_difference.html │ ├── tidy_stats.effsize.html │ ├── tidy_stats.emmGrid.html │ ├── tidy_stats.emm_list.html │ ├── tidy_stats.glm.html │ ├── tidy_stats.gls.html │ ├── tidy_stats.htest.html │ ├── tidy_stats.html │ ├── tidy_stats.icclist.html │ ├── tidy_stats.lavaan.html │ ├── tidy_stats.lm.html │ ├── tidy_stats.lme.html │ ├── tidy_stats.lmerMod.html │ ├── tidy_stats.lmerModLmerTest.html │ ├── tidy_stats.mixed.html │ ├── tidy_stats.nlme.html │ ├── tidy_stats.pairwise.htest.html │ ├── tidy_stats.psych.html │ ├── tidy_stats.rcorr.html │ ├── tidy_stats.summary_emm.html │ ├── tidy_stats.tidystats.html │ ├── tidy_stats.tidystats_counts.html │ ├── tidy_stats.tidystats_descriptives.html │ ├── tidy_stats_to_data_frame.html │ ├── write_stats.html │ └── write_test_stats.html ├── search.json └── sitemap.xml ├── how-to-package.R ├── inst ├── CITATION └── statistics.json ├── man ├── add_stats.Rd ├── count_data.Rd ├── custom_stat.Rd ├── custom_stats.Rd ├── describe_data.Rd ├── helper_functions.Rd ├── quote_source.Rd ├── read_stats.Rd ├── tidy_stats.Rd ├── tidy_stats_to_data_frame.Rd └── write_stats.Rd ├── tests ├── data │ ├── BayesFactor.json │ ├── Hmisc.json │ ├── afex.json │ ├── aov.json │ ├── brms.json │ ├── confint.json │ ├── count_data.json │ ├── describe_data.json │ ├── effectsize.json │ ├── effsize.json │ ├── emmeans.json │ ├── glm.json │ ├── htest.json │ ├── irr.json │ ├── lavaan.json │ ├── lm.json │ ├── lme4.json │ ├── lmerTest.json │ ├── main-update.json │ ├── main.json │ ├── main_df.csv │ ├── marginaleffects.json │ ├── pairwise_htest.json │ ├── psych.json │ └── stats.json ├── prep │ ├── prep_BayesFactor.R │ ├── prep_Hmisc.R │ ├── prep_afex.R │ ├── prep_aov.R │ ├── prep_brms.R │ ├── prep_confint.R │ ├── prep_count_data.R │ ├── prep_describe_data.R │ ├── prep_effectsize.R │ ├── prep_effsize.R │ ├── prep_emmeans.R │ ├── prep_glm.R │ ├── prep_htest.R │ ├── prep_irr.R │ ├── prep_lavaan.R │ ├── prep_lm.R │ ├── prep_lme4.R │ ├── prep_lmerTest.R │ ├── prep_main.R │ ├── prep_ordinal.R │ ├── prep_pairwise_htest.R │ ├── prep_psych.R │ ├── prep_quantreg.R │ └── prep_stats.R ├── testthat.R ├── testthat │ ├── test_BayesFactor.R │ ├── test_Hmisc.R │ ├── test_afex.R │ ├── test_aov.R │ ├── test_confint.R │ ├── test_count_data.R │ ├── test_describe_data.R │ ├── test_effectsize.R │ ├── test_effsize.R │ ├── test_emmeans.r │ ├── test_glm.R │ ├── test_htest.R │ ├── test_irr.R │ ├── test_lm.R │ ├── test_lme4.R │ ├── test_lmerTest.R │ ├── test_main.R │ ├── test_pairwise_htest.R │ ├── test_stats.R │ └── test_tidy_stats_to_data_frame.R └── work-in-progress │ └── prep │ ├── prep_marginaleffects.R │ ├── prep_metafor.R │ ├── prep_misc.R │ └── prep_nlme.R ├── tidystats-R-package.Rproj └── vignettes ├── .gitignore ├── custom-statistics.Rmd ├── introduction-to-tidystats.Rmd ├── references.bib ├── reusing-statistics.Rmd ├── statistics.json ├── supported-functions.Rmd └── supported-functions.json /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .RData 4 | .Ruserdata 5 | .DS_Store 6 | .Rbuildignore 7 | inst/doc 8 | -------------------------------------------------------------------------------- /CRAN-SUBMISSION: -------------------------------------------------------------------------------- 1 | Version: 0.6.3 2 | Date: 2025-04-06 10:18:47 UTC 3 | SHA: 1561ab4661b8f28f02da2d003969438fa31f07dc 4 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: tidystats 2 | Type: Package 3 | Title: Save Output of Statistical Tests 4 | Version: 0.6.3 5 | Authors@R: person(given = "Willem", 6 | family = "Sleegers", 7 | role = c("aut", "cre"), 8 | email = "w.sleegers@me.com") 9 | Maintainer: Willem Sleegers 10 | Description: Save the output of statistical tests in an organized file that can 11 | be shared with others or used to report statistics in scientific papers. 12 | URL: https://willemsleegers.github.io/tidystats/ 13 | BugReports: https://github.com/WillemSleegers/tidystats/issues 14 | License: MIT + file LICENSE 15 | Encoding: UTF-8 16 | Roxygen: list(markdown = TRUE) 17 | RoxygenNote: 7.3.2 18 | LazyData: true 19 | Depends: R (>= 4.1.0) 20 | Imports: 21 | dplyr, 22 | tidyr, 23 | purrr, 24 | stringr, 25 | readr, 26 | jsonlite, 27 | tibble, 28 | checkmate 29 | Suggests: 30 | BayesFactor, 31 | knitr, 32 | lme4, 33 | lmerTest, 34 | rmarkdown, 35 | effectsize, 36 | effsize, 37 | Hmisc, 38 | afex, 39 | emmeans, 40 | irr, 41 | testthat, 42 | MEMSS, 43 | lavaan, 44 | methods, 45 | nlme, 46 | rlang, 47 | tidyselect 48 | VignetteBuilder: knitr 49 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | YEAR: 2024 2 | COPYRIGHT HOLDER: Willem Sleegers -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(tidy_stats,BFBayesFactor) 4 | S3method(tidy_stats,afex_aov) 5 | S3method(tidy_stats,anova) 6 | S3method(tidy_stats,anova.lme) 7 | S3method(tidy_stats,aov) 8 | S3method(tidy_stats,aovlist) 9 | S3method(tidy_stats,brmsfit) 10 | S3method(tidy_stats,confint) 11 | S3method(tidy_stats,effectsize_difference) 12 | S3method(tidy_stats,effsize) 13 | S3method(tidy_stats,emmGrid) 14 | S3method(tidy_stats,emm_list) 15 | S3method(tidy_stats,glm) 16 | S3method(tidy_stats,gls) 17 | S3method(tidy_stats,htest) 18 | S3method(tidy_stats,icclist) 19 | S3method(tidy_stats,lavaan) 20 | S3method(tidy_stats,lm) 21 | S3method(tidy_stats,lme) 22 | S3method(tidy_stats,lmerMod) 23 | S3method(tidy_stats,lmerModLmerTest) 24 | S3method(tidy_stats,mixed) 25 | S3method(tidy_stats,nlme) 26 | S3method(tidy_stats,pairwise.htest) 27 | S3method(tidy_stats,psych) 28 | S3method(tidy_stats,rcorr) 29 | S3method(tidy_stats,summary_emm) 30 | S3method(tidy_stats,tidystats) 31 | S3method(tidy_stats,tidystats_counts) 32 | S3method(tidy_stats,tidystats_descriptives) 33 | export(add_stats) 34 | export(count_data) 35 | export(custom_stat) 36 | export(custom_stats) 37 | export(describe_data) 38 | export(read_stats) 39 | export(tidy_stats) 40 | export(tidy_stats_to_data_frame) 41 | export(write_stats) 42 | -------------------------------------------------------------------------------- /R/add_stats.R: -------------------------------------------------------------------------------- 1 | #' Add statistical output to a tidystats list 2 | #' 3 | #' [add_stats()] is used to add the output of a statistical test to a 4 | #' tidystats list. 5 | #' 6 | #' @param list A tidystats list. 7 | #' @param output Output of a statistical test. 8 | #' @param identifier A string identifying the model. Automatically created if 9 | #' not provided. 10 | #' @param type A string specifying the type of analysis: primary, 11 | #' secondary, or exploratory. 12 | #' @param preregistered A boolean specifying whether the analysis was 13 | #' preregistered or not. 14 | #' @param notes A string specifying additional information. 15 | #' @param class A string to manually specify the class of the object so that 16 | #' tidystats knows how to extract the statistics. See 'Details' for a list of 17 | #' classes that are supported. 18 | #' @param args A list of additional arguments to customize which statistics 19 | #' should be extracted. See 'Details' for a list of supported functions and 20 | #' their arguments. 21 | #' 22 | #' @details 23 | #' Many functions to perform statistical tests (e.g., [t.test()], [lm()]) return 24 | #' an object containing the statistics. These objects can be stored in variables 25 | #' and used with [add_stats()] to extract the statistics and add them to a 26 | #' list. 27 | #' 28 | #' The list can be saved to a file using the [write_stats()] function. 29 | #' 30 | #' For a list of supported functions, see `vignette("supported-functions", 31 | #' package = "tidystats")`. 32 | #' 33 | #' @examples 34 | #' # Conduct analyses 35 | #' sleep_wide <- reshape( 36 | #' sleep, 37 | #' direction = "wide", 38 | #' idvar = "ID", 39 | #' timevar = "group", 40 | #' sep = "_" 41 | #' ) 42 | #' sleep_test <- t.test(sleep_wide$extra_1, sleep_wide$extra_2, paired = TRUE) 43 | #' 44 | #' ctl <- c(4.17, 5.58, 5.18, 6.11, 4.50, 4.61, 5.17, 4.53, 5.33, 5.14) 45 | #' trt <- c(4.81, 4.17, 4.41, 3.59, 5.87, 3.83, 6.03, 4.89, 4.32, 4.69) 46 | #' group <- gl(2, 10, 20, labels = c("Ctl", "Trt")) 47 | #' weight <- c(ctl, trt) 48 | #' lm_D9 <- lm(weight ~ group) 49 | #' lm_D9_confint <- confint(lm_D9) 50 | #' 51 | #' npk_aov <- aov(yield ~ block + N * P * K, npk) 52 | #' 53 | #' # Create an empty list to store the statistics in 54 | #' statistics <- list() 55 | #' 56 | #' # Add statistics to the list 57 | #' statistics <- statistics |> 58 | #' add_stats(sleep_test, type = "primary", preregistered = TRUE) |> 59 | #' add_stats(lm_D9) |> 60 | #' add_stats(lm_D9_confint, class = "confint") |> 61 | #' add_stats(npk_aov, notes = "An ANOVA example") 62 | #' 63 | #' @export 64 | add_stats <- function(list, output, identifier = NULL, type = NULL, 65 | preregistered = NULL, notes = NULL, args = NULL, 66 | class = NULL) { 67 | if (is.null(identifier)) { 68 | if (deparse(substitute(output)) == ".") { 69 | identifier <- paste0( 70 | "M", formatC(length(list) + 1, 71 | width = "1", format = "d" 72 | ) 73 | ) 74 | } else { 75 | identifier <- deparse(substitute(output)) 76 | } 77 | } else { 78 | if (!is.null(names(list))) { 79 | if (identifier %in% names(list)) { 80 | stop("Identifier already exists.") 81 | } 82 | } 83 | } 84 | 85 | if (!is.null(class)) { 86 | class(output) <- append(class(output), class, after = 0) 87 | } 88 | 89 | analysis <- tidy_stats(output, args = args) 90 | 91 | if (!is.null(type)) { 92 | if (type == "primary") { 93 | analysis$type <- "primary" 94 | } else if (type == "secondary") { 95 | analysis$type <- "secondary" 96 | } else if (type == "exploratory") { 97 | analysis$type <- "exploratory" 98 | } else { 99 | warning(paste( 100 | "Unknown type; type should be either 'primary',", 101 | "'secondary', or 'exploratory'." 102 | )) 103 | } 104 | } 105 | 106 | if (!is.null(preregistered)) { 107 | if (preregistered) { 108 | analysis$preregistered <- "yes" 109 | } else { 110 | analysis$preregistered <- "no" 111 | } 112 | } 113 | 114 | if (!is.null(notes)) { 115 | analysis$notes <- notes 116 | } 117 | 118 | list[[identifier]] <- analysis 119 | 120 | return(list) 121 | } 122 | -------------------------------------------------------------------------------- /R/count_data.r: -------------------------------------------------------------------------------- 1 | #' Count the number of observations 2 | #' 3 | #' [count_data()] returns the number and proportion of observations for 4 | #' categorical variables. 5 | #' 6 | #' @param data A data frame. 7 | #' @param ... One or more unquoted (categorical) column names from the data 8 | #' frame, separated by commas. 9 | #' @param na.rm A boolean specifying whether missing values (including NaN) 10 | #' should be removed. 11 | #' @param pct A boolean indicating whether to calculate percentages instead of 12 | #' proportions. The default is `FALSE`. 13 | #' 14 | #' @details The data frame can be grouped using [dplyr::group_by()] 15 | #' so that the number of observations will be calculated within each group 16 | #' level. 17 | #' 18 | #' @examples 19 | #' count_data(quote_source, source) 20 | #' count_data(quote_source, source, sex) 21 | #' count_data(quote_source, source, sex, na.rm = TRUE) 22 | #' count_data(quote_source, source, sex, na.rm = TRUE, pct = TRUE) 23 | #' 24 | #' # Use dplyr::group_by() to calculate proportions within a group 25 | #' quote_source |> 26 | #' dplyr::group_by(source) |> 27 | #' count_data(sex) 28 | #' 29 | #' @export 30 | count_data <- function(data, ..., na.rm = FALSE, pct = FALSE) { 31 | checkmate::assert_data_frame(data) 32 | 33 | output <- dplyr::count(data, ...) 34 | 35 | # Remove missing observations if na.rm is set to TRUE 36 | if (na.rm) { 37 | output <- dplyr::filter( 38 | output, 39 | dplyr::if_all(dplyr::everything(), ~ !is.na(.)) 40 | ) 41 | } 42 | 43 | # Calculate proportion or percentage of each group per var 44 | if (pct) { 45 | output <- dplyr::mutate(output, pct = n / sum(n) * 100) 46 | } else { 47 | output <- dplyr::mutate(output, prop = n / sum(n)) 48 | } 49 | 50 | 51 | # Add a tidystats class so we can use the tidy_stats() function to parse the 52 | # the output 53 | class(output) <- c("tidystats_counts", class(output)) 54 | 55 | return(output) 56 | } 57 | -------------------------------------------------------------------------------- /R/custom_stat.r: -------------------------------------------------------------------------------- 1 | #' Create a custom statistic 2 | #' 3 | #' [custom_stat()] is used together with the [custom_stats()] function to add 4 | #' statistics from unsupported functions via [add_stats()]. See the 5 | #' [custom_stats()] function for more information. 6 | #' 7 | #' @param name A string specifying the name of the statistic. 8 | #' @param value The numeric value of the statistic. 9 | #' @param symbol A string specifying the symbol of the statistic to use when 10 | #' reporting the statistic. 11 | #' @param subscript A string specifying a subscript to use when reporting the 12 | #' statistic. 13 | #' @param interval A string specifying the type of interval if the statistic is 14 | #' a ranged statistic (e.g., 95% confidence interval) 15 | #' @param level A numeric value between 0 and 1 indicating the level of the 16 | #' interval. 17 | #' @param lower The numeric value of the lower bound of the statistic. 18 | #' @param upper The numeric value of the upper bound of the statistic. 19 | #' 20 | #' @examples 21 | #' # Example 1: A single mean value 22 | #' sample <- rnorm(1000, mean = 0, sd = 1) 23 | #' mean <- mean(sample) 24 | #' 25 | #' custom_stat(name = "mean", value = mean, symbol = "M") 26 | #' 27 | #' # Example 2: A mean with a 95% confidence interval 28 | #' sample <- rnorm(1000, mean = 0, sd = 1) 29 | #' mean <- mean(sample) 30 | #' se <- sd(sample) / sqrt(length(sample)) 31 | #' CI <- c(mean - 1.96 * se, mean + 1.96 * se) 32 | #' 33 | #' custom_stat( 34 | #' name = "mean", 35 | #' value = mean, 36 | #' symbol = "M", 37 | #' interval = "CI", 38 | #' level = .95, 39 | #' lower = CI[1], 40 | #' upper = CI[2] 41 | #' ) 42 | #' 43 | #' @export 44 | custom_stat <- function(name, value, symbol = NULL, subscript = NULL, 45 | interval = NULL, level = NULL, lower = NULL, 46 | upper = NULL) { 47 | custom_stat <- list( 48 | name = name, 49 | value = value 50 | ) 51 | 52 | if (!is.null(symbol)) custom_stat$symbol <- symbol 53 | if (!is.null(subscript)) custom_stat$subscript <- subscript 54 | 55 | if (!is.null(interval)) { 56 | custom_stat$interval <- interval 57 | custom_stat$level <- level 58 | custom_stat$lower <- lower 59 | custom_stat$upper <- upper 60 | } 61 | 62 | class(custom_stat) <- c("tidystats", "list") 63 | 64 | return(list(custom_stat)) 65 | } 66 | -------------------------------------------------------------------------------- /R/custom_stats.r: -------------------------------------------------------------------------------- 1 | #' Create a collection of custom statistics 2 | #' 3 | #' [custom_stats()] is used to create a collection of statistics from 4 | #' unsupported functions to add to a list via [add_stats()]. 5 | #' 6 | #' @param method A string specifying the method used to obtain the statistics. 7 | #' @param statistics A vector of statistics created with [custom_stat()]. 8 | #' 9 | #' @details 10 | #' [custom_stats()] supports adding a single statistic or a group of statistics. 11 | #' Multiple groups of statistics are not (yet) supported. 12 | #' 13 | #' @examples 14 | #' # Example: BIC Bayes factor (approx.) 15 | #' # Run the analysis 16 | #' lm1 <- lm(Fertility ~ ., data = swiss) 17 | #' lm2 <- update(lm1, . ~ . - Examination) 18 | #' 19 | #' BF10 <- 1 / exp((BIC(lm2) - BIC(lm1)) / 2) 20 | #' 21 | #' # Create the custom statistics 22 | #' BIC_BFs <- custom_stats( 23 | #' method = "BIC Bayes factor", 24 | #' statistics = c( 25 | #' custom_stat(name = "BF", value = BF10, subscript = "10"), 26 | #' custom_stat(name = "BF", value = 1 / BF10, subscript = "01") 27 | #' ) 28 | #' ) 29 | #' 30 | #' # Create an empty list 31 | #' statistics <- list() 32 | #' 33 | #' # Add the custom statistics to the list 34 | #' statistics <- add_stats(statistics, BIC_BFs) 35 | #' 36 | #' @export 37 | custom_stats <- function(method, statistics) { 38 | stats <- list() 39 | 40 | for (stat in statistics) { 41 | stats <- append(stats, list(stat)) 42 | } 43 | 44 | output <- list( 45 | method = method, 46 | statistics = stats 47 | ) 48 | 49 | class(output) <- c("tidystats", "list") 50 | 51 | return(output) 52 | } 53 | -------------------------------------------------------------------------------- /R/data.R: -------------------------------------------------------------------------------- 1 | #' A Many Labs replication of Lorge & Curtiss (1936) 2 | #' 3 | #' Data of multiple studies from the Many Labs project (Klein et al., 2014) 4 | #' replicating Lorge & Curtiss (1936). 5 | #' 6 | #' @format A data frame with 6343 rows and 15 columns: 7 | #' \describe{ 8 | #' \item{ID}{participant number} 9 | #' \item{source}{attributed source of the quote: Washington or Bin 10 | #' Laden} 11 | #' \item{response}{evaluation of the quote on a 9-point Likert 12 | #' scale, with 1 indicating disagreement and 9 indicating agreement} 13 | #' \item{age}{participant's age} 14 | #' \item{sex}{participant's sex} 15 | #' \item{citizenship}{participant's citizenship} 16 | #' \item{race}{participant's race} 17 | #' \item{major}{participant's major} 18 | #' \item{native_language}{participant's native language} 19 | #' \item{referrer}{location of where the study was conducted} 20 | #' \item{compensation}{how the participant was compensated for their 21 | #' participation} 22 | #' \item{recruitment}{how the participant was recruited} 23 | #' \item{separation}{description of how the study was administered in 24 | #' terms of participant isolation} 25 | #' \item{us_or_international}{whether the study was conducted in the US or 26 | #' outside of the US (international)} 27 | #' \item{lab_or_online}{whether the study was conducted in the lab or online} 28 | #' } 29 | #' 30 | #' @details Lorge and Curtiss (1936) examined how a quotation is perceived when 31 | #' it is attributed to a liked or disliked individual. The quotation of interest 32 | #' was: "I hold it that a little rebellion, now and then, is a good thing, and 33 | #' as necessary in the political world as storms are in the physical world." 34 | #' In one condition the quotation was attributed to Thomas Jefferson, a liked 35 | #' individual, and in the other condition it was attributed to Vladimir Lenin, a 36 | #' disliked individual. More agreement was observed when the quotation was 37 | #' attributed to Jefferson than Lenin. In the replication studies, the quotation 38 | #' was: "I have sworn to only live free, even if I find bitter the taste of 39 | #' death." This quotation was attributed to either George Washington, the 40 | #' liked individual, or Osama Bin Laden, the disliked individual. 41 | #' 42 | #' @references 43 | #' Lorge, I., & Curtiss, C. C. (1936). Prestige, suggestion, and attitudes. 44 | #' The Journal of Social Psychology, 7, 386-402. 45 | #' \doi{10.1080/00224545.1936.9919891} 46 | #' 47 | #' Klein, R.A. et al. (2014) Investigating Variation in Replicability: A "Many 48 | #' Labs" Replication Project. Social Psychology, 45(3), 142-152. 49 | #' \doi{10.1027/1864-9335/a000178} 50 | #' 51 | "quote_source" 52 | -------------------------------------------------------------------------------- /R/describe_data.r: -------------------------------------------------------------------------------- 1 | #' Calculate common descriptive statistics 2 | #' 3 | #' [describe_data()] returns a set of common descriptive statistics 4 | #' (e.g., number of observations, mean, standard deviation) for one or more 5 | #' numeric variables. 6 | #' 7 | #' @param data A data frame. 8 | #' @param ... One or more unquoted column names from the data frame. 9 | #' @param na.rm A boolean indicating whether missing values (including NaN) 10 | #' should be excluded in calculating the descriptives? The default is TRUE. 11 | #' @param short A boolean indicating whether only a subset of descriptives 12 | #' should be reported? If set to `TRUE``, only the N, M, and SD will be 13 | #' returned. The default is `FALSE`. 14 | #' 15 | #' @details The data can be grouped using [dplyr::group_by()] so that 16 | #' descriptives will be calculated for each group level. 17 | #' 18 | #' Skew and kurtosis are based on the [datawizard::skewness()] and 19 | #' [datawizard::kurtosis()] functions (Komsta & Novomestky, 2015). 20 | #' 21 | #' @examples 22 | #' describe_data(quote_source, response) 23 | #' 24 | #' describe_data(quote_source, response, na.rm = FALSE) 25 | #' 26 | #' quote_source |> 27 | #' dplyr::group_by(source) |> 28 | #' describe_data(response) 29 | #' 30 | #' quote_source |> 31 | #' dplyr::group_by(source) |> 32 | #' describe_data(response, short = TRUE) 33 | #' 34 | #' @export 35 | describe_data <- function( 36 | data, ..., na.rm = TRUE, short = FALSE) { 37 | checkmate::check_data_frame(data) 38 | 39 | # Check if the user provided any columns. 40 | if (length(rlang::enquos(...)) == 0) { 41 | stop("No columns found; please provide one or more columns.") 42 | } 43 | 44 | # Check if the columns exist in the data frame. 45 | column_names <- as.character(rlang::exprs(...)) 46 | if (sum(!column_names %in% names(data)) != 0) { 47 | stop( 48 | paste( 49 | "Did not find the following column(s) in the data:", 50 | paste(column_names[!column_names %in% names(data)], collapse = ", ") 51 | ) 52 | ) 53 | } 54 | 55 | # Check whether the values in the columns are numeric 56 | columns <- dplyr::select(dplyr::ungroup(data), ...) 57 | if (sum(!purrr::map_chr(columns, class) %in% c("numeric", "integer")) > 0) { 58 | stop( 59 | paste( 60 | "The following columns are not numeric:", 61 | paste( 62 | column_names[ 63 | !purrr::map_chr(columns, class) %in% c("numeric", "integer") 64 | ] 65 | ) 66 | ) 67 | ) 68 | } 69 | 70 | grouping <- dplyr::group_vars(data) 71 | 72 | data <- dplyr::select(data, dplyr::all_of(grouping), ...) 73 | 74 | data <- tidyr::pivot_longer(data, 75 | cols = -dplyr::all_of(grouping), 76 | names_to = "var" 77 | ) 78 | 79 | # Add the var to the existing grouping 80 | data <- dplyr::group_by(data, var, .add = TRUE) 81 | 82 | # Calculate descriptives 83 | output <- data |> 84 | dplyr::summarize( 85 | missing = sum(is.na(value)), 86 | N = dplyr::n() - missing, 87 | M = mean(value, na.rm = na.rm), 88 | SD = sd(value, na.rm = na.rm), 89 | SE = SD / sqrt(N), 90 | min = min(value, na.rm = na.rm), 91 | max = max(value, na.rm = na.rm), 92 | range = diff(range(value, na.rm = na.rm)), 93 | median = median(value, na.rm = na.rm), 94 | mode = unique(value)[which.max(tabulate(match( 95 | value, 96 | unique(value) 97 | )))], 98 | skew = ( 99 | sum((value - mean(value, na.rm = na.rm))^3, na.rm = na.rm) / N 100 | ) / ( 101 | sum((value - mean(value, na.rm = na.rm))^2, na.rm = na.rm) / N 102 | )^(3 / 2), 103 | kurtosis = N * sum((value - mean(value, na.rm = na.rm))^4, 104 | na.rm = na.rm 105 | ) / (sum((value - mean(value, na.rm = na.rm))^2, na.rm = na.rm)^2) 106 | ) 107 | 108 | # Reorder the columns and return only a subset if short was set to TRUE 109 | if (short) { 110 | output <- dplyr::select( 111 | output, 112 | dplyr::all_of(c("var", grouping, "N", "M", "SD")) 113 | ) 114 | } else { 115 | output <- dplyr::relocate(output, var, dplyr::all_of(grouping)) 116 | } 117 | 118 | output <- dplyr::arrange(output, var) 119 | 120 | # Add a tidystats class so we can use the tidy_stats() function to parse the 121 | # the output 122 | # Put it at the beginning otherwise we get an error when printing the tibble 123 | class(output) <- c("tidystats_descriptives", class(output)) 124 | 125 | return(output) 126 | } 127 | -------------------------------------------------------------------------------- /R/globals.r: -------------------------------------------------------------------------------- 1 | # Trick to suppress some check warnings 2 | globalVariables( 3 | c( 4 | ":=", "identifier", "method", "variable", "value", "sd", 5 | "SD", "N", "median", "M", "pct", "DV", "group", "term", "terms", "n", 6 | "column", "name", "name1", "vars", "statistic", "statistic_name", 7 | "df_to_group", "ci_df_to_group", "fixed_cors", "ci_df_to_group", "head", 8 | "combn", "var", "name2", "corrs", "level", "lhs", "rhs", "op" 9 | ) 10 | ) 11 | -------------------------------------------------------------------------------- /R/helper_functions.r: -------------------------------------------------------------------------------- 1 | #' Helper functions in tidystats 2 | #' 3 | #' Functions used under the hood in the tidystats package. 4 | 5 | #' @describeIn helper_functions 6 | #' Function to convert matrix objects to a tidy data frame. 7 | #' 8 | #' @param m A matrix. 9 | #' 10 | #' @keywords internal 11 | tidy_matrix <- function(m, symmetric = TRUE) { 12 | if (!length(rownames(m)) > 0) { 13 | stop("Matrix has no row names.") 14 | } 15 | 16 | if (!length(colnames(m)) > 0) { 17 | stop("Matrix has no column names.") 18 | } 19 | 20 | if (symmetric) { 21 | if (sum(rownames(m) == colnames(m)) != length(rownames(m))) { 22 | stop("Matrix row and column names do not match.") 23 | } 24 | } 25 | 26 | # Remove the diagonal and duplicate values in case of a symmetric matrix 27 | if (symmetric) { 28 | m[lower.tri(m, diag = TRUE)] <- NA 29 | } 30 | 31 | df <- m |> 32 | as.matrix() |> 33 | tibble::as_tibble(rownames = "name1") |> 34 | tidyr::pivot_longer(-name1, names_to = "name2", values_to = "value") |> 35 | dplyr::filter(!is.na(value)) 36 | 37 | return(df) 38 | } 39 | 40 | # Tidying ----------------------------------------------------------------- 41 | 42 | #' @describeIn helper_functions 43 | #' Function to add a statistic to list. It helps create the list and ignores 44 | #' NULL values. 45 | #' 46 | #' @keywords internal 47 | add_statistic <- function(list, name, value, symbol = NULL, subscript = NULL, 48 | interval = NULL, level = NULL, lower = NULL, 49 | upper = NULL) { 50 | if (is_blank(value)) { 51 | return(list) 52 | } 53 | 54 | new_list <- list() 55 | new_list$name <- name 56 | 57 | if (!is_blank(symbol)) new_list$symbol <- symbol 58 | if (!is_blank(subscript)) new_list$subscript <- subscript 59 | 60 | new_list$value <- value 61 | 62 | if ( 63 | !is_blank(level) && 64 | !is_blank(interval) && 65 | !is_blank(lower) && 66 | !is_blank(upper) 67 | ) { 68 | new_list$interval <- interval 69 | new_list$level <- level 70 | new_list$lower <- lower 71 | new_list$upper <- upper 72 | } 73 | 74 | list <- append(list, list(new_list)) 75 | 76 | return(list) 77 | } 78 | 79 | add_attribute <- function(list, object, attribute) { 80 | value <- attr(object, attribute) 81 | if (!is.null(value)) { 82 | list[attribute] <- value 83 | } 84 | 85 | return(list) 86 | } 87 | 88 | add_package_info <- function(list, package) { 89 | list$package <- list( 90 | name = package, 91 | version = getNamespaceVersion(package)[[1]] 92 | ) 93 | 94 | return(list) 95 | } 96 | 97 | is_blank <- function(x) { 98 | return( 99 | is.null(x) || is.na(x) 100 | ) 101 | } 102 | 103 | # Symbols ----------------------------------------------------------------- 104 | 105 | #' @describeIn helper_functions 106 | #' Function to return symbols in ASCII. 107 | #' 108 | #' @keywords internal 109 | symbol <- function( 110 | x = c( 111 | "alpha", 112 | "chi_squared", 113 | "delta", 114 | "guttmans_lambda", 115 | "K_squared", 116 | "lambda", 117 | "p_hat", 118 | "R_squared", 119 | "sigma", 120 | "t_squared", 121 | "tau" 122 | )) { 123 | dplyr::case_match( 124 | x, 125 | "alpha" ~ intToUtf8(0x03b1), 126 | "chi_squared" ~ paste0(intToUtf8(0x03c7), intToUtf8(0x00b2)), 127 | "delta" ~ intToUtf8(0x03b4), 128 | "guttmans_lambda" ~ paste("Guttman's", intToUtf8(0x03bb)), 129 | "K_squared" ~ paste0("K", intToUtf8(0x00b2)), 130 | "lambda" ~ intToUtf8(0x03bb), 131 | "p_hat" ~ paste0("p", intToUtf8(0x0302)), 132 | "R_hat" ~ paste0("R", intToUtf8(0x0302)), 133 | "R_squared" ~ paste0("R", intToUtf8(0x00b2)), 134 | "sigma" ~ intToUtf8(0x03a3), 135 | "t_squared" ~ paste0("t", intToUtf8(0x00b2)), 136 | "tau" ~ intToUtf8(0x03c4) 137 | ) 138 | } 139 | 140 | # Testing ----------------------------------------------------------------- 141 | 142 | #' @describeIn helper_functions 143 | #' Function to compare tidied models during testing. 144 | #' 145 | #' @keywords internal 146 | expect_equal_models <- function(model, expected_tidy_model, tolerance = 0.001) { 147 | # Convert model output to a tidystats list 148 | tidy_model <- tidy_stats(model) 149 | 150 | # Set package information to NULL because this may have changed since the 151 | # data was last saved 152 | tidy_model$package <- NULL 153 | expected_tidy_model$package <- NULL 154 | 155 | # Test whether the two lists are equal 156 | testthat::expect_equal(tidy_model, expected_tidy_model, tolerance = tolerance) 157 | } 158 | 159 | #' @describeIn helper_functions 160 | #' Function to save tidied statistics to a file. Since these files are used 161 | #' during testing, it's important to only store files with correctly tidied 162 | #' statistics, hence the prompt. 163 | #' 164 | #' @keywords internal 165 | write_test_stats <- function(x, path, digits = 6) { 166 | choice <- utils::menu( 167 | title = "Are you sure you want to save these (test) statistics?", 168 | choices = c("Yes", "No") 169 | ) 170 | 171 | if (choice == 1) { 172 | write_stats(x = x, path = path, digits = digits) 173 | } 174 | } 175 | -------------------------------------------------------------------------------- /R/read_stats.R: -------------------------------------------------------------------------------- 1 | #' Read a .json file that was produced with [write_stats()] 2 | #' 3 | #' [read_stats()] can read a .json file containing statistics that was produced 4 | #' using tidystats. It returns a list containing the statistics, with the 5 | #' identifier as the name for each list element. 6 | #' 7 | #' @param file A string specifying the path to the tidystats data file. 8 | #' 9 | #' @examples 10 | #' # A simple example, assuming there is a file called 'statistics.json' 11 | #' \dontrun{ 12 | #' statistics <- read_stats("statistics.json") 13 | #' } 14 | #' 15 | #' # A working example 16 | #' statistics <- read_stats( 17 | #' file = system.file("statistics.json", package = "tidystats") 18 | #' ) 19 | #' 20 | #' @export 21 | read_stats <- function(file) { 22 | if (tools::file_ext(file) != "json") { 23 | warning( 24 | paste( 25 | "The file does not have a .json file extension;", 26 | "make sure you have specified the correct file." 27 | ) 28 | ) 29 | } 30 | 31 | results <- jsonlite::read_json(file) 32 | 33 | # Look for character Inf's and convert them to numeric 34 | results <- rapply( 35 | results, 36 | function(x) ifelse(x %in% c("Inf", "-Inf"), as.numeric(x), x), 37 | how = "replace" 38 | ) 39 | 40 | return(results) 41 | } 42 | -------------------------------------------------------------------------------- /R/tidy_stats.BFBayesFactor.r: -------------------------------------------------------------------------------- 1 | #' @describeIn tidy_stats tidy_stats method for class 'BayesFactor' 2 | #' @export 3 | tidy_stats.BFBayesFactor <- function(x, args = NULL) { 4 | # Create the analysis list 5 | analysis <- list() 6 | 7 | # Determine and set the method 8 | class <- class(x@numerator[[1]])[1] 9 | analysis$method <- dplyr::case_when( 10 | class == "BFoneSample" ~ "Bayesian t-test", 11 | class == "BFlinearModel" ~ "Bayesian linear regression", 12 | class == "BFcorrelation" ~ "Bayesian correlation", 13 | class == "BFcontingencyTable" ~ "Bayesian contingency table", 14 | class == "BFproportion" ~ "Bayesian analysis of proportions", 15 | class == "BFmetat" ~ "Bayesian meta-analysis" 16 | ) 17 | 18 | # Extract bayes factors 19 | bayes_factors <- BayesFactor::extractBF(x) 20 | 21 | # Extract the statistics or loop over the models 22 | if (nrow(bayes_factors) == 1) { 23 | # Create a statistics list 24 | statistics <- list() 25 | 26 | # Extract statistics 27 | statistics <- add_statistic( 28 | statistics, "BF10", bayes_factors$bf, "BF", 29 | "10" 30 | ) 31 | statistics <- add_statistic( 32 | statistics, "BF01", 1 / bayes_factors$bf, "BF", 33 | "01" 34 | ) 35 | statistics <- add_statistic( 36 | statistics, "proportional error", 37 | bayes_factors$error, "PE" 38 | ) 39 | 40 | # Add statistics to the analysis 41 | analysis$statistics <- statistics 42 | } else { 43 | # Create a list to store the different models in 44 | groups <- list(name = "Models") 45 | 46 | # Loop over the models 47 | for (i in seq_len(nrow(bayes_factors))) { 48 | # Create a list to store the model statistics in 49 | group <- list(name = rownames(bayes_factors)[i]) 50 | 51 | # Create a list to add the statistics to 52 | statistics <- list() 53 | 54 | statistics <- add_statistic( 55 | statistics, "BF10", bayes_factors$bf[i], "BF", 56 | "10" 57 | ) 58 | statistics <- add_statistic( 59 | statistics, "BF01", 1 / bayes_factors$bf[i], 60 | "BF", "01" 61 | ) 62 | statistics <- add_statistic( 63 | statistics, "proportional error", 64 | bayes_factors$error[i], "PE" 65 | ) 66 | 67 | # Add statistics to the model list 68 | group$statistics <- statistics 69 | 70 | # Add the model group to the groups list 71 | groups$groups <- append(groups$groups, list(group)) 72 | } 73 | 74 | # Add the list of models to the analysis list 75 | analysis$groups <- append(analysis$groups, list(groups)) 76 | } 77 | 78 | # Add denominator model information 79 | alternative <- list( 80 | name = x@denominator@longName, 81 | formula = x@denominator@identifier$formula 82 | ) 83 | analysis$alternative <- alternative 84 | 85 | # Add package information 86 | analysis <- add_package_info(analysis, "BayesFactor") 87 | 88 | return(analysis) 89 | } 90 | -------------------------------------------------------------------------------- /R/tidy_stats.Hmisc.r: -------------------------------------------------------------------------------- 1 | #' @describeIn tidy_stats tidy_stats method for class 'rcorr' 2 | #' @export 3 | tidy_stats.rcorr <- function(x, args = NULL) { 4 | message( 5 | paste( 6 | "Can't determine whether the correlations are Pearson's r or Spearman's", 7 | "rho rank correlation coefficients." 8 | ) 9 | ) 10 | 11 | analysis <- list(method = "Correlation") 12 | 13 | rs <- tidy_matrix(x$r) 14 | ns <- tidy_matrix(x$n) 15 | ps <- tidy_matrix(x$P) 16 | 17 | groups <- list(name = "Pairs") 18 | 19 | for (i in seq_len(nrow(rs))) { 20 | names <- list( 21 | list(name = rs$name1[i]), 22 | list(name = rs$name2[i]) 23 | ) 24 | 25 | group <- list(names = names) 26 | statistics <- list() 27 | 28 | statistics <- add_statistic(statistics, name = "r", value = rs$value[i]) 29 | statistics <- add_statistic(statistics, name = "n", value = ns$value[i]) 30 | statistics <- add_statistic(statistics, name = "p", value = ps$value[i]) 31 | 32 | group$statistics <- statistics 33 | groups$groups <- append(groups$groups, list(group)) 34 | } 35 | 36 | analysis$groups <- append(analysis$groups, list(groups)) 37 | 38 | analysis <- add_package_info(analysis, "afex") 39 | 40 | return(analysis) 41 | } 42 | -------------------------------------------------------------------------------- /R/tidy_stats.afex.r: -------------------------------------------------------------------------------- 1 | #' @describeIn tidy_stats tidy_stats method for class 'afex_aov' 2 | #' @export 3 | tidy_stats.afex_aov <- function(x, args = NULL) { 4 | analysis <- list(method = "ANOVA") 5 | 6 | terms <- x$anova_table 7 | 8 | groups <- list(name = "Terms") 9 | 10 | for (i in seq_len(nrow(terms))) { 11 | group <- list(name = rownames(terms)[i]) 12 | 13 | statistics <- list() |> 14 | add_statistic( 15 | name = "df numerator", 16 | value = terms$`num Df`[i], 17 | symbol = "df", 18 | subscript = "num." 19 | ) |> 20 | add_statistic( 21 | name = "df denominator", 22 | value = terms$`den Df`[i], 23 | symbol = "df", 24 | subscript = "den." 25 | ) |> 26 | add_statistic( 27 | name = "MSE", 28 | value = terms$MSE[i] 29 | ) |> 30 | add_statistic( 31 | name = "statistic", 32 | value = terms$`F`[i], 33 | symbol = "F" 34 | ) |> 35 | add_statistic( 36 | name = "ges", 37 | value = terms$ges[i], 38 | symbol = paste0(intToUtf8(0x03b7), intToUtf8(0x00b2)), 39 | subscript = "G" 40 | ) |> 41 | add_statistic( 42 | name = "pes", 43 | value = terms$pes[i], 44 | symbol = paste0(intToUtf8(0x03b7), intToUtf8(0x00b2)), 45 | subscript = "p" 46 | ) |> 47 | add_statistic( 48 | name = "p", 49 | value = terms$`Pr(>F)`[i] 50 | ) 51 | 52 | group$statistics <- statistics 53 | 54 | groups$groups <- append(groups$groups, list(group)) 55 | } 56 | analysis$groups <- append(analysis$groups, list(groups)) 57 | 58 | analysis$anova_type <- attr(x, "type") 59 | analysis$p_adjustment_method <- attr(x$anova_table, "p_adjust_method") 60 | analysis$sphericity_correction_method <- attr(x$anova_table, "correction") 61 | 62 | analysis <- add_package_info(analysis, "afex") 63 | 64 | return(analysis) 65 | } 66 | 67 | #' @describeIn tidy_stats tidy_stats method for class 'afex_aov' 68 | #' @export 69 | tidy_stats.mixed <- function(x, args = NULL) { 70 | analysis <- list(method = "Mixed Model ANOVA") 71 | 72 | terms <- x$anova_table 73 | 74 | groups <- list(name = "Terms") 75 | 76 | for (i in seq_len(nrow(terms))) { 77 | group <- list(name = rownames(terms)[i]) 78 | 79 | group$statistics <- list() |> 80 | add_statistic( 81 | name = "df numerator", 82 | value = terms$`num Df`[i], 83 | symbol = "df", 84 | subscript = "num." 85 | ) |> 86 | add_statistic( 87 | name = "df denominator", 88 | value = terms$`den Df`[i], 89 | symbol = "df", 90 | subscript = "den." 91 | ) |> 92 | add_statistic( 93 | name = "MSE", 94 | value = terms$MSE[i] 95 | ) |> 96 | add_statistic( 97 | name = "statistic", 98 | value = terms$`F`[i], 99 | symbol = "F" 100 | ) |> 101 | add_statistic( 102 | name = "ges", 103 | value = terms$ges[i], 104 | symbol = paste0(intToUtf8(0x03b7), intToUtf8(0x00b2)), 105 | subscript = "G" 106 | ) |> 107 | add_statistic( 108 | name = "pes", 109 | value = terms$pes[i], 110 | symbol = paste0(intToUtf8(0x03b7), intToUtf8(0x00b2)), 111 | subscript = "p" 112 | ) |> 113 | add_statistic( 114 | name = "p", 115 | value = terms$`Pr(>F)`[i] 116 | ) 117 | 118 | groups$groups <- append(groups$groups, list(group)) 119 | } 120 | 121 | analysis$groups <- append(analysis$groups, list(groups)) 122 | 123 | analysis$anova_type <- attr(x, "type") 124 | 125 | analysis <- add_package_info(analysis, "afex") 126 | 127 | return(analysis) 128 | } 129 | -------------------------------------------------------------------------------- /R/tidy_stats.brmsfit.r: -------------------------------------------------------------------------------- 1 | #' @describeIn tidy_stats tidy_stats method for class 'brmsfit' 2 | #' @export 3 | tidy_stats.brmsfit <- function(x, args = NULL) { 4 | analysis <- list(method = "Bayesian regression model") 5 | 6 | prob <- .95 7 | robust <- FALSE 8 | mc_se <- FALSE 9 | 10 | if (!is.null(args)) { 11 | if (!is.null(args$prob)) { 12 | prob <- args$prob 13 | } 14 | if (!is.null(args$robust)) { 15 | robust <- args$robust 16 | } 17 | if (!is.null(args$mc_se)) { 18 | mc_se <- args$mc_se 19 | } 20 | } 21 | 22 | summary <- summary(x, prob = prob, robust = robust, mc_se = mc_se) 23 | 24 | # Group-level effects 25 | if (length(summary$group) > 0) { 26 | group1 <- list(name = "Group-Level Effects") 27 | 28 | for (group in names(summary$random)) { 29 | group2 <- list(name = group) 30 | 31 | group2 <- add_brms_statistics( 32 | summary$random[[group]], group2, 33 | prob = prob 34 | ) 35 | group1$groups <- append(group1$groups, list(group2)) 36 | } 37 | analysis$groups <- append(analysis$groups, list(group1)) 38 | } 39 | 40 | # Population-level effects 41 | group <- list(name = "Population-Level Effects") 42 | 43 | group <- add_brms_statistics(summary$fixed, group, prob = prob) 44 | analysis$groups <- append(analysis$groups, list(group)) 45 | 46 | # Family specific parameters 47 | if (nrow(summary$spec_pars) > 0) { 48 | group <- list(name = "Family Specific Parameters") 49 | 50 | group <- add_brms_statistics(summary$spec_pars, group, prob = prob) 51 | analysis$groups <- append(analysis$groups, list(group)) 52 | } 53 | 54 | analysis <- add_package_info(analysis, "brms") 55 | 56 | # Extra information 57 | analysis$robust <- robust 58 | 59 | return(analysis) 60 | } 61 | 62 | add_brms_statistics <- function(x, list, prob) { 63 | for (i in seq_len(nrow(x))) { 64 | group <- list(name = rownames(x)[i]) 65 | 66 | group$statistics <- list() |> 67 | add_statistic( 68 | "estimate", 69 | x$Estimate[i], 70 | "b", 71 | interval = "CI", 72 | level = prob, 73 | lower = x[i, paste0("l-", round(prob * 100), "% CI")], 74 | upper = x[i, paste0("u-", round(prob * 100), "% CI")] 75 | ) |> 76 | add_statistic("Monte Carlo standard error", x$MCSE[i], "MCSE") |> 77 | add_statistic("estimate error", x$Est.Error[i], "EE") |> 78 | add_statistic("R-hat", x$Rhat[i], symbol = symbol("R_hat")) |> 79 | add_statistic( 80 | "effective sample size (bulk)", 81 | x$Bulk_ESS[i], 82 | symbol = "ESS", 83 | subscript = "bulk" 84 | ) |> 85 | add_statistic( 86 | "effective sample size (tail)", 87 | x$Tail_ESS[i], 88 | symbol = "ESS", 89 | subscript = "tail" 90 | ) 91 | 92 | list$groups <- append(list$groups, list(group)) 93 | } 94 | 95 | return(list) 96 | } 97 | -------------------------------------------------------------------------------- /R/tidy_stats.effectsize.r: -------------------------------------------------------------------------------- 1 | #' @describeIn tidy_stats tidy_stats method for class 'effectsize_difference' 2 | #' @export 3 | tidy_stats.effectsize_difference <- function(x, args = NULL) { 4 | analysis <- list() 5 | 6 | if ("Cohens_d" %in% names(x)) { 7 | method <- "Cohen's d effect size" 8 | name <- "Cohen's d" 9 | symbol <- "d" 10 | } else if ("Hedges_g" %in% names(x)) { 11 | method <- "Hedge's g effect size" 12 | name <- "Hedge's g" 13 | symbol <- "g" 14 | } else { 15 | method <- "Glass' delta effect size" 16 | name <- "Glass' delta" 17 | symbol <- symbol("delta") 18 | } 19 | 20 | analysis$method <- method 21 | 22 | statistics <- list() 23 | statistics <- add_statistic( 24 | list = statistics, 25 | name = name, 26 | value = x[[1]], 27 | symbol = symbol, 28 | interval = "CI", 29 | level = x[[2]], 30 | lower = x[[3]], 31 | upper = x[[4]] 32 | ) 33 | analysis$statistics <- statistics 34 | 35 | analysis$alternative <- list( 36 | direction = attr(x, "alternative"), 37 | null_value = attr(x, "mu") 38 | ) 39 | analysis$paired <- attr(x, "paired") 40 | analysis$correction <- attr(x, "correction") 41 | analysis$pooled_sd <- attr(x, "pooled_sd") 42 | analysis$proximate <- attr(x, "approximate") 43 | 44 | analysis <- add_package_info(analysis, "effectsize") 45 | 46 | return(analysis) 47 | } 48 | -------------------------------------------------------------------------------- /R/tidy_stats.effsize.r: -------------------------------------------------------------------------------- 1 | #' @describeIn tidy_stats tidy_stats method for class 'effsize' 2 | #' @export 3 | tidy_stats.effsize <- function(x, args = NULL) { 4 | analysis <- list(method = paste(x$method, "effect size")) 5 | statistics <- list() 6 | 7 | # Determine the symbol, which is different from what is stored in the name 8 | # attribute in the case of a Cliff's Delta 9 | symbol <- dplyr::if_else( 10 | x$name == "delta", 11 | intToUtf8(0x03b4), 12 | x$name 13 | ) 14 | 15 | statistics <- add_statistic( 16 | list = statistics, 17 | name = x$method, 18 | symbol = symbol, 19 | value = x$estimate, 20 | interval = "CI", 21 | level = x$conf.level, 22 | lower = x$conf.int[["lower"]], 23 | upper = x$conf.int[["upper"]] 24 | ) 25 | 26 | analysis$statistics <- statistics 27 | 28 | analysis <- add_package_info(analysis, "effsize") 29 | 30 | return(analysis) 31 | } 32 | -------------------------------------------------------------------------------- /R/tidy_stats.irr.r: -------------------------------------------------------------------------------- 1 | #' @describeIn tidy_stats tidy_stats method for class 'icclist' 2 | #' @export 3 | tidy_stats.icclist <- function(x, args = NULL) { 4 | analysis <- list(method = "ICC") 5 | 6 | statistics <- list() |> 7 | add_statistic("N subjects", x$subjects, "N", "subjects") |> 8 | add_statistic("N raters", x$raters, "N", "raters") |> 9 | add_statistic( 10 | name = x$icc.name, 11 | value = x$value, 12 | interval = "CI", 13 | level = x$conf.level, 14 | lower = x$lbound, 15 | upper = x$ubound 16 | ) |> 17 | add_statistic("statistic", x$Fvalue, "F") |> 18 | add_statistic("df numerator", x$df1, "df", "num.") |> 19 | add_statistic("df denominator", x$df2, "df", "den.") |> 20 | add_statistic("p", x$p.value) 21 | 22 | analysis$statistics <- statistics 23 | 24 | analysis$model <- x$model 25 | analysis$type <- x$type 26 | analysis$unit <- x$unit 27 | 28 | analysis$alernative <- list(null_value = x$r0) 29 | 30 | analysis <- add_package_info(analysis, "irr") 31 | 32 | return(analysis) 33 | } 34 | -------------------------------------------------------------------------------- /R/tidy_stats_to_data_frame.R: -------------------------------------------------------------------------------- 1 | #' Convert a tidystats list to a data frame 2 | #' 3 | #' [tidy_stats_to_data_frame()] converts a tidystats list to a data frame, 4 | #' which can then be used to extract specific statistics using standard 5 | #' subsetting functions (e.g., [dplyr::filter()]). 6 | #' 7 | #' @param x A tidystats list. 8 | #' 9 | #' @examples 10 | #' # Conduct analyses 11 | #' sleep_wide <- reshape( 12 | #' sleep, 13 | #' direction = "wide", 14 | #' idvar = "ID", 15 | #' timevar = "group", 16 | #' sep = "_" 17 | #' ) 18 | #' sleep_test <- t.test(sleep_wide$extra_1, sleep_wide$extra_2, paired = TRUE) 19 | #' 20 | #' ctl <- c(4.17, 5.58, 5.18, 6.11, 4.50, 4.61, 5.17, 4.53, 5.33, 5.14) 21 | #' trt <- c(4.81, 4.17, 4.41, 3.59, 5.87, 3.83, 6.03, 4.89, 4.32, 4.69) 22 | #' group <- gl(2, 10, 20, labels = c("Ctl", "Trt")) 23 | #' weight <- c(ctl, trt) 24 | #' lm_D9 <- lm(weight ~ group) 25 | #' 26 | #' npk_aov <- aov(yield ~ block + N * P * K, npk) 27 | #' 28 | #' # Create an empty list to store the statistics in 29 | #' statistics <- list() 30 | #' 31 | #' # Add statistics 32 | #' statistics <- statistics |> 33 | #' add_stats(sleep_test, type = "primary", preregistered = TRUE) |> 34 | #' add_stats(lm_D9) |> 35 | #' add_stats(npk_aov, notes = "An ANOVA example") 36 | #' 37 | #' # Convert the list to a data frame 38 | #' df <- tidy_stats_to_data_frame(statistics) 39 | #' 40 | #' # Select all the p-values 41 | #' dplyr::filter(df, statistic_name == "p") 42 | #' 43 | #' @export 44 | tidy_stats_to_data_frame <- function(x) { 45 | df <- purrr::map2_df(x, names(x), analysis_to_data_frame) 46 | 47 | df <- dplyr::rename(df, statistic_name = name) 48 | 49 | df <- dplyr::relocate(df, identifier, sort(tidyselect::peek_vars())) 50 | df <- dplyr::relocate(df, dplyr::any_of("symbol"), .after = statistic_name) 51 | df <- dplyr::relocate(df, dplyr::any_of("lower"), .before = value) 52 | df <- dplyr::relocate(df, dplyr::any_of("upper"), .after = value) 53 | df <- dplyr::relocate( 54 | df, 55 | dplyr::any_of(c("interval", "level")), 56 | .after = dplyr::last_col() 57 | ) 58 | 59 | return(df) 60 | } 61 | 62 | analysis_to_data_frame <- function(x, y) { 63 | df <- tibble::tibble(identifier = y) 64 | 65 | if ("statistics" %in% names(x)) { 66 | df <- dplyr::bind_cols( 67 | df, 68 | purrr::map_df(x$statistics, function(x) { 69 | return(x) 70 | }) 71 | ) 72 | } 73 | 74 | # Check if there are groups, if so, recursively loop through them and convert 75 | # each group to a data frame, with a level integer to keep track of each 76 | # group name 77 | if ("groups" %in% names(x)) { 78 | level <- 0 79 | df <- dplyr::bind_cols( 80 | df, 81 | purrr::map_df(x$groups, groups_to_data_frame, level) 82 | ) 83 | } 84 | 85 | if (!is.null(x$name)) { 86 | df <- dplyr::mutate(df, analysis_name = x$name, .after = identifier) 87 | } 88 | 89 | return(df) 90 | } 91 | 92 | groups_to_data_frame <- function(x, level) { 93 | level <- level + 1 94 | 95 | if ("statistics" %in% names(x)) { 96 | df <- purrr::map_df(x$statistics, function(x) { 97 | return(x) 98 | }) 99 | } 100 | 101 | if ("groups" %in% names(x)) { 102 | df <- purrr::map_df(x$groups, groups_to_data_frame, level) 103 | } 104 | 105 | # Check if there's one or more names, if so, add them to the data frame and 106 | # append the level to the name 107 | if ("name" %in% names(x)) { 108 | df <- dplyr::mutate(df, "group_name_{level}" := as.character(x$name)) 109 | } 110 | 111 | if ("names" %in% names(x)) { 112 | df <- dplyr::mutate( 113 | df, 114 | "group_name_{level}_1" := x$names[[1]]$name, 115 | "group_name_{level}_2" := x$names[[2]]$name, 116 | ) 117 | } 118 | 119 | return(df) 120 | } 121 | -------------------------------------------------------------------------------- /R/write_stats.R: -------------------------------------------------------------------------------- 1 | #' Write a tidystats list to a file 2 | #' 3 | #' [write_stats()] writes a tidystats list to a .json file. 4 | #' 5 | #' @param x A tidystats list. 6 | #' @param path A string specifying the path or connection to write to. 7 | #' @param digits The number of decimal places to use. The default is 6. 8 | #' 9 | #' @examples 10 | #' # Conduct a statistical test 11 | #' sleep_wide <- reshape( 12 | #' sleep, 13 | #' direction = "wide", 14 | #' idvar = "ID", 15 | #' timevar = "group", 16 | #' sep = "_" 17 | #' ) 18 | #' sleep_test <- t.test(sleep_wide$extra_1, sleep_wide$extra_2, paired = TRUE) 19 | #' 20 | #' # Create an empty list 21 | #' statistics <- list() 22 | #' 23 | #' # Add statistics to the list 24 | #' statistics <- add_stats(statistics, sleep_test) 25 | #' 26 | #' # Save the statistics to a file 27 | #' dir <- tempdir() 28 | #' write_stats(statistics, file.path(dir, "statistics.json")) 29 | #' 30 | #' @export 31 | write_stats <- function(x, path, digits = 6) { 32 | jsonlite::write_json( 33 | x, 34 | path = path, 35 | pretty = TRUE, 36 | auto_unbox = TRUE, 37 | digits = digits, 38 | na = "string" 39 | ) 40 | } 41 | -------------------------------------------------------------------------------- /R/zzz.r: -------------------------------------------------------------------------------- 1 | .onAttach <- function(libname, pkgname) { 2 | msg <- paste( 3 | "This version of tidystats uses a new and improved way of", 4 | "structuring statistics. This breaks previous (major) versions of", 5 | "tidystats." 6 | ) 7 | packageStartupMessage(msg) 8 | } 9 | -------------------------------------------------------------------------------- /_pkgdown.yml: -------------------------------------------------------------------------------- 1 | url: https://willemsleegers.github.io/tidystats/ 2 | 3 | reference: 4 | - title: Main functions 5 | desc: Essential functionality 6 | contents: 7 | - add_stats 8 | - write_stats 9 | - read_stats 10 | - custom_stat 11 | - custom_stats 12 | - tidy_stats_to_data_frame 13 | - title: Analysis functions 14 | desc: Functions for performing analyses such as calculating descriptives 15 | contents: 16 | - count_data 17 | - describe_data 18 | - title: Data sets 19 | desc: Data sets that come with the package 20 | contents: 21 | - quote_source 22 | 23 | articles: 24 | - title: Vignettes 25 | navbar: ~ 26 | contents: 27 | - introduction-to-tidystats 28 | - supported-functions 29 | - custom-statistics 30 | - reusing-statistics 31 | # - title: Development 32 | # desc: Vignettes aimed at package developers 33 | # contents: 34 | # - tidystats-development 35 | 36 | template: 37 | bootstrap: 5 38 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | ## R CMD check results 2 | 3 | 0 errors | 0 warnings | 0 notes 4 | -------------------------------------------------------------------------------- /data/quote_source.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WillemSleegers/tidystats/7d5aa0ebb7b2159770bfa1464fdf50bd1d910436/data/quote_source.rda -------------------------------------------------------------------------------- /docs/.nojekyll: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/404.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | Page not found (404) • tidystats 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | Skip to contents 17 | 18 | 19 |
55 |
56 |
61 | 62 | Content not found. Please use links in the navbar. 63 | 64 |
65 |
66 | 67 | 68 |
71 | 72 | 75 | 76 |
77 |
78 | 79 | 80 | 81 | 82 | 83 | 84 | 85 | -------------------------------------------------------------------------------- /docs/CNAME: -------------------------------------------------------------------------------- 1 | 2 | -------------------------------------------------------------------------------- /docs/LICENSE-text.html: -------------------------------------------------------------------------------- 1 | 2 | License • tidystats 3 | Skip to contents 4 | 5 | 6 |
35 |
36 |
41 | 42 |
YEAR: 2024
43 | COPYRIGHT HOLDER: Willem Sleegers
44 | 
45 | 46 |
47 | 48 | 49 |
52 | 53 | 56 | 57 |
58 | 59 | 60 | 61 | 62 | 63 | 64 | 65 | -------------------------------------------------------------------------------- /docs/articles/index.html: -------------------------------------------------------------------------------- 1 | 2 | Articles • tidystats 3 | Skip to contents 4 | 5 | 6 |
35 |
36 |
40 | 41 |
42 |

Vignettes

43 |
44 | 45 |
Introduction to tidystats
46 |
47 |
Supported functions
48 |
49 |
Custom statistics
50 |
51 |
Reusing statistics
52 |
53 |
54 |
55 | 56 | 57 |
60 | 61 | 64 | 65 |
66 | 67 | 68 | 69 | 70 | 71 | 72 | 73 | -------------------------------------------------------------------------------- /docs/authors.html: -------------------------------------------------------------------------------- 1 | 2 | Authors and Citation • tidystats 3 | Skip to contents 4 | 5 | 6 |
35 |
36 |
40 | 41 |
42 |

Authors

43 | 44 |
  • 45 |

    Willem Sleegers. Maintainer. 46 |

    47 |
  • 48 |
49 | 50 |
51 |

Citation

52 |

Source: inst/CITATION

53 | 54 |

Sleegers WWA (2024). 55 | tidystats: Save output of statistical tests. 56 | doi:10.5281/zenodo.4041858, https://zenodo.org/doi/10.5281/zenodo.4041858. 57 |

58 |
@Manual{,
59 |   title = {tidystats: Save output of statistical tests},
60 |   author = {Willem W. A. Sleegers},
61 |   year = {2024},
62 |   doi = {10.5281/zenodo.4041858},
63 |   url = {https://zenodo.org/doi/10.5281/zenodo.4041858},
64 | }
65 |
66 | 67 |
69 | 70 | 71 |
74 | 75 | 78 | 79 |
80 | 81 | 82 | 83 | 84 | 85 | 86 | 87 | -------------------------------------------------------------------------------- /docs/deps/bootstrap-toc-1.0.1/bootstrap-toc.min.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * Bootstrap Table of Contents v1.0.1 (http://afeld.github.io/bootstrap-toc/) 3 | * Copyright 2015 Aidan Feldman 4 | * Licensed under MIT (https://github.com/afeld/bootstrap-toc/blob/gh-pages/LICENSE.md) */ 5 | !function(a){"use strict";window.Toc={helpers:{findOrFilter:function(e,t){var n=e.find(t);return e.filter(t).add(n).filter(":not([data-toc-skip])")},generateUniqueIdBase:function(e){return a(e).text().trim().replace(/\'/gi,"").replace(/[& +$,:;=?@"#{}|^~[`%!'<>\]\.\/\(\)\*\\\n\t\b\v]/g,"-").replace(/-{2,}/g,"-").substring(0,64).replace(/^-+|-+$/gm,"").toLowerCase()||e.tagName.toLowerCase()},generateUniqueId:function(e){for(var t=this.generateUniqueIdBase(e),n=0;;n++){var r=t;if(0')},createChildNavList:function(e){var t=this.createNavList();return e.append(t),t},generateNavEl:function(e,t){var n=a('');n.attr("href","#"+e),n.text(t);var r=a("
  • ");return r.append(n),r},generateNavItem:function(e){var t=this.generateAnchor(e),n=a(e),r=n.data("toc-text")||n.text();return this.generateNavEl(t,r)},getTopLevel:function(e){for(var t=1;t<=6;t++){if(1 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | -------------------------------------------------------------------------------- /docs/deps/font-awesome-6.5.2/webfonts/fa-brands-400.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WillemSleegers/tidystats/7d5aa0ebb7b2159770bfa1464fdf50bd1d910436/docs/deps/font-awesome-6.5.2/webfonts/fa-brands-400.ttf -------------------------------------------------------------------------------- /docs/deps/font-awesome-6.5.2/webfonts/fa-brands-400.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WillemSleegers/tidystats/7d5aa0ebb7b2159770bfa1464fdf50bd1d910436/docs/deps/font-awesome-6.5.2/webfonts/fa-brands-400.woff2 -------------------------------------------------------------------------------- /docs/deps/font-awesome-6.5.2/webfonts/fa-regular-400.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WillemSleegers/tidystats/7d5aa0ebb7b2159770bfa1464fdf50bd1d910436/docs/deps/font-awesome-6.5.2/webfonts/fa-regular-400.ttf -------------------------------------------------------------------------------- /docs/deps/font-awesome-6.5.2/webfonts/fa-regular-400.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WillemSleegers/tidystats/7d5aa0ebb7b2159770bfa1464fdf50bd1d910436/docs/deps/font-awesome-6.5.2/webfonts/fa-regular-400.woff2 -------------------------------------------------------------------------------- /docs/deps/font-awesome-6.5.2/webfonts/fa-solid-900.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WillemSleegers/tidystats/7d5aa0ebb7b2159770bfa1464fdf50bd1d910436/docs/deps/font-awesome-6.5.2/webfonts/fa-solid-900.ttf -------------------------------------------------------------------------------- /docs/deps/font-awesome-6.5.2/webfonts/fa-solid-900.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WillemSleegers/tidystats/7d5aa0ebb7b2159770bfa1464fdf50bd1d910436/docs/deps/font-awesome-6.5.2/webfonts/fa-solid-900.woff2 -------------------------------------------------------------------------------- /docs/deps/font-awesome-6.5.2/webfonts/fa-v4compatibility.ttf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WillemSleegers/tidystats/7d5aa0ebb7b2159770bfa1464fdf50bd1d910436/docs/deps/font-awesome-6.5.2/webfonts/fa-v4compatibility.ttf -------------------------------------------------------------------------------- /docs/deps/font-awesome-6.5.2/webfonts/fa-v4compatibility.woff2: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/WillemSleegers/tidystats/7d5aa0ebb7b2159770bfa1464fdf50bd1d910436/docs/deps/font-awesome-6.5.2/webfonts/fa-v4compatibility.woff2 -------------------------------------------------------------------------------- /docs/deps/headroom-0.11.0/headroom.min.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * headroom.js v0.11.0 - Give your page some headroom. Hide your header until you need it 3 | * Copyright (c) 2020 Nick Williams - http://wicky.nillia.ms/headroom.js 4 | * License: MIT 5 | */ 6 | 7 | !function(t,n){"object"==typeof exports&&"undefined"!=typeof module?module.exports=n():"function"==typeof define&&define.amd?define(n):(t=t||self).Headroom=n()}(this,function(){"use strict";function t(){return"undefined"!=typeof window}function d(t){return function(t){return t&&t.document&&function(t){return 9===t.nodeType}(t.document)}(t)?function(t){var n=t.document,o=n.body,s=n.documentElement;return{scrollHeight:function(){return Math.max(o.scrollHeight,s.scrollHeight,o.offsetHeight,s.offsetHeight,o.clientHeight,s.clientHeight)},height:function(){return t.innerHeight||s.clientHeight||o.clientHeight},scrollY:function(){return void 0!==t.pageYOffset?t.pageYOffset:(s||o.parentNode||o).scrollTop}}}(t):function(t){return{scrollHeight:function(){return Math.max(t.scrollHeight,t.offsetHeight,t.clientHeight)},height:function(){return Math.max(t.offsetHeight,t.clientHeight)},scrollY:function(){return t.scrollTop}}}(t)}function n(t,s,e){var n,o=function(){var n=!1;try{var t={get passive(){n=!0}};window.addEventListener("test",t,t),window.removeEventListener("test",t,t)}catch(t){n=!1}return n}(),i=!1,r=d(t),l=r.scrollY(),a={};function c(){var t=Math.round(r.scrollY()),n=r.height(),o=r.scrollHeight();a.scrollY=t,a.lastScrollY=l,a.direction=ls.tolerance[a.direction],e(a),l=t,i=!1}function h(){i||(i=!0,n=requestAnimationFrame(c))}var u=!!o&&{passive:!0,capture:!1};return t.addEventListener("scroll",h,u),c(),{destroy:function(){cancelAnimationFrame(n),t.removeEventListener("scroll",h,u)}}}function o(t,n){n=n||{},Object.assign(this,o.options,n),this.classes=Object.assign({},o.options.classes,n.classes),this.elem=t,this.tolerance=function(t){return t===Object(t)?t:{down:t,up:t}}(this.tolerance),this.initialised=!1,this.frozen=!1}return o.prototype={constructor:o,init:function(){return o.cutsTheMustard&&!this.initialised&&(this.addClass("initial"),this.initialised=!0,setTimeout(function(t){t.scrollTracker=n(t.scroller,{offset:t.offset,tolerance:t.tolerance},t.update.bind(t))},100,this)),this},destroy:function(){this.initialised=!1,Object.keys(this.classes).forEach(this.removeClass,this),this.scrollTracker.destroy()},unpin:function(){!this.hasClass("pinned")&&this.hasClass("unpinned")||(this.addClass("unpinned"),this.removeClass("pinned"),this.onUnpin&&this.onUnpin.call(this))},pin:function(){this.hasClass("unpinned")&&(this.addClass("pinned"),this.removeClass("unpinned"),this.onPin&&this.onPin.call(this))},freeze:function(){this.frozen=!0,this.addClass("frozen")},unfreeze:function(){this.frozen=!1,this.removeClass("frozen")},top:function(){this.hasClass("top")||(this.addClass("top"),this.removeClass("notTop"),this.onTop&&this.onTop.call(this))},notTop:function(){this.hasClass("notTop")||(this.addClass("notTop"),this.removeClass("top"),this.onNotTop&&this.onNotTop.call(this))},bottom:function(){this.hasClass("bottom")||(this.addClass("bottom"),this.removeClass("notBottom"),this.onBottom&&this.onBottom.call(this))},notBottom:function(){this.hasClass("notBottom")||(this.addClass("notBottom"),this.removeClass("bottom"),this.onNotBottom&&this.onNotBottom.call(this))},shouldUnpin:function(t){return"down"===t.direction&&!t.top&&t.toleranceExceeded},shouldPin:function(t){return"up"===t.direction&&t.toleranceExceeded||t.top},addClass:function(t){this.elem.classList.add.apply(this.elem.classList,this.classes[t].split(" "))},removeClass:function(t){this.elem.classList.remove.apply(this.elem.classList,this.classes[t].split(" "))},hasClass:function(t){return this.classes[t].split(" ").every(function(t){return this.classList.contains(t)},this.elem)},update:function(t){t.isOutOfBounds||!0!==this.frozen&&(t.top?this.top():this.notTop(),t.bottom?this.bottom():this.notBottom(),this.shouldUnpin(t)?this.unpin():this.shouldPin(t)&&this.pin())}},o.options={tolerance:{up:0,down:0},offset:0,scroller:t()?window:null,classes:{frozen:"headroom--frozen",pinned:"headroom--pinned",unpinned:"headroom--unpinned",top:"headroom--top",notTop:"headroom--not-top",bottom:"headroom--bottom",notBottom:"headroom--not-bottom",initial:"headroom"}},o.cutsTheMustard=!!(t()&&function(){}.bind&&"classList"in document.documentElement&&Object.assign&&Object.keys&&requestAnimationFrame),o}); -------------------------------------------------------------------------------- /docs/deps/headroom-0.11.0/jQuery.headroom.min.js: -------------------------------------------------------------------------------- 1 | /*! 2 | * headroom.js v0.9.4 - Give your page some headroom. Hide your header until you need it 3 | * Copyright (c) 2017 Nick Williams - http://wicky.nillia.ms/headroom.js 4 | * License: MIT 5 | */ 6 | 7 | !function(a){a&&(a.fn.headroom=function(b){return this.each(function(){var c=a(this),d=c.data("headroom"),e="object"==typeof b&&b;e=a.extend(!0,{},Headroom.options,e),d||(d=new Headroom(this,e),d.init(),c.data("headroom",d)),"string"==typeof b&&(d[b](),"destroy"===b&&c.removeData("headroom"))})},a("[data-headroom]").each(function(){var b=a(this);b.headroom(b.data())}))}(window.Zepto||window.jQuery); -------------------------------------------------------------------------------- /docs/katex-auto.js: -------------------------------------------------------------------------------- 1 | // https://github.com/jgm/pandoc/blob/29fa97ab96b8e2d62d48326e1b949a71dc41f47a/src/Text/Pandoc/Writers/HTML.hs#L332-L345 2 | document.addEventListener("DOMContentLoaded", function () { 3 | var mathElements = document.getElementsByClassName("math"); 4 | var macros = []; 5 | for (var i = 0; i < mathElements.length; i++) { 6 | var texText = mathElements[i].firstChild; 7 | if (mathElements[i].tagName == "SPAN") { 8 | katex.render(texText.data, mathElements[i], { 9 | displayMode: mathElements[i].classList.contains("display"), 10 | throwOnError: false, 11 | macros: macros, 12 | fleqn: false 13 | }); 14 | }}}); 15 | -------------------------------------------------------------------------------- /docs/lightswitch.js: -------------------------------------------------------------------------------- 1 | 2 | /*! 3 | * Color mode toggler for Bootstrap's docs (https://getbootstrap.com/) 4 | * Copyright 2011-2023 The Bootstrap Authors 5 | * Licensed under the Creative Commons Attribution 3.0 Unported License. 6 | * Updates for {pkgdown} by the {bslib} authors, also licensed under CC-BY-3.0. 7 | */ 8 | 9 | const getStoredTheme = () => localStorage.getItem('theme') 10 | const setStoredTheme = theme => localStorage.setItem('theme', theme) 11 | 12 | const getPreferredTheme = () => { 13 | const storedTheme = getStoredTheme() 14 | if (storedTheme) { 15 | return storedTheme 16 | } 17 | 18 | return window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light' 19 | } 20 | 21 | const setTheme = theme => { 22 | if (theme === 'auto') { 23 | document.documentElement.setAttribute('data-bs-theme', (window.matchMedia('(prefers-color-scheme: dark)').matches ? 'dark' : 'light')) 24 | } else { 25 | document.documentElement.setAttribute('data-bs-theme', theme) 26 | } 27 | } 28 | 29 | function bsSetupThemeToggle () { 30 | 'use strict' 31 | 32 | const showActiveTheme = (theme, focus = false) => { 33 | var activeLabel, activeIcon; 34 | 35 | document.querySelectorAll('[data-bs-theme-value]').forEach(element => { 36 | const buttonTheme = element.getAttribute('data-bs-theme-value') 37 | const isActive = buttonTheme == theme 38 | 39 | element.classList.toggle('active', isActive) 40 | element.setAttribute('aria-pressed', isActive) 41 | 42 | if (isActive) { 43 | activeLabel = element.textContent; 44 | activeIcon = element.querySelector('span').classList.value; 45 | } 46 | }) 47 | 48 | const themeSwitcher = document.querySelector('#dropdown-lightswitch') 49 | if (!themeSwitcher) { 50 | return 51 | } 52 | 53 | themeSwitcher.setAttribute('aria-label', activeLabel) 54 | themeSwitcher.querySelector('span').classList.value = activeIcon; 55 | 56 | if (focus) { 57 | themeSwitcher.focus() 58 | } 59 | } 60 | 61 | window.matchMedia('(prefers-color-scheme: dark)').addEventListener('change', () => { 62 | const storedTheme = getStoredTheme() 63 | if (storedTheme !== 'light' && storedTheme !== 'dark') { 64 | setTheme(getPreferredTheme()) 65 | } 66 | }) 67 | 68 | window.addEventListener('DOMContentLoaded', () => { 69 | showActiveTheme(getPreferredTheme()) 70 | 71 | document 72 | .querySelectorAll('[data-bs-theme-value]') 73 | .forEach(toggle => { 74 | toggle.addEventListener('click', () => { 75 | const theme = toggle.getAttribute('data-bs-theme-value') 76 | setTheme(theme) 77 | setStoredTheme(theme) 78 | showActiveTheme(theme, true) 79 | }) 80 | }) 81 | }) 82 | } 83 | 84 | setTheme(getPreferredTheme()); 85 | bsSetupThemeToggle(); 86 | -------------------------------------------------------------------------------- /docs/link.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 8 | 12 | 13 | -------------------------------------------------------------------------------- /docs/pkgdown.yml: -------------------------------------------------------------------------------- 1 | pandoc: '3.6' 2 | pkgdown: 2.1.1 3 | pkgdown_sha: ~ 4 | articles: 5 | custom-statistics: custom-statistics.html 6 | introduction-to-tidystats: introduction-to-tidystats.html 7 | reusing-statistics: reusing-statistics.html 8 | supported-functions: supported-functions.html 9 | last_built: 2025-04-06T09:45Z 10 | urls: 11 | reference: https://willemsleegers.github.io/tidystats/reference 12 | article: https://willemsleegers.github.io/tidystats/articles 13 | -------------------------------------------------------------------------------- /docs/reference/add_statistic.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/expect_equal_models.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/symbol.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.afex_aov.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.anova.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.anova.lme.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.aov.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.aovlist.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.brmsfit.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.confint.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.effectsize_difference.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.effsize.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.emmGrid.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.emm_list.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.glm.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.gls.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.htest.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.icclist.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.lavaan.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.lm.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.lme.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.lmerMod.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.lmerModLmerTest.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.mixed.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.nlme.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.pairwise.htest.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.psych.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.rcorr.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.summary_emm.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.tidystats.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.tidystats_counts.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/tidy_stats.tidystats_descriptives.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/reference/write_test_stats.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | -------------------------------------------------------------------------------- /docs/sitemap.xml: -------------------------------------------------------------------------------- 1 | 2 | https://willemsleegers.github.io/tidystats/404.html 3 | https://willemsleegers.github.io/tidystats/LICENSE-text.html 4 | https://willemsleegers.github.io/tidystats/articles/custom-statistics.html 5 | https://willemsleegers.github.io/tidystats/articles/index.html 6 | https://willemsleegers.github.io/tidystats/articles/introduction-to-tidystats.html 7 | https://willemsleegers.github.io/tidystats/articles/reusing-statistics.html 8 | https://willemsleegers.github.io/tidystats/articles/supported-functions.html 9 | https://willemsleegers.github.io/tidystats/authors.html 10 | https://willemsleegers.github.io/tidystats/index.html 11 | https://willemsleegers.github.io/tidystats/news/index.html 12 | https://willemsleegers.github.io/tidystats/reference/add_stats.html 13 | https://willemsleegers.github.io/tidystats/reference/count_data.html 14 | https://willemsleegers.github.io/tidystats/reference/custom_stat.html 15 | https://willemsleegers.github.io/tidystats/reference/custom_stats.html 16 | https://willemsleegers.github.io/tidystats/reference/describe_data.html 17 | https://willemsleegers.github.io/tidystats/reference/helper_functions.html 18 | https://willemsleegers.github.io/tidystats/reference/index.html 19 | https://willemsleegers.github.io/tidystats/reference/quote_source.html 20 | https://willemsleegers.github.io/tidystats/reference/read_stats.html 21 | https://willemsleegers.github.io/tidystats/reference/tidy_stats.html 22 | https://willemsleegers.github.io/tidystats/reference/tidy_stats_to_data_frame.html 23 | https://willemsleegers.github.io/tidystats/reference/write_stats.html 24 | 25 | 26 | -------------------------------------------------------------------------------- /how-to-package.R: -------------------------------------------------------------------------------- 1 | # Todos ------------------------------------------------------------------- 2 | 3 | # TODO: Call deviance dfs df numerator and df denominator? 4 | # TODO: Store all sample estimates in the case of a 4-sample chi-squared test? 5 | # TODO: Simplify the method name of Kolmogorov-Smirnov tests? 6 | # TODO: Rename the name of degrees of freedom and related values to 'parameter'? 7 | # TODO: Use the checkmate package 8 | # TODO: Create a vignette to describe the development process of adding support 9 | # for an analysis 10 | # TODO: Improve tidystats count function (redo the loop function) 11 | 12 | # Load functions ---------------------------------------------------------- 13 | 14 | pkgload::load_all() 15 | 16 | # Documentation ----------------------------------------------------------- 17 | 18 | # Run once 19 | # usethis::use_roxygen_md() 20 | 21 | # Update documentation 22 | devtools::document() 23 | 24 | # Inspect documentation 25 | pkgload::dev_help("add_stats") 26 | 27 | # Installation ------------------------------------------------------------ 28 | 29 | # Install the dev version 30 | devtools::install() 31 | # .rs.restartR() 32 | 33 | # Add dependency ---------------------------------------------------------- 34 | 35 | # usethis::use_package("lavaan", "Suggests") 36 | 37 | # Testing ----------------------------------------------------------------- 38 | 39 | # Add a test 40 | # usethis::use_test("add_stats") 41 | 42 | # Test all tests 43 | devtools::test() 44 | testthat::test_dir(path = "tests/testthat/tests/") 45 | 46 | # Test a specific test 47 | testthat::test_file("tests/testthat/test_effectsize.R") 48 | 49 | # Create a vignette ------------------------------------------------------- 50 | 51 | # usethis::use_vignette("supported-functions") 52 | 53 | # Add a data set ---------------------------------------------------------- 54 | 55 | # usethis::use_data(quote_source, overwrite = TRUE) 56 | 57 | # Build website ----------------------------------------------------------- 58 | 59 | # Update documentation 60 | devtools::document() 61 | 62 | # Update README 63 | #devtools::build_readme() 64 | knitr::knit(input = "README.Rmd") 65 | 66 | # Run to build the website 67 | pkgdown::build_site_github_pages() 68 | 69 | # Preview the site 70 | pkgdown::preview_site() 71 | 72 | # Delete website files 73 | pkgdown::clean_site() 74 | 75 | # CRAN submission --------------------------------------------------------- 76 | 77 | # Update README 78 | knitr::knit(input = "README.Rmd") 79 | 80 | # Update website 81 | pkgdown::build_site_github_pages() 82 | 83 | # Spellcheck 84 | devtools::spell_check() 85 | 86 | # Check examples 87 | devtools::run_examples() 88 | 89 | # CRAN comments 90 | usethis::use_cran_comments() 91 | 92 | # Check tests 93 | devtools::test() 94 | 95 | # Check package 96 | # devtools::load_all() 97 | devtools::check() 98 | devtools::check(args = c("--no-tests")) # Without tests 99 | devtools::check(args = c("--as-cran")) 100 | 101 | # run R CMD check on CRAN’s servers 102 | devtools::check_win_devel() 103 | devtools::check_win_release() 104 | 105 | # R-hub 106 | devtools::check_rhub() 107 | 108 | # Submit 109 | devtools::release() 110 | 111 | # Setup ------------------------------------------------------------------- 112 | 113 | # Create README file 114 | usethis::use_readme_rmd(open = rlang::is_interactive()) 115 | 116 | # Create pkgdown website 117 | usethis::use_pkgdown() 118 | -------------------------------------------------------------------------------- /inst/CITATION: -------------------------------------------------------------------------------- 1 | bibentry( 2 | bibtype = "Manual", 3 | title = "tidystats: Save output of statistical tests", 4 | author = "Willem W. A. Sleegers", 5 | year = "2024", 6 | doi = "10.5281/zenodo.4041858", 7 | url = "https://zenodo.org/doi/10.5281/zenodo.4041858" 8 | ) 9 | -------------------------------------------------------------------------------- /man/add_stats.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/add_stats.R 3 | \name{add_stats} 4 | \alias{add_stats} 5 | \title{Add statistical output to a tidystats list} 6 | \usage{ 7 | add_stats( 8 | list, 9 | output, 10 | identifier = NULL, 11 | type = NULL, 12 | preregistered = NULL, 13 | notes = NULL, 14 | args = NULL, 15 | class = NULL 16 | ) 17 | } 18 | \arguments{ 19 | \item{list}{A tidystats list.} 20 | 21 | \item{output}{Output of a statistical test.} 22 | 23 | \item{identifier}{A string identifying the model. Automatically created if 24 | not provided.} 25 | 26 | \item{type}{A string specifying the type of analysis: primary, 27 | secondary, or exploratory.} 28 | 29 | \item{preregistered}{A boolean specifying whether the analysis was 30 | preregistered or not.} 31 | 32 | \item{notes}{A string specifying additional information.} 33 | 34 | \item{args}{A list of additional arguments to customize which statistics 35 | should be extracted. See 'Details' for a list of supported functions and 36 | their arguments.} 37 | 38 | \item{class}{A string to manually specify the class of the object so that 39 | tidystats knows how to extract the statistics. See 'Details' for a list of 40 | classes that are supported.} 41 | } 42 | \description{ 43 | \code{\link[=add_stats]{add_stats()}} is used to add the output of a statistical test to a 44 | tidystats list. 45 | } 46 | \details{ 47 | Many functions to perform statistical tests (e.g., \code{\link[=t.test]{t.test()}}, \code{\link[=lm]{lm()}}) return 48 | an object containing the statistics. These objects can be stored in variables 49 | and used with \code{\link[=add_stats]{add_stats()}} to extract the statistics and add them to a 50 | list. 51 | 52 | The list can be saved to a file using the \code{\link[=write_stats]{write_stats()}} function. 53 | 54 | For a list of supported functions, see \code{vignette("supported-functions", package = "tidystats")}. 55 | } 56 | \examples{ 57 | # Conduct analyses 58 | sleep_wide <- reshape( 59 | sleep, 60 | direction = "wide", 61 | idvar = "ID", 62 | timevar = "group", 63 | sep = "_" 64 | ) 65 | sleep_test <- t.test(sleep_wide$extra_1, sleep_wide$extra_2, paired = TRUE) 66 | 67 | ctl <- c(4.17, 5.58, 5.18, 6.11, 4.50, 4.61, 5.17, 4.53, 5.33, 5.14) 68 | trt <- c(4.81, 4.17, 4.41, 3.59, 5.87, 3.83, 6.03, 4.89, 4.32, 4.69) 69 | group <- gl(2, 10, 20, labels = c("Ctl", "Trt")) 70 | weight <- c(ctl, trt) 71 | lm_D9 <- lm(weight ~ group) 72 | lm_D9_confint <- confint(lm_D9) 73 | 74 | npk_aov <- aov(yield ~ block + N * P * K, npk) 75 | 76 | # Create an empty list to store the statistics in 77 | statistics <- list() 78 | 79 | # Add statistics to the list 80 | statistics <- statistics |> 81 | add_stats(sleep_test, type = "primary", preregistered = TRUE) |> 82 | add_stats(lm_D9) |> 83 | add_stats(lm_D9_confint, class = "confint") |> 84 | add_stats(npk_aov, notes = "An ANOVA example") 85 | 86 | } 87 | -------------------------------------------------------------------------------- /man/count_data.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/count_data.r 3 | \name{count_data} 4 | \alias{count_data} 5 | \title{Count the number of observations} 6 | \usage{ 7 | count_data(data, ..., na.rm = FALSE, pct = FALSE) 8 | } 9 | \arguments{ 10 | \item{data}{A data frame.} 11 | 12 | \item{...}{One or more unquoted (categorical) column names from the data 13 | frame, separated by commas.} 14 | 15 | \item{na.rm}{A boolean specifying whether missing values (including NaN) 16 | should be removed.} 17 | 18 | \item{pct}{A boolean indicating whether to calculate percentages instead of 19 | proportions. The default is \code{FALSE}.} 20 | } 21 | \description{ 22 | \code{\link[=count_data]{count_data()}} returns the number and proportion of observations for 23 | categorical variables. 24 | } 25 | \details{ 26 | The data frame can be grouped using \code{\link[dplyr:group_by]{dplyr::group_by()}} 27 | so that the number of observations will be calculated within each group 28 | level. 29 | } 30 | \examples{ 31 | count_data(quote_source, source) 32 | count_data(quote_source, source, sex) 33 | count_data(quote_source, source, sex, na.rm = TRUE) 34 | count_data(quote_source, source, sex, na.rm = TRUE, pct = TRUE) 35 | 36 | # Use dplyr::group_by() to calculate proportions within a group 37 | quote_source |> 38 | dplyr::group_by(source) |> 39 | count_data(sex) 40 | 41 | } 42 | -------------------------------------------------------------------------------- /man/custom_stat.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/custom_stat.r 3 | \name{custom_stat} 4 | \alias{custom_stat} 5 | \title{Create a custom statistic} 6 | \usage{ 7 | custom_stat( 8 | name, 9 | value, 10 | symbol = NULL, 11 | subscript = NULL, 12 | interval = NULL, 13 | level = NULL, 14 | lower = NULL, 15 | upper = NULL 16 | ) 17 | } 18 | \arguments{ 19 | \item{name}{A string specifying the name of the statistic.} 20 | 21 | \item{value}{The numeric value of the statistic.} 22 | 23 | \item{symbol}{A string specifying the symbol of the statistic to use when 24 | reporting the statistic.} 25 | 26 | \item{subscript}{A string specifying a subscript to use when reporting the 27 | statistic.} 28 | 29 | \item{interval}{A string specifying the type of interval if the statistic is 30 | a ranged statistic (e.g., 95\% confidence interval)} 31 | 32 | \item{level}{A numeric value between 0 and 1 indicating the level of the 33 | interval.} 34 | 35 | \item{lower}{The numeric value of the lower bound of the statistic.} 36 | 37 | \item{upper}{The numeric value of the upper bound of the statistic.} 38 | } 39 | \description{ 40 | \code{\link[=custom_stat]{custom_stat()}} is used together with the \code{\link[=custom_stats]{custom_stats()}} function to add 41 | statistics from unsupported functions via \code{\link[=add_stats]{add_stats()}}. See the 42 | \code{\link[=custom_stats]{custom_stats()}} function for more information. 43 | } 44 | \examples{ 45 | # Example 1: A single mean value 46 | sample <- rnorm(1000, mean = 0, sd = 1) 47 | mean <- mean(sample) 48 | 49 | custom_stat(name = "mean", value = mean, symbol = "M") 50 | 51 | # Example 2: A mean with a 95\% confidence interval 52 | sample <- rnorm(1000, mean = 0, sd = 1) 53 | mean <- mean(sample) 54 | se <- sd(sample) / sqrt(length(sample)) 55 | CI <- c(mean - 1.96 * se, mean + 1.96 * se) 56 | 57 | custom_stat( 58 | name = "mean", 59 | value = mean, 60 | symbol = "M", 61 | interval = "CI", 62 | level = .95, 63 | lower = CI[1], 64 | upper = CI[2] 65 | ) 66 | 67 | } 68 | -------------------------------------------------------------------------------- /man/custom_stats.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/custom_stats.r 3 | \name{custom_stats} 4 | \alias{custom_stats} 5 | \title{Create a collection of custom statistics} 6 | \usage{ 7 | custom_stats(method, statistics) 8 | } 9 | \arguments{ 10 | \item{method}{A string specifying the method used to obtain the statistics.} 11 | 12 | \item{statistics}{A vector of statistics created with \code{\link[=custom_stat]{custom_stat()}}.} 13 | } 14 | \description{ 15 | \code{\link[=custom_stats]{custom_stats()}} is used to create a collection of statistics from 16 | unsupported functions to add to a list via \code{\link[=add_stats]{add_stats()}}. 17 | } 18 | \details{ 19 | \code{\link[=custom_stats]{custom_stats()}} supports adding a single statistic or a group of statistics. 20 | Multiple groups of statistics are not (yet) supported. 21 | } 22 | \examples{ 23 | # Example: BIC Bayes factor (approx.) 24 | # Run the analysis 25 | lm1 <- lm(Fertility ~ ., data = swiss) 26 | lm2 <- update(lm1, . ~ . - Examination) 27 | 28 | BF10 <- 1 / exp((BIC(lm2) - BIC(lm1)) / 2) 29 | 30 | # Create the custom statistics 31 | BIC_BFs <- custom_stats( 32 | method = "BIC Bayes factor", 33 | statistics = c( 34 | custom_stat(name = "BF", value = BF10, subscript = "10"), 35 | custom_stat(name = "BF", value = 1 / BF10, subscript = "01") 36 | ) 37 | ) 38 | 39 | # Create an empty list 40 | statistics <- list() 41 | 42 | # Add the custom statistics to the list 43 | statistics <- add_stats(statistics, BIC_BFs) 44 | 45 | } 46 | -------------------------------------------------------------------------------- /man/describe_data.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/describe_data.r 3 | \name{describe_data} 4 | \alias{describe_data} 5 | \title{Calculate common descriptive statistics} 6 | \usage{ 7 | describe_data(data, ..., na.rm = TRUE, short = FALSE) 8 | } 9 | \arguments{ 10 | \item{data}{A data frame.} 11 | 12 | \item{...}{One or more unquoted column names from the data frame.} 13 | 14 | \item{na.rm}{A boolean indicating whether missing values (including NaN) 15 | should be excluded in calculating the descriptives? The default is TRUE.} 16 | 17 | \item{short}{A boolean indicating whether only a subset of descriptives 18 | should be reported? If set to \verb{TRUE``, only the N, M, and SD will be returned. The default is }FALSE`.} 19 | } 20 | \description{ 21 | \code{\link[=describe_data]{describe_data()}} returns a set of common descriptive statistics 22 | (e.g., number of observations, mean, standard deviation) for one or more 23 | numeric variables. 24 | } 25 | \details{ 26 | The data can be grouped using \code{\link[dplyr:group_by]{dplyr::group_by()}} so that 27 | descriptives will be calculated for each group level. 28 | 29 | Skew and kurtosis are based on the \code{\link[datawizard:skewness]{datawizard::skewness()}} and 30 | \code{\link[datawizard:skewness]{datawizard::kurtosis()}} functions (Komsta & Novomestky, 2015). 31 | } 32 | \examples{ 33 | describe_data(quote_source, response) 34 | 35 | describe_data(quote_source, response, na.rm = FALSE) 36 | 37 | quote_source |> 38 | dplyr::group_by(source) |> 39 | describe_data(response) 40 | 41 | quote_source |> 42 | dplyr::group_by(source) |> 43 | describe_data(response, short = TRUE) 44 | 45 | } 46 | -------------------------------------------------------------------------------- /man/helper_functions.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/helper_functions.r 3 | \name{tidy_matrix} 4 | \alias{tidy_matrix} 5 | \alias{add_statistic} 6 | \alias{symbol} 7 | \alias{expect_equal_models} 8 | \alias{write_test_stats} 9 | \title{Helper functions in tidystats} 10 | \usage{ 11 | tidy_matrix(m, symmetric = TRUE) 12 | 13 | add_statistic( 14 | list, 15 | name, 16 | value, 17 | symbol = NULL, 18 | subscript = NULL, 19 | interval = NULL, 20 | level = NULL, 21 | lower = NULL, 22 | upper = NULL 23 | ) 24 | 25 | symbol( 26 | x = c("alpha", "chi_squared", "delta", "guttmans_lambda", "K_squared", "lambda", 27 | "p_hat", "R_squared", "sigma", "t_squared", "tau") 28 | ) 29 | 30 | expect_equal_models(model, expected_tidy_model, tolerance = 0.001) 31 | 32 | write_test_stats(x, path, digits = 6) 33 | } 34 | \arguments{ 35 | \item{m}{A matrix.} 36 | } 37 | \description{ 38 | Functions used under the hood in the tidystats package. 39 | } 40 | \section{Functions}{ 41 | \itemize{ 42 | \item \code{tidy_matrix()}: Function to convert matrix objects to a tidy data frame. 43 | 44 | \item \code{add_statistic()}: Function to add a statistic to list. It helps create the list and ignores 45 | NULL values. 46 | 47 | \item \code{symbol()}: Function to return symbols in ASCII. 48 | 49 | \item \code{expect_equal_models()}: Function to compare tidied models during testing. 50 | 51 | \item \code{write_test_stats()}: Function to save tidied statistics to a file. Since these files are used 52 | during testing, it's important to only store files with correctly tidied 53 | statistics, hence the prompt. 54 | 55 | }} 56 | \keyword{internal} 57 | -------------------------------------------------------------------------------- /man/quote_source.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{quote_source} 5 | \alias{quote_source} 6 | \title{A Many Labs replication of Lorge & Curtiss (1936)} 7 | \format{ 8 | A data frame with 6343 rows and 15 columns: 9 | \describe{ 10 | \item{ID}{participant number} 11 | \item{source}{attributed source of the quote: Washington or Bin 12 | Laden} 13 | \item{response}{evaluation of the quote on a 9-point Likert 14 | scale, with 1 indicating disagreement and 9 indicating agreement} 15 | \item{age}{participant's age} 16 | \item{sex}{participant's sex} 17 | \item{citizenship}{participant's citizenship} 18 | \item{race}{participant's race} 19 | \item{major}{participant's major} 20 | \item{native_language}{participant's native language} 21 | \item{referrer}{location of where the study was conducted} 22 | \item{compensation}{how the participant was compensated for their 23 | participation} 24 | \item{recruitment}{how the participant was recruited} 25 | \item{separation}{description of how the study was administered in 26 | terms of participant isolation} 27 | \item{us_or_international}{whether the study was conducted in the US or 28 | outside of the US (international)} 29 | \item{lab_or_online}{whether the study was conducted in the lab or online} 30 | } 31 | } 32 | \usage{ 33 | quote_source 34 | } 35 | \description{ 36 | Data of multiple studies from the Many Labs project (Klein et al., 2014) 37 | replicating Lorge & Curtiss (1936). 38 | } 39 | \details{ 40 | Lorge and Curtiss (1936) examined how a quotation is perceived when 41 | it is attributed to a liked or disliked individual. The quotation of interest 42 | was: "I hold it that a little rebellion, now and then, is a good thing, and 43 | as necessary in the political world as storms are in the physical world." 44 | In one condition the quotation was attributed to Thomas Jefferson, a liked 45 | individual, and in the other condition it was attributed to Vladimir Lenin, a 46 | disliked individual. More agreement was observed when the quotation was 47 | attributed to Jefferson than Lenin. In the replication studies, the quotation 48 | was: "I have sworn to only live free, even if I find bitter the taste of 49 | death." This quotation was attributed to either George Washington, the 50 | liked individual, or Osama Bin Laden, the disliked individual. 51 | } 52 | \references{ 53 | Lorge, I., & Curtiss, C. C. (1936). Prestige, suggestion, and attitudes. 54 | The Journal of Social Psychology, 7, 386-402. 55 | \doi{10.1080/00224545.1936.9919891} 56 | 57 | Klein, R.A. et al. (2014) Investigating Variation in Replicability: A "Many 58 | Labs" Replication Project. Social Psychology, 45(3), 142-152. 59 | \doi{10.1027/1864-9335/a000178} 60 | } 61 | \keyword{datasets} 62 | -------------------------------------------------------------------------------- /man/read_stats.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/read_stats.R 3 | \name{read_stats} 4 | \alias{read_stats} 5 | \title{Read a .json file that was produced with \code{\link[=write_stats]{write_stats()}}} 6 | \usage{ 7 | read_stats(file) 8 | } 9 | \arguments{ 10 | \item{file}{A string specifying the path to the tidystats data file.} 11 | } 12 | \description{ 13 | \code{\link[=read_stats]{read_stats()}} can read a .json file containing statistics that was produced 14 | using tidystats. It returns a list containing the statistics, with the 15 | identifier as the name for each list element. 16 | } 17 | \examples{ 18 | # A simple example, assuming there is a file called 'statistics.json' 19 | \dontrun{ 20 | statistics <- read_stats("statistics.json") 21 | } 22 | 23 | # A working example 24 | statistics <- read_stats( 25 | file = system.file("statistics.json", package = "tidystats") 26 | ) 27 | 28 | } 29 | -------------------------------------------------------------------------------- /man/tidy_stats_to_data_frame.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tidy_stats_to_data_frame.R 3 | \name{tidy_stats_to_data_frame} 4 | \alias{tidy_stats_to_data_frame} 5 | \title{Convert a tidystats list to a data frame} 6 | \usage{ 7 | tidy_stats_to_data_frame(x) 8 | } 9 | \arguments{ 10 | \item{x}{A tidystats list.} 11 | } 12 | \description{ 13 | \code{\link[=tidy_stats_to_data_frame]{tidy_stats_to_data_frame()}} converts a tidystats list to a data frame, 14 | which can then be used to extract specific statistics using standard 15 | subsetting functions (e.g., \code{\link[dplyr:filter]{dplyr::filter()}}). 16 | } 17 | \examples{ 18 | # Conduct analyses 19 | sleep_wide <- reshape( 20 | sleep, 21 | direction = "wide", 22 | idvar = "ID", 23 | timevar = "group", 24 | sep = "_" 25 | ) 26 | sleep_test <- t.test(sleep_wide$extra_1, sleep_wide$extra_2, paired = TRUE) 27 | 28 | ctl <- c(4.17, 5.58, 5.18, 6.11, 4.50, 4.61, 5.17, 4.53, 5.33, 5.14) 29 | trt <- c(4.81, 4.17, 4.41, 3.59, 5.87, 3.83, 6.03, 4.89, 4.32, 4.69) 30 | group <- gl(2, 10, 20, labels = c("Ctl", "Trt")) 31 | weight <- c(ctl, trt) 32 | lm_D9 <- lm(weight ~ group) 33 | 34 | npk_aov <- aov(yield ~ block + N * P * K, npk) 35 | 36 | # Create an empty list to store the statistics in 37 | statistics <- list() 38 | 39 | # Add statistics 40 | statistics <- statistics |> 41 | add_stats(sleep_test, type = "primary", preregistered = TRUE) |> 42 | add_stats(lm_D9) |> 43 | add_stats(npk_aov, notes = "An ANOVA example") 44 | 45 | # Convert the list to a data frame 46 | df <- tidy_stats_to_data_frame(statistics) 47 | 48 | # Select all the p-values 49 | dplyr::filter(df, statistic_name == "p") 50 | 51 | } 52 | -------------------------------------------------------------------------------- /man/write_stats.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/write_stats.R 3 | \name{write_stats} 4 | \alias{write_stats} 5 | \title{Write a tidystats list to a file} 6 | \usage{ 7 | write_stats(x, path, digits = 6) 8 | } 9 | \arguments{ 10 | \item{x}{A tidystats list.} 11 | 12 | \item{path}{A string specifying the path or connection to write to.} 13 | 14 | \item{digits}{The number of decimal places to use. The default is 6.} 15 | } 16 | \description{ 17 | \code{\link[=write_stats]{write_stats()}} writes a tidystats list to a .json file. 18 | } 19 | \examples{ 20 | # Conduct a statistical test 21 | sleep_wide <- reshape( 22 | sleep, 23 | direction = "wide", 24 | idvar = "ID", 25 | timevar = "group", 26 | sep = "_" 27 | ) 28 | sleep_test <- t.test(sleep_wide$extra_1, sleep_wide$extra_2, paired = TRUE) 29 | 30 | # Create an empty list 31 | statistics <- list() 32 | 33 | # Add statistics to the list 34 | statistics <- add_stats(statistics, sleep_test) 35 | 36 | # Save the statistics to a file 37 | dir <- tempdir() 38 | write_stats(statistics, file.path(dir, "statistics.json")) 39 | 40 | } 41 | -------------------------------------------------------------------------------- /tests/data/effsize.json: -------------------------------------------------------------------------------- 1 | { 2 | "cohen_d": { 3 | "method": "Cohen's d effect size", 4 | "statistics": [ 5 | { 6 | "name": "Cohen's d", 7 | "symbol": "d", 8 | "value": 1.99598, 9 | "interval": "CI", 10 | "level": 0.95, 11 | "lower": 1.654645, 12 | "upper": 2.337314 13 | } 14 | ], 15 | "package": { 16 | "name": "effsize", 17 | "version": "0.8.1" 18 | } 19 | }, 20 | "hedges_g": { 21 | "method": "Hedges's g effect size", 22 | "statistics": [ 23 | { 24 | "name": "Hedges's g", 25 | "symbol": "g", 26 | "value": 1.988409, 27 | "interval": "CI", 28 | "level": 0.95, 29 | "lower": 1.648797, 30 | "upper": 2.328021 31 | } 32 | ], 33 | "package": { 34 | "name": "effsize", 35 | "version": "0.8.1" 36 | } 37 | }, 38 | "vda": { 39 | "method": "Vargha and Delaney A effect size", 40 | "statistics": [ 41 | { 42 | "name": "Vargha and Delaney A", 43 | "symbol": "A", 44 | "value": 0.9286 45 | } 46 | ], 47 | "package": { 48 | "name": "effsize", 49 | "version": "0.8.1" 50 | } 51 | }, 52 | "cliffs_delta": { 53 | "method": "Cliff's Delta effect size", 54 | "statistics": [ 55 | { 56 | "name": "Cliff's Delta", 57 | "symbol": "δ", 58 | "value": -0.25, 59 | "interval": "CI", 60 | "level": 0.95, 61 | "lower": -0.726585, 62 | "upper": 0.389006 63 | } 64 | ], 65 | "package": { 66 | "name": "effsize", 67 | "version": "0.8.1" 68 | } 69 | } 70 | } 71 | -------------------------------------------------------------------------------- /tests/data/irr.json: -------------------------------------------------------------------------------- 1 | { 2 | "ICC_anxiety": { 3 | "method": "ICC", 4 | "statistics": [ 5 | { 6 | "name": "N subjects", 7 | "symbol": "N", 8 | "subscript": "subjects", 9 | "value": 20 10 | }, 11 | { 12 | "name": "N raters", 13 | "symbol": "N", 14 | "subscript": "raters", 15 | "value": 3 16 | }, 17 | { 18 | "name": "ICC(A,1)", 19 | "value": 0.197998, 20 | "interval": "CI", 21 | "level": 0.95, 22 | "lower": -0.038911, 23 | "upper": 0.493574 24 | }, 25 | { 26 | "name": "statistic", 27 | "symbol": "F", 28 | "value": 1.826772 29 | }, 30 | { 31 | "name": "df numerator", 32 | "symbol": "df", 33 | "subscript": "num.", 34 | "value": 19 35 | }, 36 | { 37 | "name": "df denominator", 38 | "symbol": "df", 39 | "subscript": "den.", 40 | "value": 39.746041 41 | }, 42 | { 43 | "name": "p", 44 | "value": 0.054266 45 | } 46 | ], 47 | "model": "twoway", 48 | "type": "agreement", 49 | "unit": "single", 50 | "alernative": { 51 | "null_value": 0 52 | }, 53 | "package": { 54 | "name": "irr", 55 | "version": "0.84.1" 56 | } 57 | }, 58 | "ICC_high_consistency": { 59 | "method": "ICC", 60 | "statistics": [ 61 | { 62 | "name": "N subjects", 63 | "symbol": "N", 64 | "subscript": "subjects", 65 | "value": 20 66 | }, 67 | { 68 | "name": "N raters", 69 | "symbol": "N", 70 | "subscript": "raters", 71 | "value": 3 72 | }, 73 | { 74 | "name": "ICC(C,1)", 75 | "value": 0.845615, 76 | "interval": "CI", 77 | "level": 0.95, 78 | "lower": 0.70808, 79 | "upper": 0.929968 80 | }, 81 | { 82 | "name": "statistic", 83 | "symbol": "F", 84 | "value": 17.431948 85 | }, 86 | { 87 | "name": "df numerator", 88 | "symbol": "df", 89 | "subscript": "num.", 90 | "value": 19 91 | }, 92 | { 93 | "name": "df denominator", 94 | "symbol": "df", 95 | "subscript": "den.", 96 | "value": 38 97 | }, 98 | { 99 | "name": "p", 100 | "value": 2.852714e-13 101 | } 102 | ], 103 | "model": "twoway", 104 | "type": "consistency", 105 | "unit": "single", 106 | "alernative": { 107 | "null_value": 0 108 | }, 109 | "package": { 110 | "name": "irr", 111 | "version": "0.84.1" 112 | } 113 | }, 114 | "ICC_low_agreement": { 115 | "method": "ICC", 116 | "statistics": [ 117 | { 118 | "name": "N subjects", 119 | "symbol": "N", 120 | "subscript": "subjects", 121 | "value": 20 122 | }, 123 | { 124 | "name": "N raters", 125 | "symbol": "N", 126 | "subscript": "raters", 127 | "value": 3 128 | }, 129 | { 130 | "name": "ICC(A,1)", 131 | "value": 0.106051, 132 | "interval": "CI", 133 | "level": 0.95, 134 | "lower": -0.002193, 135 | "upper": 0.345521 136 | }, 137 | { 138 | "name": "statistic", 139 | "symbol": "F", 140 | "value": 17.431948 141 | }, 142 | { 143 | "name": "df numerator", 144 | "symbol": "df", 145 | "subscript": "num.", 146 | "value": 19 147 | }, 148 | { 149 | "name": "df denominator", 150 | "symbol": "df", 151 | "subscript": "den.", 152 | "value": 2.345547 153 | }, 154 | { 155 | "name": "p", 156 | "value": 0.037717 157 | } 158 | ], 159 | "model": "twoway", 160 | "type": "agreement", 161 | "unit": "single", 162 | "alernative": { 163 | "null_value": 0 164 | }, 165 | "package": { 166 | "name": "irr", 167 | "version": "0.84.1" 168 | } 169 | } 170 | } 171 | -------------------------------------------------------------------------------- /tests/data/stats.json: -------------------------------------------------------------------------------- 1 | { 2 | "BF_stats": { 3 | "method": "BF BIC method", 4 | "statistics": [ 5 | { 6 | "name": "BF", 7 | "value": 3.820709, 8 | "subscript": "10" 9 | }, 10 | { 11 | "name": "BF", 12 | "value": 0.261732, 13 | "subscript": "01" 14 | } 15 | ], 16 | "notes": "Wagenmakers (2007) method for calculating Bayes factors" 17 | } 18 | } 19 | -------------------------------------------------------------------------------- /tests/prep/prep_BayesFactor.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(BayesFactor) 4 | 5 | statistics <- list() 6 | 7 | # generalTestBF() --------------------------------------------------------- 8 | 9 | set.seed(1) 10 | 11 | data(puzzles) 12 | 13 | generalTestBF <- generalTestBF( 14 | RT ~ shape * color + ID, 15 | data = puzzles, 16 | whichRandom = "ID", 17 | neverExclude = "ID", 18 | progress = FALSE 19 | ) 20 | 21 | statistics <- add_stats(statistics, generalTestBF) 22 | 23 | generalTestBF 24 | 25 | # lmBF() ------------------------------------------------------------------ 26 | 27 | set.seed(1) 28 | 29 | data(puzzles) 30 | 31 | bfFull <- lmBF( 32 | RT ~ shape + color + shape:color + ID, 33 | data = puzzles, 34 | whichRandom = "ID" 35 | ) 36 | 37 | set.seed(1) 38 | 39 | bfMain <- lmBF(RT ~ shape + color + ID, data = puzzles, whichRandom = "ID") 40 | bfMainFull <- bfMain / bfFull 41 | 42 | statistics <- statistics |> 43 | add_stats(bfFull) |> 44 | add_stats(bfMain) |> 45 | add_stats(bfMainFull) 46 | 47 | bfFull 48 | bfMain 49 | bfMainFull 50 | 51 | # regressionBF() ---------------------------------------------------------- 52 | 53 | set.seed(1) 54 | 55 | data(attitude) 56 | 57 | attitudeBF <- regressionBF(rating ~ ., data = attitude, progress = FALSE) 58 | attitudeBFBest <- attitudeBF / attitudeBF[63] 59 | 60 | statistics <- statistics |> 61 | add_stats(attitudeBF) |> 62 | add_stats(attitudeBFBest) 63 | 64 | attitudeBF 65 | attitudeBFBest 66 | 67 | # ttestBF() --------------------------------------------------------------- 68 | 69 | set.seed(1) 70 | 71 | diffScores <- sleep$extra[1:10] - sleep$extra[11:20] 72 | 73 | sleepTTestBF <- ttestBF( 74 | x = sleep$extra[sleep$group == 1], 75 | y = sleep$extra[sleep$group == 2], 76 | paired = TRUE 77 | ) 78 | sleepTTestBF_interval <- ttestBF(x = diffScores, nullInterval = c(-Inf, 0)) 79 | 80 | statistics <- statistics |> 81 | add_stats(sleepTTestBF) |> 82 | add_stats(sleepTTestBF_interval) 83 | 84 | sleepTTestBF 85 | sleepTTestBF_interval 86 | 87 | # anovaBF() --------------------------------------------------------------- 88 | 89 | set.seed(1) 90 | 91 | data(puzzles) 92 | 93 | sleepAnovaBF <- anovaBF( 94 | extra ~ group + ID, 95 | data = sleep, 96 | whichRandom = "ID", 97 | progress = FALSE 98 | ) 99 | 100 | set.seed(1) 101 | 102 | puzzlesAnovaBF <- anovaBF( 103 | RT ~ shape * color + ID, 104 | data = puzzles, 105 | whichRandom = "ID", 106 | whichModels = "top", 107 | progress = FALSE 108 | ) 109 | 110 | statistics <- statistics |> 111 | add_stats(sleepAnovaBF) |> 112 | add_stats(puzzlesAnovaBF) 113 | 114 | sleepAnovaBF 115 | puzzlesAnovaBF 116 | 117 | # correlationBF() --------------------------------------------------------- 118 | 119 | set.seed(1) 120 | 121 | correlationBF <- correlationBF(y = iris$Sepal.Length, x = iris$Sepal.Width) 122 | 123 | statistics <- add_stats(statistics, correlationBF) 124 | 125 | correlationBF 126 | 127 | # contingencyTableBF() ---------------------------------------------------- 128 | 129 | set.seed(1) 130 | 131 | data(raceDolls) 132 | 133 | contingencyTableBF <- contingencyTableBF( 134 | raceDolls, 135 | sampleType = "indepMulti", 136 | fixedMargin = "cols" 137 | ) 138 | 139 | statistics <- add_stats(statistics, contingencyTableBF) 140 | 141 | contingencyTableBF 142 | 143 | # proportionBF() ---------------------------------------------------------- 144 | 145 | set.seed(1) 146 | 147 | proportionBF <- proportionBF(y = 15, N = 25, p = .5) 148 | 149 | statistics <- add_stats(statistics, proportionBF) 150 | 151 | proportionBF 152 | 153 | # meta.ttestBF() ---------------------------------------------------------- 154 | 155 | set.seed(1) 156 | 157 | t <- c(-.15, 2.39, 2.42, 2.43) 158 | N <- c(100, 150, 97, 99) 159 | 160 | metaBF <- meta.ttestBF(t, N, rscale = 1, nullInterval = c(0, Inf)) 161 | 162 | statistics <- add_stats(statistics, metaBF) 163 | 164 | metaBF 165 | 166 | # tidy_stats_to_data_frame() ---------------------------------------------- 167 | 168 | df <- tidy_stats_to_data_frame(statistics) 169 | 170 | # write_stats() ----------------------------------------------------------- 171 | 172 | write_test_stats(statistics, "tests/data/BayesFactor.json") 173 | 174 | # Cleanup ----------------------------------------------------------------- 175 | 176 | rm( 177 | attitude, attitudeBF, attitudeBFBest, bfFull, bfMain, bfMainFull, 178 | contingencyTableBF, correlationBF, generalTestBF, metaBF, proportionBF, 179 | puzzles, puzzlesAnovaBF, raceDolls, sleepAnovaBF, sleepTTestBF, 180 | sleepTTestBF_interval, diffScores, N, t, df, statistics 181 | ) 182 | -------------------------------------------------------------------------------- /tests/prep/prep_Hmisc.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(Hmisc) 4 | 5 | statistics <- list() 6 | 7 | # rcorr() ----------------------------------------------------------------- 8 | 9 | x <- c(-2, -1, 0, 1, 2) 10 | y <- c(4, 1, 0, 1, 4) 11 | z <- c(1, 2, 3, 4, NA) 12 | v <- c(1, 2, 3, 4, 5) 13 | 14 | rcorr <- rcorr(cbind(x, y, z, v), type = "pearson") 15 | rcorr_spearman <- rcorr(cbind(x, y, z, v), type = "spearman") 16 | 17 | statistics <- statistics |> 18 | add_stats(rcorr) |> 19 | add_stats(rcorr_spearman) 20 | 21 | rcorr 22 | rcorr_spearman 23 | 24 | # tidy_stats_to_data_frame() ---------------------------------------------- 25 | 26 | df <- tidy_stats_to_data_frame(statistics) 27 | 28 | # write_stats() ----------------------------------------------------------- 29 | 30 | write_test_stats(statistics, "tests/data/Hmisc.json") 31 | 32 | # Cleanup ----------------------------------------------------------------- 33 | 34 | rm(x, y, z, v, rcorr, rcorr_spearman, df, statistics) 35 | -------------------------------------------------------------------------------- /tests/prep/prep_aov.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | statistics <- list() 4 | 5 | # aov(): aov -------------------------------------------------------------- 6 | 7 | aov <- aov(yield ~ block + N * P * K, npk) 8 | aov_order <- aov(terms(yield ~ block + N * P + K, keep.order = TRUE), npk) 9 | 10 | statistics <- statistics |> 11 | add_stats(aov) |> 12 | add_stats(aov_order) 13 | 14 | summary(aov) 15 | summary(aov_order) 16 | 17 | # aov(): aovlist ---------------------------------------------------------- 18 | 19 | aov_error <- aov(yield ~ N * P * K + Error(block), npk) 20 | 21 | statistics <- add_stats(statistics, aov_error) 22 | 23 | summary(aov_error) 24 | 25 | # tidy_stats_to_data_frame() ---------------------------------------------- 26 | 27 | df <- tidy_stats_to_data_frame(statistics) 28 | 29 | # write_stats() ----------------------------------------------------------- 30 | 31 | write_test_stats(statistics, "tests/data/aov.json") 32 | 33 | # Cleanup ----------------------------------------------------------------- 34 | 35 | rm(aov, aov_error, aov_order, df, statistics) 36 | -------------------------------------------------------------------------------- /tests/prep/prep_brms.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(brms) 4 | 5 | statistics <- list() 6 | 7 | # brm() ------------------------------------------------------------------- 8 | 9 | poisson_regression <- brm( 10 | count ~ zAge + zBase * Trt + (1 | patient), 11 | data = epilepsy, 12 | family = poisson(), 13 | prior = c( 14 | prior(normal(0, 10), class = b), 15 | prior(cauchy(0, 2), class = sd) 16 | ), 17 | seed = 1, 18 | file = "./tests/models/brms-poisson-regression" 19 | ) 20 | 21 | ordinal_regression <- brm( 22 | rating ~ period + carry + cs(treat), 23 | data = inhaler, 24 | family = sratio("logit"), 25 | prior = set_prior("normal(0,5)"), 26 | seed = 1, 27 | file = "./tests/models/brms-ordinal-regression" 28 | ) 29 | 30 | survival_regression <- brm( 31 | time | cens(censored) ~ age * sex + disease + (1 | patient), 32 | data = kidney, 33 | family = lognormal(), 34 | seed = 1, 35 | file = "./tests/models/brms-survival-regression" 36 | ) 37 | 38 | nonlinear_gaussian <- brm( 39 | bf( 40 | cum ~ ult * (1 - exp(-(dev / theta)^omega)), 41 | ult ~ 1 + (1 | AY), 42 | omega ~ 1, 43 | theta ~ 1, 44 | nl = TRUE 45 | ), 46 | data = loss, family = gaussian(), 47 | prior = c( 48 | prior(normal(5000, 1000), nlpar = "ult"), 49 | prior(normal(1, 2), nlpar = "omega"), 50 | prior(normal(45, 10), nlpar = "theta") 51 | ), 52 | control = list(adapt_delta = 0.9), 53 | seed = 1, 54 | file = "./tests/models/brms-nonlinear-gaussian" 55 | ) 56 | 57 | set.seed(1) 58 | heterogeneous_variances <- brm( 59 | bf(y ~ x, sigma ~ 0 + x), 60 | data = data.frame( 61 | y = c(rnorm(50), rnorm(50, 1, 2)), 62 | x = factor(rep(c("a", "b"), each = 50)) 63 | ), 64 | file = "./tests/models/brms-heterogeneous-variances" 65 | ) 66 | 67 | set.seed(1) 68 | quantile_regression <- brm( 69 | bf(y ~ x, quantile = 0.25), 70 | data = data.frame( 71 | y = c(rnorm(50), rnorm(50, 1, 2)), 72 | x = factor(rep(c("a", "b"), each = 50)) 73 | ), 74 | family = asym_laplace(), 75 | seed = 1, 76 | file = "./tests/models/brms-quantile-regression" 77 | ) 78 | 79 | statistics <- statistics |> 80 | add_stats(poisson_regression, args = list(robust = TRUE, mc_se = TRUE)) |> 81 | add_stats(ordinal_regression) |> 82 | add_stats(survival_regression) |> 83 | add_stats(nonlinear_gaussian) |> 84 | add_stats(heterogeneous_variances) |> 85 | add_stats(quantile_regression) 86 | 87 | summary(poisson_regression, robust = TRUE, mc_se = TRUE) 88 | summary(ordinal_regression) 89 | summary(survival_regression) 90 | summary(nonlinear_gaussian) 91 | summary(heterogeneous_variances) 92 | summary(quantile_regression) 93 | 94 | # tidy_stats_to_data_frame() ---------------------------------------------- 95 | 96 | df <- tidy_stats_to_data_frame(statistics) 97 | 98 | # write_stats() ----------------------------------------------------------- 99 | 100 | write_test_stats(statistics, "tests/data/brms.json") 101 | 102 | # Cleanup ----------------------------------------------------------------- 103 | 104 | rm( 105 | poisson_regression, ordinal_regression, survival_regression, 106 | nonlinear_gaussian, heterogeneous_variances, quantile_regression, fit1, 107 | fit2, loo1, loo2, loo_compare, ntrials, success, x, data4, data_het, df, 108 | statistics 109 | ) 110 | -------------------------------------------------------------------------------- /tests/prep/prep_confint.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | statistics <- list() 4 | 5 | # confint() --------------------------------------------------------------- 6 | 7 | D93 <- tibble::tibble( 8 | counts = c(18, 17, 15, 20, 10, 20, 25, 13, 12), 9 | outcome = gl(3, 1, 9), 10 | treatment = gl(3, 3) 11 | ) 12 | 13 | fit <- lm(100 / mpg ~ disp + hp + wt + am, data = mtcars) 14 | CI_fit <- confint(fit) 15 | CI_fit_wt <- confint(fit, "wt") 16 | 17 | glm_D93 <- glm(counts ~ outcome + treatment, data = D93, family = poisson()) 18 | CI_glm_D93_MASS <- confint(glm_D93) # based on profile likelihood 19 | CI_glm_D93_default <- confint.default(glm_D93) # based on asymptotic normality 20 | 21 | statistics <- statistics |> 22 | add_stats(CI_fit, class = "confint") |> 23 | add_stats(CI_fit_wt, class = "confint") |> 24 | add_stats(CI_glm_D93_MASS, class = "confint") |> 25 | add_stats(CI_glm_D93_default, class = "confint") 26 | 27 | CI_fit 28 | CI_fit_wt 29 | CI_glm_D93_MASS 30 | CI_glm_D93_default 31 | 32 | # tidy_stats_to_data_frame() ---------------------------------------------- 33 | 34 | df <- tidy_stats_to_data_frame(statistics) 35 | 36 | # write_stats() ----------------------------------------------------------- 37 | 38 | write_test_stats(results, "tests/data/confint.json") 39 | 40 | # Cleanup ----------------------------------------------------------------- 41 | 42 | rm( 43 | CI_fit, CI_fit_wt, CI_glm_D93_MASS, CI_glm_D93_default, D93, fit, glm_D93, 44 | df, statistics 45 | ) 46 | -------------------------------------------------------------------------------- /tests/prep/prep_count_data.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | statistics <- list() 4 | 5 | # count_data() ------------------------------------------------------------ 6 | 7 | no_group <- count_data(quote_source) 8 | single_group <- count_data(quote_source, source) 9 | two_groups <- count_data(quote_source, source, sex, pct = TRUE) 10 | 11 | grouped_group <- quote_source |> 12 | dplyr::group_by(source) |> 13 | count_data(sex) 14 | 15 | grouped_group_na_rm <- quote_source |> 16 | dplyr::group_by(source) |> 17 | count_data(sex, na.rm = TRUE) 18 | 19 | statistics <- statistics |> 20 | add_stats(no_group) |> 21 | add_stats(single_group) |> 22 | add_stats(two_groups) |> 23 | add_stats(grouped_group) |> 24 | add_stats(grouped_group_na_rm) 25 | 26 | no_group 27 | single_group 28 | two_groups 29 | grouped_group 30 | grouped_group_na_rm 31 | 32 | # tidy_stats_to_data_frame() ---------------------------------------------- 33 | 34 | df <- tidy_stats_to_data_frame(statistics) 35 | 36 | # write_stats() ----------------------------------------------------------- 37 | 38 | write_test_stats(statistics, "tests/data/count_data.json") 39 | 40 | # Cleanup ----------------------------------------------------------------- 41 | 42 | rm( 43 | no_group, single_group, two_groups, grouped_group, grouped_group_na_rm, df, 44 | statistics 45 | ) 46 | -------------------------------------------------------------------------------- /tests/prep/prep_describe_data.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | statistics <- list() 4 | 5 | # describe_data() --------------------------------------------------------- 6 | 7 | single_var <- describe_data(quote_source, response) 8 | 9 | single_var_w_group <- quote_source |> 10 | dplyr::group_by(source) |> 11 | describe_data(response) 12 | 13 | multiple_vars <- describe_data(quote_source, response, age) 14 | 15 | single_var_w_groups <- quote_source |> 16 | dplyr::group_by(source, sex) |> 17 | describe_data(response) 18 | 19 | single_var_w_groups_wo_na <- quote_source |> 20 | dplyr::group_by(source, sex) |> 21 | describe_data(response, na.rm = FALSE) 22 | 23 | multiple_vars_w_group <- quote_source |> 24 | dplyr::group_by(source) |> 25 | describe_data(response, age) 26 | 27 | single_var_subset <- describe_data(quote_source, response, short = TRUE) 28 | 29 | statistics <- statistics |> 30 | add_stats(single_var) |> 31 | add_stats(single_var_w_group) |> 32 | add_stats(multiple_vars) |> 33 | add_stats(single_var_w_groups) |> 34 | add_stats(single_var_w_groups_wo_na) |> 35 | add_stats(multiple_vars_w_group) |> 36 | add_stats(single_var_subset) 37 | 38 | single_var 39 | single_var_subset 40 | single_var_w_group 41 | single_var_w_groups 42 | single_var_w_groups_wo_na 43 | multiple_vars 44 | multiple_vars_w_group 45 | 46 | # tidy_stats_to_data_frame() ---------------------------------------------- 47 | 48 | df <- tidy_stats_to_data_frame(statistics) 49 | 50 | # write_stats() ----------------------------------------------------------- 51 | 52 | write_test_stats(statistics, "tests/data/describe_data.json") 53 | 54 | # Cleanup ----------------------------------------------------------------- 55 | 56 | rm( 57 | single_var, single_var_subset, single_var_w_group, single_var_w_groups, 58 | single_var_w_groups_wo_na, multiple_vars, multiple_vars_w_group, df, 59 | statistics 60 | ) 61 | -------------------------------------------------------------------------------- /tests/prep/prep_effectsize.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(effectsize) 4 | 5 | statistics <- list() 6 | 7 | # cohens_d() -------------------------------------------------------------- 8 | 9 | cohens_d <- cohens_d(mpg ~ am, data = mtcars) 10 | cohens_d_not_pooled <- cohens_d(mpg ~ am, data = mtcars, pooled_sd = FALSE) 11 | cohens_d_mu <- cohens_d(mpg ~ am, data = mtcars, mu = -5) 12 | cohens_d_less <- cohens_d(mpg ~ am, data = mtcars, alternative = "less") 13 | cohens_d_one_sample <- cohens_d(wt ~ 1, data = mtcars) 14 | cohens_d_paired <- cohens_d( 15 | Pair(extra[group == 1], extra[group == 2]) ~ 1, 16 | data = sleep 17 | ) 18 | 19 | statistics <- statistics |> 20 | add_stats(cohens_d) |> 21 | add_stats(cohens_d_not_pooled) |> 22 | add_stats(cohens_d_mu) |> 23 | add_stats(cohens_d_less) |> 24 | add_stats(cohens_d_one_sample) |> 25 | add_stats(cohens_d_paired) 26 | 27 | cohens_d 28 | cohens_d_not_pooled 29 | cohens_d_mu 30 | cohens_d_less 31 | cohens_d_one_sample 32 | cohens_d_paired 33 | 34 | # hedges_g() -------------------------------------------------------------- 35 | 36 | hedges_g <- hedges_g(mpg ~ am, data = mtcars) 37 | hedges_g_not_pooled <- hedges_g(mpg ~ am, data = mtcars, pooled_sd = FALSE) 38 | hedges_g_mu <- hedges_g(mpg ~ am, data = mtcars, mu = -5) 39 | hedges_g_less <- hedges_g(mpg ~ am, data = mtcars, alternative = "less") 40 | hedges_g_one_sample <- hedges_g(wt ~ 1, data = mtcars) 41 | hedges_g_paired <- hedges_g( 42 | Pair(extra[group == 1], extra[group == 2]) ~ 1, 43 | data = sleep 44 | ) 45 | 46 | statistics <- statistics |> 47 | add_stats(hedges_g) |> 48 | add_stats(hedges_g_not_pooled) |> 49 | add_stats(hedges_g_mu) |> 50 | add_stats(hedges_g_less) |> 51 | add_stats(hedges_g_one_sample) |> 52 | add_stats(hedges_g_paired) 53 | 54 | hedges_g 55 | hedges_g_not_pooled 56 | hedges_g_mu 57 | hedges_g_less 58 | hedges_g_one_sample 59 | hedges_g_paired 60 | 61 | # glass_delta() ----------------------------------------------------------- 62 | 63 | glass_delta <- glass_delta(mpg ~ am, data = mtcars) 64 | glass_delta_mu <- glass_delta(mpg ~ am, data = mtcars, mu = -5) 65 | glass_delta_less <- glass_delta(mpg ~ am, data = mtcars, alternative = "less") 66 | 67 | statistics <- statistics |> 68 | add_stats(glass_delta) |> 69 | add_stats(glass_delta_mu) |> 70 | add_stats(glass_delta_less) 71 | 72 | glass_delta 73 | glass_delta_mu 74 | glass_delta_less 75 | 76 | # tidy_stats_to_data_frame() ---------------------------------------------- 77 | 78 | df <- tidy_stats_to_data_frame(statistics) 79 | 80 | # write_stats() ----------------------------------------------------------- 81 | 82 | write_test_stats(statistics, "tests/data/effectsize.json") 83 | 84 | # Cleanup ----------------------------------------------------------------- 85 | 86 | rm( 87 | statistics, cohens_d, cohens_d_not_pooled, cohens_d_mu, cohens_d_less, 88 | cohens_d_one_sample, cohens_d_paired, glass_delta, glass_delta_less, 89 | glass_delta_mu, hedges_g, hedges_g_less, hedges_g_mu, hedges_g_not_pooled, 90 | hedges_g_one_sample, hedges_g_paired, df 91 | ) 92 | -------------------------------------------------------------------------------- /tests/prep/prep_effsize.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(effsize) 4 | 5 | statistics <- list() 6 | 7 | # cohen.d() --------------------------------------------------------------- 8 | 9 | set.seed(1) 10 | treatment <- rnorm(100, mean = 10) 11 | control <- rnorm(100, mean = 12) 12 | d <- c(treatment, control) 13 | f <- rep(c("Treatment", "Control"), each = 100) 14 | 15 | cohen_d <- cohen.d(d ~ f) 16 | hedges_g <- cohen.d(d ~ f, hedges.correction = TRUE) 17 | 18 | statistics <- statistics |> 19 | add_stats(cohen_d) |> 20 | add_stats(hedges_g) 21 | 22 | cohen_d 23 | hedges_g 24 | 25 | # VD.A() ------------------------------------------------------------------ 26 | 27 | vda <- VD.A(d ~ f) 28 | 29 | statistics <- add_stats(statistics, vda) 30 | 31 | vda 32 | 33 | # cliff.delta() ----------------------------------------------------------- 34 | 35 | treatment <- c(10, 10, 20, 20, 20, 30, 30, 30, 40, 50) 36 | control <- c(10, 20, 30, 40, 40, 50) 37 | 38 | cliffs_delta <- cliff.delta(treatment, control, return.dm = TRUE) 39 | 40 | statistics <- add_stats(statistics, cliffs_delta) 41 | 42 | cliffs_delta 43 | 44 | # tidy_stats_to_data_frame() ---------------------------------------------- 45 | 46 | df <- tidy_stats_to_data_frame(statistics) 47 | 48 | # write_stats() ----------------------------------------------------------- 49 | 50 | write_test_stats(statistics, "tests/data/effsize.json") 51 | 52 | # Cleanup ----------------------------------------------------------------- 53 | 54 | rm( 55 | statistics, control, d, f, treatment, cohen_d, hedges_g, vda, cliffs_delta, 56 | df 57 | ) 58 | -------------------------------------------------------------------------------- /tests/prep/prep_glm.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | statistics <- list() 4 | 5 | # glm() ------------------------------------------------------------------- 6 | 7 | # Get data 8 | d.AD <- tibble::tibble( 9 | treatment = gl(3, 3), 10 | outcome = gl(3, 1, 9), 11 | counts = c(18, 17, 15, 20, 10, 20, 25, 13, 12) 12 | ) 13 | 14 | anorexia <- tibble::tibble( 15 | Treat = c( 16 | "Cont", "Cont", "Cont", "Cont", "Cont", "Cont", "Cont", "Cont", "Cont", 17 | "Cont", "Cont", "Cont", "Cont", "Cont", "Cont", "Cont", "Cont", "Cont", 18 | "Cont", "Cont", "Cont", "Cont", "Cont", "Cont", "Cont", "Cont", "CBT", 19 | "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", 20 | "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", 21 | "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", "CBT", "FT", "FT", "FT", "FT", 22 | "FT", "FT", "FT", "FT", "FT", "FT", "FT", "FT", "FT", "FT", "FT", "FT", "FT" 23 | ), 24 | Prewt = c( 25 | 80.7, 89.4, 91.8, 74.0, 78.1, 88.3, 87.3, 75.1, 80.6, 78.4, 77.6, 88.7, 26 | 81.3, 78.1, 70.5, 77.3, 85.2, 86.0, 84.1, 79.7, 85.5, 84.4, 79.6, 77.5, 27 | 72.3, 89.0, 80.5, 84.9, 81.5, 82.6, 79.9, 88.7, 94.9, 76.3, 81.0, 80.5, 28 | 85.0, 89.2, 81.3, 76.5, 70.0, 80.4, 83.3, 83.0, 87.7, 84.2, 86.4, 76.5, 29 | 80.2, 87.8, 83.3, 79.7, 84.5, 80.8, 87.4, 83.8, 83.3, 86.0, 82.5, 86.7, 30 | 79.6, 76.9, 94.2, 73.4, 80.5, 81.6, 82.1, 77.6, 83.5, 89.9, 86.0, 87.3 31 | ), 32 | Postwt = c( 33 | 80.2, 80.1, 86.4, 86.3, 76.1, 78.1, 75.1, 86.7, 73.5, 84.6, 77.4, 79.5, 34 | 89.6, 81.4, 81.8, 77.3, 84.2, 75.4, 79.5, 73.0, 88.3, 84.7, 81.4, 81.2, 35 | 88.2, 78.8, 82.2, 85.6, 81.4, 81.9, 76.4, 103.6, 98.4, 93.4, 73.4, 82.1, 36 | 96.7, 95.3, 82.4, 72.5, 90.9, 71.3, 85.4, 81.6, 89.1, 83.9, 82.7, 75.7, 37 | 82.6, 100.4, 85.2, 83.6, 84.6, 96.2, 86.7, 95.2, 94.3, 91.5, 91.9, 100.3, 38 | 76.7, 76.8, 101.6, 94.9, 75.2, 77.8, 95.5, 90.7, 92.5, 93.8, 91.7, 98.0 39 | ) 40 | ) 41 | 42 | clotting <- tibble::tibble( 43 | u = c(5, 10, 15, 20, 30, 40, 60, 80, 100), 44 | lot1 = c(118, 58, 42, 35, 27, 25, 21, 19, 18), 45 | lot2 = c(69, 35, 26, 21, 18, 16, 13, 12, 12) 46 | ) 47 | 48 | admission <- tibble::tibble( 49 | admit = c(0, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0), 50 | gre = c(380, 660, 800, 640, 520, 760, 560, 400, 540, 700, 800), 51 | gpa = c(3.61, 3.67, 4.00, 3.19, 2.93, 3.00, 2.98, 3.08, 3.39, 3.92, 4.00), 52 | rank = c(3, 3, 1, 4, 4, 2, 1, 2, 3, 2, 4) 53 | ) 54 | 55 | glm_poisson <- glm( 56 | counts ~ outcome + treatment, 57 | data = d.AD, 58 | family = poisson() 59 | ) 60 | glm_gaussian <- glm(Postwt ~ Prewt + Treat + offset(Prewt), data = anorexia) 61 | glm_gamma <- glm(lot1 ~ log(u), data = clotting, family = Gamma) 62 | glm_gamma_fs <- glm(lot2 ~ log(u) + log(u^2), data = clotting, family = Gamma) 63 | glm_binomial <- glm( 64 | admit ~ gre + gpa + rank, 65 | data = admission, 66 | family = binomial(link = "logit") 67 | ) 68 | 69 | statistics <- statistics |> 70 | add_stats(glm_poisson) |> 71 | add_stats(glm_gaussian) |> 72 | add_stats(glm_gamma) |> 73 | add_stats(glm_gamma_fs) |> 74 | add_stats(glm_binomial) 75 | 76 | summary(glm_poisson) 77 | summary(glm_gaussian) 78 | summary(glm_gamma) 79 | summary(glm_gamma_fs) 80 | summary(glm_binomial) 81 | 82 | # anova() ----------------------------------------------------------------- 83 | 84 | d_AD <- tibble::tibble( 85 | treatment = gl(3, 3), 86 | outcome = gl(3, 1, 9), 87 | counts = c(18, 17, 15, 20, 10, 20, 25, 13, 12) 88 | ) 89 | 90 | glm_D93 <- glm(counts ~ outcome + treatment, family = poisson(), data = d_AD) 91 | glm_D93a <- update(glm_D93, ~ treatment * outcome) 92 | 93 | anova_glm <- anova(glm_D93) 94 | anova_glm_cp <- anova(glm_D93, test = "Cp") 95 | anova_glm_chisq <- anova(glm_D93, test = "Chisq") 96 | anova_glm_rao <- anova(glm_D93, glm_D93a, test = "Rao") 97 | 98 | statistics <- statistics |> 99 | add_stats(anova_glm) |> 100 | add_stats(anova_glm_cp) |> 101 | add_stats(anova_glm_chisq) |> 102 | add_stats(anova_glm_rao) 103 | 104 | anova_glm 105 | anova_glm_cp 106 | anova_glm_chisq 107 | anova_glm_rao 108 | 109 | # tidy_stats_to_data_frame() ---------------------------------------------- 110 | 111 | df <- tidy_stats_to_data_frame(statistics) 112 | 113 | # write_stats() ----------------------------------------------------------- 114 | 115 | write_test_stats(statistics, "tests/data/glm.json") 116 | 117 | # Cleanup ----------------------------------------------------------------- 118 | 119 | rm( 120 | admission, anorexia, clotting, d_AD, glm_binomial, glm_gamma, glm_gamma_fs, 121 | glm_gaussian, glm_poisson, glm_D93, glm_D93a, anova_glm, anova_glm_cp, 122 | anova_glm_chisq, anova_glm_rao, df, statistics 123 | ) 124 | -------------------------------------------------------------------------------- /tests/prep/prep_irr.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(irr) 4 | 5 | statistics <- list() 6 | 7 | # icc() ------------------------------------------------------------------- 8 | 9 | set.seed(1) 10 | 11 | data(anxiety) 12 | 13 | r1 <- round(rnorm(20, 10, 4)) 14 | r2 <- round(r1 + 10 + rnorm(20, 0, 2)) 15 | r3 <- round(r1 + 20 + rnorm(20, 0, 2)) 16 | 17 | ICC_anxiety <- icc(anxiety, model = "twoway", type = "agreement") 18 | ICC_high_consistency <- icc(cbind(r1, r2, r3), "twoway") 19 | ICC_low_agreement <- icc(cbind(r1, r2, r3), "twoway", "agreement") 20 | 21 | statistics <- statistics |> 22 | add_stats(ICC_anxiety) |> 23 | add_stats(ICC_high_consistency) |> 24 | add_stats(ICC_low_agreement) 25 | 26 | ICC_anxiety 27 | ICC_high_consistency 28 | ICC_low_agreement 29 | 30 | # tidy_stats_to_data_frame() ---------------------------------------------- 31 | 32 | df <- tidy_stats_to_data_frame(statistics) 33 | 34 | # write_stats() ----------------------------------------------------------- 35 | 36 | write_test_stats(statistics, "tests/data/irr.json") 37 | 38 | # Cleanup ----------------------------------------------------------------- 39 | 40 | rm( 41 | anxiety, ICC_anxiety, ICC_high_consistency, ICC_low_agreement, statistics, df, 42 | r1, r2, r3 43 | ) 44 | -------------------------------------------------------------------------------- /tests/prep/prep_lm.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | statistics <- list() 4 | 5 | # lm() -------------------------------------------------------------------- 6 | 7 | ctl <- c(4.17, 5.58, 5.18, 6.11, 4.50, 4.61, 5.17, 4.53, 5.33, 5.14) 8 | trt <- c(4.81, 4.17, 4.41, 3.59, 5.87, 3.83, 6.03, 4.89, 4.32, 4.69) 9 | group <- gl(2, 10, 20, labels = c("Ctl", "Trt")) 10 | weight <- c(ctl, trt) 11 | 12 | lm <- lm(weight ~ group) 13 | lm_wo_intercept <- lm(weight ~ group - 1) # omitting intercept 14 | 15 | statistics <- statistics |> 16 | add_stats(lm) |> 17 | add_stats(lm_wo_intercept) 18 | 19 | summary(lm) 20 | summary(lm_wo_intercept) 21 | 22 | # anova() ----------------------------------------------------------------- 23 | 24 | fit <- lm(sr ~ ., data = LifeCycleSavings) 25 | fit0 <- lm(sr ~ 1, data = LifeCycleSavings) 26 | fit1 <- update(fit0, . ~ . + pop15) 27 | fit2 <- update(fit1, . ~ . + pop75) 28 | fit3 <- update(fit2, . ~ . + dpi) 29 | fit4 <- update(fit3, . ~ . + ddpi) 30 | 31 | anova_lm <- anova(fit) 32 | anova_lm_fits <- anova(fit0, fit1, fit2, fit3, fit4, test = "F") 33 | anova_lm_order <- anova(fit4, fit2, fit0, test = "F") 34 | anova_lm_chisq <- anova(fit4, fit2, fit0, test = "Chisq") 35 | anova_lm_cp <- anova(fit4, fit2, fit0, test = "Cp") 36 | 37 | statistics <- statistics |> 38 | add_stats(anova_lm) |> 39 | add_stats(anova_lm_fits) |> 40 | add_stats(anova_lm_order) |> 41 | add_stats(anova_lm_chisq) |> 42 | add_stats(anova_lm_cp) 43 | 44 | anova_lm 45 | anova_lm_fits 46 | anova_lm_order 47 | anova_lm_chisq 48 | anova_lm_cp 49 | 50 | # tidy_stats_to_data_frame() ---------------------------------------------- 51 | 52 | df <- tidy_stats_to_data_frame(statistics) 53 | 54 | # write_stats() ----------------------------------------------------------- 55 | 56 | write_test_stats(statistics, "tests/data/lm.json") 57 | 58 | # Cleanup ----------------------------------------------------------------- 59 | 60 | rm( 61 | df, lm, lm_wo_intercept, statistics, ctl, group, trt, weight, 62 | fit, fit0, fit1, fit2, fit3, fit4, anova_lm, anova_lm_chisq, anova_lm_cp, 63 | anova_lm_fits, anova_lm_order 64 | ) 65 | -------------------------------------------------------------------------------- /tests/prep/prep_lme4.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | statistics <- list() 4 | 5 | # lmer() ------------------------------------------------------------------ 6 | 7 | lme4 <- lme4::lmer(Reaction ~ Days + (1 | Subject), lme4::sleepstudy) 8 | lme4_ML <- lme4::lmer( 9 | Reaction ~ Days + (1 | Subject), 10 | lme4::sleepstudy, 11 | REML = FALSE 12 | ) 13 | lme4_slopes <- lme4::lmer(Reaction ~ Days + (Days || Subject), lme4::sleepstudy) 14 | 15 | statistics <- statistics |> 16 | add_stats(lme4) |> 17 | add_stats(lme4_ML) |> 18 | add_stats(lme4_slopes) 19 | 20 | summary(lme4) 21 | summary(lme4_ML) 22 | summary(lme4_slopes) 23 | 24 | # anova() ----------------------------------------------------------------- 25 | 26 | anova_lme4 <- anova(lme4) 27 | anova_models <- anova(lme4, lme4_slopes) 28 | 29 | statistics <- statistics |> 30 | add_stats(anova_lme4) |> 31 | add_stats(anova_models) 32 | 33 | anova_lme4 34 | anova_models 35 | 36 | # tidy_stats_to_data_frame() ---------------------------------------------- 37 | 38 | df <- tidy_stats_to_data_frame(statistics) 39 | 40 | # write_stats() ----------------------------------------------------------- 41 | 42 | write_test_stats(statistics, "tests/data/lme4.json") 43 | 44 | # Cleanup ----------------------------------------------------------------- 45 | 46 | rm(anova_lme4, anova_models, lme4, lme4_ML, lme4_slopes, df, statistics) 47 | -------------------------------------------------------------------------------- /tests/prep/prep_lmerTest.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | statistics <- list() 4 | 5 | # lmer() ------------------------------------------------------------------ 6 | 7 | lmerTest1 <- lmerTest::lmer( 8 | Reaction ~ Days + (Days | Subject), lme4::sleepstudy 9 | ) 10 | lmerTest2 <- lmerTest::lmer( 11 | Informed.liking ~ 12 | Gender + Information * Product + (1 | Consumer) + (1 | Consumer:Product), 13 | data = lmerTest::ham 14 | ) 15 | lmerTest_ML <- lmerTest::lmer( 16 | Reaction ~ Days + (Days | Subject), lme4::sleepstudy, 17 | REML = FALSE 18 | ) 19 | 20 | statistics <- statistics |> 21 | add_stats(lmerTest1) |> 22 | add_stats(lmerTest2) |> 23 | add_stats(lmerTest_ML) 24 | 25 | summary(lmerTest1) 26 | summary(lmerTest2) 27 | summary(lmerTest_ML) 28 | 29 | # anova() ----------------------------------------------------------------- 30 | 31 | m0 <- lmerTest::lmer(Reaction ~ Days + (1 | Subject), lme4::sleepstudy) 32 | m <- lmerTest::lmer(Reaction ~ Days + (Days | Subject), lme4::sleepstudy) 33 | 34 | anova_lmerTest <- anova(m) 35 | anova_lmerTest_lme4 <- anova(m, ddf = "lme4") 36 | anova_lmerTest_fit <- anova(m0, m) 37 | 38 | statistics <- statistics |> 39 | add_stats(anova_lmerTest) |> 40 | add_stats(anova_lmerTest_lme4) |> 41 | add_stats(anova_lmerTest_fit) 42 | 43 | anova_lmerTest 44 | anova_lmerTest_lme4 45 | anova_lmerTest_fit 46 | 47 | # tidy_stats_to_data_frame() ---------------------------------------------- 48 | 49 | df <- tidy_stats_to_data_frame(statistics) 50 | 51 | # write_stats() ----------------------------------------------------------- 52 | 53 | write_test_stats(statistics, "tests/data/lmerTest.json") 54 | 55 | # Cleanup ----------------------------------------------------------------- 56 | 57 | rm( 58 | anova_lmerTest, anova_lmerTest_fit, anova_lmerTest_lme4, lmerTest_ML, 59 | lmerTest1, lmerTest2, m, m0, df, statistics 60 | ) 61 | -------------------------------------------------------------------------------- /tests/prep/prep_main.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | statistics <- list() 4 | 5 | # t.test(), lm(), and aov() ----------------------------------------------- 6 | 7 | sleep_wide <- reshape( 8 | sleep, 9 | direction = "wide", 10 | idvar = "ID", 11 | timevar = "group", 12 | sep = "_" 13 | ) 14 | 15 | D9 <- tibble::tibble( 16 | group = gl(2, 10, 20, labels = c("Ctl", "Trt")), 17 | weight = c( 18 | 4.17, 5.58, 5.18, 6.11, 4.50, 4.61, 5.17, 4.53, 5.33, 5.14, 4.81, 4.17, 19 | 4.41, 3.59, 5.87, 3.83, 6.03, 4.89, 4.32, 4.69 20 | ) 21 | ) 22 | 23 | sleep_t_test <- t.test(sleep_wide$extra_1, sleep_wide$extra_2, paired = TRUE) 24 | D9_lm <- lm(weight ~ group, data = D9) 25 | npk_aov <- aov(yield ~ block + N * P * K, npk) 26 | 27 | statistics <- statistics |> 28 | add_stats(sleep_t_test, type = "primary") |> 29 | add_stats(D9_lm, preregistered = FALSE) |> 30 | add_stats(npk_aov, notes = "An ANOVA example") 31 | 32 | sleep_t_test 33 | summary(D9_lm) 34 | summary(npk_aov) 35 | 36 | # tidy_stats_to_data_frame() ---------------------------------------------- 37 | 38 | df <- tidy_stats_to_data_frame(statistics) 39 | readr::write_csv(df, "tests/data/main_df.csv") 40 | 41 | # write_stats() ----------------------------------------------------------- 42 | 43 | write_test_stats(statistics, "tests/data/main.json") 44 | 45 | # Cleanup ----------------------------------------------------------------- 46 | 47 | rm(sleep_wide, sleep_t_test, D9, D9_lm, npk_aov, df, statistics) 48 | -------------------------------------------------------------------------------- /tests/prep/prep_ordinal.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(ordinal) 4 | 5 | statistics <- list() 6 | 7 | # clm() ------------------------------------------------------------------- 8 | 9 | data(wine) 10 | 11 | fm1 <- clm(rating ~ temp * contact, data = wine) 12 | 13 | statistics <- statistics |> 14 | add_stats(fm1) 15 | 16 | summary(fm1) 17 | 18 | # anova() ----------------------------------------------------------------- 19 | 20 | fm1 <- clm(rating ~ temp * contact, data = wine) 21 | fm2 <- update(fm1, ~ . - temp:contact) 22 | 23 | anova <- anova(fm1, fm2) 24 | 25 | statistics <- statistics |> 26 | add_stats(anova) 27 | 28 | anova 29 | 30 | # update() ---------------------------------------------------------------- 31 | 32 | fm1_prt <- update(fm1, link = "probit") 33 | fm1_ll <- update(fm1, link = "loglog") 34 | fm1_cll <- update(fm1, link = "cloglog") 35 | fm1_cct <- update(fm1, link = "cauchit") 36 | fm1_symmetric <- update(fm1, threshold = "symmetric") 37 | fm1_equidistant <- update(fm1, threshold = "equidistant") 38 | 39 | statistics <- statistics |> 40 | add_stats(fm1_prt) |> 41 | add_stats(fm1_ll) |> 42 | add_stats(fm1_cll) |> 43 | add_stats(fm1_cct) |> 44 | add_stats(fm1_symmetric) |> 45 | add_stats(fm1_equidistant) 46 | 47 | summary(fm1_prt) 48 | summary(fm1_cll) 49 | summary(fm1_ll) 50 | summary(fm1_cct) 51 | summary(fm1_symmetric) 52 | summary(fm1_equidistant) 53 | 54 | # tidy_stats_to_data_frame() ---------------------------------------------- 55 | 56 | df <- tidy_stats_to_data_frame(statistics) 57 | 58 | # write_stats() ----------------------------------------------------------- 59 | 60 | write_test_stats(statistics, "tests/data/ordinal.json") 61 | 62 | # Cleanup ----------------------------------------------------------------- 63 | 64 | rm( 65 | fm1, fm2, anova, fm1_prt, fm1_cll, fm1_ll, fm1_cct, fm1_symmetric, 66 | fm1_equidistant, df, statistics, wine 67 | ) 68 | -------------------------------------------------------------------------------- /tests/prep/prep_pairwise_htest.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | statistics <- list() 4 | 5 | # pairwise.t.test() ------------------------------------------------------- 6 | 7 | Month <- factor(airquality$Month, labels = month.abb[5:9]) 8 | 9 | pairwise_t_test <- pairwise.t.test(airquality$Ozone, Month) 10 | pairwise_t_test_nonpooled <- pairwise.t.test( 11 | airquality$Ozone, 12 | Month, 13 | p.adjust.method = "bonf", 14 | pool.sd = FALSE 15 | ) 16 | pairwise_t_test_paired <- pairwise.t.test( 17 | c(1, 2, 3, 1, 2, 4), 18 | c(1, 1, 2, 2, 3, 3), 19 | paired = TRUE 20 | ) 21 | 22 | statistics <- statistics |> 23 | add_stats(pairwise_t_test) |> 24 | add_stats(pairwise_t_test_paired) |> 25 | add_stats(pairwise_t_test_nonpooled) 26 | 27 | pairwise_t_test 28 | pairwise_t_test_paired 29 | pairwise_t_test_nonpooled 30 | 31 | # pairwise.prop.test() ---------------------------------------------------- 32 | 33 | smokers <- c(83, 90, 129, 70) 34 | patients <- c(86, 93, 136, 82) 35 | 36 | pairwise_prop_test <- pairwise.prop.test(smokers, patients) 37 | 38 | statistics <- add_stats(statistics, pairwise_prop_test) 39 | 40 | pairwise_prop_test 41 | 42 | # pairwise.wilcox.test() -------------------------------------------------- 43 | 44 | pairwise_wilcox_test <- pairwise.wilcox.test( 45 | c(1, 2, 3, 4, 5, 6, 7, 8, 9, 11), 46 | c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2) 47 | ) 48 | pairwise_wilcox_test_paired <- pairwise.wilcox.test( 49 | PlantGrowth$weight, 50 | PlantGrowth$group, 51 | p.adjust.method = "BH", 52 | paired = TRUE 53 | ) 54 | 55 | statistics <- statistics |> 56 | add_stats(pairwise_wilcox_test) |> 57 | add_stats(pairwise_wilcox_test_paired) 58 | 59 | pairwise_wilcox_test 60 | pairwise_wilcox_test_paired 61 | 62 | # tidy_stats_to_data_frame() ---------------------------------------------- 63 | 64 | df <- tidy_stats_to_data_frame(statistics) 65 | 66 | # write_stats() ----------------------------------------------------------- 67 | 68 | write_test_stats(statistics, "tests/data/pairwise_htest.json") 69 | 70 | # Cleanup ----------------------------------------------------------------- 71 | 72 | rm( 73 | Month, pairwise_prop_test, pairwise_t_test, pairwise_t_test_nonpooled, 74 | pairwise_t_test_paired, pairwise_wilcox_test, pairwise_wilcox_test_paired, 75 | patients, smokers, statistics 76 | ) 77 | -------------------------------------------------------------------------------- /tests/prep/prep_psych.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(psych) 4 | 5 | statistics <- list() 6 | 7 | # alpha() ----------------------------------------------------------------- 8 | 9 | set.seed(42) 10 | 11 | r4 <- sim.congeneric(loads = c(0.8, 0.7, 0.6, 0.5), low = -3, high = 3) 12 | alpha_r4 <- alpha(r4) 13 | 14 | r9 <- sim.hierarchical() 15 | alpha_r9 <- alpha(r9) 16 | 17 | two_f <- sim.item(8) 18 | alpha_two_f <- alpha(two_f, keys = c("V3", "V4", "V5", "V6"), n.iter = 500) 19 | 20 | cov_two <- cov(two_f) 21 | alpha_cov_two <- alpha(cov_two, check.keys = TRUE) 22 | 23 | alpha_two_f_check <- alpha(two_f, check.keys = TRUE) 24 | 25 | items <- sim.congeneric( 26 | N = 500, short = FALSE, low = -2, high = 2, 27 | categorical = TRUE 28 | ) 29 | alpha_a4 <- alpha(items$observed) 30 | 31 | statistics <- statistics |> 32 | add_stats(alpha_r4) |> 33 | add_stats(alpha_r9) |> 34 | add_stats(alpha_two_f) |> 35 | add_stats(alpha_cov_two) |> 36 | add_stats(alpha_two_f_check) |> 37 | add_stats(alpha_a4) 38 | 39 | alpha_r4 40 | alpha_r9 41 | alpha_two_f 42 | alpha_cov_two 43 | alpha_two_f_check 44 | alpha_a4 45 | 46 | # corr.test() ------------------------------------------------------------- 47 | 48 | ct <- corr.test(attitude) 49 | cts <- corr.test(attitude[1:3], attitude[4:6]) 50 | cts_single <- corr.test(attitude[1:2], attitude[1:2]) 51 | cts_spearman <- corr.test(attitude[1:3], attitude[4:6], method = "spearman") 52 | cts_kendall <- corr.test(attitude[1:3], attitude[4:6], method = "kendall") 53 | sats <- corr.test(sat.act[1:3], sat.act[4:6], adjust = "none") 54 | sats_no_ci <- corr.test(sat.act[1:3], sat.act[4:6], adjust = "none", ci = FALSE) 55 | sats_alpha <- corr.test(sat.act[1:3], sat.act[4:6], adjust = "none", alpha = .1) 56 | 57 | statistics <- statistics |> 58 | add_stats(ct) |> 59 | add_stats(cts) |> 60 | add_stats(sats) |> 61 | add_stats(sats_no_ci) |> 62 | add_stats(sats_alpha) 63 | 64 | print(ct, short = FALSE) 65 | cts 66 | print(cts_kendall, short = FALSE) 67 | sats 68 | sats_no_ci 69 | sats_alpha 70 | 71 | # mardia() ---------------------------------------------------------------- 72 | 73 | set.seed(1) 74 | 75 | x <- matrix(rnorm(1000), ncol = 10) 76 | 77 | mardia_attitude <- mardia(attitude, plot = FALSE) 78 | mardia_x <- mardia(x, plot = FALSE) 79 | 80 | statistics <- statistics |> 81 | add_stats(mardia_attitude) |> 82 | add_stats(mardia_x) 83 | 84 | mardia_attitude 85 | mardia_x 86 | 87 | # ICC() ------------------------------------------------------------------- 88 | 89 | sf <- matrix( 90 | ncol = 4, byrow = TRUE, 91 | c( 92 | 9, 2, 5, 8, 93 | 6, 1, 3, 2, 94 | 8, 4, 6, 8, 95 | 7, 1, 2, 6, 96 | 10, 5, 6, 9, 97 | 6, 2, 4, 7 98 | ) 99 | ) 100 | colnames(sf) <- paste("J", 1:4, sep = "") 101 | rownames(sf) <- paste("S", 1:6, sep = "") 102 | 103 | icc <- ICC(sf, lmer = FALSE, alpha = .2) 104 | 105 | statistics <- add_stats(statistics, icc) 106 | 107 | icc 108 | 109 | # tidy_stats_to_data_frame() ---------------------------------------------- 110 | 111 | df <- tidy_stats_to_data_frame(statistics) 112 | 113 | # Save stats -------------------------------------------------------------- 114 | 115 | write_test_stats(statistics, "tests/data/psych.json") 116 | 117 | # Cleanup ----------------------------------------------------------------- 118 | 119 | rm( 120 | statistics, df, alpha_r4, r4, r9, two_f, items, alpha_r9, alpha_two_f, 121 | alpha_cov_two, alpha_two_f_check, alpha_a4, ct, cts, cts_kendall, sats, 122 | sats_no_ci, sats_alpha, x, mardia_attitude, mardia_x, sf, icc 123 | ) 124 | -------------------------------------------------------------------------------- /tests/prep/prep_quantreg.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(quantreg) 4 | 5 | statistics <- list() 6 | 7 | # rq() -------------------------------------------------------------------- 8 | 9 | data(stackloss) 10 | data(engel) 11 | 12 | rq_median <- rq( 13 | stack.loss ~ stack.x, 14 | tau = .5, 15 | stackloss 16 | ) 17 | 18 | rq_sequence <- rq( 19 | foodexp ~ income, 20 | tau = c(.05, .1, .25, .75, .9, .95), 21 | engel 22 | ) 23 | 24 | statistics <- statistics |> 25 | add_stats(rq_median) |> 26 | add_stats(rq_sequence) 27 | 28 | summary(rq_median) 29 | summary(rq_median, se = "boot") 30 | summary(rq_sequence) 31 | 32 | # crq() ------------------------------------------------------------------- 33 | 34 | # An artificial Powell example 35 | set.seed(2345) 36 | x <- sqrt(rnorm(100)^2) 37 | y <- -0.5 + x + (.25 + .25 * x) * rnorm(100) 38 | s <- (y > 0) 39 | yLatent <- y 40 | y <- pmax(0, y) 41 | yc <- rep(0, 100) 42 | tau <- 0.2 43 | f <- crq(Curv(y, yc) ~ x, tau = tau, method = "Pow") 44 | summary(f) 45 | 46 | # crq example with left censoring 47 | set.seed(1968) 48 | n <- 200 49 | x <- rnorm(n) 50 | y <- 5 + x + rnorm(n) 51 | c <- 4 + x + rnorm(n) 52 | d <- (y > c) 53 | f <- crq(survival::Surv(pmax(y, c), d, type = "left") ~ x, method = "Portnoy") 54 | 55 | summary(f) 56 | 57 | # anova() ----------------------------------------------------------------- 58 | 59 | data(barro) 60 | 61 | fit1 <- rq( 62 | y.net ~ lgdp2 + fse2 + gedy2 + Iy2 + gcony2, 63 | data = barro 64 | ) 65 | fit2 <- rq( 66 | y.net ~ lgdp2 + fse2 + gedy2 + Iy2 + gcony2, 67 | data = barro, 68 | tau = .75 69 | ) 70 | fit3 <- rq( 71 | y.net ~ lgdp2 + fse2 + gedy2 + Iy2 + gcony2, 72 | data = barro, 73 | tau = .25 74 | ) 75 | 76 | anova_joint <- anova(fit1, fit2, fit3) 77 | anova_distinct <- anova(fit1, fit2, fit3, joint = FALSE) 78 | 79 | statistics <- statistics |> 80 | add_stats(anova_joint) |> 81 | add_stats(anova_distinct) 82 | 83 | anova_joint 84 | anova_distinct 85 | 86 | # tidy_stats_to_data_frame() ---------------------------------------------- 87 | 88 | df <- tidy_stats_to_data_frame(statistics) 89 | 90 | # write_stats() ----------------------------------------------------------- 91 | 92 | write_test_stats(statistics, "tests/data/quantreg.json") 93 | 94 | # Cleanup ----------------------------------------------------------------- 95 | 96 | rm( 97 | rq_median, rq_sequence, stackloss, engel, fit1, fit2, fit3, anova_joint, 98 | anova_distinct, barro, df, statistics 99 | ) 100 | -------------------------------------------------------------------------------- /tests/prep/prep_stats.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | statistics <- list() 4 | 5 | # stats() and stat() ------------------------------------------------------ 6 | 7 | lm1 <- lm(Fertility ~ ., data = swiss) 8 | lm2 <- update(lm1, . ~ . - Examination) 9 | 10 | BF10 <- 1 / exp((BIC(lm2) - BIC(lm1)) / 2) 11 | 12 | BF_stats <- stats( 13 | method = "BF BIC method", 14 | statistics = c( 15 | stat(name = "BF", value = BF10, subscript = "10"), 16 | stat(name = "BF", value = 1 / BF10, subscript = "01") 17 | ) 18 | ) 19 | 20 | statistics <- add_stats( 21 | list = statistics, 22 | output = BF_stats, 23 | notes = "Wagenmakers (2007) method for calculating Bayes factors" 24 | ) 25 | 26 | stats 27 | 28 | # tidy_stats_to_data_frame() ---------------------------------------------- 29 | 30 | df <- tidy_stats_to_data_frame(statistics) 31 | 32 | # write_stats() ----------------------------------------------------------- 33 | 34 | write_test_stats(statistics, "tests/data/stats.json") 35 | 36 | # Cleanup ----------------------------------------------------------------- 37 | 38 | rm(df, statistics, BF10, BF_stats, lm1, lm2) 39 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | library(tidystats) 3 | 4 | test_check("tidystats") 5 | -------------------------------------------------------------------------------- /tests/testthat/test_Hmisc.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(Hmisc) 4 | 5 | expected_statistics <- read_stats("../data/Hmisc.json") 6 | 7 | # aov() ------------------------------------------------------------------- 8 | 9 | test_that("Hmisc's rcorr works", { 10 | x <- c(-2, -1, 0, 1, 2) 11 | y <- c(4, 1, 0, 1, 4) 12 | z <- c(1, 2, 3, 4, NA) 13 | v <- c(1, 2, 3, 4, 5) 14 | 15 | model <- rcorr(cbind(x, y, z, v)) 16 | 17 | expect_equal_models( 18 | model = model, 19 | expected_tidy_model = expected_statistics$rcorr 20 | ) 21 | }) 22 | -------------------------------------------------------------------------------- /tests/testthat/test_aov.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_statistics <- read_stats("../data/aov.json") 4 | 5 | # aov() ------------------------------------------------------------------- 6 | 7 | test_that("aov works", { 8 | model <- aov(yield ~ block + N * P * K, npk) 9 | 10 | expect_equal_models( 11 | model = model, 12 | expected_tidy_model = expected_statistics$aov 13 | ) 14 | }) 15 | 16 | test_that("aov order works", { 17 | model <- aov(terms(yield ~ block + N * P + K, keep.order = TRUE), npk) 18 | 19 | expect_equal_models( 20 | model = model, 21 | expected_tidy_model = expected_statistics$aov_order 22 | ) 23 | }) 24 | 25 | test_that("aov error works", { 26 | model <- aov(yield ~ N * P * K + Error(block), npk) 27 | 28 | expect_equal_models( 29 | model = model, 30 | expected_tidy_model = expected_statistics$aov_error 31 | ) 32 | }) 33 | -------------------------------------------------------------------------------- /tests/testthat/test_confint.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_statistics <- read_stats("../data/confint.json") 4 | 5 | # lm() -------------------------------------------------------------------- 6 | 7 | test_that("confint works", { 8 | fit <- lm(100 / mpg ~ disp + hp + wt + am, data = mtcars) 9 | model <- confint(fit) 10 | 11 | class(model) <- append(class(model), "confint", after = 0) 12 | 13 | expect_equal_models( 14 | model = model, 15 | expected_tidy_model = expected_statistics$CI_fit 16 | ) 17 | }) 18 | 19 | test_that("single coefficient confint works", { 20 | fit <- lm(100 / mpg ~ disp + hp + wt + am, data = mtcars) 21 | model <- confint(fit, "wt") 22 | 23 | class(model) <- append(class(model), "confint", after = 0) 24 | 25 | expect_equal_models( 26 | model = model, 27 | expected_tidy_model = expected_statistics$CI_fit_wt 28 | ) 29 | }) 30 | 31 | test_that("profile likelihood confint works", { 32 | D93 <- tibble::tibble( 33 | counts = c(18, 17, 15, 20, 10, 20, 25, 13, 12), 34 | outcome = gl(3, 1, 9), 35 | treatment = gl(3, 3) 36 | ) 37 | 38 | fit <- glm(counts ~ outcome + treatment, data = D93, family = poisson()) 39 | model <- confint(fit) 40 | 41 | class(model) <- append(class(model), "confint", after = 0) 42 | 43 | expect_equal_models( 44 | model = model, 45 | expected_tidy_model = expected_statistics$CI_glm_D93_MASS 46 | ) 47 | }) 48 | 49 | test_that("asymptotic normality confint works", { 50 | D93 <- tibble::tibble( 51 | counts = c(18, 17, 15, 20, 10, 20, 25, 13, 12), 52 | outcome = gl(3, 1, 9), 53 | treatment = gl(3, 3) 54 | ) 55 | 56 | fit <- glm(counts ~ outcome + treatment, data = D93, family = poisson()) 57 | model <- confint.default(fit) 58 | 59 | class(model) <- append(class(model), "confint", after = 0) 60 | 61 | expect_equal_models( 62 | model = model, 63 | expected_tidy_model = expected_statistics$CI_glm_D93_default 64 | ) 65 | }) 66 | -------------------------------------------------------------------------------- /tests/testthat/test_count_data.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_statistics <- read_stats("../data/count_data.json") 4 | 5 | # count_data() ------------------------------------------------------------ 6 | 7 | test_that("count data without groups works", { 8 | model <- count_data(quote_source) 9 | 10 | expect_equal_models( 11 | model = model, 12 | expected_tidy_model = expected_statistics$no_group 13 | ) 14 | }) 15 | 16 | test_that("count data with one group works", { 17 | model <- count_data(quote_source, source) 18 | 19 | expect_equal_models( 20 | model = model, 21 | expected_tidy_model = expected_statistics$single_group 22 | ) 23 | }) 24 | 25 | test_that("count data with two groups works", { 26 | model <- count_data(quote_source, source, sex, pct = TRUE) 27 | 28 | expect_equal_models( 29 | model = model, 30 | expected_tidy_model = expected_statistics$two_groups 31 | ) 32 | }) 33 | 34 | test_that("grouped count data with one group works", { 35 | model <- quote_source |> 36 | dplyr::group_by(source) |> 37 | count_data(sex) 38 | 39 | expect_equal_models( 40 | model = model, 41 | expected_tidy_model = expected_statistics$grouped_group 42 | ) 43 | }) 44 | 45 | test_that("grouped count data with one group without missings works", { 46 | model <- quote_source |> 47 | dplyr::group_by(source) |> 48 | count_data(sex, na.rm = TRUE) 49 | 50 | expect_equal_models( 51 | model = model, 52 | expected_tidy_model = expected_statistics$grouped_group_na_rm 53 | ) 54 | }) 55 | -------------------------------------------------------------------------------- /tests/testthat/test_describe_data.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_statistics <- read_stats("../data/describe_data.json") 4 | 5 | # describe_data() --------------------------------------------------------- 6 | 7 | test_that("describe data works", { 8 | model <- describe_data(quote_source, response) 9 | 10 | expect_equal_models( 11 | model = model, 12 | expected_tidy_model = expected_statistics$single_var 13 | ) 14 | }) 15 | 16 | test_that("describe data with one group works", { 17 | model <- quote_source |> 18 | dplyr::group_by(source) |> 19 | describe_data(response) 20 | 21 | expect_equal_models( 22 | model = model, 23 | expected_tidy_model = expected_statistics$single_var_w_group 24 | ) 25 | }) 26 | 27 | test_that("multiple vars describe data works", { 28 | model <- describe_data(quote_source, response, age) 29 | 30 | expect_equal_models( 31 | model = model, 32 | expected_tidy_model = expected_statistics$multiple_vars 33 | ) 34 | }) 35 | 36 | test_that("describe data with multiple groups works", { 37 | model <- quote_source |> 38 | dplyr::group_by(source, sex) |> 39 | describe_data(response) 40 | 41 | expect_equal_models( 42 | model = model, 43 | expected_tidy_model = expected_statistics$single_var_w_groups 44 | ) 45 | }) 46 | 47 | test_that("describe data with multiple groups without missings works", { 48 | model <- quote_source |> 49 | dplyr::group_by(source, sex) |> 50 | describe_data(response, na.rm = FALSE) 51 | 52 | expect_equal_models( 53 | model = model, 54 | expected_tidy_model = expected_statistics$single_var_w_groups_wo_na 55 | ) 56 | }) 57 | 58 | test_that("describe data with multiple vars and a group works", { 59 | model <- quote_source |> 60 | dplyr::group_by(source) |> 61 | describe_data(response, age) 62 | 63 | expect_equal_models( 64 | model = model, 65 | expected_tidy_model = expected_statistics$multiple_vars_w_group 66 | ) 67 | }) 68 | 69 | test_that("describe data with subset works", { 70 | model <- describe_data(quote_source, response, short = TRUE) 71 | 72 | expect_equal_models( 73 | model = model, 74 | expected_tidy_model = expected_statistics$single_var_subset 75 | ) 76 | }) 77 | -------------------------------------------------------------------------------- /tests/testthat/test_effectsize.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(effectsize) 4 | expected_statistics <- read_stats("../data/effectsize.json") 5 | 6 | # cohens_d() -------------------------------------------------------------- 7 | 8 | test_that("effectsize's Cohen's d works", { 9 | model <- cohens_d(mpg ~ am, data = mtcars) 10 | 11 | expect_equal_models( 12 | model = model, 13 | expected_tidy_model = expected_statistics$cohens_d 14 | ) 15 | }) 16 | 17 | test_that("effectsize's Cohen's d not pooled works", { 18 | model <- cohens_d(mpg ~ am, data = mtcars, pooled_sd = FALSE) 19 | 20 | expect_equal_models( 21 | model = model, 22 | expected_tidy_model = expected_statistics$cohens_d_not_pooled 23 | ) 24 | }) 25 | 26 | test_that("effectsize's Cohen's d mu works", { 27 | model <- cohens_d(mpg ~ am, data = mtcars, mu = -5) 28 | 29 | expect_equal_models( 30 | model = model, 31 | expected_tidy_model = expected_statistics$cohens_d_mu 32 | ) 33 | }) 34 | 35 | test_that("effectsize's Cohen's d less works", { 36 | model <- cohens_d(mpg ~ am, data = mtcars, alternative = "less") 37 | 38 | expect_equal_models( 39 | model = model, 40 | expected_tidy_model = expected_statistics$cohens_d_less 41 | ) 42 | }) 43 | 44 | test_that("effectsize's Cohen's d one sample works", { 45 | model <- cohens_d(wt ~ 1, data = mtcars) 46 | 47 | expect_equal_models( 48 | model = model, 49 | expected_tidy_model = expected_statistics$cohens_d_one_sample 50 | ) 51 | }) 52 | 53 | test_that("effectsize's Cohen's d paired works", { 54 | model <- cohens_d( 55 | Pair(extra[group == 1], extra[group == 2]) ~ 1, 56 | data = sleep 57 | ) 58 | 59 | expect_equal_models( 60 | model = model, 61 | expected_tidy_model = expected_statistics$cohens_d_paired 62 | ) 63 | }) 64 | 65 | # hedges_g() -------------------------------------------------------------- 66 | 67 | test_that("effectsize's Hedges' g works", { 68 | model <- hedges_g(mpg ~ am, data = mtcars) 69 | 70 | expect_equal_models( 71 | model = model, 72 | expected_tidy_model = expected_statistics$hedges_g 73 | ) 74 | }) 75 | 76 | test_that("effectsize's Hedges' g not pooled works", { 77 | model <- hedges_g(mpg ~ am, data = mtcars, pooled_sd = FALSE) 78 | 79 | expect_equal_models( 80 | model = model, 81 | expected_tidy_model = expected_statistics$hedges_g_not_pooled 82 | ) 83 | }) 84 | 85 | test_that("effectsize's Hedges' g mu works", { 86 | model <- hedges_g(mpg ~ am, data = mtcars, mu = -5) 87 | 88 | expect_equal_models( 89 | model = model, 90 | expected_tidy_model = expected_statistics$hedges_g_mu 91 | ) 92 | }) 93 | 94 | test_that("effectsize's Hedges' g less works", { 95 | model <- hedges_g(mpg ~ am, data = mtcars, alternative = "less") 96 | 97 | expect_equal_models( 98 | model = model, 99 | expected_tidy_model = expected_statistics$hedges_g_less 100 | ) 101 | }) 102 | 103 | test_that("effectsize's Hedges' g one sample works", { 104 | model <- hedges_g(wt ~ 1, data = mtcars) 105 | 106 | expect_equal_models( 107 | model = model, 108 | expected_tidy_model = expected_statistics$hedges_g_one_sample 109 | ) 110 | }) 111 | 112 | test_that("effectsize's Hedges' g one sample works", { 113 | model <- hedges_g( 114 | Pair(extra[group == 1], extra[group == 2]) ~ 1, 115 | data = sleep 116 | ) 117 | 118 | expect_equal_models( 119 | model = model, 120 | expected_tidy_model = expected_statistics$hedges_g_paired 121 | ) 122 | }) 123 | 124 | # glass_delta() -------------------------------------------------------------- 125 | 126 | test_that("effectsize's Glass's delta works", { 127 | model <- glass_delta(mpg ~ am, data = mtcars) 128 | 129 | expect_equal_models( 130 | model = model, 131 | expected_tidy_model = expected_statistics$glass_delta 132 | ) 133 | }) 134 | 135 | test_that("effectsize's Glass's delta mu works", { 136 | model <- glass_delta(mpg ~ am, data = mtcars, mu = -5) 137 | 138 | expect_equal_models( 139 | model = model, 140 | expected_tidy_model = expected_statistics$glass_delta_mu 141 | ) 142 | }) 143 | 144 | test_that("effectsize's Glass's delta less works", { 145 | model <- glass_delta( 146 | mpg ~ am, 147 | data = mtcars, alternative = "less" 148 | ) 149 | 150 | expect_equal_models( 151 | model = model, 152 | expected_tidy_model = expected_statistics$glass_delta_less 153 | ) 154 | }) 155 | -------------------------------------------------------------------------------- /tests/testthat/test_effsize.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_statistics <- read_stats("../data/effsize.json") 4 | 5 | # cohen.d() --------------------------------------------------------------- 6 | 7 | test_that("effsize's Cohen's d works", { 8 | set.seed(1) 9 | 10 | treatment <- rnorm(100, mean = 10) 11 | control <- rnorm(100, mean = 12) 12 | d <- c(treatment, control) 13 | f <- rep(c("Treatment", "Control"), each = 100) 14 | 15 | model <- effsize::cohen.d(d ~ f) 16 | 17 | expect_equal_models( 18 | model = model, 19 | expected_tidy_model = expected_statistics$cohen_d 20 | ) 21 | }) 22 | 23 | test_that("effsize's Hedges' g works", { 24 | set.seed(1) 25 | 26 | treatment <- rnorm(100, mean = 10) 27 | control <- rnorm(100, mean = 12) 28 | d <- c(treatment, control) 29 | f <- rep(c("Treatment", "Control"), each = 100) 30 | 31 | model <- effsize::cohen.d(d ~ f, hedges.correction = TRUE) 32 | 33 | expect_equal_models( 34 | model = model, 35 | expected_tidy_model = expected_statistics$hedges_g 36 | ) 37 | }) 38 | 39 | test_that("effsize's VDA works", { 40 | set.seed(1) 41 | 42 | treatment <- rnorm(100, mean = 10) 43 | control <- rnorm(100, mean = 12) 44 | d <- c(treatment, control) 45 | f <- rep(c("Treatment", "Control"), each = 100) 46 | 47 | model <- effsize::VD.A(d ~ f) 48 | 49 | expect_equal_models( 50 | model = model, 51 | expected_tidy_model = expected_statistics$vda 52 | ) 53 | }) 54 | 55 | test_that("effsize's Cliff's delta works", { 56 | treatment <- c(10, 10, 20, 20, 20, 30, 30, 30, 40, 50) 57 | control <- c(10, 20, 30, 40, 40, 50) 58 | 59 | model <- effsize::cliff.delta(treatment, control, return.dm = TRUE) 60 | 61 | expect_equal_models( 62 | model = model, 63 | expected_tidy_model = expected_statistics$cliffs_delta 64 | ) 65 | }) 66 | -------------------------------------------------------------------------------- /tests/testthat/test_irr.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(irr) 4 | 5 | expected_statistics <- read_stats("../data/irr.json") 6 | 7 | # aov() ------------------------------------------------------------------- 8 | 9 | test_that("irr's icc twoway agreement works", { 10 | data(anxiety) 11 | 12 | model <- icc(anxiety, model = "twoway", type = "agreement") 13 | 14 | expect_equal_models( 15 | model = model, 16 | expected_tidy_model = expected_statistics$ICC_anxiety 17 | ) 18 | }) 19 | 20 | test_that("irrs's icc twoway consistency works", { 21 | set.seed(1) 22 | 23 | r1 <- round(rnorm(20, 10, 4)) 24 | r2 <- round(r1 + 10 + rnorm(20, 0, 2)) 25 | r3 <- round(r1 + 20 + rnorm(20, 0, 2)) 26 | 27 | model <- icc(cbind(r1, r2, r3), "twoway") 28 | 29 | expect_equal_models( 30 | model = model, 31 | expected_tidy_model = expected_statistics$ICC_high_consistency 32 | ) 33 | }) 34 | -------------------------------------------------------------------------------- /tests/testthat/test_lm.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_statistics <- read_stats("../data/lm.json") 4 | 5 | # lm() -------------------------------------------------------------------- 6 | 7 | test_that("lm works", { 8 | ctl <- c(4.17, 5.58, 5.18, 6.11, 4.50, 4.61, 5.17, 4.53, 5.33, 5.14) 9 | trt <- c(4.81, 4.17, 4.41, 3.59, 5.87, 3.83, 6.03, 4.89, 4.32, 4.69) 10 | group <- gl(2, 10, 20, labels = c("Ctl", "Trt")) 11 | weight <- c(ctl, trt) 12 | 13 | model <- lm(weight ~ group) 14 | 15 | expect_equal_models( 16 | model = model, 17 | expected_tidy_model = expected_statistics$lm 18 | ) 19 | }) 20 | 21 | test_that("lm without an intercept works", { 22 | ctl <- c(4.17, 5.58, 5.18, 6.11, 4.50, 4.61, 5.17, 4.53, 5.33, 5.14) 23 | trt <- c(4.81, 4.17, 4.41, 3.59, 5.87, 3.83, 6.03, 4.89, 4.32, 4.69) 24 | group <- gl(2, 10, 20, labels = c("Ctl", "Trt")) 25 | weight <- c(ctl, trt) 26 | 27 | model <- lm(weight ~ group - 1) 28 | 29 | expect_equal_models( 30 | model = model, 31 | expected_tidy_model = expected_statistics$lm_wo_intercept 32 | ) 33 | }) 34 | 35 | test_that("lm anova works", { 36 | fit <- lm(sr ~ ., data = LifeCycleSavings) 37 | 38 | model <- anova(fit) 39 | 40 | expect_equal_models( 41 | model = model, 42 | expected_tidy_model = expected_statistics$anova_lm 43 | ) 44 | }) 45 | 46 | test_that("lm model comparison anova works", { 47 | fit <- lm(sr ~ ., data = LifeCycleSavings) 48 | fit0 <- lm(sr ~ 1, data = LifeCycleSavings) 49 | fit1 <- update(fit0, . ~ . + pop15) 50 | fit2 <- update(fit1, . ~ . + pop75) 51 | fit3 <- update(fit2, . ~ . + dpi) 52 | fit4 <- update(fit3, . ~ . + ddpi) 53 | 54 | model <- anova(fit0, fit1, fit2, fit3, fit4, test = "F") 55 | 56 | expect_equal_models( 57 | model = model, 58 | expected_tidy_model = expected_statistics$anova_lm_fits 59 | ) 60 | }) 61 | 62 | test_that("lm model comparison anova in another order works", { 63 | fit0 <- lm(sr ~ 1, data = LifeCycleSavings) 64 | fit1 <- update(fit0, . ~ . + pop15) 65 | fit2 <- update(fit1, . ~ . + pop75) 66 | fit3 <- update(fit2, . ~ . + dpi) 67 | fit4 <- update(fit3, . ~ . + ddpi) 68 | 69 | model <- anova(fit4, fit2, fit0, test = "F") 70 | 71 | expect_equal_models( 72 | model = model, 73 | expected_tidy_model = expected_statistics$anova_lm_order 74 | ) 75 | }) 76 | 77 | test_that("lm model comparison anova chi-squared works", { 78 | fit0 <- lm(sr ~ 1, data = LifeCycleSavings) 79 | fit1 <- update(fit0, . ~ . + pop15) 80 | fit2 <- update(fit1, . ~ . + pop75) 81 | fit3 <- update(fit2, . ~ . + dpi) 82 | fit4 <- update(fit3, . ~ . + ddpi) 83 | 84 | model <- anova(fit4, fit2, fit0, test = "Chisq") 85 | 86 | expect_equal_models( 87 | model = model, 88 | expected_tidy_model = expected_statistics$anova_lm_chisq 89 | ) 90 | }) 91 | 92 | test_that("lm model comparison anova Cp works", { 93 | fit0 <- lm(sr ~ 1, data = LifeCycleSavings) 94 | fit1 <- update(fit0, . ~ . + pop15) 95 | fit2 <- update(fit1, . ~ . + pop75) 96 | fit3 <- update(fit2, . ~ . + dpi) 97 | fit4 <- update(fit3, . ~ . + ddpi) 98 | 99 | model <- anova(fit4, fit2, fit0, test = "Cp") 100 | 101 | expect_equal_models( 102 | model = model, 103 | expected_tidy_model = expected_statistics$anova_lm_cp 104 | ) 105 | }) 106 | -------------------------------------------------------------------------------- /tests/testthat/test_lme4.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_statistics <- read_stats("../data/lme4.json") 4 | 5 | # lmer() ------------------------------------------------------------------ 6 | 7 | test_that("lme4 works", { 8 | model <- lme4::lmer(Reaction ~ Days + (1 | Subject), lme4::sleepstudy) 9 | 10 | expect_equal_models( 11 | model = model, 12 | expected_tidy_model = expected_statistics$lme4 13 | ) 14 | }) 15 | 16 | test_that("lme4 ML works", { 17 | model <- lme4::lmer(Reaction ~ Days + (1 | Subject), lme4::sleepstudy, 18 | REML = FALSE 19 | ) 20 | 21 | expect_equal_models( 22 | model = model, 23 | expected_tidy_model = expected_statistics$lme4_ML 24 | ) 25 | }) 26 | 27 | test_that("lme4 slopes works", { 28 | model <- lme4::lmer(Reaction ~ Days + (Days || Subject), lme4::sleepstudy) 29 | 30 | expect_equal_models( 31 | model = model, 32 | expected_tidy_model = expected_statistics$lme4_slopes 33 | ) 34 | }) 35 | 36 | # anova.merMod() ---------------------------------------------------------- 37 | 38 | test_that("lme4 anova works", { 39 | lme4 <- lme4::lmer(Reaction ~ Days + (1 | Subject), lme4::sleepstudy) 40 | model <- anova(lme4) 41 | 42 | expect_equal_models( 43 | model = model, 44 | expected_tidy_model = expected_statistics$anova_lme4 45 | ) 46 | }) 47 | 48 | test_that("lme4 anova model comparison works", { 49 | lme4 <- lme4::lmer(Reaction ~ Days + (1 | Subject), lme4::sleepstudy) 50 | lme4_slopes <- lme4::lmer( 51 | Reaction ~ Days + (Days || Subject), 52 | lme4::sleepstudy 53 | ) 54 | model <- anova(lme4, lme4_slopes) 55 | 56 | expect_equal_models( 57 | model = model, 58 | expected_tidy_model = expected_statistics$anova_models 59 | ) 60 | }) 61 | -------------------------------------------------------------------------------- /tests/testthat/test_lmerTest.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_statistics <- read_stats("../data/lmerTest.json") 4 | 5 | # lmer() ------------------------------------------------------------------ 6 | 7 | test_that("lmerTest 1 works", { 8 | model <- lmerTest::lmer( 9 | Reaction ~ Days + (Days | Subject), lme4::sleepstudy 10 | ) 11 | 12 | expect_equal_models( 13 | model = model, 14 | expected_tidy_model = expected_statistics$lmerTest1 15 | ) 16 | }) 17 | 18 | test_that("lmerTest 2 works", { 19 | model <- lmerTest::lmer( 20 | Informed.liking ~ Gender + Information * Product + (1 | Consumer) + 21 | (1 | Consumer:Product), 22 | data = lmerTest::ham 23 | ) 24 | 25 | expect_equal_models( 26 | model = model, 27 | expected_tidy_model = expected_statistics$lmerTest2 28 | ) 29 | }) 30 | 31 | test_that("lmerTest ML works", { 32 | model <- lmerTest::lmer( 33 | Reaction ~ Days + (Days | Subject), sleepstudy, 34 | REML = FALSE 35 | ) 36 | 37 | expect_equal_models( 38 | model = model, 39 | expected_tidy_model = expected_statistics$lmerTest_ML 40 | ) 41 | }) 42 | 43 | # anova.merMod() ---------------------------------------------------------- 44 | 45 | test_that("lmerTest anova works", { 46 | m <- lmerTest::lmer(Reaction ~ Days + (Days | Subject), lme4::sleepstudy) 47 | model <- anova(m) 48 | 49 | expect_equal_models( 50 | model = model, 51 | expected_tidy_model = expected_statistics$anova_lmerTest 52 | ) 53 | }) 54 | 55 | test_that("lmerTest anova lme4 works", { 56 | m <- lmerTest::lmer(Reaction ~ Days + (Days | Subject), lme4::sleepstudy) 57 | model <- anova(m, ddf = "lme4") 58 | 59 | expect_equal_models( 60 | model = model, 61 | expected_tidy_model = expected_statistics$anova_lmerTest_lme4 62 | ) 63 | }) 64 | 65 | test_that("lmerTest anova fit works", { 66 | m0 <- lmerTest::lmer(Reaction ~ Days + (1 | Subject), sleepstudy) 67 | m <- lmerTest::lmer(Reaction ~ Days + (Days | Subject), sleepstudy) 68 | model <- anova(m0, m) 69 | 70 | expect_equal_models( 71 | model = model, 72 | expected_tidy_model = expected_statistics$anova_lmerTest_fit 73 | ) 74 | }) 75 | -------------------------------------------------------------------------------- /tests/testthat/test_main.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_statistics <- read_stats("../data/main.json") 4 | 5 | tolerance <- 0.001 6 | 7 | # add_stats() ------------------------------------------------------------- 8 | 9 | test_that("the t-test in main works", { 10 | sleep_wide <- reshape( 11 | sleep, 12 | direction = "wide", 13 | idvar = "ID", 14 | timevar = "group", 15 | sep = "_" 16 | ) 17 | sleep_t_test <- t.test(sleep_wide$extra_1, sleep_wide$extra_2, paired = TRUE) 18 | 19 | statistics <- add_stats(list(), sleep_t_test, type = "primary") 20 | 21 | statistics$sleep_t_test$package <- NULL 22 | expected_statistics$sleep_t_test$package <- NULL 23 | 24 | expect_equal( 25 | object = statistics$sleep_t_test, 26 | expected = expected_statistics$sleep_t_test, 27 | tolerance = tolerance 28 | ) 29 | }) 30 | 31 | test_that("the linear regression in main works", { 32 | D9 <- tibble::tibble( 33 | group = gl(2, 10, 20, labels = c("Ctl", "Trt")), 34 | weight = c( 35 | 4.17, 5.58, 5.18, 6.11, 4.50, 4.61, 5.17, 4.53, 5.33, 5.14, 4.81, 36 | 4.17, 4.41, 3.59, 5.87, 3.83, 6.03, 4.89, 4.32, 4.69 37 | ) 38 | ) 39 | 40 | D9_lm <- lm(weight ~ group, data = D9) 41 | 42 | statistics <- add_stats(list(), D9_lm, preregistered = FALSE) 43 | 44 | statistics$D9_lm$package <- NULL 45 | expected_statistics$D9_lm$package <- NULL 46 | 47 | expect_equal( 48 | object = statistics$D9_lm, 49 | expected = expected_statistics$D9_lm, 50 | tolerance = tolerance 51 | ) 52 | }) 53 | 54 | test_that("the ANOVA in main works", { 55 | npk_aov <- aov(yield ~ block + N * P * K, npk) 56 | 57 | statistics <- add_stats(list(), npk_aov, notes = "An ANOVA example") 58 | 59 | statistics$npk_aov$package <- NULL 60 | expected_statistics$npk_aov$package <- NULL 61 | 62 | expect_equal( 63 | object = statistics$npk_aov, 64 | expected = expected_statistics$npk_aov, 65 | tolerance = tolerance 66 | ) 67 | }) 68 | -------------------------------------------------------------------------------- /tests/testthat/test_pairwise_htest.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_statistics <- read_stats("../data/pairwise_htest.json") 4 | 5 | # pairwise.t.test() ------------------------------------------------------- 6 | 7 | test_that("pairwise t-tests with pooled SD works", { 8 | Month <- factor(airquality$Month, labels = month.abb[5:9]) 9 | 10 | model <- pairwise.t.test(airquality$Ozone, Month) 11 | 12 | expect_equal_models( 13 | model = model, 14 | expected_tidy_model = expected_statistics$pairwise_t_test 15 | ) 16 | }) 17 | 18 | test_that("Pairwise t tests with non-pooled SD works", { 19 | Month <- factor(airquality$Month, labels = month.abb[5:9]) 20 | 21 | model <- pairwise.t.test( 22 | airquality$Ozone, 23 | Month, 24 | p.adjust.method = "bonf", 25 | pool.sd = FALSE 26 | ) 27 | 28 | expect_equal_models( 29 | model = model, 30 | expected_tidy_model = expected_statistics$pairwise_t_test_nonpooled 31 | ) 32 | }) 33 | 34 | test_that("pairwise paired t-tests works", { 35 | model <- pairwise.t.test( 36 | c(1, 2, 3, 1, 2, 4), 37 | c(1, 1, 2, 2, 3, 3), 38 | paired = TRUE 39 | ) 40 | 41 | expect_equal_models( 42 | model = model, 43 | expected_tidy_model = expected_statistics$pairwise_t_test_paired 44 | ) 45 | }) 46 | 47 | # pairwise.prop.test() ---------------------------------------------------- 48 | 49 | test_that("pairwise comparison of proportions works", { 50 | smokers <- c(83, 90, 129, 70) 51 | patients <- c(86, 93, 136, 82) 52 | 53 | model <- suppressWarnings(pairwise.prop.test(smokers, patients)) 54 | 55 | expect_equal_models( 56 | model = model, 57 | expected_tidy_model = expected_statistics$pairwise_prop_test 58 | ) 59 | }) 60 | 61 | # pairwise.wilcox.test() -------------------------------------------------- 62 | 63 | test_that("pairwise Wilcoxon rank sum exact test works", { 64 | model <- pairwise.wilcox.test( 65 | c(1, 2, 3, 4, 5, 6, 7, 8, 9, 11), 66 | c(1, 1, 1, 1, 1, 2, 2, 2, 2, 2) 67 | ) 68 | 69 | expect_equal_models( 70 | model = model, 71 | expected_tidy_model = expected_statistics$pairwise_wilcox_test 72 | ) 73 | }) 74 | 75 | test_that("pairwise Wilcoxon signed rank exact test works", { 76 | model <- pairwise.wilcox.test( 77 | PlantGrowth$weight, 78 | PlantGrowth$group, 79 | p.adjust.method = "BH", 80 | paired = TRUE 81 | ) 82 | 83 | expect_equal_models( 84 | model = model, 85 | expected_tidy_model = expected_statistics$pairwise_wilcox_test_paired 86 | ) 87 | }) 88 | -------------------------------------------------------------------------------- /tests/testthat/test_stats.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_statistics <- read_stats("../data/stats.json") 4 | 5 | # stat() and stats() ------------------------------------------------------ 6 | 7 | test_that("BF stats works", { 8 | statistics <- list() 9 | 10 | lm1 <- lm(Fertility ~ ., data = swiss) 11 | lm2 <- update(lm1, . ~ . - Examination) 12 | 13 | BF10 <- 1 / exp((BIC(lm2) - BIC(lm1)) / 2) 14 | 15 | BF_stats <- custom_stats( 16 | method = "BF BIC method", 17 | statistics = c( 18 | custom_stat(name = "BF", value = BF10, subscript = "10"), 19 | custom_stat(name = "BF", value = 1 / BF10, subscript = "01") 20 | ) 21 | ) 22 | 23 | statistics <- add_stats( 24 | list = statistics, 25 | output = BF_stats, 26 | notes = "Wagenmakers (2007) method for calculating Bayes factors" 27 | ) 28 | statistics$BF_stats$statistics[[1]] 29 | expected_statistics$BF_stats$statistics[[1]] 30 | 31 | expect_equivalent( 32 | statistics$BF_stats, 33 | expected_statistics$BF_stats, 34 | tolerance = 0.00001 35 | ) 36 | }) 37 | -------------------------------------------------------------------------------- /tests/testthat/test_tidy_stats_to_data_frame.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | expected_df <- readr::read_csv("../data/main_df.csv") 4 | 5 | # Drop the spec_tbl_df class 6 | expected_df <- expected_df[] 7 | 8 | tolerance <- 0.001 9 | 10 | # tidy_stats_to_data_frame() ---------------------------------------------- 11 | 12 | test_that("tidy stats to data frame works", { 13 | statistics <- read_stats("../data/main.json") 14 | 15 | df <- tidy_stats_to_data_frame(statistics) 16 | 17 | expect_equal(df, expected_df, tolerance = tolerance) 18 | }) 19 | -------------------------------------------------------------------------------- /tests/work-in-progress/prep/prep_misc.R: -------------------------------------------------------------------------------- 1 | # MANOVA 2 | 3 | data <- iris 4 | 5 | model7_1 <- summary(manova(cbind(Sepal.Length, Petal.Length) ~ Species, 6 | data = iris 7 | ), test = "Roy") 8 | 9 | model7_1 10 | 11 | model7_2 <- summary(manova(cbind(Sepal.Length, Petal.Length) ~ Species, 12 | data = iris 13 | ), test = "Pillai") 14 | model7_2 15 | 16 | model7_3 <- summary(manova(cbind(Sepal.Length, Petal.Length) ~ Species, 17 | data = iris 18 | ), test = "Wilks") 19 | model7_3 20 | 21 | model7_4 <- summary(manova(cbind(Sepal.Length, Petal.Length) ~ Species * 22 | Petal.Width, data = iris), test = "Hotelling-Lawley") 23 | model7_4 24 | 25 | ## Set orthogonal contrasts. 26 | op <- options(contrasts = c("contr.helmert", "contr.poly")) 27 | ## Fake a 2nd response variable 28 | npk2 <- within(npk, foo <- rnorm(24)) 29 | (npk2.aov <- manova(cbind(yield, foo) ~ block + N * P * K, npk2)) 30 | summary(npk2.aov) 31 | (npk2.aovE <- manova(cbind(yield, foo) ~ N * P * K + Error(block), npk2)) 32 | summary(npk2.aovE) 33 | 34 | # Analysis: ppcor’s pcor.test() ------------------------------------------- 35 | 36 | # Load package 37 | library(ppcor) 38 | 39 | # Get data 40 | y.data <- data.frame( 41 | hl = c(7, 15, 19, 15, 21, 22, 57, 15, 20, 18), 42 | disp = c(0, 0.964, 0, 0, 0.921, 0, 0, 1.006, 0, 1.011), 43 | deg = c(9, 2, 3, 4, 1, 3, 1, 3, 6, 1), 44 | BC = c( 45 | 1.78e-02, 1.05e-06, 1.37e-05, 7.18e-03, 0.00e+00, 0.00e+00, 0.00e+00, 46 | 4.48e-03, 2.10e-06, 0.00e+00 47 | ) 48 | ) 49 | 50 | # Run analysis 51 | pcor_correlation <- pcor.test(y.data$hl, y.data$disp, y.data[, c("deg", "BC")]) 52 | pcor_correlation 53 | -------------------------------------------------------------------------------- /tests/work-in-progress/prep/prep_nlme.R: -------------------------------------------------------------------------------- 1 | # Setup ------------------------------------------------------------------- 2 | 3 | library(nlme) 4 | 5 | statistics <- list() 6 | 7 | # lme() ------------------------------------------------------------------- 8 | 9 | lme_fm1 <- lme( 10 | distance ~ age, 11 | random = ~ 1 + age | Subject / Sex, 12 | data = Orthodont 13 | ) 14 | lme_fm2 <- lme(distance ~ age + Sex, data = Orthodont, random = ~1) 15 | 16 | statistics <- statistics |> 17 | add_stats(lme_fm1) |> 18 | add_stats(lme_fm2) 19 | 20 | summary(lme_fm1) 21 | summary(lme_fm2) 22 | 23 | 24 | lme( 25 | distance ~ age, 26 | random = ~ 1 + age | Subject / Sex, 27 | data = Orthodont 28 | ) 29 | 30 | 31 | 32 | 33 | # nlme() ------------------------------------------------------------------ 34 | 35 | # Run models 36 | nlme_fm1 <- nlme(height ~ SSasymp(age, Asym, R0, lrc), 37 | data = Loblolly, 38 | fixed = Asym + R0 + lrc ~ 1, random = Asym ~ 1, 39 | start = c(Asym = 103, R0 = -8.5, lrc = -3.3) 40 | ) 41 | nlme_fm2 <- update(nlme_fm1, random = pdDiag(Asym + lrc ~ 1)) 42 | 43 | summary(nlme_fm1) 44 | summary(nlme_fm2) 45 | 46 | # Tidy stats 47 | temp <- tidy_stats(nlme_fm1) 48 | temp <- tidy_stats(nlme_fm2) 49 | 50 | # Add stats 51 | results <- results |> 52 | add_stats(nlme_fm1) |> 53 | add_stats(nlme_fm2) 54 | 55 | # gls() ------------------------------------------------------------------- 56 | 57 | # Run models 58 | gls_fm1 <- gls(follicles ~ sin(2 * pi * Time) + cos(2 * pi * Time), Ovary, 59 | correlation = corAR1(form = ~ 1 | Mare) 60 | ) 61 | gls_fm2 <- update(gls_fm1, weights = varPower()) 62 | 63 | summary(gls_fm1) 64 | summary(gls_fm2) 65 | 66 | # Tidy stats 67 | temp <- tidy_stats(gls_fm1) 68 | temp <- tidy_stats(gls_fm2) 69 | 70 | # Add stats 71 | results <- results |> 72 | add_stats(gls_fm1) |> 73 | add_stats(gls_fm2) 74 | 75 | # anova() ----------------------------------------------------------------- 76 | 77 | # Run models 78 | fm1 <- lme(distance ~ age, Orthodont, random = ~ age | Subject) 79 | anova_fm1 <- anova(fm1) 80 | anova_fm1 81 | 82 | fm2 <- update(fm1, random = pdDiag(~age)) 83 | anova_fm1_fm2 <- anova(fm1, fm2) 84 | anova_fm1_fm2 85 | 86 | fm1Orth.gls <- gls(distance ~ Sex * I(age - 11), Orthodont, 87 | correlation = corSymm(form = ~ 1 | Subject), 88 | weights = varIdent(form = ~ 1 | age) 89 | ) 90 | fm2Orth.gls <- update(fm1Orth.gls, corr = corCompSymm(form = ~ 1 | Subject)) 91 | anova_fm10_fm20 <- anova(fm1Orth.gls, fm2Orth.gls) 92 | anova_fm10_fm20 93 | 94 | fm3Orth.gls <- update(fm2Orth.gls, weights = NULL) 95 | anova_fm20_fm30 <- anova(fm2Orth.gls, fm3Orth.gls) 96 | anova_fm20_fm30 97 | 98 | fm4Orth.gls <- update(fm3Orth.gls, weights = varIdent(form = ~ 1 | Sex)) 99 | anova_fm30_fm40 <- anova(fm3Orth.gls, fm4Orth.gls) 100 | anova_fm30_fm40 101 | 102 | fm3Orth.lme <- lme(distance ~ Sex * I(age - 11), 103 | data = Orthodont, 104 | random = ~ I(age - 11) | Subject, 105 | weights = varIdent(form = ~ 1 | Sex) 106 | ) 107 | 108 | anova_fm30_fm40_no_test <- anova(fm3Orth.lme, fm4Orth.gls, test = FALSE) 109 | anova_fm30_fm40_no_test 110 | 111 | op <- options(contrasts = c("contr.treatment", "contr.poly")) 112 | 113 | fm1BW.lme <- lme(weight ~ Time * Diet, BodyWeight, random = ~Time) 114 | fm2BW.lme <- update(fm1BW.lme, weights = varPower()) 115 | 116 | anova_fm2BW <- anova(fm2BW.lme, L = c("Time:Diet2" = 1, "Time:Diet3" = -1)) 117 | anova_fm2BW 118 | 119 | fm1Theo.lis <- nlsList(conc ~ SSfol(Dose, Time, lKe, lKa, lCl), data = Theoph) 120 | fm1Theo.nlme <- nlme(fm1Theo.lis) 121 | fm2Theo.nlme <- update(fm1Theo.nlme, random = pdDiag(lKe + lKa + lCl ~ 1)) 122 | fm3Theo.nlme <- update(fm2Theo.nlme, random = pdDiag(lKa + lCl ~ 1)) 123 | 124 | # Comparing the 3 nlme models 125 | anova_fm1_fm3_fm2 <- anova(fm1Theo.nlme, fm3Theo.nlme, fm2Theo.nlme) 126 | anova_fm1_fm3_fm2 127 | 128 | options(op) # (set back to previous state) 129 | 130 | # Tidy stats 131 | temp <- tidy_stats(anova_fm1) 132 | temp <- tidy_stats(anova_fm1_fm2) 133 | temp <- tidy_stats(anova_fm10_fm20) 134 | temp <- tidy_stats(anova_fm20_fm30) 135 | temp <- tidy_stats(anova_fm30_fm40) 136 | temp <- tidy_stats(anova_fm30_fm40_no_test) 137 | temp <- tidy_stats(anova_fm2BW) 138 | temp <- tidy_stats(anova_fm1_fm3_fm2) 139 | 140 | # Add stats 141 | results <- results %>% 142 | add_stats(anova_fm1) %>% 143 | add_stats(anova_fm1_fm2) %>% 144 | add_stats(anova_fm10_fm20) %>% 145 | add_stats(anova_fm20_fm30) %>% 146 | add_stats(anova_fm30_fm40) %>% 147 | add_stats(anova_fm30_fm40_no_test) %>% 148 | add_stats(anova_fm2BW) %>% 149 | add_stats(anova_fm1_fm3_fm2) 150 | 151 | # Write stats ------------------------------------------------------------- 152 | 153 | write_stats(results, "inst/test_data/nlme.json") 154 | -------------------------------------------------------------------------------- /tidystats-R-package.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | BuildType: Package 16 | PackageUseDevtools: Yes 17 | PackageInstallArgs: --no-multiarch --with-keep.source 18 | -------------------------------------------------------------------------------- /vignettes/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | *.R 3 | -------------------------------------------------------------------------------- /vignettes/references.bib: -------------------------------------------------------------------------------- 1 | 2 | 3 | @article{wagenmakers2007, 4 | title = {A Practical Solution to the Pervasive Problems of p Values}, 5 | author = {Wagenmakers, Eric-Jan}, 6 | year = {2007}, 7 | month = oct, 8 | volume = {14}, 9 | pages = {779--804}, 10 | issn = {1069-9384, 1531-5320}, 11 | doi = {10.3758/BF03194105}, 12 | abstract = {In the field of psychology, the practice ofp value null-hypothesis testing is as widespread as ever. Despite this popularity, or perhaps because of it, most psychologists are not aware of the statistical peculiarities of thep value procedure. In particular,p values are based on data that were never observed, and these hypothetical data are themselves influenced by subjective intentions. Moreover,p values do not quantify statistical evidence. This article reviews thesep value problems and illustrates each problem with concrete examples. The three problems are familiar to statisticians but may be new to psychologists. A practical solution to thesep value problems is to adopt a model selection perspective and use the Bayesian information criterion (BIC) for statistical inference (Raftery, 1995). The BIC provides an approximation to a Bayesian hypothesis test, does not require the specification of priors, and can be easily calculated from SPSS output.}, 13 | file = {/Users/willem/Zotero/storage/GIUXN78A/Wagenmakers - 2007 - A practical solution to the pervasive problems ofp.pdf}, 14 | journal = {Psychonomic Bulletin \& Review}, 15 | keywords = {Bayesian statistics,BIC,p-values,statistics}, 16 | language = {en}, 17 | number = {5} 18 | } 19 | 20 | 21 | 22 | -------------------------------------------------------------------------------- /vignettes/reusing-statistics.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Reusing statistics" 3 | date: "`r Sys.Date()`" 4 | output: rmarkdown::html_vignette 5 | vignette: > 6 | %\VignetteIndexEntry{Reusing statistics} 7 | %\VignetteEngine{knitr::rmarkdown} 8 | %\VignetteEncoding{UTF-8} 9 | --- 10 | 11 | ```{r, include = FALSE} 12 | knitr::opts_chunk$set( 13 | collapse = TRUE, 14 | comment = "#>" 15 | ) 16 | ``` 17 | 18 | ```{r, echo = FALSE} 19 | library(knitr) 20 | options(knitr.kable.NA = "-") 21 | ``` 22 | 23 | Besides using tidystats in combination with a text editor add-in to report 24 | statistics, you can also use tidystats to read and use the statistics for other 25 | purposes. For example, researchers can extract specific statistics 26 | and perform analyses such as meta-analyses or a p-curve analysis on the 27 | extracted statistics. 28 | 29 | One particular useful function that was created for this purpose is 30 | `tidy_stats_to_data_frame()`. This function converts a tidystats list of 31 | statistics to a standard data frame. That means you can use common data 32 | manipulation functions such as `filter()` on the data to retrieve the statistics 33 | of interest. 34 | 35 | ## An example 36 | 37 | Below is an example of how to convert a list of statistics to a data frame and 38 | perform several simple operations. 39 | 40 | In the example below we read the tidystats list and select all the p-values. 41 | 42 | ```{r, results = "hide", message = FALSE} 43 | library(tidystats) 44 | library(dplyr) 45 | 46 | # Read the .json file containing the statistics and immediately convert it to 47 | # a data frame 48 | statistics <- read_stats("statistics.json") |> 49 | tidy_stats_to_data_frame() 50 | 51 | # Extract all the p-values 52 | p_values <- filter(statistics, statistic_name == "p") 53 | 54 | p_values 55 | ``` 56 | 57 | ```{r, echo = FALSE} 58 | kable(p_values, format = "markdown") 59 | ``` 60 | 61 | Alternatively, we can can also easily select all significant p-values. 62 | 63 | ```{r} 64 | sig_p_values <- filter(statistics, statistic_name == "p" & value < .05) 65 | ``` 66 | 67 | ```{r example2_eval, echo = FALSE} 68 | kable(sig_p_values, format = "markdown") 69 | ``` 70 | 71 | This could be useful if you want to conduct a 72 | [p-curve analysis](https://doi.org/10.1177/1745691614553988). 73 | Although do note that you should not blindly select all *p*-values. You should 74 | select only the *p*-values that are relevant to a particular hypothesis. If 75 | researchers provide the correct meta-information for each test 76 | (e.g., by indicating whether it is a primary analysis), this could help 77 | meta-researchers make correct decisions about which statistics to include in 78 | their analyses. 79 | 80 | ## Summary 81 | 82 | By importing a tidystats-produced file of statistics, you can convert 83 | the statistics to a data frame using the `tidy_stats_to_data_frame` function and 84 | apply common data transformation functions to extract specific statistics. These 85 | statistics can then be used in analyses such as meta-analyses, p-curve analyses, 86 | or other analyses. 87 | 88 | ## References 89 | 90 | Simonsohn, U., Nelson, L. D., & Simmons, J. P. (2014). p-curve and effect size: Correcting for publication bias using only significant results. *Perspectives on Psychological Science*, *9*(6), 666-681. https://doi.org/10.1177/1745691614553988 91 | -------------------------------------------------------------------------------- /vignettes/supported-functions.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Supported functions" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{Supported functions} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | This page contains a list of all functions supported by tidystats. 11 | 12 | Note that if a function is not yet supported, you can request support for it by 13 | creating an [issue](https://github.com/WillemSleegers/tidystats/issues) on 14 | GitHub or by using the `custom_stats()` function. See the 15 | `vignette("custom-statistics")` for more information. 16 | 17 | ## Packages and supported functions 18 | 19 | ```{r, echo = FALSE, results = "asis"} 20 | functions <- jsonlite::read_json("supported-functions.json") 21 | 22 | purrr::walk( 23 | functions, 24 | function(x) { 25 | cat(paste("###", x$package)) 26 | cat("\n") 27 | 28 | purrr::walk( 29 | x$functions, 30 | function(y) { 31 | cat(paste("-", y, "\n")) 32 | } 33 | ) 34 | 35 | cat("\n") 36 | 37 | return(invisible("")) 38 | } 39 | ) 40 | ``` 41 | -------------------------------------------------------------------------------- /vignettes/supported-functions.json: -------------------------------------------------------------------------------- 1 | [ 2 | { 3 | "package": "afex", 4 | "functions": ["aov_ez()", "aov_car()", "aov_4()", "mixed()"] 5 | }, 6 | { 7 | "package": "BayesFactor", 8 | "functions": [ 9 | "generalTestBF()", 10 | "lmBF()", 11 | "regressionBF()", 12 | "ttestBF()", 13 | "anovaBF()", 14 | "correlationBF()", 15 | "contingencyTableBF()", 16 | "proportionBF()", 17 | "meta.ttestBF()" 18 | ] 19 | }, 20 | { 21 | "package": "effectsize", 22 | "functions": ["cohens_d()", "hedges_g()", "glass_delta()"] 23 | }, 24 | { 25 | "package": "effsize", 26 | "functions": ["cohen.d()", "VD.A()", "cliff.delta()"] 27 | }, 28 | { 29 | "package": "emmeans", 30 | "functions": [ 31 | "emmeans()", 32 | "contrast()", 33 | "test()", 34 | "mvcontrast()", 35 | "eff_size()", 36 | "emtrends()", 37 | "joint_tests()", 38 | "ref_grid()" 39 | ] 40 | }, 41 | { 42 | "package": "Hmisc", 43 | "functions": ["rcorr()"] 44 | }, 45 | { 46 | "package": "irr", 47 | "functions": ["icc()"] 48 | }, 49 | { 50 | "package": "lme4", 51 | "functions": ["lmer()", "anova()"] 52 | }, 53 | { 54 | "package": "lmerTest", 55 | "functions": ["lmer()", "anova()"] 56 | }, 57 | { 58 | "package": "tidystats", 59 | "functions": ["count_data()", "describe_data()"] 60 | }, 61 | { 62 | "package": "stats", 63 | "functions": [ 64 | "anova()", 65 | "ansari.test()", 66 | "aov()", 67 | "bartlett.test()", 68 | "binom.test()", 69 | "Box.test()", 70 | "chisq.test()", 71 | "confint()", 72 | "cor.test()", 73 | "fisher.test()", 74 | "fligner.test()", 75 | "friedman.test()", 76 | "glm()", 77 | "kruskal.test()", 78 | "ks.test()", 79 | "lm()", 80 | "mantelhaen.test()", 81 | "mauchly.test()", 82 | "mcnemar.test()", 83 | "mood.test()", 84 | "oneway.test()", 85 | "pairwise.t.test()", 86 | "pairwise.prop.test()", 87 | "pairwise.wilcox.test()", 88 | "poisson.test()", 89 | "PP.test()", 90 | "prop.test()", 91 | "prop.trend.test()", 92 | "quade.test()", 93 | "shapiro.test()", 94 | "t.test()", 95 | "var.test()", 96 | "wilcox.test()" 97 | ] 98 | } 99 | ] 100 | --------------------------------------------------------------------------------