.
21 | Depends:
22 | R (>= 3.2.4)
23 | Imports:
24 | dplyr,
25 | tidyr,
26 | purrr,
27 | ggplot2,
28 | afex,
29 | lme4,
30 | broom.mixed,
31 | faux,
32 | lmerTest
33 | Suggests:
34 | knitr,
35 | rmarkdown,
36 | papaja,
37 | shiny,
38 | shinydashboard,
39 | shinyjs,
40 | MASS
41 | Remotes:
42 | crsh/papaja
43 | VignetteBuilder: knitr
44 | RoxygenNote: 7.1.1
45 | Encoding: UTF-8
46 | LazyData: true
47 | URL: https://github.com/debruine/lmem_sim
48 | BugReports: https://github.com/debruine/lmem_sim/issues
49 | License: CC BY 4.0
50 |
--------------------------------------------------------------------------------
/R/functions.R:
--------------------------------------------------------------------------------
1 | #' Launch Shiny App
2 | #'
3 | #' Launch a local copy of the shiny app that accompanies Simulating LMEM
4 | #'
5 | #' @param ... arguments to pass to shiny::runApp
6 | #'
7 | #' @export
8 | #'
9 | app <- function(...) {
10 | appDir <- system.file("app", package = "lmem.sim")
11 | shiny::runApp(appDir, ...)
12 | }
13 |
14 | #' Open Simulating LMEM paper
15 | #'
16 | #' Open the manuscript
17 | #'
18 | #' @param type The type of manuscript to open (pdf, html, or Rmd)
19 | #'
20 | #' @export
21 | #'
22 | paper <- function(type = c("pdf", "html", "Rmd")) {
23 | type <- match.arg(type)
24 | filename <- paste0("paper/01.AMPSS_LMEM.", type)
25 | f <- system.file(filename, package = "lmem.sim")
26 | browseURL(f)
27 | }
28 |
29 | #' Open Simulating LMEM appendices
30 | #'
31 | #' @param i Which appendix to open
32 | #' @param filename Where to save the appendix
33 | #'
34 | #' @export
35 | #'
36 | appendix <- function(i = c("1a", "1b", "1c", "2", "3a", "3b"), filename = NULL) {
37 | i <- match.arg(i)
38 | dir <- system.file("appendices/", package = "lmem.sim")
39 | files <- list.files(dir, ".Rmd", full.names = TRUE)
40 | n <- grepl(paste0("appendix", i), files)
41 | f <- files[n][[1]]
42 |
43 | if (is.null(filename)) filename <- basename(f)
44 |
45 | file.copy(f, filename)
46 | utils::browseURL(filename)
47 | }
48 |
49 |
--------------------------------------------------------------------------------
/vignettes/sims/ext_sims_2.csv:
--------------------------------------------------------------------------------
1 | effect,group,term,estimate,std.error,statistic,df,p.value
2 | fixed,NA,(Intercept),800.1520312010771,10.824044905189528,73.92356907328134,238.64763132142974,1.774890900354849e-166
3 | fixed,NA,X_e,-8.143875258984634,7.000678457746131,-1.1632980014920662,137.371432463037,0.24672530227557052
4 | fixed,NA,X_c,56.143812618290205,10.028565572437712,5.598389142769792,192.69299139929353,7.368755336570842e-8
5 | fixed,NA,X_e:X_c,82.39589560789746,10.032571144314398,8.212839403046985,144.45383938260895,1.1076255072625297e-13
6 | ran_pars,subj_id,sd__(Intercept),102.19878574854683,NA,NA,NA,NA
7 | ran_pars,subj_id,cor__(Intercept).X_e,0.31318827449875714,NA,NA,NA,NA
8 | ran_pars,subj_id,cor__(Intercept).X_c,0.34871622889073606,NA,NA,NA,NA
9 | ran_pars,subj_id,cor__(Intercept).X_e:X_c,0.11156080618878045,NA,NA,NA,NA
10 | ran_pars,subj_id,sd__X_e,45.25860205339935,NA,NA,NA,NA
11 | ran_pars,subj_id,cor__X_e.X_c,0.2823068424164505,NA,NA,NA,NA
12 | ran_pars,subj_id,cor__X_e.X_e:X_c,0.11842738071053396,NA,NA,NA,NA
13 | ran_pars,subj_id,sd__X_c,85.14224221245728,NA,NA,NA,NA
14 | ran_pars,subj_id,cor__X_c.X_e:X_c,0.011528766388017453,NA,NA,NA,NA
15 | ran_pars,subj_id,sd__X_e:X_c,81.17459208550507,NA,NA,NA,NA
16 | ran_pars,item_id,sd__(Intercept),79.3118684387006,NA,NA,NA,NA
17 | ran_pars,item_id,cor__(Intercept).X_e,0.01632039800618323,NA,NA,NA,NA
18 | ran_pars,item_id,cor__(Intercept).X_c,0.38445920912737014,NA,NA,NA,NA
19 | ran_pars,item_id,cor__(Intercept).X_e:X_c,0.04121436179741903,NA,NA,NA,NA
20 | ran_pars,item_id,sd__X_e,55.34865518730321,NA,NA,NA,NA
21 | ran_pars,item_id,cor__X_e.X_c,0.1406742082999369,NA,NA,NA,NA
22 | ran_pars,item_id,cor__X_e.X_e:X_c,0.11506623132043559,NA,NA,NA,NA
23 | ran_pars,item_id,sd__X_c,74.96203248090238,NA,NA,NA,NA
24 | ran_pars,item_id,cor__X_c.X_e:X_c,0.17201202047210634,NA,NA,NA,NA
25 | ran_pars,item_id,sd__X_e:X_c,59.307404873047425,NA,NA,NA,NA
26 | ran_pars,Residual,sd__Observation,201.6563569439757,NA,NA,NA,NA
27 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Understanding mixed effects models through data simulation
2 |
3 | *Lisa M. DeBruine & Dale J. Barr*
4 |
5 | This repository contains all materials needed to reproduce the manuscript, as well as supplemental scripts and shiny apps.
6 |
7 | **Abstract** Experimental designs that sample both subjects and stimuli from a larger population need to account for random effects of both subjects and stimuli using mixed effects models. However, much of this research is analyzed using ANOVA on aggregated responses because researchers are not confident specifying and interpreting mixed effects models. The tutorial will explain how to simulate data with random effects structure and analyse the data using linear mixed effects regression (with the lme4 R package), with a focus on interpreting the output in light of the simulated parameters. Data simulation can not only enhance understanding of how these models work, but also enables researchers to perform power calculations for complex designs.
8 |
9 |
10 | * [AMPPS paper](https://doi.org/10.1177/2515245920965119)
11 | * [PsyArXiv preprint](https://psyarxiv.com/xp5cy)
12 | * [Example code](https://debruine.github.io/lmem_sim/articles/)
13 | * Shiny App [Simulating LMEM](https://shiny.psy.gla.ac.uk/lmem_sim/)
14 | * Shiny App [Crossed Random Effects](https://shiny.psy.gla.ac.uk/Dale/crossed/)
15 |
16 | ## Installation
17 |
18 | You can install the development version of a package that include all the packages you'll need for the examples, plus a local version of the shiny app, from [GitHub](https://github.com/debruine/lmem_sim) with:
19 |
20 | ``` r
21 | devtools::install_github("debruine/lmem_sim")
22 | ```
23 |
24 | Here are some functions included in the app:
25 |
26 | ``` r
27 | lmem.sim::paper() # open the pdf version of the paper
28 | lmem.sim::paper("html") # open the html version of the paper
29 | lmem.sim::appendix("1a") # open appendix 1a
30 | lmem.sim::app() # start the app
31 | citation("lmem.sim") # get the citation
32 | ```
33 |
34 |
--------------------------------------------------------------------------------
/inst/app/R/plot_funcs.R:
--------------------------------------------------------------------------------
1 | plot_dat <- function(dat, b0 = 0, view = c("violin", "boxplot"), grp = "") {
2 | min_RT <- min(dat$RT)
3 | max_RT <- max(dat$RT)
4 |
5 | # aggregate over subjects or stimuli if grp is set
6 | if (grp == "subj") {
7 | dat <- dat %>%
8 | group_by(subj_id, category) %>%
9 | summarise(RT = mean(RT))
10 | } else if (grp == "item") {
11 | dat <- dat %>%
12 | group_by(item_id, category) %>%
13 | summarise(RT = mean(RT))
14 | }
15 |
16 | plot <- ggplot(dat, aes(category, RT, color = category)) +
17 | geom_hline(yintercept = b0) +
18 | xlab("Stimulus Type") +
19 | ylab("Rating") +
20 | scale_color_discrete(name = "Stimulus Type") +
21 | coord_cartesian(ylim = c(min_RT, max_RT)) +
22 | theme(legend.position="bottom")
23 |
24 | if ("violin" %in% view) {
25 | plot <- plot + geom_violin(alpha = 0.5, show.legend = FALSE)
26 | }
27 |
28 | if ("boxplot" %in% view) {
29 | plot <- plot + geom_boxplot(width = 0.2, show.legend = FALSE,
30 | position = position_dodge(width = 0.9))
31 | }
32 |
33 | return(plot)
34 | }
35 |
36 | plot_power_lmer <- function(dat) {
37 | dat %>%
38 | filter(type == "power", analysis == "lmer") %>%
39 | mutate(analysis = recode(analysis,
40 | "lmer" = "LMER")) %>%
41 | filter(type == "power") %>%
42 | ggplot(aes(es)) +
43 | geom_density() +
44 | xlab("Effect Size (raw estimate)") +
45 | ggtitle("LMER")
46 | }
47 |
48 | plot_power_anova <- function(dat) {
49 | dat %>%
50 | filter(type == "power", analysis != "lmer") %>%
51 | mutate(analysis = recode(analysis,
52 | "anova_subj" = "By-Subjects ANOVA",
53 | "anova_item" = "By-Items ANOVA")) %>%
54 | ggplot(aes(es, color = analysis)) +
55 | geom_density(show.legend = TRUE) +
56 | xlab("Effect Size (Cohen's d)") +
57 | theme(legend.position="bottom") +
58 | ggtitle("ANOVAs")
59 | }
--------------------------------------------------------------------------------
/inst/app/comp_tab.R:
--------------------------------------------------------------------------------
1 | ### comp_tab ----
2 | comp_tab <- tabItem(
3 | tabName = "comp_tab",
4 | h3("Compare ANOVAs and LMER for an individual simulation"),
5 | fluidRow(
6 | box(
7 | title = "Descriptives",
8 | width = 8,
9 | tableOutput("descr_table")
10 | ),
11 | box(
12 | title = "Plot Type",
13 | width = 4,
14 | checkboxGroupInput(
15 | "dat_plot_view",
16 | "View:",
17 | c("violin" = "violin",
18 | "boxplot" = "boxplot"),
19 | selected = c("violin", "boxplot")
20 | )
21 | )
22 | ),
23 | fluidRow(
24 | column(
25 | width = 8,
26 | box(
27 | title = "By-Item ANOVA",
28 | width = NULL,
29 | tableOutput("item_coef")
30 | )
31 | ),
32 | column(
33 | width = 4,
34 | box(
35 | title = "Aggregated by Items",
36 | width = NULL,
37 | plotOutput(outputId = "dat_item_plot", height = "auto")
38 | )
39 | )
40 | ),
41 | fluidRow(
42 | column(
43 | width = 8,
44 | box(
45 | title = "By-Subjects ANOVA",
46 | width = NULL,
47 | tableOutput("subj_coef")
48 | )
49 | ),
50 | column(
51 | width = 4,
52 | box(
53 | title = "Aggregated by Subject",
54 | width = NULL,
55 | plotOutput(outputId = "dat_subj_plot", height = "auto")
56 | )
57 | )
58 | ),
59 | fluidRow(
60 | column(
61 | width = 8,
62 | box(
63 | title = "LMER",
64 | width = NULL,
65 | p("The p-value for the main effect in LMER will be near-identical to the by-stimuli ANOVA if the random slope for the main effect is set to zero (i.e., where between-subject variation in the effect of stimulus type is 0)."),
66 | tableOutput("lmer_coef")
67 | )
68 | ),
69 | column(
70 | width = 4,
71 | box(
72 | title = "Not Aggregated",
73 | width = NULL,
74 | plotOutput(outputId = "dat_plot", height = "auto")
75 | )
76 | )
77 | )
78 | )
79 |
--------------------------------------------------------------------------------
/docs/bootstrap-toc.css:
--------------------------------------------------------------------------------
1 | /*!
2 | * Bootstrap Table of Contents v0.4.1 (http://afeld.github.io/bootstrap-toc/)
3 | * Copyright 2015 Aidan Feldman
4 | * Licensed under MIT (https://github.com/afeld/bootstrap-toc/blob/gh-pages/LICENSE.md) */
5 |
6 | /* modified from https://github.com/twbs/bootstrap/blob/94b4076dd2efba9af71f0b18d4ee4b163aa9e0dd/docs/assets/css/src/docs.css#L548-L601 */
7 |
8 | /* All levels of nav */
9 | nav[data-toggle='toc'] .nav > li > a {
10 | display: block;
11 | padding: 4px 20px;
12 | font-size: 13px;
13 | font-weight: 500;
14 | color: #767676;
15 | }
16 | nav[data-toggle='toc'] .nav > li > a:hover,
17 | nav[data-toggle='toc'] .nav > li > a:focus {
18 | padding-left: 19px;
19 | color: #563d7c;
20 | text-decoration: none;
21 | background-color: transparent;
22 | border-left: 1px solid #563d7c;
23 | }
24 | nav[data-toggle='toc'] .nav > .active > a,
25 | nav[data-toggle='toc'] .nav > .active:hover > a,
26 | nav[data-toggle='toc'] .nav > .active:focus > a {
27 | padding-left: 18px;
28 | font-weight: bold;
29 | color: #563d7c;
30 | background-color: transparent;
31 | border-left: 2px solid #563d7c;
32 | }
33 |
34 | /* Nav: second level (shown on .active) */
35 | nav[data-toggle='toc'] .nav .nav {
36 | display: none; /* Hide by default, but at >768px, show it */
37 | padding-bottom: 10px;
38 | }
39 | nav[data-toggle='toc'] .nav .nav > li > a {
40 | padding-top: 1px;
41 | padding-bottom: 1px;
42 | padding-left: 30px;
43 | font-size: 12px;
44 | font-weight: normal;
45 | }
46 | nav[data-toggle='toc'] .nav .nav > li > a:hover,
47 | nav[data-toggle='toc'] .nav .nav > li > a:focus {
48 | padding-left: 29px;
49 | }
50 | nav[data-toggle='toc'] .nav .nav > .active > a,
51 | nav[data-toggle='toc'] .nav .nav > .active:hover > a,
52 | nav[data-toggle='toc'] .nav .nav > .active:focus > a {
53 | padding-left: 28px;
54 | font-weight: 500;
55 | }
56 |
57 | /* from https://github.com/twbs/bootstrap/blob/e38f066d8c203c3e032da0ff23cd2d6098ee2dd6/docs/assets/css/src/docs.css#L631-L634 */
58 | nav[data-toggle='toc'] .nav > .active > ul {
59 | display: block;
60 | }
61 |
--------------------------------------------------------------------------------
/inst/app/power_tab.R:
--------------------------------------------------------------------------------
1 | ### power_tab ----
2 | power_tab <- tabItem(
3 | tabName = "power_tab",
4 | h3("Compare False Positive Rate and Power"),
5 | p("This function will run the number of simulations with the parameters you've set and report the proportion of runs that gave a significant effect of condition (given the alpha you set). It will also report the false positive rate for the same simulations with the main effect of condition set to 0. If you set the main effect of condition to 0, then power will be equal to the false positive rate."),
6 | fluidRow(
7 | column(
8 | width = 4,
9 | actionButton("calc_power", "Calculate"),
10 | sliderInput("n_reps", "Number of Simulations to Run:",
11 | min = 10, max = 100, value = 10, step = 10),
12 | sliderInput("alpha", "(Justify Your) Alpha",
13 | min = .005, max = .100, value = 0.05, step = 0.005)
14 | ),
15 | column(
16 | width = 8,
17 | title = "False Positive/Power Calculations",
18 | tableOutput("power_table")
19 | )
20 | ),
21 | p("It is not an error that the false positive rate for the by-subjects ANOVA is very high. With this type of within-subjects, between-items design, you can get very high false positive rates if items have some variation in their mean DV (i.e., where faces tend to vary in expressiveness). For this type of design (no between-subject factors), the by-items ANOVA will have a closer-to-nominal false positive rate, but will have a more inflated false positive rate for designs with between-subject factors where subjects have some random variation in their mean repsonses."),
22 | p("If you set the Item Intercept SD to 0, you will see that the by-subjects ANOVA has a false positive rate closer to the nominal alpha (defaults to 0.05). However, this models a very unrealistic situation where the variation in expressiveness of faces is 0."),
23 | h4("Simulated Effect Size Distribution"),
24 | fluidRow(
25 | column(
26 | width = 6,
27 | plotOutput(outputId = "power_plot_anova", height = "auto")
28 | ),
29 | column(
30 | width = 6,
31 | plotOutput(outputId = "power_plot_lmer", height = "auto")
32 | )
33 | )
34 | )
35 |
--------------------------------------------------------------------------------
/docs/docsearch.js:
--------------------------------------------------------------------------------
1 | $(function() {
2 |
3 | // register a handler to move the focus to the search bar
4 | // upon pressing shift + "/" (i.e. "?")
5 | $(document).on('keydown', function(e) {
6 | if (e.shiftKey && e.keyCode == 191) {
7 | e.preventDefault();
8 | $("#search-input").focus();
9 | }
10 | });
11 |
12 | $(document).ready(function() {
13 | // do keyword highlighting
14 | /* modified from https://jsfiddle.net/julmot/bL6bb5oo/ */
15 | var mark = function() {
16 |
17 | var referrer = document.URL ;
18 | var paramKey = "q" ;
19 |
20 | if (referrer.indexOf("?") !== -1) {
21 | var qs = referrer.substr(referrer.indexOf('?') + 1);
22 | var qs_noanchor = qs.split('#')[0];
23 | var qsa = qs_noanchor.split('&');
24 | var keyword = "";
25 |
26 | for (var i = 0; i < qsa.length; i++) {
27 | var currentParam = qsa[i].split('=');
28 |
29 | if (currentParam.length !== 2) {
30 | continue;
31 | }
32 |
33 | if (currentParam[0] == paramKey) {
34 | keyword = decodeURIComponent(currentParam[1].replace(/\+/g, "%20"));
35 | }
36 | }
37 |
38 | if (keyword !== "") {
39 | $(".contents").unmark({
40 | done: function() {
41 | $(".contents").mark(keyword);
42 | }
43 | });
44 | }
45 | }
46 | };
47 |
48 | mark();
49 | });
50 | });
51 |
52 | /* Search term highlighting ------------------------------*/
53 |
54 | function matchedWords(hit) {
55 | var words = [];
56 |
57 | var hierarchy = hit._highlightResult.hierarchy;
58 | // loop to fetch from lvl0, lvl1, etc.
59 | for (var idx in hierarchy) {
60 | words = words.concat(hierarchy[idx].matchedWords);
61 | }
62 |
63 | var content = hit._highlightResult.content;
64 | if (content) {
65 | words = words.concat(content.matchedWords);
66 | }
67 |
68 | // return unique words
69 | var words_uniq = [...new Set(words)];
70 | return words_uniq;
71 | }
72 |
73 | function updateHitURL(hit) {
74 |
75 | var words = matchedWords(hit);
76 | var url = "";
77 |
78 | if (hit.anchor) {
79 | url = hit.url_without_anchor + '?q=' + escape(words.join(" ")) + '#' + hit.anchor;
80 | } else {
81 | url = hit.url + '?q=' + escape(words.join(" "));
82 | }
83 |
84 | return url;
85 | }
86 |
--------------------------------------------------------------------------------
/inst/app/R/data_funcs.R:
--------------------------------------------------------------------------------
1 | sim_dat <- function(...) {
2 | trials <- sim_trials(...)
3 | dat_code(trials, ...)
4 | }
5 |
6 | sim_trials <- function(
7 | n_subj = 100, # number of subjects
8 | n_ingroup = 25, # number of ingroup stimuli
9 | n_outgroup = 25, # number of outgroup stimuli
10 | omega_0 = 80, # by-item random intercept sd
11 | tau_0 = 100, # by-subject random intercept sd
12 | tau_1 = 40, # by-subject random slope sd
13 | rho = 0.2, # correlation between intercept and slope
14 | sigma = 200, ...) { # residual (standard deviation)
15 |
16 | # simulate a sample of items
17 | items <- data.frame(
18 | item_id = seq_len(n_ingroup + n_outgroup),
19 | category = rep(c("ingroup", "outgroup"), c(n_ingroup, n_outgroup)),
20 | X_i = rep(c(-0.5, 0.5), c(n_ingroup, n_outgroup)),
21 | O_0i = rnorm(n = n_ingroup + n_outgroup, mean = 0, sd = omega_0)
22 | )
23 |
24 | # simulate a sample of subjects
25 | subjects <- rnorm_multi(
26 | n = n_subj, mu = 0, sd = c(tau_0, tau_1), r = rho,
27 | varnames = c("T_0s", "T_1s")
28 | )
29 | subjects$subj_id <- 1:n_subj
30 |
31 | # cross subject and item IDs
32 | crossing(subjects, items) %>%
33 | mutate(e_si = rnorm(nrow(.), mean = 0, sd = sigma))
34 | }
35 |
36 |
37 | dat_code <- function(trials, beta_0, beta_1 = 0, ...) {
38 | mutate(trials,
39 | RT = beta_0 + O_0i + T_0s + (beta_1 + T_1s) * X_i + e_si,
40 | RT_null = beta_0 + O_0i + T_0s + ( 0 + T_1s) * X_i + e_si
41 | )
42 | }
43 |
44 |
45 | descr <- function(dat) {
46 | subj_table <- dat %>%
47 | group_by(subj_id, category) %>%
48 | summarise(RT = mean(RT)) %>%
49 | ungroup() %>%
50 | group_by(category) %>%
51 | summarise(sd = sd(RT), n = n()) %>%
52 | ungroup() %>%
53 | unite(cell, category, sep = " ") %>%
54 | spread(cell, sd) %>%
55 | mutate(`grouped by` = "subjects", stat = "sd")
56 |
57 | item_table <- dat %>%
58 | group_by(item_id, category) %>%
59 | summarise(RT = mean(RT)) %>%
60 | ungroup() %>%
61 | group_by(category) %>%
62 | summarise(sd = sd(RT), n = n()*2) %>% # x2 because 2 groups of stim
63 | ungroup() %>%
64 | unite(cell, category, sep = " ") %>%
65 | spread(cell, sd) %>%
66 | mutate(`grouped by` = "items", stat = "sd")
67 |
68 | all_table <- dat %>%
69 | group_by(category) %>%
70 | summarise(sd = sd(RT), n = n()) %>%
71 | ungroup() %>%
72 | unite(cell, category, sep = " ") %>%
73 | spread(cell, sd) %>%
74 | mutate(`grouped by` = "all", stat = "sd")
75 |
76 | all_table_RT <- dat %>%
77 | group_by(category) %>%
78 | summarise(RT = mean(RT), n = n()) %>%
79 | ungroup() %>%
80 | unite(cell, category, sep = " ") %>%
81 | spread(cell, RT) %>%
82 | mutate(`grouped by` = "", stat = "mean")
83 |
84 | bind_rows(
85 | all_table_RT,
86 | all_table,
87 | subj_table,
88 | item_table
89 | ) %>%
90 | select(`grouped by`, n, stat, ingroup, outgroup)
91 | }
92 |
--------------------------------------------------------------------------------
/docs/pkgdown.js:
--------------------------------------------------------------------------------
1 | /* http://gregfranko.com/blog/jquery-best-practices/ */
2 | (function($) {
3 | $(function() {
4 |
5 | $('.navbar-fixed-top').headroom();
6 |
7 | $('body').css('padding-top', $('.navbar').height() + 10);
8 | $(window).resize(function(){
9 | $('body').css('padding-top', $('.navbar').height() + 10);
10 | });
11 |
12 | $('[data-toggle="tooltip"]').tooltip();
13 |
14 | var cur_path = paths(location.pathname);
15 | var links = $("#navbar ul li a");
16 | var max_length = -1;
17 | var pos = -1;
18 | for (var i = 0; i < links.length; i++) {
19 | if (links[i].getAttribute("href") === "#")
20 | continue;
21 | // Ignore external links
22 | if (links[i].host !== location.host)
23 | continue;
24 |
25 | var nav_path = paths(links[i].pathname);
26 |
27 | var length = prefix_length(nav_path, cur_path);
28 | if (length > max_length) {
29 | max_length = length;
30 | pos = i;
31 | }
32 | }
33 |
34 | // Add class to parent , and enclosing if in dropdown
35 | if (pos >= 0) {
36 | var menu_anchor = $(links[pos]);
37 | menu_anchor.parent().addClass("active");
38 | menu_anchor.closest("li.dropdown").addClass("active");
39 | }
40 | });
41 |
42 | function paths(pathname) {
43 | var pieces = pathname.split("/");
44 | pieces.shift(); // always starts with /
45 |
46 | var end = pieces[pieces.length - 1];
47 | if (end === "index.html" || end === "")
48 | pieces.pop();
49 | return(pieces);
50 | }
51 |
52 | // Returns -1 if not found
53 | function prefix_length(needle, haystack) {
54 | if (needle.length > haystack.length)
55 | return(-1);
56 |
57 | // Special case for length-0 haystack, since for loop won't run
58 | if (haystack.length === 0) {
59 | return(needle.length === 0 ? 0 : -1);
60 | }
61 |
62 | for (var i = 0; i < haystack.length; i++) {
63 | if (needle[i] != haystack[i])
64 | return(i);
65 | }
66 |
67 | return(haystack.length);
68 | }
69 |
70 | /* Clipboard --------------------------*/
71 |
72 | function changeTooltipMessage(element, msg) {
73 | var tooltipOriginalTitle=element.getAttribute('data-original-title');
74 | element.setAttribute('data-original-title', msg);
75 | $(element).tooltip('show');
76 | element.setAttribute('data-original-title', tooltipOriginalTitle);
77 | }
78 |
79 | if(ClipboardJS.isSupported()) {
80 | $(document).ready(function() {
81 | var copyButton = " ";
82 |
83 | $(".examples, div.sourceCode").addClass("hasCopyButton");
84 |
85 | // Insert copy buttons:
86 | $(copyButton).prependTo(".hasCopyButton");
87 |
88 | // Initialize tooltips:
89 | $('.btn-copy-ex').tooltip({container: 'body'});
90 |
91 | // Initialize clipboard:
92 | var clipboardBtnCopies = new ClipboardJS('[data-clipboard-copy]', {
93 | text: function(trigger) {
94 | return trigger.parentNode.textContent;
95 | }
96 | });
97 |
98 | clipboardBtnCopies.on('success', function(e) {
99 | changeTooltipMessage(e.trigger, 'Copied!');
100 | e.clearSelection();
101 | });
102 |
103 | clipboardBtnCopies.on('error', function() {
104 | changeTooltipMessage(e.trigger,'Press Ctrl+C or Command+C to copy');
105 | });
106 | });
107 | }
108 | })(window.jQuery || window.$)
109 |
--------------------------------------------------------------------------------
/inst/app/R/lmer_funcs.R:
--------------------------------------------------------------------------------
1 | sim_lmer <- function(dat_sim) {
2 | mod_sim <- lmer(RT ~ X_i + (1 | item_id) + (1 + X_i | subj_id),
3 | dat_sim, REML = TRUE)
4 |
5 | return(mod_sim)
6 | }
7 |
8 | sim_subj_anova <- function(dat) {
9 | dat_sub <- dat %>%
10 | group_by(subj_id, category, X_i) %>%
11 | summarise(RT = mean(RT))
12 |
13 | mod <- afex::aov_4(RT ~ (X_i | subj_id),
14 | factorize = FALSE,
15 | data = dat_sub)
16 |
17 | mod.sum <- anova(mod)
18 |
19 | # within cohen's d
20 | x <- filter(dat_sub, category == "ingroup") %>% pull(RT)
21 | y <- filter(dat_sub, category == "outgroup") %>% pull(RT)
22 | mod.sum$d <- cohen_d(x, y, TRUE)
23 |
24 | return(mod.sum)
25 | }
26 |
27 | sim_item_anova <- function(dat) {
28 | dat_item <- dat %>%
29 | group_by(item_id, category, X_i) %>%
30 | summarise(RT = mean(RT))
31 |
32 | mod <- afex::aov_4(RT ~ X_i + (1 | item_id),
33 | factorize = FALSE,
34 | data = dat_item)
35 |
36 | mod.sum <- anova(mod)
37 |
38 | # between cohen's d
39 | x <- filter(dat_item, category == "ingroup") %>% pull(RT)
40 | y <- filter(dat_item, category == "outgroup") %>% pull(RT)
41 | mod.sum$d <- cohen_d(x, y, FALSE)
42 |
43 | return(mod.sum)
44 | }
45 |
46 | sim_power <- function(rep = 0, ...) {
47 | dat <- sim_dat(...)
48 | dots <- list(...)
49 |
50 | # run models to calculate power
51 | mod.lmer <- sim_lmer(dat)
52 | mod.subj <- sim_subj_anova(dat)
53 | mod.item<- sim_item_anova(dat)
54 |
55 | if (dots$beta_1 != 0) {
56 | # run models for null effect to calculate false positives
57 | dat$RT <- dat$RT_null
58 |
59 | mod.lmer.null <- sim_lmer(dat)
60 | mod.subj.null <- sim_subj_anova(dat)
61 | mod.item.null <- sim_item_anova(dat)
62 | }
63 |
64 | # get output into tables
65 | table.lmer <- summary(mod.lmer)$coefficients %>%
66 | as_tibble(rownames = "effect") %>%
67 | filter(effect != "(Intercept)") %>%
68 | select(effect, es = Estimate, p = 6) %>%
69 | mutate(analysis = "lmer", type = "power")
70 |
71 | table.subj <- mod.subj %>%
72 | as_tibble(rownames = "effect") %>%
73 | select(effect, es = d, p = 7) %>%
74 | mutate(analysis = "anova_subj", type = "power")
75 |
76 | table.item <- mod.item %>%
77 | as_tibble(rownames = "effect") %>%
78 | select(effect, es = d, p = 7) %>%
79 | mutate(analysis = "anova_item", type = "power")
80 |
81 | if (dots$beta_1 == 0) {
82 | # avoid duplicate models if effect is null
83 | table.lmer.null <- mutate(table.lmer, type = "false positive")
84 | table.subj.null <- mutate(table.subj, type = "false positive")
85 | table.item.null <- mutate(table.item, type = "false positive")
86 | } else {
87 | table.lmer.null <- summary(mod.lmer.null)$coefficients %>%
88 | as_tibble(rownames = "effect") %>%
89 | filter(effect != "(Intercept)") %>%
90 | select(effect, es = Estimate, p = 6) %>%
91 | mutate(analysis = "lmer", type = "false positive")
92 |
93 | table.subj.null <- mod.subj.null %>%
94 | as_tibble(rownames = "effect") %>%
95 | select(effect, es = ges, p = 7) %>%
96 | mutate(analysis = "anova_subj", type = "false positive")
97 |
98 | table.item.null <- mod.item.null %>%
99 | as_tibble(rownames = "effect") %>%
100 | select(effect, es = ges, p = 7) %>%
101 | mutate(analysis = "anova_item", type = "false positive")
102 | }
103 |
104 | bind_rows(table.lmer, table.subj, table.item,
105 | table.lmer.null, table.subj.null, table.item.null) %>%
106 | mutate(rep = rep)
107 | }
108 |
--------------------------------------------------------------------------------
/vignettes/r-references.bib:
--------------------------------------------------------------------------------
1 | @Manual{R-base,
2 | title = {R: A Language and Environment for Statistical Computing},
3 | author = {{R Core Team}},
4 | organization = {R Foundation for Statistical Computing},
5 | address = {Vienna, Austria},
6 | year = {2018},
7 | url = {https://www.R-project.org/},
8 | }
9 | @Manual{R-broom.mixed,
10 | title = {broom.mixed: Tidying Methods for Mixed Models},
11 | author = {Ben Bolker and David Robinson},
12 | year = {2019},
13 | note = {R package version 0.2.4},
14 | url = {https://CRAN.R-project.org/package=broom.mixed},
15 | }
16 | @Manual{R-dplyr,
17 | title = {dplyr: A Grammar of Data Manipulation},
18 | author = {Hadley Wickham and Romain François and Lionel Henry and Kirill Müller},
19 | year = {2019},
20 | note = {R package version 0.8.1},
21 | url = {https://CRAN.R-project.org/package=dplyr},
22 | }
23 | @Manual{R-forcats,
24 | title = {forcats: Tools for Working with Categorical Variables (Factors)},
25 | author = {Hadley Wickham},
26 | year = {2019},
27 | note = {R package version 0.4.0},
28 | url = {https://CRAN.R-project.org/package=forcats},
29 | }
30 | @Book{R-ggplot2,
31 | author = {Hadley Wickham},
32 | title = {ggplot2: Elegant Graphics for Data Analysis},
33 | publisher = {Springer-Verlag New York},
34 | year = {2016},
35 | isbn = {978-3-319-24277-4},
36 | url = {https://ggplot2.tidyverse.org},
37 | }
38 | @Article{R-lme4,
39 | title = {Fitting Linear Mixed-Effects Models Using {lme4}},
40 | author = {Douglas Bates and Martin M{\"a}chler and Ben Bolker and Steve Walker},
41 | journal = {Journal of Statistical Software},
42 | year = {2015},
43 | volume = {67},
44 | number = {1},
45 | pages = {1--48},
46 | doi = {10.18637/jss.v067.i01},
47 | }
48 | @Article{R-lmerTest,
49 | title = {{lmerTest} Package: Tests in Linear Mixed Effects Models},
50 | author = {Alexandra Kuznetsova and Per B. Brockhoff and Rune H. B. Christensen},
51 | journal = {Journal of Statistical Software},
52 | year = {2017},
53 | volume = {82},
54 | number = {13},
55 | pages = {1--26},
56 | doi = {10.18637/jss.v082.i13},
57 | }
58 | @Manual{R-Matrix,
59 | title = {Matrix: Sparse and Dense Matrix Classes and Methods},
60 | author = {Douglas Bates and Martin Maechler},
61 | year = {2018},
62 | note = {R package version 1.2-15},
63 | url = {https://CRAN.R-project.org/package=Matrix},
64 | }
65 | @Manual{R-papaja,
66 | author = {Frederik Aust and Marius Barth},
67 | title = {{papaja}: {Create} {APA} manuscripts with {R Markdown}},
68 | year = {2018},
69 | note = {R package version 0.1.0.9842},
70 | url = {https://github.com/crsh/papaja},
71 | }
72 | @Manual{R-purrr,
73 | title = {purrr: Functional Programming Tools},
74 | author = {Lionel Henry and Hadley Wickham},
75 | year = {2019},
76 | note = {R package version 0.3.2},
77 | url = {https://CRAN.R-project.org/package=purrr},
78 | }
79 | @Manual{R-readr,
80 | title = {readr: Read Rectangular Text Data},
81 | author = {Hadley Wickham and Jim Hester and Romain Francois},
82 | year = {2018},
83 | note = {R package version 1.3.1},
84 | url = {https://CRAN.R-project.org/package=readr},
85 | }
86 | @Manual{R-stringr,
87 | title = {stringr: Simple, Consistent Wrappers for Common String Operations},
88 | author = {Hadley Wickham},
89 | year = {2019},
90 | note = {R package version 1.4.0},
91 | url = {https://CRAN.R-project.org/package=stringr},
92 | }
93 | @Manual{R-tibble,
94 | title = {tibble: Simple Data Frames},
95 | author = {Kirill Müller and Hadley Wickham},
96 | year = {2019},
97 | note = {R package version 2.1.1},
98 | url = {https://CRAN.R-project.org/package=tibble},
99 | }
100 | @Manual{R-tidyr,
101 | title = {tidyr: Easily Tidy Data with 'spread()' and 'gather()' Functions},
102 | author = {Hadley Wickham and Lionel Henry},
103 | year = {2019},
104 | note = {R package version 0.8.3},
105 | url = {https://CRAN.R-project.org/package=tidyr},
106 | }
107 | @Manual{R-tidyverse,
108 | title = {tidyverse: Easily Install and Load the 'Tidyverse'},
109 | author = {Hadley Wickham},
110 | year = {2017},
111 | note = {R package version 1.2.1},
112 | url = {https://CRAN.R-project.org/package=tidyverse},
113 | }
114 | @Manual{R-afex,
115 | title = {afex: Analysis of Factorial Experiments},
116 | author = {Henrik Singmann and Ben Bolker and Jake Westfall and Frederik Aust},
117 | year = {2019},
118 | note = {R package version 0.23-0},
119 | url = {https://CRAN.R-project.org/package=afex},
120 | }
121 |
122 | @Manual{R-faux,
123 | title = {faux: Simulation for Factorial Designs},
124 | author = {Lisa DeBruine},
125 | doi = {10.5281/zenodo.2669586},
126 | publisher = {Zenodo},
127 | year = {2020},
128 | month = {August},
129 | note = {R package version 0.0.1.2},
130 | url = {https://debruine.github.io/faux/},
131 | }
132 |
--------------------------------------------------------------------------------
/inst/paper/r-references.bib:
--------------------------------------------------------------------------------
1 | @Manual{R-base,
2 | title = {R: A Language and Environment for Statistical Computing},
3 | author = {{R Core Team}},
4 | organization = {R Foundation for Statistical Computing},
5 | address = {Vienna, Austria},
6 | year = {2018},
7 | url = {https://www.R-project.org/},
8 | }
9 | @Manual{R-broom.mixed,
10 | title = {broom.mixed: Tidying Methods for Mixed Models},
11 | author = {Ben Bolker and David Robinson},
12 | year = {2019},
13 | note = {R package version 0.2.4},
14 | url = {https://CRAN.R-project.org/package=broom.mixed},
15 | }
16 | @Manual{R-dplyr,
17 | title = {dplyr: A Grammar of Data Manipulation},
18 | author = {Hadley Wickham and Romain François and Lionel Henry and Kirill Müller},
19 | year = {2019},
20 | note = {R package version 0.8.1},
21 | url = {https://CRAN.R-project.org/package=dplyr},
22 | }
23 | @Manual{R-forcats,
24 | title = {forcats: Tools for Working with Categorical Variables (Factors)},
25 | author = {Hadley Wickham},
26 | year = {2019},
27 | note = {R package version 0.4.0},
28 | url = {https://CRAN.R-project.org/package=forcats},
29 | }
30 | @Book{R-ggplot2,
31 | author = {Hadley Wickham},
32 | title = {ggplot2: Elegant Graphics for Data Analysis},
33 | publisher = {Springer-Verlag New York},
34 | year = {2016},
35 | isbn = {978-3-319-24277-4},
36 | url = {https://ggplot2.tidyverse.org},
37 | }
38 | @Article{R-lme4,
39 | title = {Fitting Linear Mixed-Effects Models Using {lme4}},
40 | author = {Douglas Bates and Martin M{\"a}chler and Ben Bolker and Steve Walker},
41 | journal = {Journal of Statistical Software},
42 | year = {2015},
43 | volume = {67},
44 | number = {1},
45 | pages = {1--48},
46 | doi = {10.18637/jss.v067.i01},
47 | }
48 | @Article{R-lmerTest,
49 | title = {{lmerTest} Package: Tests in Linear Mixed Effects Models},
50 | author = {Alexandra Kuznetsova and Per B. Brockhoff and Rune H. B. Christensen},
51 | journal = {Journal of Statistical Software},
52 | year = {2017},
53 | volume = {82},
54 | number = {13},
55 | pages = {1--26},
56 | doi = {10.18637/jss.v082.i13},
57 | }
58 | @Manual{R-Matrix,
59 | title = {Matrix: Sparse and Dense Matrix Classes and Methods},
60 | author = {Douglas Bates and Martin Maechler},
61 | year = {2018},
62 | note = {R package version 1.2-15},
63 | url = {https://CRAN.R-project.org/package=Matrix},
64 | }
65 | @Manual{R-papaja,
66 | author = {Frederik Aust and Marius Barth},
67 | title = {{papaja}: {Create} {APA} manuscripts with {R Markdown}},
68 | year = {2018},
69 | note = {R package version 0.1.0.9842},
70 | url = {https://github.com/crsh/papaja},
71 | }
72 | @Manual{R-purrr,
73 | title = {purrr: Functional Programming Tools},
74 | author = {Lionel Henry and Hadley Wickham},
75 | year = {2019},
76 | note = {R package version 0.3.2},
77 | url = {https://CRAN.R-project.org/package=purrr},
78 | }
79 | @Manual{R-readr,
80 | title = {readr: Read Rectangular Text Data},
81 | author = {Hadley Wickham and Jim Hester and Romain Francois},
82 | year = {2018},
83 | note = {R package version 1.3.1},
84 | url = {https://CRAN.R-project.org/package=readr},
85 | }
86 | @Manual{R-stringr,
87 | title = {stringr: Simple, Consistent Wrappers for Common String Operations},
88 | author = {Hadley Wickham},
89 | year = {2019},
90 | note = {R package version 1.4.0},
91 | url = {https://CRAN.R-project.org/package=stringr},
92 | }
93 | @Manual{R-tibble,
94 | title = {tibble: Simple Data Frames},
95 | author = {Kirill Müller and Hadley Wickham},
96 | year = {2019},
97 | note = {R package version 2.1.1},
98 | url = {https://CRAN.R-project.org/package=tibble},
99 | }
100 | @Manual{R-tidyr,
101 | title = {tidyr: Easily Tidy Data with 'spread()' and 'gather()' Functions},
102 | author = {Hadley Wickham and Lionel Henry},
103 | year = {2019},
104 | note = {R package version 0.8.3},
105 | url = {https://CRAN.R-project.org/package=tidyr},
106 | }
107 | @Manual{R-tidyverse,
108 | title = {tidyverse: Easily Install and Load the 'Tidyverse'},
109 | author = {Hadley Wickham},
110 | year = {2017},
111 | note = {R package version 1.2.1},
112 | url = {https://CRAN.R-project.org/package=tidyverse},
113 | }
114 | @Manual{R-afex,
115 | title = {afex: Analysis of Factorial Experiments},
116 | author = {Henrik Singmann and Ben Bolker and Jake Westfall and Frederik Aust},
117 | year = {2019},
118 | note = {R package version 0.23-0},
119 | url = {https://CRAN.R-project.org/package=afex},
120 | }
121 |
122 | @Manual{R-faux,
123 | title = {faux: Simulation for Factorial Designs},
124 | author = {Lisa DeBruine},
125 | doi = {10.5281/zenodo.2669586},
126 | publisher = {Zenodo},
127 | year = {2020},
128 | month = {August},
129 | note = {R package version 0.0.1.2},
130 | url = {https://debruine.github.io/faux/},
131 | }
132 |
--------------------------------------------------------------------------------
/inst/app/www/custom.css:
--------------------------------------------------------------------------------
1 | .sidebar .box {
2 | background-color: inherit;
3 | }
4 |
5 | .sidebar .box-title {
6 | color: white;
7 | }
8 |
9 | pre.shiny-text-output {
10 | white-space: pre-wrap; /* css-3 */
11 | white-space: -moz-pre-wrap; /* Mozilla, since 1999 */
12 | white-space: -pre-wrap; /* Opera 4-6 */
13 | white-space: -o-pre-wrap; /* Opera 7 */
14 | word-wrap: break-word; /* Internet Explorer 5.5+ */
15 | }
16 |
17 | p { font-size: 120%; }
18 |
19 | #broom_output table tbody tr td:nth-child(3) { color: white; }
20 | ul.sidebar-menu label { width: 100%; padding: 1px 5px; }
21 |
22 | label[for="n_subj"] { background-color: inherit; }
23 | label[for="n_ingroup"] { background-color: inherit; }
24 | label[for="n_outgroup"] { background-color: inherit; }
25 | #broom_output table tbody tr:nth-child(1) td:nth-child(3) { background-color: #E50F45; }
26 | #broom_output table tbody tr:nth-child(2) td:nth-child(3) { background-color: #E76839; }
27 | #broom_output table tbody tr:nth-child(3) td:nth-child(3) { background-color: #B29700; }
28 | #broom_output table tbody tr:nth-child(4) td:nth-child(3) { background-color: #086F50; }
29 | #broom_output table tbody tr:nth-child(5) td:nth-child(3) { background-color: #015FA1; }
30 | #broom_output table tbody tr:nth-child(6) td:nth-child(3) { background-color: #702981; }
31 | #broom_output table tbody tr:nth-child(7) td:nth-child(3) { background-color: #C20FA7; }
32 |
33 | label[for="beta_0"] + span.irs span.irs-bar,
34 | label[for="beta_0"] + span.irs span.irs-bar-edge,
35 | label[for="beta_0_"] + span.irs span.irs-bar,
36 | label[for="beta_0_"] + span.irs span.irs-bar-edge
37 | {
38 | background-color: #E50F45;
39 | border-color: #E50F45;
40 | }
41 |
42 | label[for="beta_1"] + span.irs span.irs-bar,
43 | label[for="beta_1"] + span.irs span.irs-bar-edge,
44 | label[for="beta_1_"] + span.irs span.irs-bar,
45 | label[for="beta_1_"] + span.irs span.irs-bar-edge
46 | {
47 | background-color: #E76839;
48 | border-color: #E76839;
49 | }
50 |
51 | label[for="tau_0"] + span.irs span.irs-bar,
52 | label[for="tau_0"] + span.irs span.irs-bar-edge,
53 | label[for="tau_0_"] + span.irs span.irs-bar,
54 | label[for="tau_0_"] + span.irs span.irs-bar-edge
55 | {
56 | background-color: #B29700;
57 | border-color: #B29700;
58 | }
59 |
60 | label[for="tau_1"] + span.irs span.irs-bar,
61 | label[for="tau_1"] + span.irs span.irs-bar-edge,
62 | label[for="tau_1_"] + span.irs span.irs-bar,
63 | label[for="tau_1_"] + span.irs span.irs-bar-edge
64 | {
65 | background-color: #086F50;
66 | border-color: #086F50;
67 | }
68 |
69 | label[for="rho"] + span.irs span.irs-bar,
70 | label[for="rho"] + span.irs span.irs-bar-edge,
71 | label[for="rho_"] + span.irs span.irs-bar,
72 | label[for="rho_"] + span.irs span.irs-bar-edge
73 | {
74 | background-color: #015FA1;
75 | border-color: #015FA1;
76 | }
77 |
78 | label[for="omega_0"] + span.irs span.irs-bar,
79 | label[for="omega_0"] + span.irs span.irs-bar-edge,
80 | label[for="omega_0_"] + span.irs span.irs-bar,
81 | label[for="omega_0_"] + span.irs span.irs-bar-edge {
82 | background-color: #702981;
83 | border-color: #702981;
84 | }
85 |
86 | label[for="sigma"] + span.irs span.irs-bar,
87 | label[for="sigma"] + span.irs span.irs-bar-edge,
88 | label[for="sigma_"] + span.irs span.irs-bar,
89 | label[for="sigma_"] + span.irs span.irs-bar-edge {
90 | background-color: #C20FA7;
91 | border-color: #C20FA7;
92 | }
93 |
94 | label[for="n_subj"] + span.irs span.irs-bar,
95 | label[for="n_subj"] + span.irs span.irs-bar-edge,
96 | label[for="n_subj_"] + span.irs span.irs-bar,
97 | label[for="n_subj_"] + span.irs span.irs-bar-edge {
98 | background-color: #F5AABF;
99 | border-color: #F5AABF;
100 | }
101 |
102 | label[for="n_ingroup"] + span.irs span.irs-bar,
103 | label[for="n_ingroup"] + span.irs span.irs-bar-edge,
104 | label[for="n_ingroup_"] + span.irs span.irs-bar,
105 | label[for="n_ingroup_"] + span.irs span.irs-bar-edge,
106 | label[for="n_outgroup"] + span.irs span.irs-bar,
107 | label[for="n_outgroup"] + span.irs span.irs-bar-edge,
108 | label[for="n_outgroup_"] + span.irs span.irs-bar,
109 | label[for="n_outgroup_"] + span.irs span.irs-bar-edge{
110 | background-color: #64D2E0;
111 | border-color: #64D2E0;
112 | }
113 |
114 | #resim, #reset { display: inline-block; width: 40%;}
115 |
116 |
117 | .RT, .beta_0, .beta_1, .T0s, .T1s, .rho, .O0i, .err, .X1, .n_subj, .n_item {
118 | background-color: transparent;
119 | color: black;
120 | border-bottom: 2px solid transparent;
121 | border-radius: 0;
122 | }
123 | .beta_0 { border-color: #E50F45; }
124 | .beta_1 { border-color: #E76839; }
125 | .T0s { border-color: #B29700; }
126 | .T1s { border-color: #086F50; }
127 | .rho { border-color: #3B5CA7; }
128 | .O0i { border-color: #702981; }
129 | .err { border-color: #C20FA7; }
130 | .n_subj { border-color: #F5AABF; }
131 | .n_item { border-color: #64D2E0; }
132 |
133 | #lmer_output {
134 | font-family: Courier, sans-serif;
135 | }
--------------------------------------------------------------------------------
/docs/bootstrap-toc.js:
--------------------------------------------------------------------------------
1 | /*!
2 | * Bootstrap Table of Contents v0.4.1 (http://afeld.github.io/bootstrap-toc/)
3 | * Copyright 2015 Aidan Feldman
4 | * Licensed under MIT (https://github.com/afeld/bootstrap-toc/blob/gh-pages/LICENSE.md) */
5 | (function() {
6 | 'use strict';
7 |
8 | window.Toc = {
9 | helpers: {
10 | // return all matching elements in the set, or their descendants
11 | findOrFilter: function($el, selector) {
12 | // http://danielnouri.org/notes/2011/03/14/a-jquery-find-that-also-finds-the-root-element/
13 | // http://stackoverflow.com/a/12731439/358804
14 | var $descendants = $el.find(selector);
15 | return $el.filter(selector).add($descendants).filter(':not([data-toc-skip])');
16 | },
17 |
18 | generateUniqueIdBase: function(el) {
19 | var text = $(el).text();
20 | var anchor = text.trim().toLowerCase().replace(/[^A-Za-z0-9]+/g, '-');
21 | return anchor || el.tagName.toLowerCase();
22 | },
23 |
24 | generateUniqueId: function(el) {
25 | var anchorBase = this.generateUniqueIdBase(el);
26 | for (var i = 0; ; i++) {
27 | var anchor = anchorBase;
28 | if (i > 0) {
29 | // add suffix
30 | anchor += '-' + i;
31 | }
32 | // check if ID already exists
33 | if (!document.getElementById(anchor)) {
34 | return anchor;
35 | }
36 | }
37 | },
38 |
39 | generateAnchor: function(el) {
40 | if (el.id) {
41 | return el.id;
42 | } else {
43 | var anchor = this.generateUniqueId(el);
44 | el.id = anchor;
45 | return anchor;
46 | }
47 | },
48 |
49 | createNavList: function() {
50 | return $('');
51 | },
52 |
53 | createChildNavList: function($parent) {
54 | var $childList = this.createNavList();
55 | $parent.append($childList);
56 | return $childList;
57 | },
58 |
59 | generateNavEl: function(anchor, text) {
60 | var $a = $(' ');
61 | $a.attr('href', '#' + anchor);
62 | $a.text(text);
63 | var $li = $(' ');
64 | $li.append($a);
65 | return $li;
66 | },
67 |
68 | generateNavItem: function(headingEl) {
69 | var anchor = this.generateAnchor(headingEl);
70 | var $heading = $(headingEl);
71 | var text = $heading.data('toc-text') || $heading.text();
72 | return this.generateNavEl(anchor, text);
73 | },
74 |
75 | // Find the first heading level (``, then ``, etc.) that has more than one element. Defaults to 1 (for ``).
76 | getTopLevel: function($scope) {
77 | for (var i = 1; i <= 6; i++) {
78 | var $headings = this.findOrFilter($scope, 'h' + i);
79 | if ($headings.length > 1) {
80 | return i;
81 | }
82 | }
83 |
84 | return 1;
85 | },
86 |
87 | // returns the elements for the top level, and the next below it
88 | getHeadings: function($scope, topLevel) {
89 | var topSelector = 'h' + topLevel;
90 |
91 | var secondaryLevel = topLevel + 1;
92 | var secondarySelector = 'h' + secondaryLevel;
93 |
94 | return this.findOrFilter($scope, topSelector + ',' + secondarySelector);
95 | },
96 |
97 | getNavLevel: function(el) {
98 | return parseInt(el.tagName.charAt(1), 10);
99 | },
100 |
101 | populateNav: function($topContext, topLevel, $headings) {
102 | var $context = $topContext;
103 | var $prevNav;
104 |
105 | var helpers = this;
106 | $headings.each(function(i, el) {
107 | var $newNav = helpers.generateNavItem(el);
108 | var navLevel = helpers.getNavLevel(el);
109 |
110 | // determine the proper $context
111 | if (navLevel === topLevel) {
112 | // use top level
113 | $context = $topContext;
114 | } else if ($prevNav && $context === $topContext) {
115 | // create a new level of the tree and switch to it
116 | $context = helpers.createChildNavList($prevNav);
117 | } // else use the current $context
118 |
119 | $context.append($newNav);
120 |
121 | $prevNav = $newNav;
122 | });
123 | },
124 |
125 | parseOps: function(arg) {
126 | var opts;
127 | if (arg.jquery) {
128 | opts = {
129 | $nav: arg
130 | };
131 | } else {
132 | opts = arg;
133 | }
134 | opts.$scope = opts.$scope || $(document.body);
135 | return opts;
136 | }
137 | },
138 |
139 | // accepts a jQuery object, or an options object
140 | init: function(opts) {
141 | opts = this.helpers.parseOps(opts);
142 |
143 | // ensure that the data attribute is in place for styling
144 | opts.$nav.attr('data-toggle', 'toc');
145 |
146 | var $topContext = this.helpers.createChildNavList(opts.$nav);
147 | var topLevel = this.helpers.getTopLevel(opts.$scope);
148 | var $headings = this.helpers.getHeadings(opts.$scope, topLevel);
149 | this.helpers.populateNav($topContext, topLevel, $headings);
150 | }
151 | };
152 |
153 | $(function() {
154 | $('nav[data-toggle="toc"]').each(function(i, el) {
155 | var $nav = $(el);
156 | Toc.init($nav);
157 | });
158 | });
159 | })();
160 |
--------------------------------------------------------------------------------
/inst/paper/bibliography.bib:
--------------------------------------------------------------------------------
1 | @article{matuschek_et_al_2017,
2 | title={Balancing Type {I} error and power in linear mixed models},
3 | author={Matuschek, Hannes and Kliegl, Reinhold and Vasishth, Shravan and Baayen, Harald and Bates, Douglas},
4 | journal={Journal of Memory and Language},
5 | volume={94},
6 | pages={305--315},
7 | year={2017},
8 | publisher={Elsevier}
9 | }
10 | }
11 |
12 | @article{barr_2013,
13 | title={Random effects structure for testing interactions in linear mixed-effects models},
14 | author={Barr, Dale J},
15 | journal={{Frontiers in Psychology}},
16 | volume={4},
17 | pages={328},
18 | year={2013}
19 | }
20 |
21 | @article{baayen_davidson_bates_2008,
22 | author = {Baayen, R. H. and Davidson, D. J. and Bates, D. M.},
23 | journal = {Journal of Memory and Language},
24 | pages = {390--412},
25 | title = {Mixed-effects modeling with crossed random effects for subjects and items},
26 | volume = {59},
27 | year = {2008}
28 | }
29 |
30 | @incollection{barr_2018,
31 | title={{Generalizing over encounters: Statistical and theoretical considerations}},
32 | author={Barr, Dale J},
33 | year={2018},
34 | editor={S.-A. Rueschemeyer and M. G. Gaskell},
35 | booktitle={Oxford Handbook of Psycholinguistics},
36 | publisher={Oxford University Press}
37 | }
38 |
39 | @article{barr_et_al_2013,
40 | title={{Random effects structure for confirmatory hypothesis testing: Keep it maximal}},
41 | author={Barr, Dale J and Levy, Roger and Scheepers, Christoph and Tily, Harry J},
42 | journal={{Journal of Memory and Language}},
43 | volume={68},
44 | number={3},
45 | pages={255--278},
46 | year={2013}
47 | }
48 |
49 | @article{bedny_aguirre_thompson-schill_2007,
50 | title={Item analysis in functional magnetic resonance imaging},
51 | author={Bedny, Marina and Aguirre, Geoffrey K and Thompson-Schill, Sharon L},
52 | journal={Neuroimage},
53 | volume={35},
54 | number={3},
55 | pages={1093--1102},
56 | year={2007},
57 | publisher={Elsevier}
58 | }
59 |
60 |
61 | @article{judd_westfall_kenny_2012,
62 | title={{Treating stimuli as a random factor in social psychology: A new and comprehensive solution to a pervasive but largely ignored problem.}},
63 | author={Judd, Charles M and Westfall, Jacob and Kenny, David A},
64 | journal={{Journal of Personality and Social Psychology}},
65 | volume={103},
66 | pages={54},
67 | year={2012}
68 | }
69 |
70 |
71 | @article{locker_hoffman_bovaird_2007,
72 | author = {Locker, Lawrence and Hoffman, Lesa and Bovaird, James},
73 | journal = {Behavior Research Methods},
74 | pages = {723--730},
75 | title = {On the use of multilevel modeling as an alternative to items analysis in psycholinguistic research},
76 | volume = {39},
77 | year = {2007}
78 | }
79 |
80 |
81 | @article{clark_1973,
82 | title={{The language-as-fixed-effect fallacy: A critique of language statistics in psychological research}},
83 | author={Clark, Herbert H},
84 | journal={{Journal of Verbal Learning and Verbal Behavior}},
85 | volume={12},
86 | pages={335--359},
87 | year={1973},
88 | publisher={Elsevier}
89 | }
90 |
91 | @article{coleman_1964,
92 | title={Generalizing to a language population},
93 | author={Coleman, Edmund B},
94 | journal={Psychological Reports},
95 | volume={14},
96 | pages={219--226},
97 | year={1964}
98 | }
99 |
100 | @article{forster_dickinson_1976,
101 | author = {Forster, K. and Dickinson, R.},
102 | journal = {{Journal of Verbal Learning and Verbal Behavior}},
103 | pages = {135--142},
104 | title = {More on the language-as-fixed-effect fallacy: Monte Carlo estimates of error rates for {$F_1$,$F_2$},{$F'$}, and min {$F'$}},
105 | volume = {15},
106 | year = {1976}
107 | }
108 |
109 | @article{luke-2017,
110 | title={Evaluating significance in linear mixed-effects models in R},
111 | author={Luke, Steven G},
112 | journal={{Behavior Research Methods}},
113 | volume={49},
114 | number={4},
115 | pages={1494--1502},
116 | year={2017},
117 | publisher={Springer}
118 | }
119 |
120 | @article{westfall_yarkoni_2016,
121 | title={{Fixing the stimulus-as-fixed-effect fallacy in task fMRI}},
122 | author={Westfall, Jacob and Nichols, Thomas E and Yarkoni, Tal},
123 | journal={{Wellcome Open Research}},
124 | volume={1},
125 | year={2016},
126 | publisher={The Wellcome Trust}
127 | }
128 |
129 | @article{westfall_2014,
130 | author = {Westfall, Jacob and Kenny, David A. and Judd, Charles M.},
131 | title = {Statistical power and optimal design in experiments in which samples of participants respond to samples of stimuli},
132 | journal = {Journal of Experimental Psychology: General},
133 | volume = {143},
134 | number = {5},
135 | pages = {2020-2045},
136 | year = {2014},
137 | doi = {}
138 | }
139 |
140 | @article{yarkoni-GC,
141 | title={The generalizability crisis},
142 | author={Yarkoni, Tal},
143 | year={2019},
144 | publisher={PsyArXiv},
145 | URL = {https://psyarxiv.com/jqw35}
146 | }
147 |
148 | @article{TOSTtutorial,
149 | author = {Daniël Lakens and Anne M. Scheel and Peder M. Isager},
150 | title = {Equivalence Testing for Psychological Research: A Tutorial},
151 | journal = {Advances in Methods and Practices in Psychological Science},
152 | volume = {1},
153 | number = {2},
154 | pages = {259--269},
155 | year = {2018},
156 | doi = {10.1177/2515245918770963},
157 | URL = { https://doi.org/10.1177/2515245918770963 }
158 | }
159 |
160 | @book{wickham-advr,
161 | title={{Advanced R}},
162 | author={Wickham, Hadley},
163 | year={2019},
164 | publisher={CRC press},
165 | URL = {http://adv-r.had.co.nz/}
166 | }
--------------------------------------------------------------------------------
/vignettes/bibliography.bib:
--------------------------------------------------------------------------------
1 | @article{matuschek_et_al_2017,
2 | title={Balancing Type {I} error and power in linear mixed models},
3 | author={Matuschek, Hannes and Kliegl, Reinhold and Vasishth, Shravan and Baayen, Harald and Bates, Douglas},
4 | journal={Journal of Memory and Language},
5 | volume={94},
6 | pages={305--315},
7 | year={2017},
8 | publisher={Elsevier}
9 | }
10 | }
11 |
12 | @article{barr_2013,
13 | title={Random effects structure for testing interactions in linear mixed-effects models},
14 | author={Barr, Dale J},
15 | journal={{Frontiers in Psychology}},
16 | volume={4},
17 | pages={328},
18 | year={2013}
19 | }
20 |
21 | @article{baayen_davidson_bates_2008,
22 | author = {Baayen, R. H. and Davidson, D. J. and Bates, D. M.},
23 | journal = {Journal of Memory and Language},
24 | pages = {390--412},
25 | title = {Mixed-effects modeling with crossed random effects for subjects and items},
26 | volume = {59},
27 | year = {2008}
28 | }
29 |
30 | @incollection{barr_2018,
31 | title={{Generalizing over encounters: Statistical and theoretical considerations}},
32 | author={Barr, Dale J},
33 | year={2018},
34 | editor={S.-A. Rueschemeyer and M. G. Gaskell},
35 | booktitle={Oxford Handbook of Psycholinguistics},
36 | publisher={Oxford University Press}
37 | }
38 |
39 | @article{barr_et_al_2013,
40 | title={{Random effects structure for confirmatory hypothesis testing: Keep it maximal}},
41 | author={Barr, Dale J and Levy, Roger and Scheepers, Christoph and Tily, Harry J},
42 | journal={{Journal of Memory and Language}},
43 | volume={68},
44 | number={3},
45 | pages={255--278},
46 | year={2013}
47 | }
48 |
49 | @article{bedny_aguirre_thompson-schill_2007,
50 | title={Item analysis in functional magnetic resonance imaging},
51 | author={Bedny, Marina and Aguirre, Geoffrey K and Thompson-Schill, Sharon L},
52 | journal={Neuroimage},
53 | volume={35},
54 | number={3},
55 | pages={1093--1102},
56 | year={2007},
57 | publisher={Elsevier}
58 | }
59 |
60 |
61 | @article{judd_westfall_kenny_2012,
62 | title={{Treating stimuli as a random factor in social psychology: A new and comprehensive solution to a pervasive but largely ignored problem.}},
63 | author={Judd, Charles M and Westfall, Jacob and Kenny, David A},
64 | journal={{Journal of Personality and Social Psychology}},
65 | volume={103},
66 | pages={54},
67 | year={2012}
68 | }
69 |
70 |
71 | @article{locker_hoffman_bovaird_2007,
72 | author = {Locker, Lawrence and Hoffman, Lesa and Bovaird, James},
73 | journal = {Behavior Research Methods},
74 | pages = {723--730},
75 | title = {On the use of multilevel modeling as an alternative to items analysis in psycholinguistic research},
76 | volume = {39},
77 | year = {2007}
78 | }
79 |
80 |
81 | @article{clark_1973,
82 | title={{The language-as-fixed-effect fallacy: A critique of language statistics in psychological research}},
83 | author={Clark, Herbert H},
84 | journal={{Journal of Verbal Learning and Verbal Behavior}},
85 | volume={12},
86 | pages={335--359},
87 | year={1973},
88 | publisher={Elsevier}
89 | }
90 |
91 | @article{coleman_1964,
92 | title={Generalizing to a language population},
93 | author={Coleman, Edmund B},
94 | journal={Psychological Reports},
95 | volume={14},
96 | pages={219--226},
97 | year={1964}
98 | }
99 |
100 | @article{forster_dickinson_1976,
101 | author = {Forster, K. and Dickinson, R.},
102 | journal = {{Journal of Verbal Learning and Verbal Behavior}},
103 | pages = {135--142},
104 | title = {More on the language-as-fixed-effect fallacy: Monte Carlo estimates of error rates for {$F_1$,$F_2$},{$F'$}, and min {$F'$}},
105 | volume = {15},
106 | year = {1976}
107 | }
108 |
109 | @article{luke-2017,
110 | title={Evaluating significance in linear mixed-effects models in R},
111 | author={Luke, Steven G},
112 | journal={{Behavior Research Methods}},
113 | volume={49},
114 | number={4},
115 | pages={1494--1502},
116 | year={2017},
117 | publisher={Springer}
118 | }
119 |
120 | @article{westfall_yarkoni_2016,
121 | title={{Fixing the stimulus-as-fixed-effect fallacy in task fMRI}},
122 | author={Westfall, Jacob and Nichols, Thomas E and Yarkoni, Tal},
123 | journal={{Wellcome Open Research}},
124 | volume={1},
125 | year={2016},
126 | publisher={The Wellcome Trust}
127 | }
128 |
129 | @article{westfall_2014,
130 | author = {Westfall, Jacob and Kenny, David A. and Judd, Charles M.},
131 | title = {Statistical power and optimal design in experiments in which samples of participants respond to samples of stimuli},
132 | journal = {Journal of Experimental Psychology: General},
133 | volume = {143},
134 | number = {5},
135 | pages = {2020-2045},
136 | year = {2014},
137 | doi = {}
138 | }
139 |
140 | @article{yarkoni-GC,
141 | title={The generalizability crisis},
142 | author={Yarkoni, Tal},
143 | year={2019},
144 | publisher={PsyArXiv},
145 | URL = {https://psyarxiv.com/jqw35}
146 | }
147 |
148 | @article{TOSTtutorial,
149 | author = {Daniël Lakens and Anne M. Scheel and Peder M. Isager},
150 | title = {Equivalence Testing for Psychological Research: A Tutorial},
151 | journal = {Advances in Methods and Practices in Psychological Science},
152 | volume = {1},
153 | number = {2},
154 | pages = {259--269},
155 | year = {2018},
156 | doi = {10.1177/2515245918770963},
157 | URL = { https://doi.org/10.1177/2515245918770963 }
158 | }
159 |
160 | @book{wickham-advr,
161 | title={{Advanced R}},
162 | author={Wickham, Hadley},
163 | year={2019},
164 | publisher={CRC press},
165 | URL = {http://adv-r.had.co.nz/}
166 | }
--------------------------------------------------------------------------------
/inst/app/R/misc_funcs.R:
--------------------------------------------------------------------------------
1 | cohen_d <- function(x, y, paired = TRUE) {
2 | # https://t.co/GmRX4y7gCl
3 | # adapted from https://github.com/Lakens/anchor_based_methods_SESOI/blob/master/effect_size_d_paired_function.R
4 |
5 | m_diff <- mean(y-x) # mean difference
6 | sd1 <- sd(x) #standard deviation of measurement 1
7 | sd2 <- sd(y) #standard deviation of measurement 2
8 | s_diff <- sd(y-x) #standard deviation of the difference scores
9 | N <- length(x) #number of observations of measurement 1
10 | N2 <- length(y) #number of observations of measurement 2
11 |
12 | # design-specific pooled standard deviation and
13 | # bias correction (unb)
14 | if (paired) {
15 | s_av <- sqrt((sd1^2+sd2^2)/2)
16 | unb <- 1-(3/(4*(N-1)-1))
17 | } else {
18 | ss_x <- sum((x - mean(x))^2)
19 | ss_y <- sum((y - mean(y))^2)
20 | Ns <- N + N2 - 2
21 | s_av <- sqrt((ss_x + ss_y)/Ns)
22 | unb <- 1-(3/(4*(N+N2)-9))
23 | }
24 |
25 | #Cohen's d_av, using s_av as standardizer
26 | d_av <- m_diff/s_av
27 | d_av_unb <- unb*d_av
28 |
29 | d_av_unb
30 | }
31 |
32 | ## FAUX functions
33 |
34 | rnorm_multi <- function (n, vars = NULL, mu = 0, sd = 1, r = 0, varnames = NULL,
35 | empirical = FALSE, as.matrix = FALSE) {
36 | if (!is.numeric(n) || n%%1 > 0 || n < 1) {
37 | stop("n must be an integer > 0")
38 | }
39 | if (!(empirical %in% c(TRUE, FALSE))) {
40 | stop("empirical must be TRUE or FALSE")
41 | }
42 | if (is.null(vars)) {
43 | if (!is.null(varnames)) {
44 | vars <- length(varnames)
45 | } else if (length(mu) > 1) {
46 | vars <- length(mu)
47 | } else if (length(sd) > 1) {
48 | vars <- length(sd)
49 | } else if (is.matrix(r)) {
50 | vars <- ncol(r)
51 | } else {
52 | stop("The number of variables (vars) was not explicitly set and can't be guessed from the input.")
53 | }
54 | }
55 | if (length(mu) == 1) {
56 | mu <- rep(mu, vars)
57 | } else if (length(mu) != vars) {
58 | stop("the length of mu must be 1 or vars")
59 | } else {
60 | mu <- as.matrix(mu) %>% as.vector()
61 | }
62 | if (length(sd) == 1) {
63 | sd <- rep(sd, vars)
64 | } else if (length(sd) != vars) {
65 | stop("the length of sd must be 1 or vars")
66 | } else {
67 | sd <- as.matrix(sd) %>% as.vector()
68 | }
69 | if (n == 1 & empirical == TRUE) {
70 | warning("When n = 1 and empirical = TRUE, returned data are equal to mu")
71 | mvn <- mu
72 | cor_mat <- r
73 | } else {
74 | cor_mat <- cormat(r, vars)
75 | sigma <- (sd %*% t(sd)) * cor_mat
76 | err <- "The correlated variables could not be generated."
77 | if (empirical) err <- paste(err, "Try increasing the N or setting empirical = FALSE.")
78 | p <- length(mu)
79 | if (!all(dim(sigma) == c(p, p))) stop(err)
80 | eS <- eigen(sigma, symmetric = TRUE)
81 | ev <- eS$values
82 | if (!all(ev >= -0.000001 * abs(ev[1L]))) stop(paste(err))
83 | X <- matrix(stats::rnorm(p * n), n)
84 | if (empirical) {
85 | X <- scale(X, TRUE, FALSE)
86 | X <- X %*% svd(X, nu = 0)$v
87 | X <- scale(X, FALSE, TRUE)
88 | }
89 | tryCatch({
90 | X <- drop(mu) + eS$vectors %*% diag(sqrt(pmax(ev,
91 | 0)), p) %*% t(X)
92 | }, error = function(e) {
93 | stop(err)
94 | })
95 | mvn <- t(X)
96 | }
97 | if (n == 1) mvn <- matrix(mvn, nrow = 1)
98 | if (length(varnames) == vars) {
99 | colnames(mvn) <- varnames
100 | } else if (!is.null(colnames(cor_mat))) {
101 | colnames(mvn) <- colnames(cor_mat)
102 | } else {
103 | colnames(mvn) <- make_id(ncol(mvn), "X")
104 | }
105 | if (as.matrix == TRUE)
106 | mvn
107 | else data.frame(mvn, check.names = FALSE)
108 | }
109 |
110 | cormat <- function (cors = 0, vars = 3) {
111 | if (is.numeric(cors) & length(cors) == 1) {
112 | if (cors >= -1 & cors <= 1) {
113 | cors = rep(cors, vars * (vars - 1)/2)
114 | } else {
115 | stop("cors must be between -1 and 1")
116 | }
117 | }
118 | if (vars == 1) {
119 | cor_mat <- matrix(1, nrow = 1)
120 | } else if (is.matrix(cors)) {
121 | if (!is.numeric(cors)) {
122 | stop("cors matrix not numeric")
123 | } else if (dim(cors)[1] != vars || dim(cors)[2] != vars) {
124 | stop("cors matrix wrong dimensions")
125 | } else if (sum(cors == t(cors)) != (nrow(cors)^2)) {
126 | stop("cors matrix not symmetric")
127 | } else {
128 | cor_mat <- cors
129 | }
130 | } else if (length(cors) == vars * vars) {
131 | cor_mat <- matrix(cors, vars)
132 | } else if (length(cors) == vars * (vars - 1)/2) {
133 | cor_mat <- cormat_from_triangle(cors)
134 | }
135 | if (!is_pos_def(cor_mat)) {
136 | stop("correlation matrix not positive definite")
137 | }
138 | return(cor_mat)
139 | }
140 |
141 | cormat_from_triangle <- function (cors) {
142 | vars <- ceiling(sqrt(2 * length(cors)))
143 | if (length(cors) != vars * (vars - 1)/2)
144 | stop("you don't have the right number of correlations")
145 | cor_mat <- matrix(nrow = vars, ncol = vars)
146 | upcounter = 1
147 | lowcounter = 1
148 | for (col in 1:vars) {
149 | for (row in 1:vars) {
150 | if (row == col) {
151 | cor_mat[row, col] = 1
152 | } else if (row > col) {
153 | cor_mat[row, col] = cors[lowcounter]
154 | lowcounter <- lowcounter + 1
155 | }
156 | }
157 | }
158 | for (row in 1:vars) {
159 | for (col in 1:vars) {
160 | if (row < col) {
161 | cor_mat[row, col] = cors[upcounter]
162 | upcounter <- upcounter + 1
163 | }
164 | }
165 | }
166 | cor_mat
167 | }
168 |
169 | is_pos_def <- function (cor_mat, tol = 0.00000001) {
170 | ev <- eigen(cor_mat, only.values = TRUE)$values
171 | sum(ev < tol) == 0
172 | }
173 |
174 | make_id <- function (n = 100, prefix = "S", digits = 0, suffix = "")
175 | {
176 | if (!is.numeric(n))
177 | stop("n must be numeric")
178 | if (length(n) == 1)
179 | n <- 1:n
180 | max_digits <- as.character(n) %>% nchar() %>% max() %>% max(digits)
181 | max_decimal <- as.character(n) %>% sub("(^\\d*\\.|^\\d*$)",
182 | "", .) %>% nchar() %>% max()
183 | fmt <- paste0(prefix, "%0", max_digits, ".", max_decimal,
184 | "f", suffix)
185 | sprintf(fmt, n)
186 | }
--------------------------------------------------------------------------------
/docs/404.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | Page not found (404) • lmem.sim
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
142 |
143 | Content not found. Please use links in the navbar.
144 |
145 |
146 |
147 |
152 |
153 |
154 |
155 |
156 |
157 |
167 |
168 |
169 |
170 |
171 |
172 |
173 |
174 |
175 |
176 |
--------------------------------------------------------------------------------
/docs/articles/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | Articles • lmem.sim
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
132 |
133 |
134 |
135 |
136 |
137 |
166 |
167 |
168 |
178 |
179 |
180 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
--------------------------------------------------------------------------------
/docs/reference/app.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | Launch Shiny App — app • lmem.sim
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
145 |
146 |
147 |
Launch a local copy of the shiny app that accompanies Simulating LMEM
148 |
149 |
150 |
app ( ... )
151 |
152 |
Arguments
153 |
154 |
155 |
156 | ...
157 | arguments to pass to shiny::runApp
158 |
159 |
160 |
161 |
162 |
163 |
168 |
169 |
170 |
171 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
--------------------------------------------------------------------------------
/docs/reference/paper.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | Open Simulating LMEM paper — paper • lmem.sim
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
145 |
146 |
147 |
Open the manuscript
148 |
149 |
150 |
paper ( type = c ( "pdf" , "html" , "Rmd" ) )
151 |
152 |
Arguments
153 |
154 |
155 |
156 | type
157 | The type of manuscript to open (pdf, html, or Rmd)
158 |
159 |
160 |
161 |
162 |
163 |
168 |
169 |
170 |
171 |
181 |
182 |
183 |
184 |
185 |
186 |
187 |
188 |
189 |
190 |
--------------------------------------------------------------------------------
/docs/reference/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | Function reference • lmem.sim
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
142 |
143 |
144 |
145 |
146 |
147 |
148 |
149 |
150 |
151 |
152 |
153 |
154 | All functions
155 |
156 |
157 |
158 |
159 |
160 |
161 |
162 |
163 |
164 |
165 |
166 | app()
167 |
168 | Launch Shiny App
169 |
170 |
171 |
172 | appendix()
173 |
174 | Open Simulating LMEM appendices
175 |
176 |
177 |
178 | paper()
179 |
180 | Open Simulating LMEM paper
181 |
182 |
183 |
184 |
185 |
186 |
191 |
192 |
193 |
194 |
204 |
205 |
206 |
207 |
208 |
209 |
210 |
211 |
212 |
213 |
--------------------------------------------------------------------------------
/docs/authors.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | Citation and Authors • lmem.sim
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
55 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
132 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
143 |
144 |
DeBruine, L. M., & Barr, D. J. (2021). Understanding mixed effects models through data simulation. Advances in Methods and Practices in Psychological Science. https://doi.org/10.1177/2515245920965119
145 |
@Article{,
146 | title = {Understanding mixed effects models through data simulation},
147 | author = {{DeBruine} and Lisa M. and {Barr} and Dale J.},
148 | doi = {10.1177/2515245920965119},
149 | journal = {Advances in Methods and Practices in Psychological Science},
150 | volume = {4},
151 | number = {1},
152 | pages = {2515245920965119},
153 | year = {2021},
154 | url = {https://debruine.github.io/lmem_sim/},
155 | }
156 |
157 |
160 |
161 |
171 |
172 |
173 |
174 |
175 |
176 |
177 |
178 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
195 |
196 |
197 |
--------------------------------------------------------------------------------
/docs/reference/appendix.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 |
9 | Open Simulating LMEM appendices — appendix • lmem.sim
10 |
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 |
19 |
20 |
21 |
22 |
23 |
24 |
25 |
26 |
27 |
28 |
29 |
30 |
31 |
32 |
33 |
34 |
35 |
36 |
37 |
38 |
39 |
40 |
41 |
42 |
43 |
44 |
45 |
46 |
47 |
48 |
49 |
50 |
51 |
52 |
56 |
57 |
58 |
59 |
60 |
61 |
62 |
63 |
64 |
133 |
134 |
135 |
136 |
137 |
138 |
139 |
140 |
145 |
146 |
147 |
Open Simulating LMEM appendices
148 |
149 |
150 |
appendix ( i = c ( "1a" , "1b" , "1c" , "2" , "3a" , "3b" ) , filename = NULL )
151 |
152 |
Arguments
153 |
154 |
155 |
156 | i
157 | Which appendix to open
158 |
159 |
160 | filename
161 | Where to save the appendix
162 |
163 |
164 |
165 |
166 |
167 |
172 |
173 |
174 |
175 |
185 |
186 |
187 |
188 |
189 |
190 |
191 |
192 |
193 |
194 |
--------------------------------------------------------------------------------
/vignettes/appendix3a_binomial.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'Appendix 3a: Binomial Example'
3 | subtitle: "Understanding mixed effects models through data simulation"
4 | author: "Lisa M. DeBruine & Dale J. Barr"
5 | output: rmarkdown::html_vignette
6 | vignette: >
7 | %\VignetteIndexEntry{Appendix 3a: Binomial Example}
8 | %\VignetteEngine{knitr::rmarkdown}
9 | %\VignetteEncoding{UTF-8}
10 | ---
11 |
12 | ```{r setup, include = FALSE}
13 | knitr::opts_chunk$set(
14 | echo = TRUE,
15 | warning = FALSE,
16 | message = FALSE,
17 | fig.width = 8,
18 | fig.height = 5,
19 | out.width = "100%"
20 | )
21 | ```
22 |
23 | [Download the .Rmd for this example](https://github.com/debruine/lmem_sim/blob/master/vignettes/appendix3a_binomial.Rmd)
24 |
25 | ## Simulating binomial data with crossed random factors
26 |
27 | To give an overview of the simulation task, we will simulate data from a design with crossed random factors of subjects and stimuli, fit a model to the simulated data, and then try to recover the parameter values we put in from the output. In this hypothetical study, subjects classify the emotional expressions of faces as quickly as possible, and we use accuracy (correct/incorrect) as the primary dependent variable. The faces are of two types: either from the subject's ingroup or from an outgroup. For simplicity, we further assume that each face appears only once in the stimulus set. The key question is whether there is any difference in classification accuracy across the type of face.
28 |
29 | The important parts of the design are:
30 |
31 | * Random factor 1: subjects (associated variables will be prefixed by T, $\tau$, or `tau`)
32 | * Random factor 2: faces (associated variables will be prefixed by O, $\omega$, or `omega`)
33 | * Fixed factor 1: category (level = ingroup, outgroup)
34 | * within subject: subjects see both ingroup and outgroup faces
35 | * between face: faces are either ingroup or outgroup
36 |
37 | ### Required software
38 |
39 | ```{r, message=FALSE}
40 | # load required packages
41 | library("lme4") # model specification / estimation
42 | library("afex") # anova and deriving p-values from lmer
43 | library("broom.mixed") # extracting data from model fits
44 | library("faux") # data simulation
45 | library("tidyverse") # data wrangling and visualisation
46 |
47 | # ensure this script returns the same results on each run
48 | set.seed(8675309)
49 | faux_options(verbose = FALSE)
50 | ```
51 |
52 | This example presents a simulation for a binomial logistic mixed regression. Part of the process involves conversion between probability and the logit function of probability. The code below converts between these.
53 |
54 | ```{r fig.width = 8, fig.height = 3}
55 |
56 | logit <- function(x) { log(x / (1 - x)) }
57 | inv_logit <- function(x) { 1 / (1 + exp(-x)) }
58 |
59 | data.frame(
60 | prob = seq(0,1,.01)
61 | ) %>%
62 | mutate(logit = logit(prob)) %>%
63 | ggplot(aes(prob, logit)) +
64 | geom_point()
65 |
66 | ```
67 |
68 | ### Data simulation function
69 |
70 | The data generating process is slighly different for binomial logistic regression. The random effects and their correlations are set the same way as for a gaussian model (you'll need some pilot data to estimate reasonable parameters), but we don't need an error term.
71 |
72 |
73 | ```{r}
74 |
75 | # set up the custom data simulation function
76 | my_bin_data <- function(
77 | n_subj = 100, # number of subjects
78 | n_ingroup = 25, # number of faces in ingroup
79 | n_outgroup = 25, # number of faces in outgroup
80 | beta_0 = 0, # intercept
81 | beta_1 = 0, # effect of category
82 | omega_0 = 1, # by-item random intercept sd
83 | tau_0 = 1, # by-subject random intercept sd
84 | tau_1 = 1, # by-subject random slope sd
85 | rho = 0 # correlation between intercept and slope
86 | ) {
87 | # simulate a sample of items
88 | items <- data.frame(
89 | item_id = 1:(n_ingroup + n_outgroup),
90 | category = rep(c("ingroup", "outgroup"),
91 | c(n_ingroup, n_outgroup)),
92 | O_0i = rnorm(n_ingroup + n_outgroup, 0, omega_0)
93 | )
94 |
95 | # effect code category
96 | items$X_i <- recode(items$category,
97 | "ingroup" = -0.5,
98 | "outgroup" = 0.5)
99 |
100 | # simulate a sample of subjects
101 | subjects <- faux::rnorm_multi(
102 | n = n_subj,
103 | mu = 0,
104 | sd = c(tau_0, tau_1),
105 | r = rho,
106 | varnames = c("T_0s", "T_1s")
107 | )
108 | subjects$subj_id <- 1:n_subj
109 |
110 | # cross subject and item IDs
111 | crossing(subjects, items) %>%
112 | mutate(
113 | # calculate gaussian DV
114 | Y = beta_0 + T_0s + O_0i + (beta_1 + T_1s) * X_i,
115 | pr = inv_logit(Y), # transform to probability of getting 1
116 | Y_bin = rbinom(nrow(.), 1, pr) # sample from bernoulli distribution
117 | ) %>%
118 | select(subj_id, item_id, category, X_i, Y, Y_bin)
119 | }
120 | ```
121 |
122 |
123 | ```{r}
124 | dat_sim <- my_bin_data()
125 | head(dat_sim)
126 | ```
127 |
128 | ### Power function
129 |
130 | ```{r}
131 | single_run <- function(filename = NULL, ...) {
132 | # ... is a shortcut that forwards any arguments to my_sim_data()
133 | dat_sim <- my_bin_data(...)
134 | mod_sim <- glmer(Y_bin ~ 1 + X_i + (1 | item_id) + (1 + X_i | subj_id),
135 | data = dat_sim, family = "binomial")
136 |
137 | sim_results <- broom.mixed::tidy(mod_sim)
138 |
139 | # append the results to a file if filename is set
140 | if (!is.null(filename)) {
141 | append <- file.exists(filename) # append if the file exists
142 | write_csv(sim_results, filename, append = append)
143 | }
144 |
145 | # return the tidy table
146 | sim_results
147 | }
148 |
149 | ```
150 |
151 | ```{r}
152 | # run one model with default parameters
153 | single_run()
154 | ```
155 |
156 | The following function converts probabilities of getting a 1 in the first and second category levels into beta values. You will need to figure out a custom function for each design to do this, or estimate fixed effect parameters from analysis of pilot data.
157 |
158 | ```{r}
159 | prob2param <- function(a = 0, b = 0) {
160 | list(
161 | beta_0 = (logit(a) + logit(b))/2,
162 | beta_1 = logit(a) - logit(b)
163 | )
164 | }
165 | ```
166 |
167 | To get an accurate estimation of power, you need to run the simulation many times. We use 20 here as an example because the analysis is very slow, but your results are more accurate the more replications you run. This will depend on the specifics of your analysis, but we recommend at least 1000 replications.
168 |
169 | ```{r}
170 | # run simulations and save to a file on each rep
171 | filename <- "sims/binomial.csv"
172 | reps <- 20
173 | b <- prob2param(.4, .6)
174 | tau_0 <- 1
175 | tau_1 <- 1
176 | omega_0 <- 1
177 | rho <- 0.5
178 |
179 | if (!file.exists(filename)) {
180 | # run simulations and save to a file
181 | sims <- purrr::map_df(1:reps, ~single_run(
182 | filename = filename,
183 | beta_0 = b$beta_0,
184 | beta_1 = b$beta_1,
185 | tau_0 = tau_0,
186 | tau_1 = tau_1,
187 | omega_0 = omega_0,
188 | rho = 0.5)
189 | )
190 | }
191 |
192 | # read saved simulation data
193 | ct <- cols(# makes sure plots display in this order
194 | group = col_factor(ordered = TRUE),
195 | term = col_factor(ordered = TRUE))
196 | sims <- read_csv(filename, col_types = ct)
197 | ```
198 |
199 |
200 | ### Calculate mean estimates and cell probabilities
201 |
202 | ```{r}
203 | est <- sims %>%
204 | group_by(group, term) %>%
205 | summarise(
206 | mean_estimate = mean(estimate),
207 | .groups = "drop"
208 | )
209 |
210 | int_est <- filter(est, is.na(group), term == "(Intercept)") %>%
211 | pull(mean_estimate)
212 | cat_est <- filter(est, is.na(group), term == "X_i") %>%
213 | pull(mean_estimate)
214 |
215 | pr0 <- inv_logit(int_est) %>% round(2)
216 | pr1_plus <- inv_logit(int_est + .5*cat_est) %>% round(2)
217 | pr1_minus <- inv_logit(int_est - .5*cat_est) %>% round(2)
218 |
219 | est %>%
220 | arrange(!is.na(group), group, term) %>%
221 | mutate(
222 | sim = c(b$beta_0, b$beta_1, omega_0, rho, tau_0, tau_1),
223 | prob = c(pr0, paste0(pr1_minus, ":", pr1_plus), rep(NA, 4))
224 | ) %>%
225 | mutate_if(is.numeric, round, 2)
226 | ```
227 |
228 |
--------------------------------------------------------------------------------
/inst/appendices/appendix3a_binomial.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'Appendix 3a: Binomial Example'
3 | subtitle: "Understanding mixed effects models through data simulation"
4 | author: "Lisa M. DeBruine & Dale J. Barr"
5 | output: rmarkdown::html_vignette
6 | vignette: >
7 | %\VignetteIndexEntry{Appendix 3a: Binomial Example}
8 | %\VignetteEngine{knitr::rmarkdown}
9 | %\VignetteEncoding{UTF-8}
10 | ---
11 |
12 | ```{r setup, include = FALSE}
13 | knitr::opts_chunk$set(
14 | echo = TRUE,
15 | warning = FALSE,
16 | message = FALSE,
17 | fig.width = 8,
18 | fig.height = 5,
19 | out.width = "100%"
20 | )
21 | ```
22 |
23 | [Download the .Rmd for this example](https://github.com/debruine/lmem_sim/blob/master/vignettes/appendix3a_binomial.Rmd)
24 |
25 | ## Simulating binomial data with crossed random factors
26 |
27 | To give an overview of the simulation task, we will simulate data from a design with crossed random factors of subjects and stimuli, fit a model to the simulated data, and then try to recover the parameter values we put in from the output. In this hypothetical study, subjects classify the emotional expressions of faces as quickly as possible, and we use accuracy (correct/incorrect) as the primary dependent variable. The faces are of two types: either from the subject's ingroup or from an outgroup. For simplicity, we further assume that each face appears only once in the stimulus set. The key question is whether there is any difference in classification accuracy across the type of face.
28 |
29 | The important parts of the design are:
30 |
31 | * Random factor 1: subjects (associated variables will be prefixed by T, $\tau$, or `tau`)
32 | * Random factor 2: faces (associated variables will be prefixed by O, $\omega$, or `omega`)
33 | * Fixed factor 1: category (level = ingroup, outgroup)
34 | * within subject: subjects see both ingroup and outgroup faces
35 | * between face: faces are either ingroup or outgroup
36 |
37 | ### Required software
38 |
39 | ```{r, message=FALSE}
40 | # load required packages
41 | library("lme4") # model specification / estimation
42 | library("afex") # anova and deriving p-values from lmer
43 | library("broom.mixed") # extracting data from model fits
44 | library("faux") # data simulation
45 | library("tidyverse") # data wrangling and visualisation
46 |
47 | # ensure this script returns the same results on each run
48 | set.seed(8675309)
49 | faux_options(verbose = FALSE)
50 | ```
51 |
52 | This example presents a simulation for a binomial logistic mixed regression. Part of the process involves conversion between probability and the logit function of probability. The code below converts between these.
53 |
54 | ```{r fig.width = 8, fig.height = 3}
55 |
56 | logit <- function(x) { log(x / (1 - x)) }
57 | inv_logit <- function(x) { 1 / (1 + exp(-x)) }
58 |
59 | data.frame(
60 | prob = seq(0,1,.01)
61 | ) %>%
62 | mutate(logit = logit(prob)) %>%
63 | ggplot(aes(prob, logit)) +
64 | geom_point()
65 |
66 | ```
67 |
68 | ### Data simulation function
69 |
70 | The data generating process is slighly different for binomial logistic regression. The random effects and their correlations are set the same way as for a gaussian model (you'll need some pilot data to estimate reasonable parameters), but we don't need an error term.
71 |
72 |
73 | ```{r}
74 |
75 | # set up the custom data simulation function
76 | my_bin_data <- function(
77 | n_subj = 100, # number of subjects
78 | n_ingroup = 25, # number of faces in ingroup
79 | n_outgroup = 25, # number of faces in outgroup
80 | beta_0 = 0, # intercept
81 | beta_1 = 0, # effect of category
82 | omega_0 = 1, # by-item random intercept sd
83 | tau_0 = 1, # by-subject random intercept sd
84 | tau_1 = 1, # by-subject random slope sd
85 | rho = 0 # correlation between intercept and slope
86 | ) {
87 | # simulate a sample of items
88 | items <- data.frame(
89 | item_id = 1:(n_ingroup + n_outgroup),
90 | category = rep(c("ingroup", "outgroup"),
91 | c(n_ingroup, n_outgroup)),
92 | O_0i = rnorm(n_ingroup + n_outgroup, 0, omega_0)
93 | )
94 |
95 | # effect code category
96 | items$X_i <- recode(items$category,
97 | "ingroup" = -0.5,
98 | "outgroup" = 0.5)
99 |
100 | # simulate a sample of subjects
101 | subjects <- faux::rnorm_multi(
102 | n = n_subj,
103 | mu = 0,
104 | sd = c(tau_0, tau_1),
105 | r = rho,
106 | varnames = c("T_0s", "T_1s")
107 | )
108 | subjects$subj_id <- 1:n_subj
109 |
110 | # cross subject and item IDs
111 | crossing(subjects, items) %>%
112 | mutate(
113 | # calculate gaussian DV
114 | Y = beta_0 + T_0s + O_0i + (beta_1 + T_1s) * X_i,
115 | pr = inv_logit(Y), # transform to probability of getting 1
116 | Y_bin = rbinom(nrow(.), 1, pr) # sample from bernoulli distribution
117 | ) %>%
118 | select(subj_id, item_id, category, X_i, Y, Y_bin)
119 | }
120 | ```
121 |
122 |
123 | ```{r}
124 | dat_sim <- my_bin_data()
125 | head(dat_sim)
126 | ```
127 |
128 | ### Power function
129 |
130 | ```{r}
131 | single_run <- function(filename = NULL, ...) {
132 | # ... is a shortcut that forwards any arguments to my_sim_data()
133 | dat_sim <- my_bin_data(...)
134 | mod_sim <- glmer(Y_bin ~ 1 + X_i + (1 | item_id) + (1 + X_i | subj_id),
135 | data = dat_sim, family = "binomial")
136 |
137 | sim_results <- broom.mixed::tidy(mod_sim)
138 |
139 | # append the results to a file if filename is set
140 | if (!is.null(filename)) {
141 | append <- file.exists(filename) # append if the file exists
142 | write_csv(sim_results, filename, append = append)
143 | }
144 |
145 | # return the tidy table
146 | sim_results
147 | }
148 |
149 | ```
150 |
151 | ```{r}
152 | # run one model with default parameters
153 | single_run()
154 | ```
155 |
156 | The following function converts probabilities of getting a 1 in the first and second category levels into beta values. You will need to figure out a custom function for each design to do this, or estimate fixed effect parameters from analysis of pilot data.
157 |
158 | ```{r}
159 | prob2param <- function(a = 0, b = 0) {
160 | list(
161 | beta_0 = (logit(a) + logit(b))/2,
162 | beta_1 = logit(a) - logit(b)
163 | )
164 | }
165 | ```
166 |
167 | To get an accurate estimation of power, you need to run the simulation many times. We use 20 here as an example because the analysis is very slow, but your results are more accurate the more replications you run. This will depend on the specifics of your analysis, but we recommend at least 1000 replications.
168 |
169 | ```{r}
170 | # run simulations and save to a file on each rep
171 | filename <- "sims/binomial.csv"
172 | reps <- 20
173 | b <- prob2param(.4, .6)
174 | tau_0 <- 1
175 | tau_1 <- 1
176 | omega_0 <- 1
177 | rho <- 0.5
178 |
179 | if (!file.exists(filename)) {
180 | # run simulations and save to a file
181 | sims <- purrr::map_df(1:reps, ~single_run(
182 | filename = filename,
183 | beta_0 = b$beta_0,
184 | beta_1 = b$beta_1,
185 | tau_0 = tau_0,
186 | tau_1 = tau_1,
187 | omega_0 = omega_0,
188 | rho = 0.5)
189 | )
190 | }
191 |
192 | # read saved simulation data
193 | ct <- cols(# makes sure plots display in this order
194 | group = col_factor(ordered = TRUE),
195 | term = col_factor(ordered = TRUE))
196 | sims <- read_csv(filename, col_types = ct)
197 | ```
198 |
199 |
200 | ### Calculate mean estimates and cell probabilities
201 |
202 | ```{r}
203 | est <- sims %>%
204 | group_by(group, term) %>%
205 | summarise(
206 | mean_estimate = mean(estimate),
207 | .groups = "drop"
208 | )
209 |
210 | int_est <- filter(est, is.na(group), term == "(Intercept)") %>%
211 | pull(mean_estimate)
212 | cat_est <- filter(est, is.na(group), term == "X_i") %>%
213 | pull(mean_estimate)
214 |
215 | pr0 <- inv_logit(int_est) %>% round(2)
216 | pr1_plus <- inv_logit(int_est + .5*cat_est) %>% round(2)
217 | pr1_minus <- inv_logit(int_est - .5*cat_est) %>% round(2)
218 |
219 | est %>%
220 | arrange(!is.na(group), group, term) %>%
221 | mutate(
222 | sim = c(b$beta_0, b$beta_1, omega_0, rho, tau_0, tau_1),
223 | prob = c(pr0, paste0(pr1_minus, ":", pr1_plus), rep(NA, 4))
224 | ) %>%
225 | mutate_if(is.numeric, round, 2)
226 | ```
227 |
228 |
--------------------------------------------------------------------------------
/inst/app/intro_tab.R:
--------------------------------------------------------------------------------
1 | ### intro_tab ----
2 | intro_tab <- tabItem(
3 | tabName = "intro_tab",
4 | h3("Introduction"),
5 | p("This app is a companion to Understanding mixed effects models through data simulation by Lisa M. DeBruine and Dale J. Barr."),
6 | tags$a(href="https://psyarxiv.com/xp5cy", "Preprint on PsyArXiv"),
7 | span(" | "),
8 | tags$a(href="https://debruine.github.io/lmem_sim/articles/appendix1_example_code.html", "Example R code"),
9 | span(" | "),
10 | tags$a(href="https://github.com/debruine/lmem_sim/tree/master/inst/app", "Code for this app"),
11 |
12 |
13 | p("Set the parameters in the sidebar menu for a crossed design where raters (subjects) classify the emotional expression of faces (items) as fast as possible. Faces are either from an ingroup or an outgroup category (X_i). The hypothesis is that people will classify the emotions of ingroup faces more quickly than outgroup faces. Learn more about what these parameters mean below."),
14 | HTML(" Click on Simulating LMER in the sidebar menu to view the output of the lmer summary and see how the parameters you specified affect the output. Click on Compare ANOVA & LMER to compare the results of the mixed effect model with by-subject and by-item aggregated ANOVA. Click on Power & False Positives to run a power analysis using your parameters and compare power and false positive rate between lmer and ANOVA.
"),
15 |
16 | HTML("
17 | RTsi =
18 | β0 +
19 | T0s +
20 | O0i +
21 | (β1 +
22 | T1s ) *
23 | Xi +
24 | esi
25 |
"),
26 |
27 | tabBox(width = 12,
28 | tabPanel("Data Generating Process",
29 | HTML("The response time for subject s on item i (RTsi ) is decomposed into:
"),
30 | HTML("
31 | an intercept β0 (population grand mean)
32 | a fixed slope β1 (effect of face category)
33 | a by-subject random intercept T0s
34 | a by-subject random slope T1s
35 | T0s and T1s are correlated rho
36 | a by-item random intercept O0i
37 | a trial-level residual esi
38 | the numeric value of the predictor Xi (ingroup = -0.5, outgroup = +0.5)
39 | "),
40 | HTML("Our data-generating process is fully determined by seven parameters: two fixed effects (intercept beta_0 and slope beta_1), four variance parameters governing the random effects (tau_0, tau_1, rho, and omega_0), and one parameter governing the trial level variance (sigma).
")
41 | ),
42 | tabPanel("Fixed Effects",
43 | HTML("The parameters β0 and β1 are fixed effects : they characterize properties of the population of encounters between subjects and stimuli. The grand mean, or intercept (beta_0) is the mean RT for a typical subject encountering a typical stimulus. The main effect of category, or slope (beta_1) is how much faster RT is for ingroup than outgroup faces, on average.
"),
44 |
45 | sliderInput("beta_0_", "intercept (beta_0)",
46 | min = 600, max = 1000, value = 800, step = 100),
47 | sliderInput("beta_1_", "effect of category (beta_1)",
48 | min = -200, max = 200, value = 50, step = 10),
49 |
50 | HTML("Our predictor is categorical, so we need to assign a numeric value to the levels ingroup and ourgroup . In this app, we will use deviation coding where ingroup = -0.5 and outgroup = +0.5. See coding categorical predictor variables in factorial designs for further discussion.
")
51 | ),
52 |
53 | tabPanel("Random Intercepts",
54 |
55 | HTML("Subjects are not identical in their response characteristics: some will be faster than average, and some slower. We can characterize the difference from the grand mean (beta_0) for each subject s in terms of a random effect T0s . In other words, we assign each subject a unique random intercept .
"),
56 |
57 | HTML("The actual values for T0s in our sampled dataset will depend on which subjects we happened to have sampled from their respective populations. Although the individual values will differ for each sample, we can set a fixed standard deviation (tau_0) for the population to use when we sample subjects.
"),
58 | sliderInput("tau_0_", "subject intercept SD (tau_0)",
59 | min = 0, max = 200, value = 100, step = 10),
60 | HTML("Likewise, it is unrealistic to assume that it is equally easy to categorize emotional expressions across all faces in the dataset; some will be easier than others. We incorporate this assumption by including by-item random intercepts O0i . Again, although the individual values will differ for each sample, we can set a fixed standard deviation (omega_0) for the population to use when we sample items
"),
61 | sliderInput("omega_0_", "item intercept SD (omega_0)",
62 | min = 0, max = 200, value = 80, step = 10)
63 | ),
64 |
65 | tabPanel("Random Slopes",
66 | HTML("The random slope T1s is an estimate of how much faster or slower subject s is in categorizing ingroup/outgroup faces than the population mean effect beta_1. Again, we can set a fixed standard deviation (tau_1) for the population to use when we sample subjects.
"),
67 |
68 | sliderInput("tau_1_", "subject slope SD (tau_1)",
69 | min = 0, max = 200, value = 40, step = 10)
70 | ),
71 |
72 | tabPanel("Random Correlations",
73 | HTML("Note that we are sampling two random effects for each subject s , a random intercept T0s and a random slope T1s . It is possible for these values to be correlated, in which case we should not sample them independently. For instance, perhaps people who are faster than average overall (negative random intercept) also show a smaller than average of the ingroup/outgroup manipulation (negative random slope) due to allocating less attention to the task. We can capture this by allowing for a small correlation between the two factors, rho.
"),
74 | sliderInput("rho_", "subject intercept*slope correlation (rho)",
75 | min = -0.9, max = 0.9, value = 0.2, step = 0.1)
76 | ),
77 |
78 | tabPanel("Residual Error",
79 | HTML("Finally, we need to characterize the trial-level noise in the study (esi ) in terms of its standard deviation, sigma.
"),
80 | sliderInput("sigma_", "residual SD (sigma)",
81 | min = 0, max = 400, value = 200, step = 10)
82 | ),
83 |
84 | tabPanel("Sample Size",
85 | p("Set the number of subjects and faces per group."),
86 | sliderInput("n_subj_", "number of subjects (n_subj)",
87 | min = 10, max = 200, value = 100, step = 10),
88 | sliderInput("n_ingroup_", "faces in the ingroup (n_ingroup)",
89 | min = 5, max = 50, value = 25, step = 5),
90 | sliderInput("n_outgroup_", "faces in the outgroup (n_outgroup)",
91 | min = 5, max = 50, value = 25, step = 5)
92 | )
93 | )
94 | )
95 |
--------------------------------------------------------------------------------
/docs/pkgdown.css:
--------------------------------------------------------------------------------
1 | /* Sticky footer */
2 |
3 | /**
4 | * Basic idea: https://philipwalton.github.io/solved-by-flexbox/demos/sticky-footer/
5 | * Details: https://github.com/philipwalton/solved-by-flexbox/blob/master/assets/css/components/site.css
6 | *
7 | * .Site -> body > .container
8 | * .Site-content -> body > .container .row
9 | * .footer -> footer
10 | *
11 | * Key idea seems to be to ensure that .container and __all its parents__
12 | * have height set to 100%
13 | *
14 | */
15 |
16 | html, body {
17 | height: 100%;
18 | }
19 |
20 | body {
21 | position: relative;
22 | }
23 |
24 | body > .container {
25 | display: flex;
26 | height: 100%;
27 | flex-direction: column;
28 | }
29 |
30 | body > .container .row {
31 | flex: 1 0 auto;
32 | }
33 |
34 | footer {
35 | margin-top: 45px;
36 | padding: 35px 0 36px;
37 | border-top: 1px solid #e5e5e5;
38 | color: #666;
39 | display: flex;
40 | flex-shrink: 0;
41 | }
42 | footer p {
43 | margin-bottom: 0;
44 | }
45 | footer div {
46 | flex: 1;
47 | }
48 | footer .pkgdown {
49 | text-align: right;
50 | }
51 | footer p {
52 | margin-bottom: 0;
53 | }
54 |
55 | img.icon {
56 | float: right;
57 | }
58 |
59 | img {
60 | max-width: 100%;
61 | }
62 |
63 | /* Fix bug in bootstrap (only seen in firefox) */
64 | summary {
65 | display: list-item;
66 | }
67 |
68 | /* Typographic tweaking ---------------------------------*/
69 |
70 | .contents .page-header {
71 | margin-top: calc(-60px + 1em);
72 | }
73 |
74 | dd {
75 | margin-left: 3em;
76 | }
77 |
78 | /* Section anchors ---------------------------------*/
79 |
80 | a.anchor {
81 | margin-left: -30px;
82 | display:inline-block;
83 | width: 30px;
84 | height: 30px;
85 | visibility: hidden;
86 |
87 | background-image: url(./link.svg);
88 | background-repeat: no-repeat;
89 | background-size: 20px 20px;
90 | background-position: center center;
91 | }
92 |
93 | .hasAnchor:hover a.anchor {
94 | visibility: visible;
95 | }
96 |
97 | @media (max-width: 767px) {
98 | .hasAnchor:hover a.anchor {
99 | visibility: hidden;
100 | }
101 | }
102 |
103 |
104 | /* Fixes for fixed navbar --------------------------*/
105 |
106 | .contents h1, .contents h2, .contents h3, .contents h4 {
107 | padding-top: 60px;
108 | margin-top: -40px;
109 | }
110 |
111 | /* Navbar submenu --------------------------*/
112 |
113 | .dropdown-submenu {
114 | position: relative;
115 | }
116 |
117 | .dropdown-submenu>.dropdown-menu {
118 | top: 0;
119 | left: 100%;
120 | margin-top: -6px;
121 | margin-left: -1px;
122 | border-radius: 0 6px 6px 6px;
123 | }
124 |
125 | .dropdown-submenu:hover>.dropdown-menu {
126 | display: block;
127 | }
128 |
129 | .dropdown-submenu>a:after {
130 | display: block;
131 | content: " ";
132 | float: right;
133 | width: 0;
134 | height: 0;
135 | border-color: transparent;
136 | border-style: solid;
137 | border-width: 5px 0 5px 5px;
138 | border-left-color: #cccccc;
139 | margin-top: 5px;
140 | margin-right: -10px;
141 | }
142 |
143 | .dropdown-submenu:hover>a:after {
144 | border-left-color: #ffffff;
145 | }
146 |
147 | .dropdown-submenu.pull-left {
148 | float: none;
149 | }
150 |
151 | .dropdown-submenu.pull-left>.dropdown-menu {
152 | left: -100%;
153 | margin-left: 10px;
154 | border-radius: 6px 0 6px 6px;
155 | }
156 |
157 | /* Sidebar --------------------------*/
158 |
159 | #pkgdown-sidebar {
160 | margin-top: 30px;
161 | position: -webkit-sticky;
162 | position: sticky;
163 | top: 70px;
164 | }
165 |
166 | #pkgdown-sidebar h2 {
167 | font-size: 1.5em;
168 | margin-top: 1em;
169 | }
170 |
171 | #pkgdown-sidebar h2:first-child {
172 | margin-top: 0;
173 | }
174 |
175 | #pkgdown-sidebar .list-unstyled li {
176 | margin-bottom: 0.5em;
177 | }
178 |
179 | /* bootstrap-toc tweaks ------------------------------------------------------*/
180 |
181 | /* All levels of nav */
182 |
183 | nav[data-toggle='toc'] .nav > li > a {
184 | padding: 4px 20px 4px 6px;
185 | font-size: 1.5rem;
186 | font-weight: 400;
187 | color: inherit;
188 | }
189 |
190 | nav[data-toggle='toc'] .nav > li > a:hover,
191 | nav[data-toggle='toc'] .nav > li > a:focus {
192 | padding-left: 5px;
193 | color: inherit;
194 | border-left: 1px solid #878787;
195 | }
196 |
197 | nav[data-toggle='toc'] .nav > .active > a,
198 | nav[data-toggle='toc'] .nav > .active:hover > a,
199 | nav[data-toggle='toc'] .nav > .active:focus > a {
200 | padding-left: 5px;
201 | font-size: 1.5rem;
202 | font-weight: 400;
203 | color: inherit;
204 | border-left: 2px solid #878787;
205 | }
206 |
207 | /* Nav: second level (shown on .active) */
208 |
209 | nav[data-toggle='toc'] .nav .nav {
210 | display: none; /* Hide by default, but at >768px, show it */
211 | padding-bottom: 10px;
212 | }
213 |
214 | nav[data-toggle='toc'] .nav .nav > li > a {
215 | padding-left: 16px;
216 | font-size: 1.35rem;
217 | }
218 |
219 | nav[data-toggle='toc'] .nav .nav > li > a:hover,
220 | nav[data-toggle='toc'] .nav .nav > li > a:focus {
221 | padding-left: 15px;
222 | }
223 |
224 | nav[data-toggle='toc'] .nav .nav > .active > a,
225 | nav[data-toggle='toc'] .nav .nav > .active:hover > a,
226 | nav[data-toggle='toc'] .nav .nav > .active:focus > a {
227 | padding-left: 15px;
228 | font-weight: 500;
229 | font-size: 1.35rem;
230 | }
231 |
232 | /* orcid ------------------------------------------------------------------- */
233 |
234 | .orcid {
235 | font-size: 16px;
236 | color: #A6CE39;
237 | /* margins are required by official ORCID trademark and display guidelines */
238 | margin-left:4px;
239 | margin-right:4px;
240 | vertical-align: middle;
241 | }
242 |
243 | /* Reference index & topics ----------------------------------------------- */
244 |
245 | .ref-index th {font-weight: normal;}
246 |
247 | .ref-index td {vertical-align: top; min-width: 100px}
248 | .ref-index .icon {width: 40px;}
249 | .ref-index .alias {width: 40%;}
250 | .ref-index-icons .alias {width: calc(40% - 40px);}
251 | .ref-index .title {width: 60%;}
252 |
253 | .ref-arguments th {text-align: right; padding-right: 10px;}
254 | .ref-arguments th, .ref-arguments td {vertical-align: top; min-width: 100px}
255 | .ref-arguments .name {width: 20%;}
256 | .ref-arguments .desc {width: 80%;}
257 |
258 | /* Nice scrolling for wide elements --------------------------------------- */
259 |
260 | table {
261 | display: block;
262 | overflow: auto;
263 | }
264 |
265 | /* Syntax highlighting ---------------------------------------------------- */
266 |
267 | pre {
268 | word-wrap: normal;
269 | word-break: normal;
270 | border: 1px solid #eee;
271 | }
272 |
273 | pre, code {
274 | background-color: #f8f8f8;
275 | color: #333;
276 | }
277 |
278 | pre code {
279 | overflow: auto;
280 | word-wrap: normal;
281 | white-space: pre;
282 | }
283 |
284 | pre .img {
285 | margin: 5px 0;
286 | }
287 |
288 | pre .img img {
289 | background-color: #fff;
290 | display: block;
291 | height: auto;
292 | }
293 |
294 | code a, pre a {
295 | color: #375f84;
296 | }
297 |
298 | a.sourceLine:hover {
299 | text-decoration: none;
300 | }
301 |
302 | .fl {color: #1514b5;}
303 | .fu {color: #000000;} /* function */
304 | .ch,.st {color: #036a07;} /* string */
305 | .kw {color: #264D66;} /* keyword */
306 | .co {color: #888888;} /* comment */
307 |
308 | .message { color: black; font-weight: bolder;}
309 | .error { color: orange; font-weight: bolder;}
310 | .warning { color: #6A0366; font-weight: bolder;}
311 |
312 | /* Clipboard --------------------------*/
313 |
314 | .hasCopyButton {
315 | position: relative;
316 | }
317 |
318 | .btn-copy-ex {
319 | position: absolute;
320 | right: 0;
321 | top: 0;
322 | visibility: hidden;
323 | }
324 |
325 | .hasCopyButton:hover button.btn-copy-ex {
326 | visibility: visible;
327 | }
328 |
329 | /* headroom.js ------------------------ */
330 |
331 | .headroom {
332 | will-change: transform;
333 | transition: transform 200ms linear;
334 | }
335 | .headroom--pinned {
336 | transform: translateY(0%);
337 | }
338 | .headroom--unpinned {
339 | transform: translateY(-100%);
340 | }
341 |
342 | /* mark.js ----------------------------*/
343 |
344 | mark {
345 | background-color: rgba(255, 255, 51, 0.5);
346 | border-bottom: 2px solid rgba(255, 153, 51, 0.3);
347 | padding: 1px;
348 | }
349 |
350 | /* vertical spacing after htmlwidgets */
351 | .html-widget {
352 | margin-bottom: 10px;
353 | }
354 |
355 | /* fontawesome ------------------------ */
356 |
357 | .fab {
358 | font-family: "Font Awesome 5 Brands" !important;
359 | }
360 |
361 | /* don't display links in code chunks when printing */
362 | /* source: https://stackoverflow.com/a/10781533 */
363 | @media print {
364 | code a:link:after, code a:visited:after {
365 | content: "";
366 | }
367 | }
368 |
--------------------------------------------------------------------------------
/vignettes/sims/binomial.csv:
--------------------------------------------------------------------------------
1 | effect,group,term,estimate,std.error,statistic,p.value
2 | fixed,NA,(Intercept),-0.2790543567783174,0.15044489465776653,-1.854860927073085,0.06361608887967492
3 | fixed,NA,X_i,-0.5522216533089404,0.2531985411845776,-2.1809827605064274,0.029184691614451672
4 | ran_pars,subj_id,sd__(Intercept),0.9166478855850864,NA,NA,NA
5 | ran_pars,subj_id,cor__(Intercept).X_i,0.48677493391671384,NA,NA,NA
6 | ran_pars,subj_id,sd__X_i,0.8484240630694556,NA,NA,NA
7 | ran_pars,item_id,sd__(Intercept),0.8092943607437812,NA,NA,NA
8 | fixed,NA,(Intercept),0.058349330107387465,0.18019801284784223,0.3238067345207473,0.7460843572641592
9 | fixed,NA,X_i,-1.1113817712326362,0.3110502561159185,-3.5729974477772477,3.5291820209701645e-4
10 | ran_pars,subj_id,sd__(Intercept),1.0620482428504974,NA,NA,NA
11 | ran_pars,subj_id,cor__(Intercept).X_i,0.43495512237683176,NA,NA,NA
12 | ran_pars,subj_id,sd__X_i,1.0952690822738052,NA,NA,NA
13 | ran_pars,item_id,sd__(Intercept),0.9963106328292758,NA,NA,NA
14 | fixed,NA,(Intercept),0.2897887720443473,0.19892913064453013,1.4567437715403069,0.14518712227685276
15 | fixed,NA,X_i,-0.5579545349781531,0.35419494756508,-1.5752752511401482,0.11519292401345298
16 | ran_pars,subj_id,sd__(Intercept),1.0477579596178144,NA,NA,NA
17 | ran_pars,subj_id,cor__(Intercept).X_i,0.48190283120150296,NA,NA,NA
18 | ran_pars,subj_id,sd__X_i,1.0539402091159416,NA,NA,NA
19 | ran_pars,item_id,sd__(Intercept),1.1681357704870772,NA,NA,NA
20 | fixed,NA,(Intercept),-0.18056432038500042,0.14355841980250234,-1.2577758980170457,0.20847281456319647
21 | fixed,NA,X_i,-0.941327043555318,0.23271269167978442,-4.045018072544989,5.231908790052255e-5
22 | ran_pars,subj_id,sd__(Intercept),0.9619777952790854,NA,NA,NA
23 | ran_pars,subj_id,cor__(Intercept).X_i,0.47005459777672876,NA,NA,NA
24 | ran_pars,subj_id,sd__X_i,0.9354880680360328,NA,NA,NA
25 | ran_pars,item_id,sd__(Intercept),0.7135152561910739,NA,NA,NA
26 | fixed,NA,(Intercept),-0.021572147137987486,0.1882310054537968,-0.11460464276849748,0.9087584995632025
27 | fixed,NA,X_i,-1.7504522772579376,0.32404046978401946,-5.401955744677987,6.591821517021169e-8
28 | ran_pars,subj_id,sd__(Intercept),1.1158532566630763,NA,NA,NA
29 | ran_pars,subj_id,cor__(Intercept).X_i,0.44994296409565165,NA,NA,NA
30 | ran_pars,subj_id,sd__X_i,1.1417198120158971,NA,NA,NA
31 | ran_pars,item_id,sd__(Intercept),1.0357556328485624,NA,NA,NA
32 | fixed,NA,(Intercept),0.09256767356463563,0.19454240533153064,0.47582260231071916,0.6342007659494275
33 | fixed,NA,X_i,-0.9948632011268881,0.34385716882012535,-2.893245484863832,0.0038128316566379172
34 | ran_pars,subj_id,sd__(Intercept),1.024404219855788,NA,NA,NA
35 | ran_pars,subj_id,cor__(Intercept).X_i,0.5040997107442479,NA,NA,NA
36 | ran_pars,subj_id,sd__X_i,0.938505318850399,NA,NA,NA
37 | ran_pars,item_id,sd__(Intercept),1.1408795599731847,NA,NA,NA
38 | fixed,NA,(Intercept),-0.2812568897014096,0.1966452285476631,-1.4302756887550834,0.15263790948761125
39 | fixed,NA,X_i,-0.8907708778532234,0.3494343118135254,-2.5491797678087798,0.010797662532245119
40 | ran_pars,subj_id,sd__(Intercept),1.0791370724084515,NA,NA,NA
41 | ran_pars,subj_id,cor__(Intercept).X_i,0.6522970969372482,NA,NA,NA
42 | ran_pars,subj_id,sd__X_i,1.1841191742762298,NA,NA,NA
43 | ran_pars,item_id,sd__(Intercept),1.1327049845820347,NA,NA,NA
44 | fixed,NA,(Intercept),-0.013096539605749073,0.1894666042028745,-0.06912320860369534,0.9448915474150642
45 | fixed,NA,X_i,-0.49511025218608573,0.32722730427618835,-1.5130468812229674,0.13026775090983167
46 | ran_pars,subj_id,sd__(Intercept),1.0508682968783938,NA,NA,NA
47 | ran_pars,subj_id,cor__(Intercept).X_i,0.5062400372134357,NA,NA,NA
48 | ran_pars,subj_id,sd__X_i,0.8778071775944949,NA,NA,NA
49 | ran_pars,item_id,sd__(Intercept),1.0866354513986556,NA,NA,NA
50 | fixed,NA,(Intercept),-0.22479812321407963,0.19874928088927502,-1.1310638318199333,0.25802822590689345
51 | fixed,NA,X_i,-0.908156529247558,0.3502592046385577,-2.5928127433073747,0.009519458624612195
52 | ran_pars,subj_id,sd__(Intercept),1.064435367211626,NA,NA,NA
53 | ran_pars,subj_id,cor__(Intercept).X_i,0.6221023897545939,NA,NA,NA
54 | ran_pars,subj_id,sd__X_i,0.9989285149152696,NA,NA,NA
55 | ran_pars,item_id,sd__(Intercept),1.1575389812884453,NA,NA,NA
56 | fixed,NA,(Intercept),0.008885731592084192,0.16593188956202648,0.05355047553268924,0.9572933146524119
57 | fixed,NA,X_i,-0.41638201920765217,0.2869544212769083,-1.4510388700575116,0.14676903687750173
58 | ran_pars,subj_id,sd__(Intercept),0.9941885859639187,NA,NA,NA
59 | ran_pars,subj_id,cor__(Intercept).X_i,0.5765737005647944,NA,NA,NA
60 | ran_pars,subj_id,sd__X_i,1.0851144391434566,NA,NA,NA
61 | ran_pars,item_id,sd__(Intercept),0.9075332582584981,NA,NA,NA
62 | fixed,NA,(Intercept),-0.005273363002535106,0.1795565672415611,-0.029368811642744002,0.9765704467736235
63 | fixed,NA,X_i,-0.6586402110968452,0.2901890255395078,-2.2696937276395195,0.023226172248957715
64 | ran_pars,subj_id,sd__(Intercept),1.1452858643507167,NA,NA,NA
65 | ran_pars,subj_id,cor__(Intercept).X_i,0.6098820906358986,NA,NA,NA
66 | ran_pars,subj_id,sd__X_i,0.8846625381446019,NA,NA,NA
67 | ran_pars,item_id,sd__(Intercept),0.9450787540363549,NA,NA,NA
68 | fixed,NA,(Intercept),0.11389054333055869,0.17466135153861354,0.6520649378198596,0.5143592816213459
69 | fixed,NA,X_i,-0.5230336108142616,0.30456663185739613,-1.7173043797495053,0.08592357222650121
70 | ran_pars,subj_id,sd__(Intercept),1.0034800268572788,NA,NA,NA
71 | ran_pars,subj_id,cor__(Intercept).X_i,0.4659251917237009,NA,NA,NA
72 | ran_pars,subj_id,sd__X_i,1.0501669364946622,NA,NA,NA
73 | ran_pars,item_id,sd__(Intercept),0.9805886805906191,NA,NA,NA
74 | fixed,NA,(Intercept),0.30507238299594996,0.19970207255386588,1.5276375407353993,0.12660255559673175
75 | fixed,NA,X_i,-1.2282371305891648,0.36193006434403635,-3.3935758633790956,6.898644755343256e-4
76 | ran_pars,subj_id,sd__(Intercept),1.0015631414262507,NA,NA,NA
77 | ran_pars,subj_id,cor__(Intercept).X_i,0.45523720808205853,NA,NA,NA
78 | ran_pars,subj_id,sd__X_i,1.0763521795614037,NA,NA,NA
79 | ran_pars,item_id,sd__(Intercept),1.1921613425999726,NA,NA,NA
80 | fixed,NA,(Intercept),-0.18255827575717665,0.14985863275609798,-1.2182032653020323,0.22314673912254404
81 | fixed,NA,X_i,-0.6881631570866568,0.25360843167195085,-2.713486900060223,0.006657920058708701
82 | ran_pars,subj_id,sd__(Intercept),0.9387412680941728,NA,NA,NA
83 | ran_pars,subj_id,cor__(Intercept).X_i,0.4872317231761919,NA,NA,NA
84 | ran_pars,subj_id,sd__X_i,0.9874589835719453,NA,NA,NA
85 | ran_pars,item_id,sd__(Intercept),0.7901088959185377,NA,NA,NA
86 | fixed,NA,(Intercept),0.017005088311317525,0.18125268314901097,0.09381978802121979,0.9252523124489418
87 | fixed,NA,X_i,-0.708680426446844,0.31932969687106166,-2.2192750420359233,0.026468018485061407
88 | ran_pars,subj_id,sd__(Intercept),1.030260552518965,NA,NA,NA
89 | ran_pars,subj_id,cor__(Intercept).X_i,0.49257539123730504,NA,NA,NA
90 | ran_pars,subj_id,sd__X_i,1.1404566858952971,NA,NA,NA
91 | ran_pars,item_id,sd__(Intercept),1.024254792936185,NA,NA,NA
92 | fixed,NA,(Intercept),-0.0755597101762786,0.1693534902535769,-0.4461656506939497,0.6554775930298791
93 | fixed,NA,X_i,-1.1505478206358095,0.2932133255721912,-3.9239274626778053,8.711697587805981e-5
94 | ran_pars,subj_id,sd__(Intercept),0.9666621087971922,NA,NA,NA
95 | ran_pars,subj_id,cor__(Intercept).X_i,0.349520674441369,NA,NA,NA
96 | ran_pars,subj_id,sd__X_i,0.930342901065913,NA,NA,NA
97 | ran_pars,item_id,sd__(Intercept),0.9506953203903903,NA,NA,NA
98 | fixed,NA,(Intercept),0.12774566493826522,0.17151942296478842,0.7447883320158464,0.45639968963576266
99 | fixed,NA,X_i,-0.7772197416324267,0.30264804892813424,-2.5680646030431955,0.010226809476005173
100 | ran_pars,subj_id,sd__(Intercept),0.9653538736872311,NA,NA,NA
101 | ran_pars,subj_id,cor__(Intercept).X_i,0.5832042037402057,NA,NA,NA
102 | ran_pars,subj_id,sd__X_i,1.0583307420144388,NA,NA,NA
103 | ran_pars,item_id,sd__(Intercept),0.9719722818142493,NA,NA,NA
104 | fixed,NA,(Intercept),-0.24013253380687766,0.18568042207409274,-1.2932571518555505,0.19592213486796384
105 | fixed,NA,X_i,-1.3798768163950828,0.319829464964103,-4.314414297475554,1.6002659768005322e-5
106 | ran_pars,subj_id,sd__(Intercept),1.1019783563828898,NA,NA,NA
107 | ran_pars,subj_id,cor__(Intercept).X_i,0.4785035759364629,NA,NA,NA
108 | ran_pars,subj_id,sd__X_i,1.1395678951133201,NA,NA,NA
109 | ran_pars,item_id,sd__(Intercept),1.0219973686046804,NA,NA,NA
110 | fixed,NA,(Intercept),0.01137962228262114,0.15401677015382223,0.0738856052575047,0.9411014098802906
111 | fixed,NA,X_i,-0.7271223587745965,0.264918434967017,-2.7447027567754017,0.006056572783571321
112 | ran_pars,subj_id,sd__(Intercept),0.9066981923045057,NA,NA,NA
113 | ran_pars,subj_id,cor__(Intercept).X_i,0.4538641565495015,NA,NA,NA
114 | ran_pars,subj_id,sd__X_i,0.9044937644486217,NA,NA,NA
115 | ran_pars,item_id,sd__(Intercept),0.8474886114480868,NA,NA,NA
116 | fixed,NA,(Intercept),-0.282125989112989,0.15943355307907797,-1.7695521655536108,0.07680177341450482
117 | fixed,NA,X_i,-0.6907969122636091,0.26919949586658126,-2.566115178038732,0.010284467188435644
118 | ran_pars,subj_id,sd__(Intercept),1.0142032424974767,NA,NA,NA
119 | ran_pars,subj_id,cor__(Intercept).X_i,0.43606603844721653,NA,NA,NA
120 | ran_pars,subj_id,sd__X_i,1.093598706643477,NA,NA,NA
121 | ran_pars,item_id,sd__(Intercept),0.8339034710654499,NA,NA,NA
122 |
--------------------------------------------------------------------------------
/docs/index.html:
--------------------------------------------------------------------------------
1 |
2 |
3 |
4 |
5 |
6 |
7 |
8 | Simulating for LMEM • lmem.sim
9 |
10 |
11 |
12 |
13 |
14 |
15 |
16 |
20 |
21 |
22 |
23 |
98 |
99 |
100 |
102 |
Lisa M. DeBruine & Dale J. Barr
103 |
This repository contains all materials needed to reproduce the manuscript, as well as supplemental scripts and shiny apps.
104 |
Abstract Experimental designs that sample both subjects and stimuli from a larger population need to account for random effects of both subjects and stimuli using mixed effects models. However, much of this research is analyzed using ANOVA on aggregated responses because researchers are not confident specifying and interpreting mixed effects models. The tutorial will explain how to simulate data with random effects structure and analyse the data using linear mixed effects regression (with the lme4 R package), with a focus on interpreting the output in light of the simulated parameters. Data simulation can not only enhance understanding of how these models work, but also enables researchers to perform power calculations for complex designs.
105 |
114 |
115 |
116 | Installation
117 |
You can install the development version of a package that include all the packages you’ll need for the examples, plus a local version of the shiny app, from GitHub with:
118 |
120 |
Here are some functions included in the app:
121 |
122 | lmem.sim :: paper ( ) # open the pdf version of the paper
123 | lmem.sim :: paper ( "html" ) # open the html version of the paper
124 | lmem.sim :: appendix ( "1a" ) # open appendix 1a
125 | lmem.sim :: app ( ) # start the app
126 | citation ( "lmem.sim" ) # get the citation
127 |
128 |
129 |
130 |
131 |
163 |
164 |
165 |
166 |
175 |
176 |
177 |
178 |
179 |
180 |
181 |
182 |
--------------------------------------------------------------------------------
/inst/appendices/appendix1c_sensitivity.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'Appendix 1c: Sensitivity Analysis'
3 | author: "Lisa M. DeBruine & Dale J. Barr"
4 | subtitle: Understanding mixed effects models through data simulation
5 | output: html_document
6 | ---
7 |
8 | ```{r setup, include = FALSE}
9 | knitr::opts_chunk$set(
10 | echo = TRUE,
11 | warning = FALSE,
12 | message = FALSE,
13 | fig.width = 8,
14 | fig.height = 5,
15 | out.width = "100%"
16 | )
17 | ```
18 |
19 | [Download the .Rmd for this example](https://github.com/debruine/lmem_sim/blob/master/vignettes/appendix1c_sensitivity.Rmd)
20 |
21 | ### Required software
22 |
23 | ```{r, message=FALSE}
24 | # load required packages
25 | library("lme4") # model specification / estimation
26 | library("lmerTest") # deriving p-values from lmer
27 | library("broom.mixed") # extracting data from model fits
28 | library("faux") # generate correlated values
29 | library("tidyverse") # data wrangling and visualisation
30 |
31 | # ensure this script returns the same results on each run
32 | set.seed(8675309)
33 | faux_options(verbose = FALSE)
34 | ```
35 |
36 | ### Data simulation function
37 |
38 | The custom simulation function for the example in Appendix 1A is copied below.
39 |
40 | ```{r}
41 | # set up the custom data simulation function
42 | my_sim_data <- function(
43 | n_subj = 100, # number of subjects
44 | n_ingroup = 25, # number of ingroup stimuli
45 | n_outgroup = 25, # number of outgroup stimuli
46 | beta_0 = 800, # grand mean
47 | beta_1 = 50, # effect of category
48 | omega_0 = 80, # by-item random intercept sd
49 | tau_0 = 100, # by-subject random intercept sd
50 | tau_1 = 40, # by-subject random slope sd
51 | rho = 0.2, # correlation between intercept and slope
52 | sigma = 200) { # residual (standard deviation)
53 |
54 | # simulate a sample of items
55 | items <- data.frame(
56 | item_id = seq_len(n_ingroup + n_outgroup),
57 | category = rep(c("ingroup", "outgroup"), c(n_ingroup, n_outgroup)),
58 | X_i = rep(c(-0.5, 0.5), c(n_ingroup, n_outgroup)),
59 | O_0i = rnorm(n = n_ingroup + n_outgroup, mean = 0, sd = omega_0)
60 | )
61 |
62 | # simulate a sample of subjects
63 | subjects <- faux::rnorm_multi(
64 | n = n_subj, mu = 0, sd = c(tau_0, tau_1), r = rho,
65 | varnames = c("T_0s", "T_1s")
66 | )
67 | subjects$subj_id <- 1:n_subj
68 |
69 | # cross subject and item IDs
70 | crossing(subjects, items) %>%
71 | mutate(
72 | e_si = rnorm(nrow(.), mean = 0, sd = sigma),
73 | RT = beta_0 + T_0s + O_0i + (beta_1 + T_1s) * X_i + e_si
74 | ) %>%
75 | select(subj_id, item_id, category, X_i, RT)
76 | }
77 | ```
78 |
79 |
80 | ## Power calculation function
81 |
82 | The power calculation function is slightly more complicated than the one for the basic example. Since sensitivity analyses usually push analyses into parameter spaces where the models produce warnings, we're capturing the warnings and adding them to the results table. We're also adding the parameters to the results table so you can group by the different parameter options when visualising the results.
83 |
84 | ```{r}
85 | # set up the power function
86 | single_run <- function(filename = NULL, ...) {
87 | # ... is a shortcut that forwards any arguments to my_sim_data()
88 | dat_sim <- my_sim_data(...)
89 |
90 | # run lmer and capture any warnings
91 | ww <- ""
92 | suppressMessages(suppressWarnings(
93 | mod_sim <- withCallingHandlers({
94 | lmer(RT ~ X_i + (1 | item_id) + (1 + X_i | subj_id),
95 | dat_sim, REML = FALSE)},
96 | warning = function(w) { ww <<- w$message }
97 | )
98 | ))
99 |
100 | # get results table and add rep number and any warnings
101 | sim_results <- broom.mixed::tidy(mod_sim) %>%
102 | mutate(warnings = ww)
103 |
104 | # add columns for the specified parameters
105 | params <- list(...)
106 | for (name in names(params)) {
107 | sim_results[name] <- params[name]
108 | }
109 |
110 | # append the results to a file if filename is set
111 | if (!is.null(filename)) {
112 | append <- file.exists(filename) # append if the file exists
113 | write_csv(sim_results, filename, append = append)
114 | }
115 |
116 | sim_results
117 | }
118 |
119 | ```
120 |
121 | ### Example 1: Effect size
122 |
123 | Set up a data table with all of the parameter combinations you want to test. For example, the code below sets up 100 replications for effects of category ranging from 0 to 100 ms in steps of 10. All of the other parameters are default, but we're specifying them anyways so they are saved in the results table.
124 |
125 | ```{r}
126 | filename1 <- "sims/sens1.csv"
127 | nreps <- 100 # number of replications per parameter combo
128 |
129 | params <- crossing(
130 | rep = 1:nreps, # repeats each combo nreps times
131 | n_subj = 100, # number of subjects
132 | n_ingroup = 25, # number of ingroup stimuli
133 | n_outgroup = 25, # number of outgroup stimuli
134 | beta_0 = 800, # grand mean
135 | beta_1 = seq(0, 100, by = 10), # effect of category
136 | omega_0 = 100, # by-item random intercept sd
137 | tau_0 = 80, # by-subject random intercept sd
138 | tau_1 = 40, # by-subject random slope sd
139 | rho = 0.2, # correlation between intercept and slope
140 | sigma = 200 # residual (standard deviation)
141 | ) %>%
142 | select(-rep) # remove rep column
143 | ```
144 |
145 | This table has 1100 rows, so will run 1100 simulations below. The code below saves the results to a named file or appends them to the file if one exists already. Run a small number of replicates to start and add to it after you're sure your code works and you have an idea how long it takes.
146 |
147 | ```{r, message = FALSE}
148 | if (!file.exists(filename1)) {
149 | # run a simulation for each row of params
150 | # and save to a file on each rep
151 | sims1 <- purrr::pmap_df(params, single_run, filename = filename1)
152 | }
153 |
154 | # read saved simulation data
155 | # NB: col_types is set for warnings in case
156 | # the first 1000 rows don't have any
157 | ct <- cols(warnings = col_character(),
158 | # makes sure plots display in this order
159 | group = col_factor(ordered = TRUE),
160 | term = col_factor(ordered = TRUE))
161 | sims1 <- read_csv(filename1, col_types = ct)
162 | ```
163 |
164 | The chunk above will just read the saved data from the named file, if it exists. The code below calculates the mean estimates and power for each group. Make sure to set the `group_by` to the parameters you altered above.
165 |
166 |
167 | ```{r, message=FALSE}
168 | # calculate mean estimates and power for specified alpha
169 | alpha <- 0.05
170 |
171 | power1 <- sims1 %>%
172 | filter(effect == "fixed", term == "X_i") %>%
173 | group_by(term, beta_1) %>%
174 | summarise(
175 | mean_estimate = mean(estimate),
176 | mean_se = mean(std.error),
177 | power = mean(p.value < alpha),
178 | .groups = "drop"
179 | )
180 |
181 | power1 %>%
182 | ggplot(aes(beta_1, power)) +
183 | geom_point() +
184 | geom_smooth(se = FALSE) +
185 | ylim(0, 1) +
186 | scale_x_continuous(name = "Effect of category in ms (beta_1)",
187 | breaks = seq(0, 100, 10)) +
188 | ggtitle("Power for designs varying in effect size")
189 | ```
190 |
191 | ### Example 2: Number of subjects and items
192 |
193 | The code below sets up 50 replications for each of 20 combinations of 10 to 50 subjects (by steps of 10) and 10 to 25 stimuli (by steps of 5).
194 |
195 | ```{r}
196 | filename2 <- "sims/sens2.csv"
197 | nreps <- 50 # number of replications per parameter combo
198 |
199 | params <- crossing(
200 | rep = 1:nreps,
201 | n_subj = seq(10, 50, by = 10),
202 | n_ingroup = seq(10, 25, by = 5),
203 | beta_0 = 800, # grand mean
204 | beta_1 = 100, # effect of category
205 | omega_0 = 100, # by-item random intercept sd
206 | tau_0 = 80, # by-subject random intercept sd
207 | tau_1 = 40, # by-subject random slope sd
208 | rho = 0.2, # correlation between intercept and slope
209 | sigma = 200 # residual (standard deviation)
210 | ) %>%
211 | mutate(n_outgroup = n_ingroup) %>%
212 | select(-rep) # remove rep column
213 | ```
214 |
215 |
216 | ```{r, message = FALSE}
217 | if (!file.exists(filename2)) {
218 | # run a simulation for each row of params
219 | # and save to a file on each rep
220 | sims2 <- purrr::pmap_df(params, single_run, filename = filename2)
221 | }
222 |
223 | # read saved simulation data
224 | sims2 <- read_csv(filename2, col_types = ct)
225 | ```
226 |
227 | ```{r}
228 | # calculate mean estimates and power for specified alpha
229 | alpha <- 0.05
230 |
231 | power2 <- sims2 %>%
232 | filter(effect == "fixed", term == "X_i") %>%
233 | group_by(term, n_subj, n_ingroup) %>%
234 | summarise(
235 | mean_estimate = mean(estimate),
236 | mean_se = mean(std.error),
237 | power = mean(p.value < alpha),
238 | .groups = "drop"
239 | )
240 |
241 | power2 %>%
242 | ggplot(aes(n_subj, n_ingroup, fill = power)) +
243 | geom_tile(show.legend = FALSE) +
244 | geom_text(aes(label = round(power, 2)),
245 | color = "black", size = 6) +
246 | scale_x_continuous(name = "Number of subjects",
247 | breaks = seq(10, 50, 10)) +
248 | scale_y_continuous(name = "Number of items/group",
249 | breaks = seq(5, 25, 5)) +
250 | scale_fill_viridis_c(limits = c(0, 1)) +
251 | ggtitle("Power for designs varying in number of subjects and items")
252 | ```
253 |
254 |
255 | ### Example 3: Random intercept SDs
256 |
257 | The code below sets up 50 replications for designs with 50 subjects, 10 items, and by-item and by-subject random intercept SDs ranging from 20 to 100 in steps of 20.
258 |
259 | ```{r}
260 |
261 | filename3 <- "sims/sens3.csv"
262 | nreps <- 50 # number of replications per parameter combo
263 |
264 | params <- crossing(
265 | rep = 1:nreps,
266 | n_subj = 50, # number of subjects
267 | n_ingroup = 10, # number of ingroup items
268 | n_outgroup = 10, # number of outgroup items
269 | beta_0 = 800, # grand mean
270 | beta_1 = 50, # effect of category
271 | omega_0 = seq(20, 100, by = 20), # by-item random intercept sd
272 | tau_0 = seq(20, 100, by = 20), # by-subject random intercept sd
273 | tau_1 = 40, # by-subject random slope sd
274 | rho = 0.2, # correlation between intercept and slope
275 | sigma = 200 # residual (standard deviation)
276 | ) %>%
277 | select(-rep) # remove rep column
278 | ```
279 |
280 | ```{r, message = FALSE}
281 | if (!file.exists(filename3)) {
282 | # run a simulation for each row of params
283 | # and save to a file on each rep
284 | sims3 <- purrr::pmap_df(params, single_run, filename = filename3)
285 | }
286 |
287 | # read saved simulation data
288 | sims3 <- read_csv(filename3, col_types = ct)
289 | ```
290 |
291 | ```{r, message=FALSE}
292 | # calculate mean estimates and power for specified alpha
293 | alpha <- 0.05
294 |
295 | power3 <- sims3 %>%
296 | filter(effect == "fixed", term == "X_i") %>%
297 | group_by(term, omega_0, tau_0) %>%
298 | summarise(
299 | mean_estimate = mean(estimate),
300 | mean_se = mean(std.error),
301 | power = mean(p.value < alpha),
302 | .groups = "drop"
303 | )
304 |
305 | power3 %>%
306 | ggplot(aes(omega_0, tau_0, fill = power)) +
307 | geom_tile(show.legend = FALSE) +
308 | geom_text(aes(label = round(power, 2)),
309 | color = "white", size = 6) +
310 | scale_x_continuous(name = "By-item random intercept SD (omega_0)",
311 | breaks = seq(20, 100, 20)) +
312 | scale_y_continuous(name = "By-subject random intercept SD (tau_0)",
313 | breaks = seq(20, 100, 20)) +
314 | scale_fill_viridis_c(limits = c(0, 1)) +
315 | ggtitle("Power for designs varying in random intercept SDs")
316 | ```
317 |
318 |
--------------------------------------------------------------------------------
/vignettes/appendix1c_sensitivity.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: 'Appendix 1c: Sensitivity Analysis'
3 | author: "Lisa M. DeBruine & Dale J. Barr"
4 | subtitle: Understanding mixed effects models through data simulation
5 | output: rmarkdown::html_vignette
6 | vignette: >
7 | %\VignetteIndexEntry{Appendix 1c: Sensitivity Analysis}
8 | %\VignetteEngine{knitr::rmarkdown}
9 | %\VignetteEncoding{UTF-8}
10 | ---
11 |
12 | ```{r setup, include = FALSE}
13 | knitr::opts_chunk$set(
14 | echo = TRUE,
15 | warning = FALSE,
16 | message = FALSE,
17 | fig.width = 8,
18 | fig.height = 5,
19 | out.width = "100%"
20 | )
21 | ```
22 |
23 | [Download the .Rmd for this example](https://github.com/debruine/lmem_sim/blob/master/vignettes/appendix1c_sensitivity.Rmd)
24 |
25 | ### Required software
26 |
27 | ```{r, message=FALSE}
28 | # load required packages
29 | library("lme4") # model specification / estimation
30 | library("lmerTest") # deriving p-values from lmer
31 | library("broom.mixed") # extracting data from model fits
32 | library("faux") # generate correlated values
33 | library("tidyverse") # data wrangling and visualisation
34 |
35 | # ensure this script returns the same results on each run
36 | set.seed(8675309)
37 | faux_options(verbose = FALSE)
38 | ```
39 |
40 | ### Data simulation function
41 |
42 | The custom simulation function for the example in Appendix 1A is copied below.
43 |
44 | ```{r}
45 | # set up the custom data simulation function
46 | my_sim_data <- function(
47 | n_subj = 100, # number of subjects
48 | n_ingroup = 25, # number of ingroup stimuli
49 | n_outgroup = 25, # number of outgroup stimuli
50 | beta_0 = 800, # grand mean
51 | beta_1 = 50, # effect of category
52 | omega_0 = 80, # by-item random intercept sd
53 | tau_0 = 100, # by-subject random intercept sd
54 | tau_1 = 40, # by-subject random slope sd
55 | rho = 0.2, # correlation between intercept and slope
56 | sigma = 200) { # residual (standard deviation)
57 |
58 | # simulate a sample of items
59 | items <- data.frame(
60 | item_id = seq_len(n_ingroup + n_outgroup),
61 | category = rep(c("ingroup", "outgroup"), c(n_ingroup, n_outgroup)),
62 | X_i = rep(c(-0.5, 0.5), c(n_ingroup, n_outgroup)),
63 | O_0i = rnorm(n = n_ingroup + n_outgroup, mean = 0, sd = omega_0)
64 | )
65 |
66 | # simulate a sample of subjects
67 | subjects <- faux::rnorm_multi(
68 | n = n_subj, mu = 0, sd = c(tau_0, tau_1), r = rho,
69 | varnames = c("T_0s", "T_1s")
70 | )
71 | subjects$subj_id <- 1:n_subj
72 |
73 | # cross subject and item IDs
74 | crossing(subjects, items) %>%
75 | mutate(
76 | e_si = rnorm(nrow(.), mean = 0, sd = sigma),
77 | RT = beta_0 + T_0s + O_0i + (beta_1 + T_1s) * X_i + e_si
78 | ) %>%
79 | select(subj_id, item_id, category, X_i, RT)
80 | }
81 | ```
82 |
83 |
84 | ## Power calculation function
85 |
86 | The power calculation function is slightly more complicated than the one for the basic example. Since sensitivity analyses usually push analyses into parameter spaces where the models produce warnings, we're capturing the warnings and adding them to the results table. We're also adding the parameters to the results table so you can group by the different parameter options when visualising the results.
87 |
88 | ```{r}
89 | # set up the power function
90 | single_run <- function(filename = NULL, ...) {
91 | # ... is a shortcut that forwards any arguments to my_sim_data()
92 | dat_sim <- my_sim_data(...)
93 |
94 | # run lmer and capture any warnings
95 | ww <- ""
96 | suppressMessages(suppressWarnings(
97 | mod_sim <- withCallingHandlers({
98 | lmer(RT ~ X_i + (1 | item_id) + (1 + X_i | subj_id),
99 | dat_sim, REML = FALSE)},
100 | warning = function(w) { ww <<- w$message }
101 | )
102 | ))
103 |
104 | # get results table and add rep number and any warnings
105 | sim_results <- broom.mixed::tidy(mod_sim) %>%
106 | mutate(warnings = ww)
107 |
108 | # add columns for the specified parameters
109 | params <- list(...)
110 | for (name in names(params)) {
111 | sim_results[name] <- params[name]
112 | }
113 |
114 | # append the results to a file if filename is set
115 | if (!is.null(filename)) {
116 | append <- file.exists(filename) # append if the file exists
117 | write_csv(sim_results, filename, append = append)
118 | }
119 |
120 | sim_results
121 | }
122 |
123 | ```
124 |
125 | ### Example 1: Effect size
126 |
127 | Set up a data table with all of the parameter combinations you want to test. For example, the code below sets up 100 replications for effects of category ranging from 0 to 100 ms in steps of 10. All of the other parameters are default, but we're specifying them anyways so they are saved in the results table.
128 |
129 | ```{r}
130 | filename1 <- "sims/sens1.csv"
131 | nreps <- 100 # number of replications per parameter combo
132 |
133 | params <- crossing(
134 | rep = 1:nreps, # repeats each combo nreps times
135 | n_subj = 100, # number of subjects
136 | n_ingroup = 25, # number of ingroup stimuli
137 | n_outgroup = 25, # number of outgroup stimuli
138 | beta_0 = 800, # grand mean
139 | beta_1 = seq(0, 100, by = 10), # effect of category
140 | omega_0 = 100, # by-item random intercept sd
141 | tau_0 = 80, # by-subject random intercept sd
142 | tau_1 = 40, # by-subject random slope sd
143 | rho = 0.2, # correlation between intercept and slope
144 | sigma = 200 # residual (standard deviation)
145 | ) %>%
146 | select(-rep) # remove rep column
147 | ```
148 |
149 | This table has 1100 rows, so will run 1100 simulations below. The code below saves the results to a named file or appends them to the file if one exists already. Run a small number of replicates to start and add to it after you're sure your code works and you have an idea how long it takes.
150 |
151 | ```{r, message = FALSE}
152 | if (!file.exists(filename1)) {
153 | # run a simulation for each row of params
154 | # and save to a file on each rep
155 | sims1 <- purrr::pmap_df(params, single_run, filename = filename1)
156 | }
157 |
158 | # read saved simulation data
159 | # NB: col_types is set for warnings in case
160 | # the first 1000 rows don't have any
161 | ct <- cols(warnings = col_character(),
162 | # makes sure plots display in this order
163 | group = col_factor(ordered = TRUE),
164 | term = col_factor(ordered = TRUE))
165 | sims1 <- read_csv(filename1, col_types = ct)
166 | ```
167 |
168 | The chunk above will just read the saved data from the named file, if it exists. The code below calculates the mean estimates and power for each group. Make sure to set the `group_by` to the parameters you altered above.
169 |
170 |
171 | ```{r, message=FALSE}
172 | # calculate mean estimates and power for specified alpha
173 | alpha <- 0.05
174 |
175 | power1 <- sims1 %>%
176 | filter(effect == "fixed", term == "X_i") %>%
177 | group_by(term, beta_1) %>%
178 | summarise(
179 | mean_estimate = mean(estimate),
180 | mean_se = mean(std.error),
181 | power = mean(p.value < alpha),
182 | .groups = "drop"
183 | )
184 |
185 | power1 %>%
186 | ggplot(aes(beta_1, power)) +
187 | geom_point() +
188 | geom_smooth(se = FALSE) +
189 | ylim(0, 1) +
190 | scale_x_continuous(name = "Effect of category in ms (beta_1)",
191 | breaks = seq(0, 100, 10)) +
192 | ggtitle("Power for designs varying in effect size")
193 | ```
194 |
195 | ### Example 2: Number of subjects and items
196 |
197 | The code below sets up 50 replications for each of 20 combinations of 10 to 50 subjects (by steps of 10) and 10 to 25 stimuli (by steps of 5).
198 |
199 | ```{r}
200 | filename2 <- "sims/sens2.csv"
201 | nreps <- 50 # number of replications per parameter combo
202 |
203 | params <- crossing(
204 | rep = 1:nreps,
205 | n_subj = seq(10, 50, by = 10),
206 | n_ingroup = seq(10, 25, by = 5),
207 | beta_0 = 800, # grand mean
208 | beta_1 = 100, # effect of category
209 | omega_0 = 100, # by-item random intercept sd
210 | tau_0 = 80, # by-subject random intercept sd
211 | tau_1 = 40, # by-subject random slope sd
212 | rho = 0.2, # correlation between intercept and slope
213 | sigma = 200 # residual (standard deviation)
214 | ) %>%
215 | mutate(n_outgroup = n_ingroup) %>%
216 | select(-rep) # remove rep column
217 | ```
218 |
219 |
220 | ```{r, message = FALSE}
221 | if (!file.exists(filename2)) {
222 | # run a simulation for each row of params
223 | # and save to a file on each rep
224 | sims2 <- purrr::pmap_df(params, single_run, filename = filename2)
225 | }
226 |
227 | # read saved simulation data
228 | sims2 <- read_csv(filename2, col_types = ct)
229 | ```
230 |
231 | ```{r}
232 | # calculate mean estimates and power for specified alpha
233 | alpha <- 0.05
234 |
235 | power2 <- sims2 %>%
236 | filter(effect == "fixed", term == "X_i") %>%
237 | group_by(term, n_subj, n_ingroup) %>%
238 | summarise(
239 | mean_estimate = mean(estimate),
240 | mean_se = mean(std.error),
241 | power = mean(p.value < alpha),
242 | .groups = "drop"
243 | )
244 |
245 | power2 %>%
246 | ggplot(aes(n_subj, n_ingroup, fill = power)) +
247 | geom_tile(show.legend = FALSE) +
248 | geom_text(aes(label = round(power, 2)),
249 | color = "black", size = 6) +
250 | scale_x_continuous(name = "Number of subjects",
251 | breaks = seq(10, 50, 10)) +
252 | scale_y_continuous(name = "Number of items/group",
253 | breaks = seq(5, 25, 5)) +
254 | scale_fill_viridis_c(limits = c(0, 1)) +
255 | ggtitle("Power for designs varying in number of subjects and items")
256 | ```
257 |
258 |
259 | ### Example 3: Random intercept SDs
260 |
261 | The code below sets up 50 replications for designs with 50 subjects, 10 items, and by-item and by-subject random intercept SDs ranging from 20 to 100 in steps of 20.
262 |
263 | ```{r}
264 |
265 | filename3 <- "sims/sens3.csv"
266 | nreps <- 50 # number of replications per parameter combo
267 |
268 | params <- crossing(
269 | rep = 1:nreps,
270 | n_subj = 50, # number of subjects
271 | n_ingroup = 10, # number of ingroup items
272 | n_outgroup = 10, # number of outgroup items
273 | beta_0 = 800, # grand mean
274 | beta_1 = 50, # effect of category
275 | omega_0 = seq(20, 100, by = 20), # by-item random intercept sd
276 | tau_0 = seq(20, 100, by = 20), # by-subject random intercept sd
277 | tau_1 = 40, # by-subject random slope sd
278 | rho = 0.2, # correlation between intercept and slope
279 | sigma = 200 # residual (standard deviation)
280 | ) %>%
281 | select(-rep) # remove rep column
282 | ```
283 |
284 | ```{r, message = FALSE}
285 | if (!file.exists(filename3)) {
286 | # run a simulation for each row of params
287 | # and save to a file on each rep
288 | sims3 <- purrr::pmap_df(params, single_run, filename = filename3)
289 | }
290 |
291 | # read saved simulation data
292 | sims3 <- read_csv(filename3, col_types = ct)
293 | ```
294 |
295 | ```{r, message=FALSE}
296 | # calculate mean estimates and power for specified alpha
297 | alpha <- 0.05
298 |
299 | power3 <- sims3 %>%
300 | filter(effect == "fixed", term == "X_i") %>%
301 | group_by(term, omega_0, tau_0) %>%
302 | summarise(
303 | mean_estimate = mean(estimate),
304 | mean_se = mean(std.error),
305 | power = mean(p.value < alpha),
306 | .groups = "drop"
307 | )
308 |
309 | power3 %>%
310 | ggplot(aes(omega_0, tau_0, fill = power)) +
311 | geom_tile(show.legend = FALSE) +
312 | geom_text(aes(label = round(power, 2)),
313 | color = "white", size = 6) +
314 | scale_x_continuous(name = "By-item random intercept SD (omega_0)",
315 | breaks = seq(20, 100, 20)) +
316 | scale_y_continuous(name = "By-subject random intercept SD (tau_0)",
317 | breaks = seq(20, 100, 20)) +
318 | scale_fill_viridis_c(limits = c(0, 1)) +
319 | ggtitle("Power for designs varying in random intercept SDs")
320 | ```
321 |
322 |
--------------------------------------------------------------------------------