├── .Rbuildignore
├── .github
├── .gitignore
└── workflows
│ └── deploy_bookdown.yml
├── .gitignore
├── .nojekyll
├── 01-module-1.Rmd
├── 02-multiple-regression.Rmd
├── 03-module-3.Rmd
├── 04-module-4.Rmd
├── 05-module-5.Rmd
├── 06-module-6.Rmd
├── 07-module-7.Rmd
├── 08-module-8.Rmd
├── 09-module-9.Rmd
├── 10-module-10.Rmd
├── 11-module-11.Rmd
├── 12-module-12.Rmd
├── 13-appendix.Rmd
├── LICENSE.txt
├── README.md
├── Rscripts
├── 02-multiple-regression.R
├── 03-module-3.R
├── 04-module-4.R
├── 05-module-5.R
├── 06-module-6.R
├── 07-module-7.R
├── 08-module-8.R
├── 09-module-9.R
├── 10-module-10.R
├── 11-module-11.R
└── 12-module-12.R
├── _book
├── img
│ └── effect_types.png
├── open_mlm_materials.epub
├── open_mlm_materials_files
│ └── figure-html
│ │ ├── graph-1.png
│ │ ├── unnamed-chunk-101-1.png
│ │ ├── unnamed-chunk-109-1.png
│ │ ├── unnamed-chunk-112-1.png
│ │ ├── unnamed-chunk-113-1.png
│ │ ├── unnamed-chunk-115-1.png
│ │ ├── unnamed-chunk-115-2.png
│ │ ├── unnamed-chunk-115-3.png
│ │ ├── unnamed-chunk-115-4.png
│ │ ├── unnamed-chunk-115-5.png
│ │ ├── unnamed-chunk-120-1.png
│ │ ├── unnamed-chunk-121-1.png
│ │ ├── unnamed-chunk-123-1.png
│ │ ├── unnamed-chunk-124-1.png
│ │ ├── unnamed-chunk-127-1.png
│ │ ├── unnamed-chunk-129-1.png
│ │ ├── unnamed-chunk-130-1.png
│ │ ├── unnamed-chunk-131-1.png
│ │ ├── unnamed-chunk-134-1.png
│ │ ├── unnamed-chunk-136-1.png
│ │ ├── unnamed-chunk-137-1.png
│ │ ├── unnamed-chunk-138-1.png
│ │ ├── unnamed-chunk-138-2.png
│ │ ├── unnamed-chunk-139-1.png
│ │ ├── unnamed-chunk-139-2.png
│ │ ├── unnamed-chunk-142-1.png
│ │ ├── unnamed-chunk-143-1.png
│ │ ├── unnamed-chunk-144-1.png
│ │ ├── unnamed-chunk-145-1.png
│ │ ├── unnamed-chunk-146-1.png
│ │ ├── unnamed-chunk-16-1.png
│ │ ├── unnamed-chunk-17-1.png
│ │ ├── unnamed-chunk-25-1.png
│ │ ├── unnamed-chunk-42-1.png
│ │ ├── unnamed-chunk-46-1.png
│ │ ├── unnamed-chunk-53-1.png
│ │ ├── unnamed-chunk-6-1.png
│ │ ├── unnamed-chunk-92-1.png
│ │ ├── unnamed-chunk-93-1.png
│ │ └── unnamed-chunk-94-1.png
├── reference-keys.txt
├── search_index.json
└── style.css
├── _bookdown.yml
├── _output.yml
├── book.bib
├── data
├── casto2016.csv
├── heck2011.csv
├── hoffman2007.csv
├── rb2002.csv
└── teachsat.csv
├── docs
├── 01-module-1.html
├── 02-multiple-regression.html
├── 03-module-3.html
├── 04-module-4.html
├── 05-module-5.html
├── 06-module-6.html
├── 07-module-7.html
├── 08-module-8.html
├── 09-module-9.html
├── 10-module-10.html
├── 11-module-11.html
├── 12-module-12.html
├── 13-appendix.html
├── 404.html
├── CNAME
├── img
│ └── effect_types.png
├── index.html
├── libs
│ ├── anchor-sections-1.0.1
│ │ ├── anchor-sections.css
│ │ └── anchor-sections.js
│ ├── anchor-sections-1.1.0
│ │ ├── anchor-sections-hash.css
│ │ ├── anchor-sections.css
│ │ └── anchor-sections.js
│ ├── gitbook-2.6.7
│ │ ├── css
│ │ │ ├── fontawesome
│ │ │ │ └── fontawesome-webfont.ttf
│ │ │ ├── plugin-bookdown.css
│ │ │ ├── plugin-clipboard.css
│ │ │ ├── plugin-fontsettings.css
│ │ │ ├── plugin-highlight.css
│ │ │ ├── plugin-search.css
│ │ │ ├── plugin-table.css
│ │ │ └── style.css
│ │ └── js
│ │ │ ├── app.min.js
│ │ │ ├── clipboard.min.js
│ │ │ ├── jquery.highlight.js
│ │ │ ├── plugin-bookdown.js
│ │ │ ├── plugin-clipboard.js
│ │ │ ├── plugin-fontsettings.js
│ │ │ ├── plugin-search.js
│ │ │ └── plugin-sharing.js
│ ├── header-attrs-2.11
│ │ └── header-attrs.js
│ └── jquery-3.6.0
│ │ └── jquery-3.6.0.min.js
├── open_mlm_materials_files
│ └── figure-html
│ │ ├── graph-1.png
│ │ ├── unnamed-chunk-101-1.png
│ │ ├── unnamed-chunk-102-1.png
│ │ ├── unnamed-chunk-103-1.png
│ │ ├── unnamed-chunk-109-1.png
│ │ ├── unnamed-chunk-11-1.png
│ │ ├── unnamed-chunk-110-1.png
│ │ ├── unnamed-chunk-111-1.png
│ │ ├── unnamed-chunk-112-1.png
│ │ ├── unnamed-chunk-113-1.png
│ │ ├── unnamed-chunk-114-1.png
│ │ ├── unnamed-chunk-115-1.png
│ │ ├── unnamed-chunk-115-2.png
│ │ ├── unnamed-chunk-115-3.png
│ │ ├── unnamed-chunk-115-4.png
│ │ ├── unnamed-chunk-115-5.png
│ │ ├── unnamed-chunk-116-1.png
│ │ ├── unnamed-chunk-116-2.png
│ │ ├── unnamed-chunk-116-3.png
│ │ ├── unnamed-chunk-116-4.png
│ │ ├── unnamed-chunk-116-5.png
│ │ ├── unnamed-chunk-117-1.png
│ │ ├── unnamed-chunk-117-2.png
│ │ ├── unnamed-chunk-117-3.png
│ │ ├── unnamed-chunk-117-4.png
│ │ ├── unnamed-chunk-117-5.png
│ │ ├── unnamed-chunk-119-1.png
│ │ ├── unnamed-chunk-120-1.png
│ │ ├── unnamed-chunk-121-1.png
│ │ ├── unnamed-chunk-122-1.png
│ │ ├── unnamed-chunk-123-1.png
│ │ ├── unnamed-chunk-124-1.png
│ │ ├── unnamed-chunk-125-1.png
│ │ ├── unnamed-chunk-126-1.png
│ │ ├── unnamed-chunk-127-1.png
│ │ ├── unnamed-chunk-128-1.png
│ │ ├── unnamed-chunk-129-1.png
│ │ ├── unnamed-chunk-13-1.png
│ │ ├── unnamed-chunk-130-1.png
│ │ ├── unnamed-chunk-131-1.png
│ │ ├── unnamed-chunk-132-1.png
│ │ ├── unnamed-chunk-133-1.png
│ │ ├── unnamed-chunk-134-1.png
│ │ ├── unnamed-chunk-135-1.png
│ │ ├── unnamed-chunk-136-1.png
│ │ ├── unnamed-chunk-137-1.png
│ │ ├── unnamed-chunk-137-2.png
│ │ ├── unnamed-chunk-138-1.png
│ │ ├── unnamed-chunk-138-2.png
│ │ ├── unnamed-chunk-139-1.png
│ │ ├── unnamed-chunk-139-2.png
│ │ ├── unnamed-chunk-14-1.png
│ │ ├── unnamed-chunk-140-1.png
│ │ ├── unnamed-chunk-140-2.png
│ │ ├── unnamed-chunk-141-1.png
│ │ ├── unnamed-chunk-141-2.png
│ │ ├── unnamed-chunk-142-1.png
│ │ ├── unnamed-chunk-143-1.png
│ │ ├── unnamed-chunk-144-1.png
│ │ ├── unnamed-chunk-145-1.png
│ │ ├── unnamed-chunk-146-1.png
│ │ ├── unnamed-chunk-147-1.png
│ │ ├── unnamed-chunk-148-1.png
│ │ ├── unnamed-chunk-15-1.png
│ │ ├── unnamed-chunk-16-1.png
│ │ ├── unnamed-chunk-17-1.png
│ │ ├── unnamed-chunk-18-1.png
│ │ ├── unnamed-chunk-19-1.png
│ │ ├── unnamed-chunk-20-1.png
│ │ ├── unnamed-chunk-21-1.png
│ │ ├── unnamed-chunk-22-1.png
│ │ ├── unnamed-chunk-22-2.png
│ │ ├── unnamed-chunk-23-1.png
│ │ ├── unnamed-chunk-23-2.png
│ │ ├── unnamed-chunk-25-1.png
│ │ ├── unnamed-chunk-26-1.png
│ │ ├── unnamed-chunk-27-1.png
│ │ ├── unnamed-chunk-28-1.png
│ │ ├── unnamed-chunk-29-1.png
│ │ ├── unnamed-chunk-30-1.png
│ │ ├── unnamed-chunk-4-1.png
│ │ ├── unnamed-chunk-42-1.png
│ │ ├── unnamed-chunk-43-1.png
│ │ ├── unnamed-chunk-44-1.png
│ │ ├── unnamed-chunk-46-1.png
│ │ ├── unnamed-chunk-47-1.png
│ │ ├── unnamed-chunk-48-1.png
│ │ ├── unnamed-chunk-5-1.png
│ │ ├── unnamed-chunk-53-1.png
│ │ ├── unnamed-chunk-54-1.png
│ │ ├── unnamed-chunk-55-1.png
│ │ ├── unnamed-chunk-6-1.png
│ │ ├── unnamed-chunk-7-1.png
│ │ ├── unnamed-chunk-8-1.png
│ │ ├── unnamed-chunk-92-1.png
│ │ ├── unnamed-chunk-93-1.png
│ │ ├── unnamed-chunk-94-1.png
│ │ ├── unnamed-chunk-95-1.png
│ │ └── unnamed-chunk-96-1.png
├── reference-keys.txt
├── search_index.json
└── style.css
├── img
├── .DS_Store
└── effect_types.png
├── index.Rmd
├── open_mlm_materials.Rproj
├── open_mlm_materials_files
└── figure-html
│ ├── graph-1.png
│ ├── unnamed-chunk-102-1.png
│ ├── unnamed-chunk-110-1.png
│ ├── unnamed-chunk-113-1.png
│ ├── unnamed-chunk-114-1.png
│ ├── unnamed-chunk-116-1.png
│ ├── unnamed-chunk-116-2.png
│ ├── unnamed-chunk-116-3.png
│ ├── unnamed-chunk-116-4.png
│ ├── unnamed-chunk-116-5.png
│ ├── unnamed-chunk-120-1.png
│ ├── unnamed-chunk-121-1.png
│ ├── unnamed-chunk-123-1.png
│ ├── unnamed-chunk-124-1.png
│ ├── unnamed-chunk-127-1.png
│ ├── unnamed-chunk-129-1.png
│ ├── unnamed-chunk-130-1.png
│ ├── unnamed-chunk-131-1.png
│ ├── unnamed-chunk-134-1.png
│ ├── unnamed-chunk-136-1.png
│ ├── unnamed-chunk-137-1.png
│ ├── unnamed-chunk-138-1.png
│ ├── unnamed-chunk-138-2.png
│ ├── unnamed-chunk-139-1.png
│ ├── unnamed-chunk-139-2.png
│ ├── unnamed-chunk-142-1.png
│ ├── unnamed-chunk-143-1.png
│ ├── unnamed-chunk-144-1.png
│ ├── unnamed-chunk-145-1.png
│ ├── unnamed-chunk-146-1.png
│ ├── unnamed-chunk-16-1.png
│ ├── unnamed-chunk-17-1.png
│ ├── unnamed-chunk-26-1.png
│ ├── unnamed-chunk-43-1.png
│ ├── unnamed-chunk-47-1.png
│ ├── unnamed-chunk-54-1.png
│ ├── unnamed-chunk-6-1.png
│ ├── unnamed-chunk-93-1.png
│ ├── unnamed-chunk-94-1.png
│ └── unnamed-chunk-95-1.png
├── packages.bib
├── preamble.tex
├── style.css
└── worksheets
├── module10.docx
├── module11.docx
├── module12.docx
├── module2.docx
├── module3.docx
├── module4.docx
├── module5.docx
├── module6.docx
├── module7.docx
├── module8.docx
└── module9.docx
/.Rbuildignore:
--------------------------------------------------------------------------------
1 | ^\.github$
2 |
--------------------------------------------------------------------------------
/.github/.gitignore:
--------------------------------------------------------------------------------
1 | *.html
2 |
--------------------------------------------------------------------------------
/.github/workflows/deploy_bookdown.yml:
--------------------------------------------------------------------------------
1 | on:
2 | push:
3 | branches:
4 | - master
5 |
6 |
7 |
8 | name: renderbook
9 |
10 | jobs:
11 | bookdown:
12 | name: Render-Book
13 | runs-on: macOS-latest
14 | steps:
15 | - uses: actions/checkout@v1
16 | - uses: r-lib/actions/setup-r@v1
17 | - uses: r-lib/actions/setup-pandoc@v1
18 | - name: Install rmarkdown
19 | run: Rscript -e 'install.packages(c("rmarkdown","bookdown"))'
20 | - name: Render Book
21 | run: Rscript -e 'bookdown::render_book("index.Rmd")'
22 | - uses: actions/upload-artifact@v1
23 | with:
24 | name: _book
25 | path: _book/
26 |
27 | # Need to first create an empty gh-pages branch
28 | # see https://pkgdown.r-lib.org/reference/deploy_site_github.html
29 | # and also add secrets for a GH_PAT and EMAIL to the repository
30 | # gh-action from Cecilapp/GitHub-Pages-deploy
31 | checkout-and-deploy:
32 | runs-on: ubuntu-latest
33 | needs: bookdown
34 | steps:
35 | - name: Checkout
36 | uses: actions/checkout@master
37 | - name: Download artifact
38 | uses: actions/download-artifact@v1.0.0
39 | with:
40 | # Artifact name
41 | name: _book # optional
42 | # Destination path
43 | path: _book # optional
44 | - name: Deploy to GitHub Pages
45 | uses: Cecilapp/GitHub-Pages-deploy@master
46 | env:
47 | EMAIL: ${{ secrets.EMAIL }} # must be a verified email
48 | GH_TOKEN: ${{ secrets.GH_PAT }} # https://github.com/settings/tokens
49 | BUILD_DIR: _book/ # "_site/" by default
50 |
51 |
52 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | _book/
2 | _bookdown_files/
3 | .Rproj.user
4 | .Rhistory
5 | .RData
6 | .Ruserdata
7 | .DS_Store
8 | data/mathmot*
9 |
--------------------------------------------------------------------------------
/.nojekyll:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/.nojekyll
--------------------------------------------------------------------------------
/01-module-1.Rmd:
--------------------------------------------------------------------------------
1 | # Introduction {#introduction}
2 |
3 | ## Overview
4 |
5 | These materials focus on conceptual foundations of multilevel models (MLMs), specifiying them, and interpreting the results. Topics include multilevel data and approaches to dependence, specifying and interpreting fixed and random effects, model estimation, centering, repeated measures and longitudinal models, assumptions testing, and effect sizes in MLMs.
6 |
7 | ## Goals
8 |
9 | These materials are intended for students and instructors.
10 |
11 | By the end of this course, students will be able to:
12 |
13 | 1. Estimate variance components and interpret the intraclass correlation coefficient;
14 | 2. Decide if and when a multilevel model is needed;
15 | 3. Specify and build multilevel models with covariates at level 1 and 2 with both cross-sectional and repeated measures designs;
16 | 4. Interpret regression coefficients and variance components from multilevel models;
17 | 5. Assess the assumptions of multilevel models;
18 | 6. Calculate effect sizes for multilevel models.
19 |
20 | ## Prerequisites
21 |
22 | Readers should be comfortable with multiple linear regression, including building regression models, interpreting regression output, and testing for and interpreting regression coefficients including interactions. The first module reviews multiple regression and can be used to gauge your preparedness for continuing. For those wishing to brush up their regression skills before working through these materials, we recommend UCLA's Statistical Methods and Data Analytics resources and online seminars: https://stats.oarc.ucla.edu/other/mult-pkg/seminars/
23 |
24 | The worked examples will be conducted using `lme4` in R. The `lme4` documentation provides details of the workings of `lme4`, for interested readers.
25 |
26 | ## Materials
27 |
28 | All materials are available for download in the appendix. The following are available for download:
29 |
30 | * Data: the data used in each chapter
31 | * R Script: an R script of the code used in each chapter
32 | * Worksheet: a worksheet with questions that follows a similar structure to each chapter, but without answers provided
33 |
34 | We recommend that people self-studying download the data and R script and following along with the code and output interpretations in each chapter. Instructors can benefit from downloading the data, code, and worksheets for use in a lab portion in their classes.
35 |
--------------------------------------------------------------------------------
/02-multiple-regression.Rmd:
--------------------------------------------------------------------------------
1 | # Multiple Regression Review {#module-2}
2 |
3 | ## Learning Objectives
4 |
5 | In this module, we will review simple and multiple linear regression to establish a strong foundation before moving onto multilevel models. Note that this is intended more as review than a comprehensive guide to regression; for the latter, we recommend https://stats.oarc.ucla.edu/other/mult-pkg/seminars/.
6 |
7 | All materials for this chapter are available for download here.
8 |
9 | The learning objectives for this chapter are:
10 |
11 | 1. Understand using file paths for project management/loading data;
12 | 2. Review using simple and multiple linear regression to analyze data.
13 |
14 | ## Data Demonstration
15 |
16 | In this data demo, we will first review setting up an R session, then simple and multiple linear regression.
17 |
18 | The data for this chapter were taken from chapter 3 of Heck, R. H., Thomas, S. L., & Tabata, L. N. (2011). *Multilevel and Longitudinal Modeling with IBM SPSS*: Taylor & Francis. These data have a multilevel structure, which we will work with in chapter 3, but for this chapter we will ignore the clustering structure and conduct regular regression. The following variables are in this data set:
19 |
20 | |Variable|Level|Description|Values|Measurement|
21 | |:-------|:----|:----------|:-----|:----------|
22 | |schcode|School|School identifier (419 schools)|Integer|Ordinal|
23 | |Rid|Individual|A within-group level identifier representing a sequential identifier for each student within 419 schools.|1 to 37|Ordinal|
24 | |id|Individual|Student identifier (6,871 students)|Integer|Ordinal|
25 | |female|Individual|Student sex|0 = Male, 1 = Female|Scale|
26 | |ses|Individual|Z-score measuring student socioeconomic status composition within the schools|-2.41 to 1.87|Scale|
27 | |femses|Individual|Grand-mean-centered variable measuring student socioeconomic status by gender (female)|-2.41 to 1.85|Scale|
28 | |math|Individual|Student math achievement test score|27.42 to 99.98|Scale|
29 | |ses_mean|School|Grand-mean-centered variable measuring student socioeconomic status|-1.30 to 1.44|Scale|
30 | |pro4yrc|School|Aggregate proportion of students who intend to study at 4-year universities|0.00 to 1.00|Scale|
31 | |public|School|Dichotomous variable identifying school type|0 = Other, 1 = Public School|Scale|
32 |
33 | ### Creating R Projects
34 |
35 | Before we get into analyzing the data, let's start by creating a new project file for this module. R project files help you keep all of the files associated with your project -- data, R scripts, and output (including figures) -- in one location so you can easily navigate everything related to your project.
36 |
37 | To create a project, open R, click "File" and "New Project...". If you have already created a folder for this chapter, you can add an R Project to that folder by clicking "Existing Directory"; the R project file will take on the name of that folder. If you do not already have a folder, click "New Directory," choose where you want to put your new folder and what you want to call it. The R Project file will again take on the name of your new folder.
38 |
39 | ### Loading Data and Dependencies
40 |
41 | Next, let's load in the data and packages we'll be using for this demo. We'll be using the following packages:
42 |
43 | ```{r data-dependencies, message=FALSE, warning=FALSE}
44 | library(ggplot2) # for data visualization
45 | library(magrittr) # for pipe, %>%
46 | ```
47 |
48 | You must install a given package before you can use it. For example: `install.package("ggplot2")`. Once you have installed a package, you can load it into any future sessions with `library(package_name)`.
49 |
50 | Next, let's read in the data. If you have your code and data in the same directory, you can read the data in as follows:
51 |
52 | ```{r data, eval=FALSE}
53 | data <- read.csv('heck2011.csv')
54 | ```
55 |
56 | ```{r, echo = FALSE}
57 | # this actually loads my code, but will be hidden
58 | data <- read.csv('data/heck2011.csv')
59 | ```
60 |
61 | This is called a *relative* file path, because you're telling your computer where to find the data relative to your current folder (a folder can also be called a "directory"). You could also use an absolute file path that fully states where your files are located, like:
62 |
63 | ```{r, eval=FALSE}
64 | read.csv('/Users/maireadshaw/open_mlm_materials/heck2011.csv')
65 | ```
66 |
67 | Let's calculate some descriptive statistics and compare them to the above table to make sure we read our data in correctly.
68 |
69 | ```{r}
70 | summary(data)
71 | ```
72 |
73 | That looks good, so let's proceed to conducting regressions.
74 |
75 | ### Simple Linear Regression
76 |
77 | Let's run a simple linear regression predicting math achievement (`math`) from socioeconomic status (`ses`). The syntax for the `lm()` (linear modelling) command in R is `lm(DV ~ IV1 + IV2 + ... + IVn, data = dataframe)`.
78 |
79 | ```{r simple-regression}
80 | model1 <- lm(math ~ ses, data = data)
81 | summary(model1)
82 | ```
83 |
84 | The intercept from this regression is 57.60, indicating that students at the mean level of SES within a school (i.e., when SES = 0, given that SES is z-scored) have an average math achievement score of 57.6 out of 100. This score is significantly different from 0, per the p-value.
85 |
86 | Per the coefficient for SES, a one-unit increase in SES is associated with a 4.25-point increase in student math achievement on average, also significant.
87 |
88 | The adjusted R-squared value is 14.3%, indicating that 14.3% of the variance in math achievement is explained by socioeconomic status.
89 |
90 | We can visualize this relationship by graphing a scatter plot.
91 |
92 | ```{r graph}
93 | ggplot(data = data, mapping = aes(x = ses, y = math)) +
94 | geom_point()
95 | ```
96 |
97 | Our graph reflects the positive relationship between SES and math achievement (and also shows a lot of math scores collecting around the 60 mark).
98 |
99 | ### Multiple Regression
100 |
101 | Next, let's add the available sex variable `female` (0 = male, 1 = female) as a predictor in our regression and interpret the coefficients and R-squared value.
102 |
103 | ```{r}
104 | model2 <- lm(math ~ ses + female, data = data)
105 | summary(model2)
106 | ```
107 |
108 | The intercept of 58.13 reflects the average math achievement score (out of 100) for male students (`female` = 0) at their class average SES (`ses` = 0). For a one-unit increase in SES, math achievement increases by 4.23 points, controlling for sex. Female students had a math achievement score lower by 1.06 points on average, controlling for SES. SES and sex together explain 14.6% of the variance in math achievement.
109 |
110 | ### Interaction Terms
111 |
112 | In the previous model, we assumed that the relationship between SES and math achievement was constant for both sexes (homogeneity of regression slopes, i.e., an ANCOVA model). As a final exercise, let's add an interaction term to our regression between sex and SES.
113 |
114 | ```{r}
115 | model3 <- lm(math ~ ses + female + ses:female, data = data)
116 | summary(model3)
117 |
118 | # Could also succinctly code it as follows:
119 | # lm(math ~ ses*female, data = data)
120 | ```
121 |
122 | An interaction captures that the relationship between two variables may differ based on the level of another variable (i.e., different slopes for different folks). An interaction term, A:B, has two possible interpretations:
123 |
124 | 1. The effect of A on the effect of B on your outcome Y.
125 | 2. The effect of B on the effect of A on your outcome Y.
126 |
127 | The `ses:female` interaction term, .34, represents the effect of being female on the relationship between SES and math achievement. Alternatively, it could represent the effect of SES on the relationship between being female and math achievement. In this case, the latter is a more intuitive interpretation: female students from higher socioeconomic statuses are slightly insulated from the negative relationship between female and math achievement in this sample. As SES increases by one point, the relationship between being female and math achievement becomes less negative, from -1.07 to -.73 (-1.07 + .34). However, this interaction term is not statistically significantly different from zero per the p-value.
128 |
129 | We can see this graphically using the `sjPlot` package:
130 |
131 | ```{r}
132 | sjPlot::plot_model(model3, type = "pred", terms = c("ses", "female"))
133 | ```
134 |
135 | As we can see, math scores for males (`female` = 0, the red line) are higher than those for females (`female` = 1, the blue line) at all levels of SES. However, the difference between males and females shrinks with increasing SES, as indicated by math scores at higher SES levels being closer than those at lower levels of SES.
136 |
137 | The other coefficients have the same interpretations as before. The R-squared indicates that SES and sex account for 14.7% of the variance in math achievement.
138 |
139 | ## Conclusion
140 |
141 | If you feel comfortable with the material presented in this data demonstration, then you have a sufficiently strong baseline to move forward with the materials. In this chapter, we ignored that students were clustered into schools; in the next chapter, we'll examine that clustering, consider its implications for our analyses, and introduce one non-multilevel-model method for handling clustered data.
--------------------------------------------------------------------------------
/03-module-3.Rmd:
--------------------------------------------------------------------------------
1 | # Approaches to Multilevel Data {#module-3}
2 |
3 | ## Learning Objectives
4 |
5 | In this chapter, we will discuss implications of clustered data and review non-multilevel-modelling options for handling that clustering.
6 |
7 | The learning objectives for this chapter are:
8 |
9 | 1. Understand the implications of treating clustered data as unclustered;
10 | 2. Use cluster-robust standard errors to account for clustering;
11 | 3. Compare results between regular and cluster-robust regression.
12 |
13 | All materials for this chapter are available for download [here](https://www.learn-mlms.com/13-appendix.html).
14 |
15 | ## Data Demonstration
16 |
17 | The data for this chapter were taken from chapter 3 of Heck, R. H., Thomas, S. L., & Tabata, L. N. (2011). *Multilevel and Longitudinal Modeling with IBM SPSS*: Taylor & Francis. Students are clustered within schools in the dataset.
18 |
19 | ### Load Data and Dependencies
20 |
21 | First, let's load in the data and packages we'll be using for this data demo. We will use the following packages:
22 |
23 | ```{r message=FALSE}
24 | library(dplyr) # for data processing
25 | library(lmtest) # for cluster-robust standard errors
26 | library(sandwich) # for cluster-robust standard errors
27 | ```
28 |
29 | We'll store the data in an object called `data`. If you want to read the data in with the relative filepath (i.e., just referencing "heck2011.csv"), make sure the file is in the same folder as your R script. If the file is in a different folder, tell your computer exactly where to find it with an absolute file path.
30 |
31 | ```{r, eval=FALSE}
32 | data <- read.csv('heck2011.csv')
33 | ```
34 |
35 | ```{r, echo = FALSE}
36 | # this actually loads my code, but will be hidden
37 | data <- read.csv('data/heck2011.csv', fileEncoding = "UTF-8-BOM")
38 | ```
39 |
40 | ### Dealing with Dependence
41 |
42 | Our dataset is clustered, students within schools, but in Chapter 2 we treated it as if it were not clustered, i.e., as if each student was randomly selected from a population of students, regardless of school. When we treat clustered data as unclustered, we bias the significance testing for our models such that we are more likely to make a Type I error. A *t* value is based on dividing a regression coefficient by the standard error: $t = \frac{b}{SE}$. The standard error is a quotient of the standard deviation and the square root of the degrees of freedom, the sample size *n*: $SE = \frac{\sigma}{\sqrt n}$. When we assume that data are unclustered, we act like we have a larger sample size than we do (e.g., 100 independent observations rather than 10 classes of 10 students), which reduces the standard error and inflates the t-value, making it more likely that our coefficients will be significant.
43 |
44 | We have multiple options for dealing with dependence. Multilevel models are one option, accounting for the clustered data structure by quantifying how the clusters vary across the entire sample. For example, looking at student math achievement, we can move beyond having a single intercept to having a mean intercept across all schools *and* a term representing how schools' mean math achievements vary around the mean intercept. Multilevel models are a powerful tool! But in providing more information to the researcher (e.g., how schools vary around the grand mean intercept), they also require more input from the researcher: do we expect our intercepts to vary? Do we expect our slopes to vary? If so, which slopes?
45 |
46 | Sometimes, we don't need an MLM (and the assumptions that come along with it) because we don't want to ask questions at multiple levels, like "how does student SES and teacher years of experience affect student math achievement?" We just want to know "how does student SES affect student math achievement?" In this case, we might think of the clustering in our data as a nuisance, something to be handled so that our standard errors aren't biased, but not theoretically investigated. In such a case where we want to run a single-level regression that controls bias in the standard errors, we can use cluster-robust standard errors.
47 |
48 | ### Cluster-Robust Standard Errors
49 |
50 | Cluster-robust standard errors account for clustering but retain the interpretation of regular regression models. That is, the coefficients do not delineate within and between effects, but provide average effects pooled across the whole dataset and unbiased standard errors that take into account the clustering. For more information on clustered standard errors, see this helpful overview: https://mldscenter.maryland.gov/egov/Publications/ResearchSeries/Clustered%20Data,%20Are%20Multilevel%20Models%20Really%20Necessary.pdf. Let's look at cluster-robust standard errors in action!
51 |
52 | In chapter 2, we conducted a linear regression predicting math achievement from socioeconomic status and sex. Let's run that same model.
53 |
54 | ```{r}
55 | model <- lm(math ~ ses + female, data = data)
56 | summary(model)
57 | ```
58 |
59 | Now, let's run the same model with cluster-robust standard errors and compare the coefficients and their significance between the regular and cluster-robust models.
60 |
61 | ```{r}
62 | model_crse <- coeftest(model, vcov = vcovCL, cluster = ~ schcode)
63 | model_crse
64 | ```
65 |
66 | As expected, the coefficients are the same between the two models, but the significance levels differ. In this case, the differences are trivial, and all coefficients retain their significance. But in your case, correcting for clustering might make the difference between significant and non-significant results.
67 |
68 | ## Conclusion
69 |
70 | In this chapter, we discussed why we need to account for clustering in our analyses. We then demonstrated an MLM alternative, cluster-robust standard errors, that can be used to account for clustering if you're asking questions at one level and want to run a single-level regression, but adjust the standard errors.
71 |
72 | In chapter 4, we will look at our first multilevel models for handling clustered data structures and asking multilevel questions.
73 |
74 | ## Further Reading
75 |
76 | McCoach, D. B. (2010). Dealing With Dependence (Part II): A Gentle Introduction to Hierarchical Linear Modeling. Gifted Child Quarterly, 54(3), 252–256. https://doi.org/10.1177/0016986210373475
77 |
78 | McCoach, D. B., & Adelson, J. L. (2010). Dealing with dependence (Part 1): Understanding the effects of clustered data. Gifted Child Quarterly, 54(2), 152–155. https://doi.org/10.1177/0016986210363076
79 |
80 | McNeish, D., Stapleton, L. M., & Silverman, R. D. (2017). On the unnecessary ubiquity of hierarchical linear modeling. Psychological Methods, 22(1), 114–140. https://doi.org/10.1037/met0000078
81 |
82 |
--------------------------------------------------------------------------------
/05-module-5.Rmd:
--------------------------------------------------------------------------------
1 | # Adding Fixed Predictors to MLMs {#module-5}
2 |
3 | ## Learning Objectives
4 |
5 | In this chapter, we will introduce fixed predictors at both level-1 and level-2.
6 |
7 | The learning objectives for this chapter are:
8 |
9 | 1. Code and interpret fixed effects in multilevel models;
10 | 2. Explain the difference between conditional and unconditional effects;
11 | 3. Evaluate the utility of predictors in a model by considering the information from regression coefficients and variance reduced.
12 |
13 | All materials for this chapter are available for download [here](https://www.learn-mlms.com/13-appendix.html).
14 |
15 | ## Data Demonstration
16 |
17 | The data for this chapter were taken from chapter 3 of Heck, R. H., Thomas, S. L., & Tabata, L. N. (2011). *Multilevel and Longitudinal Modeling with IBM SPSS*: Taylor & Francis. Students are clustered within schools in the data.
18 |
19 | ### Load Data and Dependencies
20 |
21 | For this data demo, we will use the following packages:
22 |
23 | ```{r message=FALSE, warning=FALSE}
24 | library(dplyr) # for data manipulation
25 | library(ggplot2) # for visualizations
26 | library(lme4) # for multilevel models
27 | library(lmerTest) # for p-values
28 | library(performance) # for intraclass correlation
29 | ```
30 |
31 | And the same dataset of students' math achievement:
32 |
33 | ```{r, eval=FALSE}
34 | data <- read.csv('heck2011.csv')
35 | ```
36 |
37 | ```{r, echo = FALSE}
38 | # this actually loads my data, but will be hidden
39 | data <- read.csv('data/heck2011.csv', fileEncoding = "UTF-8-BOM")
40 | ```
41 |
42 | ### MLM with Level-1 Predictor
43 |
44 | As a reminder, in Chapter 4 we estimated the random-intercept-only model, also called the null model:
45 |
46 | ```{r}
47 | null_model <- lmer(math ~ 1 + (1|schcode), data = data)
48 | summary(null_model)
49 | ```
50 |
51 | Now that we've explored the null model and variance decomposition it gives us access to, let's practice adding a level-1 predictor to our model. Level-1 predictors vary at level-1, which in our example is the student level, meaning that students have different values for a variable. In our data, socioeconomic status (`ses`) and sex (`female`) vary across students, at level-1. Let's add a fixed effect for `ses` as a predictor to our model.
52 |
53 | The following equations describe this model:
54 |
55 | | Level | Equation |
56 | |:-------|:---------|
57 | |Level 1 | $math_{ij} = \beta_{0j} + \beta_{1j}ses_{ij} + R_{ij}$|
58 | |Level 2 | $\beta_{0j} = \gamma_{00} + U_{0j}$|
59 | | | $\beta_{1j} = \gamma_{10}$|
60 | |Combined| $math_{ij} = \gamma_{00} + \gamma_{10}ses_{ij} + U_{0j} + R_{ij}$|
61 |
62 | We'll be estimating four parameters:
63 |
64 | 1. $\gamma_{00}$: the fixed effect for the intercept, controlling for `ses`;
65 | 2. $\gamma_{10}$: the fixed effect for the slope of `ses`;
66 | 3. $\tau_0^2$: a random effect variance for the intercept capturing the variance of schools around the intercept, controlling for `ses`;
67 | 4. $\sigma^2$: a random effect variance capturing the variance of students around their school mean math achievement, controlling for `ses`.
68 |
69 | Notice that the parameters are now conditional on `ses`. The intercept is no longer interpreted as the intercept across all schools; it's the intercept across all schools *conditional on `ses` being equal to 0*, or at the mean `ses` level for the sample given that `ses` is z-scored in these data. Additionally, note that there is no $U_j$ term associated with the coefficient for `ses`; that's because we're only adding a fixed effect for `ses` right now. This implies that the relationship between `ses` and math achievement is the same across all schools (i.e., the slope is fixed, not randomly varying). We'll look at adding random slope effects in the next chapter. For now, let's run our model.
70 |
71 | ```{r}
72 | ses_l1 <- lmer(math ~ ses + (1|schcode), data = data, REML = TRUE)
73 | summary(ses_l1)
74 | ```
75 |
76 | Per the intercept, the average math achievement across all schools at mean `ses` is 57.596. A one-standard-deviation increase in `ses` is associated with a 3.87-point increase in math achievement. The variance term describing how schools vary around the intercept is 3.469, whereas the variance term describing how the students vary within schools, about their schools' mean, is 62.807. These variance terms are different from our null model that had no predictors; we can quantify that difference in at least two ways.
77 |
78 | One option is to calculate how much level-1 variance was reduced by adding `ses` as a level-1 predictor. If we divide the difference between our null model's level-1 variance and this new model's (l1) level-1 variance by the null model variance, we can see what proportion of variance was reduced.
79 |
80 | ```{r}
81 | null <- sigma(null_model)^2
82 | l1 <- sigma(ses_l1)^2
83 |
84 | (null - l1) / null
85 | ```
86 |
87 | So we reduced about 5.6% of level-1 variance by adding `ses` as a level-1 predictor. Another way of stating this is that we reduced the unexplained within school variance by 5.6%.
88 |
89 | Another option is to calculate the conditional ICC, or the proportion of variance explained by clustering after we account for `ses`. Recall from last chapter that the adjusted ICC accounts only for random effect variances, while the conditional ICC accounts for both variance of both random effects and fixed effects. With the null model, the adjusted and conditional ICC values from `performance` are the same because there are no predictors in the model, but with a fixed level-1 predictor in the model, we should reference the conditional ICC.
90 |
91 | ```{r}
92 | performance::icc(ses_l1)
93 | ```
94 |
95 | After accounting for the effect of socioeconomic status, 4.6% of the variance in math achievement is accounted for by school membership.
96 |
97 | ### Compare Regular and Multilevel Regression
98 |
99 | In the previous chapter, we compared a regular regression to a cluster-robust standard error regression. Now, let's compare those two with a multilevel model.
100 |
101 | The regular regression from Chapter 4:
102 |
103 | ```{r}
104 | model <- lm(math ~ ses, data = data)
105 | summary(model)
106 | ```
107 |
108 | The cluster-robust standard error regression from Chapter 4:
109 |
110 | ```{r}
111 | model_crse <- lmtest::coeftest(model, vcov = sandwich::vcovCL, cluster = ~ schcode)
112 | model_crse
113 | ```
114 |
115 | These two models had the same coefficients, with different significance values.
116 |
117 | This is our multilevel model:
118 |
119 | ```{r}
120 | summary(ses_l1)
121 | ```
122 |
123 | The intercepts are the same between the MLM and regular regressions, but the coefficient for `ses` is not. Why? The coefficient for `ses` represents the mean relationship between SES and math achievement across all schools, weighted by the reliability of the cluster. The weighting reflects cluster-level sample size, and thus varies from the regular regression estimates that treat all observations equally.
124 |
125 | ### MLM with Level-2 Predictor
126 |
127 | We added `ses` as a level-1 predictor to explain some of the student-level variance in math achievement. Now, let's add a predictor that varies at level-2, meaning that the value is different across level 2 units, which is the school level. Level-2 predictors are different across schools but the same for all students within a school. There are three possible level-2 predictors:
128 |
129 | * `ses_mean`: the mean SES per school (this variable is centered, we'll discuss centering more in Chapter 9)
130 | * `pro4yc`: the percentage of students at a school who intend to study at a 4-year college/university
131 | * `public`: whether the school is private (0) or public (1)
132 |
133 | This is where we begin to unlock the potential of MLMs, to ask questions about both individual differences (level-1 variables) and school differences (level-2 variables) at the same time while accounting for clustered data structures. Let's consider the role of school type in our model by adding a fixed effect for `public` as a predictor of our intercept.
134 |
135 | The following equations describe this model:
136 |
137 | | Level | Equation |
138 | |:-------|:---------|
139 | |Level 1 | $math_{ij} = \beta_{0j} + \beta_{1j}ses_{ij} + R_{ij}$|
140 | |Level 2 | $\beta_{0j} = \gamma_{00} + \gamma_{01}public_j + U_{0j}$|
141 | | | $\beta_{1j} = \gamma_{10}$|
142 | |Combined| $math_{ij} = \gamma_{00} + \gamma_{01}public_{j} + \gamma_{10}ses_{ij} + U_{0j} + R_{ij}$|
143 |
144 | A few things to note here: first, `public_j` only has a j subscript because only different schools (j's) have different values of `public`. All students (i's) within a school have the same value. Second, `public` is currently only a predictor for the intercept. In Chapter 6 we'll look at using level-2 variables as predictors of level-1 slopes and the cross-level interactions that result.
145 |
146 | We'll be estimating five parameters:
147 |
148 | 1. $\gamma_{00}$: the fixed effect for the intercept, controlling for `ses` and `public`;
149 | 2. $\gamma_{01}$: the fixed effect for the slope of `public` controlling for `ses`
150 | 3. $\gamma_{10}$: the fixed effect for the slope of `ses` controlling for `public`;
151 | 4. $\tau_0^2$: a random effect variance for the intercept capturing the variance of schools around the intercept, controlling for `ses` and `public`;
152 | 5. $\sigma^2$: a random effect variance capturing the variance of students around their school mean math achievement, controlling for `ses` and `public`.
153 |
154 | Notice that the parameters are conditional on both `ses` and on `public` now. Let's run our model.
155 |
156 | ```{r}
157 | ses_l1_public_l2 <- lmer(math ~ 1 + ses + public + (1|schcode), data = data, REML = TRUE)
158 | summary(ses_l1_public_l2)
159 | ```
160 |
161 | Let's look at our fixed effects, which describes the conditional mean effect of a variable on the outcome, across all schools. Per the intercept, the average math achievement across all private schools (`public` = 0) at mean SES (`ses` = 0) is 57.70. A one-standard-deviation increase in `ses` across all private schools is associated with a 3.87-point increase in math achievement. Public schools at mean `ses` have a -0.14-point decrease on average in math achievement relative to private schools.
162 |
163 | From our random effect variances, the variance term describing how schools vary around the intercept (at mean SES at private schools) is 3.48, and the variance term describing how students vary around their school means is 62.81.
164 |
165 | Let's calculate variance reduced at level 1 and level 2 by adding school type as a predictor.
166 |
167 | ```{r}
168 | # level-1 variance reduced
169 | sigma2_null <- sigma(null_model)^2
170 | sigma2_public <- sigma(ses_l1_public_l2)^2
171 | (sigma2_null - sigma2_public) / sigma2_null
172 | ```
173 |
174 | ```{r}
175 | # level-2 variance reduced
176 | tau2_null <- VarCorr(null_model)$schcode[1]
177 | tau2_public <- VarCorr(ses_l1_public_l2)$schcode[1]
178 | (tau2_null - tau2_public) / tau2_null
179 | ```
180 |
181 | We reduced around 5.6% of variance in math achievement at level-1 and 67.2% of variance at level-2 by adding `public` as a level-2 predictor. It makes sense that the variance at level-2 was reduced by so much more, because we added a level-2 predictor that varies at level-2.
182 |
183 | So, does it seem like school type is related to math achievement? We have two sources of information to consider so far: the regression coefficient and the variance reduced. While the regression coefficient is relatively small, the intercept variance reduced at level-2 is quite large (67%!), so it seems like school type is a valuable predictor in our model.
184 |
185 | ## Conclusion
186 |
187 | In this chapter, we added level-1 and level-2 fixed effects to our models, considered the difference between conditional and unconditional effects, and used regression coefficients and variance reduced to make a decision about retaining model parameters. In Chapter 6, we'll work with random slopes and explain cross-level interactions.
--------------------------------------------------------------------------------
/13-appendix.Rmd:
--------------------------------------------------------------------------------
1 | # (APPENDIX) Appendix {-}
2 |
3 | # Download Materials
4 |
5 | |Chapter|Data|R Script|Worksheet|
6 | |:-----:|:--:|:------:|:-------:|
7 | |[2](#module-2)|`r xfun::embed_file('data/heck2011.csv', text = "heck2011.csv")`|`r xfun::embed_file('Rscripts/02-multiple-regression.R', text = "02-multiple-regression.R")`|`r xfun::embed_file('worksheets/module2.docx', text = "Linear Regression Review")`|
8 | |[3](#module-3)|`r xfun::embed_file('data/heck2011.csv', text = "heck2011.csv")`|`r xfun::embed_file('Rscripts/03-module-3.R', text = "03-module-3.R")`|`r xfun::embed_file('worksheets/module3.docx', text = "Approaches to Multilevel Data")`|
9 | |[4](#module-4)|`r xfun::embed_file('data/heck2011.csv', text = "heck2011.csv")`|`r xfun::embed_file('Rscripts/04-module-4.R', text = "04-module-4.R")`|`r xfun::embed_file('worksheets/module4.docx', text = "Our First MLM: The Null Model")`|
10 | |[5](#module-5)|`r xfun::embed_file('data/heck2011.csv', text = "heck2011.csv")`|`r xfun::embed_file('Rscripts/05-module-5.R', text = "05-module-5.R")`|`r xfun::embed_file('worksheets/module5.docx', text = "Adding Fixed Predictors")`|
11 | |[6](#module-6)|`r xfun::embed_file('data/heck2011.csv', text = "heck2011.csv")`|`r xfun::embed_file('Rscripts/06-module-6.R', text = "06-module-6.R")`|`r xfun::embed_file('worksheets/module6.docx', text = "Random Effects and Cross-level Interactions")`|
12 | |[7](#module-7)|`r xfun::embed_file('data/heck2011.csv', text = "heck2011.csv")`|`r xfun::embed_file('Rscripts/07-module-7.R', text = "07-module-7.R")`|`r xfun::embed_file('worksheets/module7.docx', text = "Estimation Options and Troubleshooting")`|
13 | |[8](#module-8)|`r xfun::embed_file('data/heck2011.csv', text = "heck2011.csv")`|`r xfun::embed_file('Rscripts/08-module-8.R', text = "08-module-8.R")`|`r xfun::embed_file('worksheets/module8.docx', text = "Centering")`|
14 | |[9](#module-9)|`r xfun::embed_file('data/hoffman2007.csv', text = "hoffman2007.csv")`|`r xfun::embed_file('Rscripts/09-module-9.R', text = "09-module-9.R")`|`r xfun::embed_file('worksheets/module9.docx', text = "Repeated Measures")`|
15 | |[10](#module-10)|`r xfun::embed_file('data/casto2016.csv', text = "casto2016.csv")`|`r xfun::embed_file('Rscripts/10-module-10.R', text = "10-module-10.R")`|`r xfun::embed_file('worksheets/module10.docx', text = "Longitudinal Measures")`|
16 | |[11](#module-11)|`r xfun::embed_file('data/teachsat.csv', text = "teachsat.csv")`|`r xfun::embed_file('Rscripts/11-module-11.R', text = "11-module-11.R")`|`r xfun::embed_file('worksheets/module11.docx', text = "Effect Sizes in MLMs")`|
17 | |[12](#module-12)|`r xfun::embed_file('data/rb2002.csv', text = "rb2002.csv")`|`r xfun::embed_file('Rscripts/12-module-12.R', text = "12-module-12.R")`|`r xfun::embed_file('worksheets/module12.docx', text = "Assumptions")`|
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | ## Introduction to Multilevel Modelling
2 |
3 | learn-mlms.com focuses on conceptual foundations of multilevel models (MLMs), specifiying them, and interpreting the results. Topics include multilevel data and approaches to dependence, specifying and interpreting fixed and random effects, model estimation, centering, repeated measures and longitudinal models, assumptions testing, and effect sizes in MLMs.
4 |
5 | The chapters are:
6 |
7 | 1. Introduction
8 | 2. Multiple Regression Review
9 | 3. Approaches to Multilevel Data
10 | 4. Our First Multilevel Models
11 | 5. Adding Fixed Predictors to MLMs
12 | 6. Random Effects and Cross-level Interactions
13 | 7. Model Estimation Options, Problems, and Troubleshooting
14 | 8. Centering Options and Interpretations
15 | 9. Multilevel Modelling with Repeated Measures Data
16 | 10. Multilevel Modelling with Longitudinal Data
17 | 11. Effect Sizes in Multilevel Models
18 | 12. Assumptions
19 |
20 | ## About the Authors
21 |
22 | Mairead Shaw is a graduate student in the Quantitative Psychology and Modelling area at McGill University. Her research interests center around effect sizes in multilevel models and measurement considerations for multi-group and replication research.
23 |
24 | Dr. Jessica Flake is an Assistant Professor of Quantitative Psychology and Modelling at McGill University. She received an MA in quantitative psychology from James Madison University and a a PhD in Measurement, Evaluation, and Assessment from the University of Connecticut. Her work focuses on technical and applied aspects of psychological measurement including scale development, psychometric modelling, and scale use and replicability.
25 |
26 | ## Funding
27 |
28 | These materials were made possible by funding from the APS Fund for Teaching and Public Understanding of Psychological Science. You can read more about the fund here.
29 |
30 | ## Contributions
31 |
32 | If you have any feedback or suggested changes, please file an issue.
33 |
--------------------------------------------------------------------------------
/Rscripts/02-multiple-regression.R:
--------------------------------------------------------------------------------
1 | # Load Data and Dependencies ----------------------------------------------
2 |
3 | library(ggplot2) # for data visualization
4 | library(magrittr) # for pipe, %>%
5 |
6 | data <- read.csv('heck2011.csv')
7 | summary(data)
8 |
9 | # Simple Linear Regression ------------------------------------------------
10 |
11 | model1 <- lm(math ~ ses, data = data)
12 | summary(model1)
13 |
14 | ggplot(data = data, mapping = aes(x = ses, y = math)) +
15 | geom_point()
16 |
17 | # Multiple Regression -----------------------------------------------------
18 |
19 | model2 <- lm(math ~ ses + female, data = data)
20 | summary(model2)
21 |
22 | # Interaction Terms -------------------------------------------------------
23 |
24 | model3 <- lm(math ~ ses + female + ses:female, data = data)
25 | summary(model3)
26 |
27 | # Could also succinctly code model3 as lm(math ~ ses*female, data = data)
28 |
29 | sjPlot::plot_model(model3, type = "pred", terms = c("ses", "female"))
30 |
31 |
--------------------------------------------------------------------------------
/Rscripts/03-module-3.R:
--------------------------------------------------------------------------------
1 | # Load Data and Dependencies ----------------------------------------------
2 |
3 | library(dplyr) # for data processing
4 | library(lmtest) # for cluster-robust standard errors
5 | library(sandwich) # for cluster-robust standard errors
6 |
7 | data <- read.csv('data/heck2011.csv', fileEncoding = "UTF-8-BOM")
8 |
9 | # Cluster-Robust Standard Errors ------------------------------------------
10 |
11 | model <- lm(math ~ ses + female, data = data)
12 | summary(model)
13 |
14 | model_crse <- coeftest(model, vcov = vcovCL, cluster = ~ schcode)
15 | model_crse
--------------------------------------------------------------------------------
/Rscripts/04-module-4.R:
--------------------------------------------------------------------------------
1 | # Load Data and Dependencies ----------------------------------------------
2 |
3 | library(dplyr) # for data manipulation
4 | library(ggplot2) # for visualizations
5 | library(lme4) # for multilevel models
6 | library(lmerTest) # for p-values
7 | library(performance) # for intraclass correlation
8 |
9 | data <- read.csv('data/heck2011.csv', fileEncoding = "UTF-8-BOM")
10 |
11 | # Why Multilevel Models? --------------------------------------------------
12 |
13 | data_sub <- data %>%
14 | filter(schcode <= 10)
15 |
16 | data_sub %>%
17 | ggplot(mapping = aes(x = ses, y = math)) +
18 | geom_point() +
19 | geom_smooth(method = "lm", se = FALSE, fullrange = TRUE)
20 |
21 | data_sub %>%
22 | ggplot(mapping = aes(x = ses, y = math, colour = factor(schcode))) +
23 | geom_point() +
24 | geom_smooth(mapping = aes(group = schcode), method = "lm", se = FALSE, fullrange = TRUE) +
25 | labs(colour = "schcode")
26 |
27 | # The Null Model ----------------------------------------------------------
28 |
29 | null_model <- lmer(math ~ 1 + (1|schcode), data = data)
30 | summary(null_model)
31 |
32 | # Understanding Variance --------------------------------------------------
33 |
34 | performance::icc(null_model)
35 |
36 | Tau0 <- VarCorr(null_model)$schcode[1]
37 |
38 | lower_bound <- null_model@beta - 1.96*sqrt(Tau0)
39 | upper_bound <- null_model@beta + 1.96*sqrt(Tau0)
40 |
41 | lower_bound
42 | upper_bound
43 |
44 | # Empirical Bayes Estimates -----------------------------------------------
45 |
46 | data %>%
47 | filter(schcode == 1) %>% # select only school code 1
48 | summarize(
49 | mean(math)
50 | )
51 |
52 | data %>%
53 | filter(schcode == 1) %>%
54 | count()
55 |
56 | empirical_bayes_data <- as_tibble(ranef(null_model))
57 |
58 | head(empirical_bayes_data, 1)
59 |
60 | ggplot(data = empirical_bayes_data, mapping = aes(x = condval)) + # "condval" is the name of the EB estimates returned by the ranef function above
61 | geom_histogram() +
62 | labs(x = "EB estimate of U0j")
63 |
64 |
--------------------------------------------------------------------------------
/Rscripts/05-module-5.R:
--------------------------------------------------------------------------------
1 | # Load Data and Dependencies ----------------------------------------------
2 |
3 | library(dplyr) # for data manipulation
4 | library(ggplot2) # for visualizations
5 | library(lme4) # for multilevel models
6 | library(lmerTest) # for p-values
7 | library(performance) # for intraclass correlation
8 |
9 | data <- read.csv('data/heck2011.csv', fileEncoding = "UTF-8-BOM")
10 |
11 | # MLM with Level-1 Predictor ----------------------------------------------
12 |
13 | null_model <- lmer(math ~ 1 + (1|schcode), data = data)
14 | summary(null_model)
15 |
16 | ses_l1 <- lmer(math ~ ses + (1|schcode), data = data, REML = TRUE)
17 | summary(ses_l1)
18 |
19 | null <- sigma(null_model)^2
20 | l1 <- sigma(ses_l1)^2
21 |
22 | (null - l1) / null
23 |
24 | performance::icc(ses_l1)
25 |
26 | # Compare Regular and Multilevel Regression -------------------------------
27 |
28 | model <- lm(math ~ ses, data = data)
29 | summary(model)
30 |
31 | model_crse <- lmtest::coeftest(model, vcov = sandwich::vcovCL, cluster = ~ schcode)
32 | model_crse
33 |
34 | summary(ses_l1)
35 |
36 | # MLM with Level-2 Predictor ----------------------------------------------
37 |
38 | ses_l1_public_l2 <- lmer(math ~ 1 + ses + public + (1|schcode), data = data, REML = TRUE)
39 | summary(ses_l1_public_l2)
40 |
41 | # level-1 variance reduced
42 | sigma2_null <- sigma(null_model)^2
43 | sigma2_public <- sigma(ses_l1_public_l2)^2
44 | (sigma2_null - sigma2_public) / sigma2_null
45 |
46 | # level-2 variance reduced
47 | tau2_null <- VarCorr(null_model)$schcode[1]
48 | tau2_public <- VarCorr(ses_l1_public_l2)$schcode[1]
49 | (tau2_null - tau2_public) / tau2_null
50 |
--------------------------------------------------------------------------------
/Rscripts/06-module-6.R:
--------------------------------------------------------------------------------
1 | # Load Data and Dependencies ----------------------------------------------
2 |
3 | library(dplyr) # for data manipulation
4 | library(ggplot2) # for visualizations
5 | library(lme4) # for multilevel models
6 | library(lmerTest) # for p-values
7 |
8 | data <- read.csv('data/heck2011.csv', fileEncoding = "UTF-8-BOM")
9 |
10 | # MLM with Random Slope Effect --------------------------------------------
11 |
12 | data %>%
13 | filter(schcode <= 10) %>%
14 | ggplot(mapping = aes(x = ses, y = math, colour = factor(schcode))) +
15 | geom_point() +
16 | geom_smooth(mapping = aes(group = schcode), method = "lm", se = FALSE, fullrange = TRUE) +
17 | labs(colour = "schcode")
18 |
19 | ses_l1_random <- lmer(math ~ ses + (1 + ses|schcode), data = data, REML = TRUE)
20 | summary(ses_l1_random)
21 |
22 | Matrix::bdiag(VarCorr(ses_l1_random))
23 |
24 | -1.58/(1.79*0.88)
25 |
26 | empirical_bayes_data <- ranef(ses_l1_random) # extract random effects for each school
27 |
28 | empirical_bayes_intercepts <- empirical_bayes_data$schcode["(Intercept)"]
29 |
30 | empirical_bayes_slopes <- empirical_bayes_data$schcode["ses"] # extracts the SES/slope EB estimates from the list
31 |
32 | bind_cols(empirical_bayes_intercepts, empirical_bayes_slopes) %>% # combine EB slopes and intercepts into a useable dataframe for graphing
33 | ggplot(mapping = aes(x = ses, y = `(Intercept)`)) +
34 | geom_point()
35 |
36 | # MLM with Crosslevel Effect ----------------------------------------------
37 |
38 | crosslevel_model <- lmer(math ~ 1 + ses + public + ses:public + (1 + ses|schcode), data = data, REML = TRUE)
39 | summary(crosslevel_model)
40 |
41 | Matrix::bdiag(VarCorr(crosslevel_model))
42 |
43 |
--------------------------------------------------------------------------------
/Rscripts/07-module-7.R:
--------------------------------------------------------------------------------
1 | # Load Data and Dependencies ----------------------------------------------
2 |
3 | library(dplyr) # for data manipulation
4 | library(ggplot2) # for graphing
5 | library(lme4) # for multilevel models
6 | library(lmerTest) # for p-values
7 |
8 | data <- read.csv('data/heck2011.csv', fileEncoding = "UTF-8-BOM")
9 |
10 | # Introduction to Estimation Problems -------------------------------------
11 |
12 | ses_l1_random <- lmer(math ~ 1 + ses + (1 + ses|schcode), data = data, REML = TRUE)
13 |
14 | # Estimation and Optimizers -----------------------------------------------
15 |
16 | data %>%
17 | filter(schcode <= 10) %>% # subset data to make it easier to see
18 | ggplot(mapping = aes(x = ses, y = math)) +
19 | geom_point() +
20 | geom_smooth(method = "lm", se = FALSE, fullrange = TRUE)
21 |
22 | # Singularity -------------------------------------------------------------
23 |
24 | ses_l1_random <- lmer(math ~ 1 + ses + (1 + ses|schcode), data = data, REML = TRUE)
25 |
26 | Matrix::bdiag(VarCorr(ses_l1_random))
27 |
28 | summary(ses_l1_random)
29 |
30 | confint(ses_l1_random, oldNames = FALSE)
31 |
32 | ses_l1_random_cov0 <- lmer(math ~ 1 + ses + (1|schcode) + (0 + ses|schcode), data = data, REML = TRUE)
33 | summary(ses_l1_random_cov0)
34 |
35 | Matrix::bdiag(VarCorr(ses_l1_random_cov0))
36 |
37 | # Deviance Testing for Model Comparison -----------------------------------
38 |
39 | ses_l1 <- lmer(math ~ 1 + ses + (1|schcode), data = data, REML = TRUE)
40 | ses_l1_random_cov0 <- lmer(math ~ 1 + ses + (1|schcode) + (0 + ses|schcode), data = data, REML = TRUE)
41 |
42 | # deviance test to compare model fit
43 | anova(ses_l1, ses_l1_random_cov0, refit = FALSE)
44 |
45 |
--------------------------------------------------------------------------------
/Rscripts/08-module-8.R:
--------------------------------------------------------------------------------
1 | # Load Data and Dependencies ----------------------------------------------
2 |
3 | library(dplyr) # for data manipulation
4 | library(magrittr) # for assignment pipe %<>%
5 | library(lme4) # for multilevel models
6 | library(lmerTest) # for p-values
7 |
8 | data <- read.csv('data/heck2011.csv', fileEncoding = "UTF-8-BOM")
9 |
10 | # Options for Centering in MLMs -------------------------------------------
11 |
12 | model <- lmer(math ~ 1 + ses + (1|schcode), data = data, REML = TRUE)
13 | summary(model)
14 |
15 | # Centering Within Cluster (CWC) ------------------------------------------
16 |
17 | data %<>% # this symbol is an assignment operator and pipe, equivalent to data <- data %>%
18 | group_by(schcode) %>%
19 | mutate(ses_mean = mean(ses))
20 |
21 | data %<>%
22 | mutate(ses_cwc = ses - ses_mean)
23 |
24 | data %>%
25 | group_by(schcode) %>%
26 | summarize(
27 | mean(ses_cwc)
28 | )
29 |
30 | model_cwc <- lmer(math ~ 1 + ses_cwc + (1|schcode), data = data, REML = TRUE)
31 | summary(model_cwc)
32 |
33 | model_cwc_l2 <- lmer(math ~ 1 + ses_cwc + ses_mean + (1|schcode), data = data, REML = TRUE)
34 | summary(model_cwc_l2)
35 |
36 | # Centering Grand Mean (CGM) ----------------------------------------------
37 |
38 | data %<>%
39 | ungroup() %>% # remove the grouping by school that we added in the CWC section
40 | mutate(ses_grand_mean = mean(ses))
41 |
42 | data %<>%
43 | mutate(ses_cgm = ses - ses_grand_mean)
44 |
45 | data %>%
46 | summarize(
47 | mean(ses_cgm)
48 | )
49 |
50 | cgm_model <- lmer(math ~ 1 + ses_cgm + ses_mean + (1|schcode), data = data, REML = TRUE)
51 | summary(cgm_model)
52 |
53 |
--------------------------------------------------------------------------------
/Rscripts/09-module-9.R:
--------------------------------------------------------------------------------
1 | # Load Data and Dependencies ----------------------------------------------
2 |
3 | library(lme4) # for multilevel models
4 | library(lmerTest) # for p-values
5 | library(performance) # for ICC
6 |
7 | data <- read.csv('data/hoffman2007.csv', fileEncoding = "UTF-8-BOM")
8 | head(data)
9 |
10 | # Random-Intercept-Only/Null Model ----------------------------------------
11 |
12 | null_model <- lmer(lg_rt ~ 1 + (1|id), data = data, REML = FALSE) # note that REML = FALSE
13 | performance::icc(null_model)
14 |
15 | # Adding Level-1 Fixed Effects --------------------------------------------
16 |
17 | l1_model <- lmer(lg_rt ~ 1 + c_mean + c_sal + (1|id), data = data, REML = FALSE)
18 | summary(l1_model)
19 |
20 | anova(null_model, l1_model)
21 |
22 | # Adding Random Slopes ----------------------------------------------------
23 |
24 | l1_random <- lmer(lg_rt ~ 1 + c_mean + c_sal + (1|id) + (0 + c_mean|id) + (0 + c_sal|id), data = data, REML = FALSE)
25 | summary(l1_random)
26 |
27 | l1_random_without_cmean <- lmer(lg_rt ~ 1 + c_mean + c_sal + (1|id) + (0 + c_sal|id), data = data, REML = FALSE)
28 | summary(l1_random_without_cmean)
29 |
30 | anova(l1_random, l1_random_without_cmean)
31 |
32 | # Adding Level-2 Fixed Effects --------------------------------------------
33 |
34 | l2_model <- lmer(lg_rt ~ 1 + c_mean + c_sal + oldage + sex + (1|id), data = data, REML = FALSE)
35 | summary(l2_model)
36 |
37 | # model
38 | l2_model_no_sex <- lmer(lg_rt ~ 1 + c_mean + c_sal + oldage + (1|id), data = data, REML = FALSE)
39 |
40 | # deviance test
41 | anova(l2_model, l2_model_no_sex)
42 |
43 | # Adding Cross-Level Interactions -----------------------------------------
44 |
45 | crosslevel_model <- lmer(lg_rt ~ 1 + c_mean + c_sal + oldage + oldage:c_mean + (1|id), data = data, REML = FALSE)
46 | summary(crosslevel_model)
47 |
48 |
--------------------------------------------------------------------------------
/Rscripts/10-module-10.R:
--------------------------------------------------------------------------------
1 | # Load Data and Dependencies ----------------------------------------------
2 |
3 | library(dplyr) # for data manipulation
4 | library(ggplot2) # for graphing
5 | library(lme4) # for multilevel models
6 | library(lmerTest) # for p-values
7 | library(performance) # for ICC
8 |
9 | data <- read.csv('data/casto2016.csv', fileEncoding = "UTF-8-BOM")
10 | head(data)
11 |
12 | # Visualizing Testosterone Levels Over Time -------------------------------
13 |
14 | data %>%
15 | group_by(time0) %>%
16 | mutate(tmean = mean(Testosterone)) %>% # mean testosterone per timepoint
17 | ggplot(mapping = aes(x = time0, y = tmean)) +
18 | geom_line() +
19 | labs(title = "Testosterone Over Time for Entire Sample")
20 |
21 | data %>%
22 | group_by(time0, Played) %>% # group by timepoint and played
23 | mutate(tmean = mean(Testosterone)) %>%
24 | ggplot(mapping = aes(x = time0, y = tmean, colour = factor(Played))) +
25 | geom_line() +
26 | labs(title = "Testosterone Over Time, Played vs. Did Not Play")
27 |
28 | data %>%
29 | group_by(time0, HormonCont) %>% # group by timepoint and birth control
30 | mutate(tmean = mean(Testosterone)) %>%
31 | ggplot(mapping = aes(x = time0, y = tmean, colour = factor(HormonCont))) +
32 | geom_line() +
33 | labs(title = "Testosterone Over Time, Birth Control or Not")
34 |
35 | # Random-Intercept-Only/Null Model ----------------------------------------
36 |
37 | null_model <- lmer(Testosterone ~ 1 + (1|Code), data = data, REML = FALSE)
38 | summary(null_model)
39 |
40 | performance::icc(null_model)
41 |
42 |
43 | # Adding Level-1 Fixed and Random Effects ---------------------------------
44 |
45 | l1_model <- lmer(Testosterone ~ 1 + time0 + (time0|Code), data = data, REML = FALSE)
46 | summary(l1_model)
47 |
48 | as.matrix(Matrix::bdiag(VarCorr(l1_model)))
49 |
50 | # Evidence for Retaining Effects ------------------------------------------
51 |
52 | l1_model_no_U1j <- lmer(Testosterone ~ 1 + time0 + (1|Code), data = data, REML = FALSE)
53 |
54 | anova(l1_model, l1_model_no_U1j)
55 |
56 | confint(l1_model, oldNames = F)
57 |
58 | l1_model_cov0 <- lmer(Testosterone ~ 1 + time0 + (1|Code) + (0 + time0|Code), data = data, REML = FALSE)
59 | anova(l1_model, l1_model_cov0)
60 |
61 | confint(l1_model_cov0, oldNames = F)
62 |
63 | # Extract Empirical Bayes estimates and graph them
64 | as_tibble(coef(l1_model)$Code) %>%
65 | ggplot(mapping = aes(x = time0)) +
66 | geom_histogram(bins = 5)
67 |
68 | # Adding Level-2 Fixed Effects --------------------------------------------
69 |
70 | l2_played <- lmer(Testosterone ~ 1 + time0 + Played + Played:time0 + (1|Code) + (0 + time0|Code), data = data, REML = FALSE)
71 | summary(l2_played)
72 |
73 | anova(l1_model_cov0, l2_played)
74 |
75 | l2_played_birthcontrol <- lmer(Testosterone ~ 1 + time0 + Played + time0:Played + HormonCont + time0:HormonCont + (1|Code) + (0 + time0|Code), data = data, REML = FALSE)
76 | summary(l2_played_birthcontrol)
77 | Matrix::bdiag(VarCorr(l2_played_birthcontrol))
78 |
79 | anova(l2_played, l2_played_birthcontrol)
80 |
81 |
--------------------------------------------------------------------------------
/Rscripts/11-module-11.R:
--------------------------------------------------------------------------------
1 | # Load Data and Dependencies ----------------------------------------------
2 |
3 | library(r2mlm) # for R-squared values
4 | library(lme4) # for multilevel models
5 | library(lmerTest) # for p-values
6 | library(performance) # for ICC
7 |
8 | data(teachsat)
9 |
10 | # Single Model, Automatic Entry -------------------------------------------
11 |
12 | null_model <- lmer(satisfaction ~ 1 + (1|schoolID), data = teachsat, REML = TRUE)
13 | summary(null_model)
14 |
15 | r2mlm(null_model)
16 |
17 | performance::icc(null_model)
18 |
19 | full_model <- lmer(satisfaction ~ 1 + control_c + salary_c + s_t_ratio + (1 + control_c | schoolID),
20 | data = teachsat,
21 | REML = TRUE)
22 | summary(full_model)
23 | Matrix::bdiag(VarCorr(full_model))
24 |
25 | r2mlm(full_model)
26 |
27 | # Single Model, Manual Entry ----------------------------------------------
28 |
29 | r2mlm_manual(data = teachsat,
30 | within_covs = c(4, 5),
31 | between_covs = c(8),
32 | random_covs = c(4),
33 | gamma_w = c(0.311, 0.074),
34 | gamma_b = c(7.186, -0.037),
35 | Tau = matrix(c(0.575, 0.009, 0.009, 0.028), 2, 2),
36 | sigma2 = 0.766,
37 | has_intercept = TRUE,
38 | clustermeancentered = TRUE)
39 |
40 | # Model Comparison --------------------------------------------------------
41 |
42 | reduced_model <- lmer(satisfaction ~ 1 + control_c + salary_c + (1 + control_c | schoolID),
43 | data = teachsat,
44 | REML = TRUE)
45 |
46 | r2mlm_comp(full_model, reduced_model)
47 |
48 |
--------------------------------------------------------------------------------
/Rscripts/12-module-12.R:
--------------------------------------------------------------------------------
1 | ## ----message=FALSE, warning=FALSE-----------------------------------------------------
2 | library(lme4) # for multilevel models
3 | library(lmerTest) # for p-values
4 | library(dplyr) # for data manipulation
5 | library(ggplot2) # for graphing
6 |
7 |
8 | ## ---- eval=FALSE----------------------------------------------------------------------
9 | ## data <- read.csv('rb2002.csv')
10 |
11 |
12 | ## ---- echo = FALSE--------------------------------------------------------------------
13 | # this actually loads my code, but will be hidden
14 | data <- read.csv('data/rb2002.csv', fileEncoding = "UTF-8-BOM")
15 |
16 |
17 | ## -------------------------------------------------------------------------------------
18 | data %>%
19 | ggplot(mapping = aes(x = ses, y = mathach)) +
20 | geom_point()
21 |
22 |
23 | ## -------------------------------------------------------------------------------------
24 | data %>%
25 | ggplot(mapping = aes(x = ses, y = mathach)) +
26 | geom_point(alpha = .2)
27 |
28 |
29 | ## -------------------------------------------------------------------------------------
30 | data <- data %>%
31 | filter(SCHOOL %in% head(unique(SCHOOL), n = 30))
32 |
33 |
34 | ## -------------------------------------------------------------------------------------
35 | data %>%
36 | ggplot() +
37 | geom_point(mapping = aes(x = ses, y = mathach)) +
38 | facet_wrap(~ SCHOOL)
39 |
40 |
41 | ## -------------------------------------------------------------------------------------
42 | data %>%
43 | group_by(SCHOOL) %>%
44 | ggplot(mapping = aes(x = ses, y = mathach, colour = factor(SCHOOL))) +
45 | geom_point(show.legend = FALSE) +
46 | geom_smooth(method = lm, se = FALSE, show.legend = FALSE, fullrange = TRUE)
47 |
48 |
49 | ## -------------------------------------------------------------------------------------
50 | model <- lmer(mathach ~ CWCses*ses_mean + (1|SCHOOL) + (0 + CWCses|SCHOOL), data = data, REML = TRUE)
51 | summary(model)
52 |
53 |
54 | ## -------------------------------------------------------------------------------------
55 | data$l1resid <- residuals(model)
56 | head(data$l1resid)
57 |
58 |
59 | ## -------------------------------------------------------------------------------------
60 | data %>%
61 | ggplot(mapping = aes(x = CWCses, y = l1resid)) +
62 | geom_point() +
63 | labs(x = "CWCses", y = "residuals")
64 |
65 |
66 | ## -------------------------------------------------------------------------------------
67 | cor.test(data$l1resid, data$CWCses)
68 |
69 |
70 | ## -------------------------------------------------------------------------------------
71 | data %>%
72 | ggplot(mapping = aes(x = l1resid)) +
73 | geom_histogram()
74 |
75 |
76 | ## -------------------------------------------------------------------------------------
77 | data %>%
78 | ggplot(mapping = aes(x = l1resid)) +
79 | geom_histogram(bins = 15)
80 |
81 |
82 | ## -------------------------------------------------------------------------------------
83 | data %>%
84 | ggplot(mapping = aes(sample = l1resid)) +
85 | stat_qq()
86 |
87 |
88 | ## -------------------------------------------------------------------------------------
89 | l2_data <- data %>%
90 | group_by(SCHOOL) %>% # group data by clustering variable, school
91 | mutate(
92 | mathach_mean = mean(mathach) # create mean math achievement per school
93 | ) %>%
94 | select(SCHOOL, ses_mean, mathach_mean) %>%
95 | unique() # select unique rows (rather than having school, ses_mean, and mathach_mean repeating over and over again)
96 |
97 |
98 | ## -------------------------------------------------------------------------------------
99 | l2_data$intercept_resid = ranef(model)$SCHOOL[, 1]
100 | l2_data$slope_resid = ranef(model)$SCHOOL[, 2]
101 |
102 |
103 | ## -------------------------------------------------------------------------------------
104 | l2_data %>%
105 | ggplot(mapping = aes(x = intercept_resid, y = ses_mean)) +
106 | geom_point()
107 |
108 |
109 | ## -------------------------------------------------------------------------------------
110 | cor.test(l2_data$ses_mean, l2_data$intercept_resid)
111 |
112 |
113 | ## -------------------------------------------------------------------------------------
114 | l2_data %>%
115 | ggplot(mapping = aes(x = slope_resid, y =ses_mean)) +
116 | geom_point()
117 |
118 | cor.test(l2_data$ses_mean, l2_data$slope_resid)
119 |
120 |
121 | ## -------------------------------------------------------------------------------------
122 | l2_data %>%
123 | ggplot(mapping = aes(x = slope_resid, y = intercept_resid)) +
124 | geom_point()
125 |
126 | cor.test(l2_data$intercept_resid, l2_data$slope_resid)
127 |
128 |
129 | ## -------------------------------------------------------------------------------------
130 | l2_data %>%
131 | ggplot(mapping = aes(x = intercept_resid)) +
132 | geom_histogram(binwidth = .75)
133 |
134 | l2_data %>%
135 | ggplot(mapping = aes(sample = intercept_resid)) +
136 | stat_qq()
137 |
138 |
139 | ## -------------------------------------------------------------------------------------
140 | l2_data %>%
141 | ggplot(mapping = aes(x = slope_resid)) +
142 | geom_histogram(binwidth = .50)
143 |
144 | l2_data %>%
145 | ggplot(mapping = aes(sample = slope_resid)) +
146 | stat_qq()
147 |
148 |
149 | ## -------------------------------------------------------------------------------------
150 | n_per_school <- data %>%
151 | group_by(SCHOOL) %>% # group by school
152 | select(SCHOOL) %>% # we just want to count schools
153 | count() %>%
154 | ungroup() %>%
155 | select(n) %>%
156 | unlist()
157 |
158 |
159 | ## -------------------------------------------------------------------------------------
160 | data$intercept_resid <- rep(l2_data$intercept_resid, times = n_per_school)
161 | data$slope_resid <- rep(l2_data$slope_resid, times = n_per_school)
162 |
163 |
164 | ## -------------------------------------------------------------------------------------
165 | data %>%
166 | ggplot(mapping = aes(x = l1resid, y = intercept_resid)) +
167 | geom_point()
168 |
169 | cor.test(data$l1resid, data$intercept_resid)
170 |
171 |
172 | ## -------------------------------------------------------------------------------------
173 | data %>%
174 | ggplot(mapping = aes(x = l1resid, y = slope_resid)) +
175 | geom_point()
176 |
177 | cor.test(data$l1resid, data$slope_resid)
178 |
179 |
180 | ## -------------------------------------------------------------------------------------
181 | data %>%
182 | ggplot(mapping = aes(x = l1resid, y = ses_mean)) +
183 | geom_point()
184 |
185 | cor.test(data$l1resid, data$ses_mean)
186 |
187 |
188 | ## -------------------------------------------------------------------------------------
189 | data %>%
190 | ggplot(mapping = aes(x = intercept_resid, y = CWCses)) +
191 | geom_point()
192 |
193 | cor.test(data$intercept_resid, data$CWCses)
194 |
195 |
196 | ## -------------------------------------------------------------------------------------
197 | data %>%
198 | ggplot(mapping = aes(x = slope_resid, y = CWCses)) +
199 | geom_point()
200 |
201 | cor.test(data$slope_resid, data$CWCses)
202 |
203 |
--------------------------------------------------------------------------------
/_book/img/effect_types.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/img/effect_types.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials.epub:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials.epub
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/graph-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/graph-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-101-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-101-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-109-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-109-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-112-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-112-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-113-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-113-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-115-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-115-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-115-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-115-2.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-115-3.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-115-3.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-115-4.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-115-4.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-115-5.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-115-5.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-120-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-120-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-121-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-121-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-123-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-123-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-124-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-124-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-127-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-127-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-129-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-129-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-130-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-130-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-131-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-131-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-134-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-134-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-136-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-136-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-137-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-137-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-138-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-138-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-138-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-138-2.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-139-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-139-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-139-2.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-139-2.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-142-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-142-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-143-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-143-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-144-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-144-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-145-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-145-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-146-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-146-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-16-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-16-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-17-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-17-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-25-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-25-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-42-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-42-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-46-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-46-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-53-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-53-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-6-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-6-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-92-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-92-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-93-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-93-1.png
--------------------------------------------------------------------------------
/_book/open_mlm_materials_files/figure-html/unnamed-chunk-94-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/mkshaw/learn-mlms/4d81d95b41bc52533027339772c92d48aa0686ef/_book/open_mlm_materials_files/figure-html/unnamed-chunk-94-1.png
--------------------------------------------------------------------------------
/_book/reference-keys.txt:
--------------------------------------------------------------------------------
1 | introduction
2 | overview
3 | goals
4 | prerequisites
5 | materials
6 | multiple-regression
7 | learning-objectives
8 | data-demonstration
9 | creating-r-projects
10 | loading-data-and-dependencies
11 | simple-linear-regression
12 | multiple-regression-1
13 | interaction-terms
14 | conclusion
15 | module-3
16 | learning-objectives-1
17 | data-demonstration-1
18 | load-data-and-dependencies
19 | dealing-with-dependence
20 | cluster-robust-standard-errors
21 | conclusion-1
22 | further-reading
23 | module-4
24 | learning-objectives-2
25 | data-demonstration-2
26 | load-data-and-dependencies-1
27 | why-multilevel-models
28 | fixed-vs-random-effects
29 | the-null-model
30 | understanding-variance
31 | intraclass-correlation-coefficient-icc
32 | plausible-values-range
33 | empirical-bayes-estimates
34 | conclusion-2
35 | module-5
36 | learning-objectives-3
37 | data-demonstration-3
38 | load-data-and-dependencies-2
39 | mlm-with-level-1-predictor
40 | compare-regular-and-multilevel-regression
41 | mlm-with-level-2-predictor
42 | conclusion-3
43 | module-6
44 | learning-objectives-4
45 | data-demonstration-4
46 | load-data-and-dependencies-3
47 | mlm-with-random-slope-effect
48 | mlm-with-crosslevel-effect
49 | conclusion-4
50 | module-7
51 | learning-objectives-5
52 | data-demonstration-5
53 | load-data-and-dependencies-4
54 | introduction-to-estimation-problems
55 | estimation-and-optimizers
56 | non-convergence
57 | singularity
58 | deviance-testing-for-model-comparison
59 | conclusion-5
60 | further-reading-1
61 | module-8
62 | learning-objectives-6
63 | data-demonstration-6
64 | load-data-and-dependencies-5
65 | why-center-variables
66 | within-between-and-contextual-effects
67 | options-for-centering-in-mlms
68 | centering-within-cluster-cwc
69 | centering-grand-mean-cgm
70 | what-kind-of-centering-should-you-use
71 | conclusion-6
72 | further-reading-2
73 | module-9
74 | learning-objectives-7
75 | data-demonstration-7
76 | load-dependencies
77 | review-of-multilevel-modelling-procedure
78 | multilevel-models-for-repeated-measures
79 | data-structures-long-vs-wide
80 | our-data-reaction-time
81 | random-intercept-onlynull-model
82 | adding-level-1-fixed-effects
83 | adding-random-slopes
84 | adding-level-2-fixed-effects
85 | adding-cross-level-interactions
86 | conclusion-7
87 | module-10
88 | learning-objectives-8
89 | data-demonstration-8
90 | load-dependencies-1
91 | multilevel-models-for-longitudinal-data
92 | data-structures-long-vs-wide-1
93 | visualizing-testosterone-levels-over-time
94 | random-intercept-onlynull-model-1
95 | adding-level-1-fixed-and-random-effects
96 | evidence-for-retaining-effects
97 | adding-level-2-fixed-effects-1
98 | conclusion-8
99 | module-11
100 | learning-objectives-9
101 | data-demonstration-9
102 | load-data-and-dependencies-6
103 | defining-effect-sizes
104 | r-squared-in-multilevel-models
105 | within-variance-explained
106 | between-variance-explained
107 | total-variance-explained
108 | single-model-automatic-entry
109 | single-model-manual-entry
110 | model-comparison
111 | conclusion-9
112 | additional-reading
113 | module-12
114 | learning-objectives-10
115 | data-demonstration-10
116 | load-data-and-dependencies-7
117 | assumptions-of-mlms
118 | assumption-1-model-specification
119 | assumption-2-functional-form-is-correct
120 | an-aside-extracting-residuals
121 | assumption-3-level-1-residuals-are-independent-and-normally-distributed
122 | independent
123 | normally-distributed
124 | assumption-4-level-2-residuals-are-independent-and-multivariate-normal
125 | independent-1
126 | multivariate-normal
127 | assumption-5-residuals-at-level-1-and-level-2-are-independent
128 | assumption-6-level-1-residuals-independent-of-level-2-predictors-level-2-residuals-independent-of-level-1-predictors
129 | conclusion-10
130 | date-2022-06-07
131 | date-2022-06-08
132 | defining-effect-sizes-1
133 | r-squared-in-multilevel-models-1
134 | within-variance-explained-1
135 | between-variance-explained-1
136 | total-variance-explained-1
137 | single-model-automatic-entry-1
138 | single-model-manual-entry-1
139 | model-comparison-1
140 | additional-reading-1
141 | learning-objectives-11
142 | data-demonstration-11
143 | load-data-and-dependencies-8
144 | conclusion-11
145 | learning-objectives-12
146 | data-demonstration-12
147 | load-data-and-dependencies-9
148 | assumptions-of-mlms-1
149 | assumption-1-model-specification-1
150 | assumption-2-functional-form-is-correct-1
151 | an-aside-extracting-residuals-1
152 | assumption-3-level-1-residuals-are-independent-and-normally-distributed-1
153 | independent-2
154 | normally-distributed-1
155 | assumption-4-level-2-residuals-are-independent-and-multivariate-normal-1
156 | independent-3
157 | multivariate-normal-1
158 | assumption-5-residuals-at-level-1-and-level-2-are-independent-1
159 | assumption-6-level-1-residuals-independent-of-level-2-predictors-level-2-residuals-independent-of-level-1-predictors-1
160 | conclusion-12
161 | module-2
162 | creating-r-projects-1
163 | loading-data-and-dependencies-1
164 | simple-linear-regression-1
165 | multiple-regression-2
166 | interaction-terms-1
167 | download-materials
168 |
--------------------------------------------------------------------------------
/_book/search_index.json:
--------------------------------------------------------------------------------
1 | [["13-appendix.html", "A Download Materials", " A Download Materials Chapter Data R Script Worksheet 2 heck2011.csv filler2 filler3 3 heck2011.csv filler2 filler3 4 heck2011.csv filler2 filler3 5 heck2011.csv filler2 filler3 6 heck2011.csv filler2 filler3 7 heck2011.csv filler2 filler3 8 heck2011.csv filler2 filler3 9 hoffman2007.csv filler2 filler3 10 casto2016.csv filler2 filler3 11 teachsat included in r2mlm filler2 filler3 12 rb2002.csv filler2 filler3 "],["404.html", "Page not found", " Page not found The page you requested cannot be found (perhaps it was moved or renamed). You may want to try searching to find the page's new location, or use the table of contents to find the page you are looking for. "]]
2 |
--------------------------------------------------------------------------------
/_book/style.css:
--------------------------------------------------------------------------------
1 | p.caption {
2 | color: #777;
3 | margin-top: 10px;
4 | }
5 | p code {
6 | white-space: inherit;
7 | }
8 | pre {
9 | word-break: normal;
10 | word-wrap: normal;
11 | }
12 | pre code {
13 | white-space: inherit;
14 | }
15 |
--------------------------------------------------------------------------------
/_bookdown.yml:
--------------------------------------------------------------------------------
1 | book_filename: "open_mlm_materials"
2 | delete_merged_file: true
3 | language:
4 | ui:
5 | chapter_name: "Chapter "
6 | output_dir: "docs"
7 |
--------------------------------------------------------------------------------
/_output.yml:
--------------------------------------------------------------------------------
1 | bookdown::gitbook:
2 | css: style.css
3 | split_by: rmd
4 | config:
5 | toc:
6 | depth: 2
7 | collapse: section
8 | scroll_highlight: yes
9 | before: |
10 |