├── .Rbuildignore
├── .github
├── .gitignore
├── CODE_OF_CONDUCT.md
├── CONTRIBUTING.md
├── FUNDING.yml
├── SUPPORT.md
├── dependabot.yaml
└── workflows
│ ├── R-CMD-check-hard.yaml
│ ├── R-CMD-check-main.yaml
│ ├── R-CMD-check-weekly.yaml
│ ├── R-CMD-check.yaml
│ ├── check-all-examples.yaml
│ ├── check-link-rot.yaml
│ ├── check-random-test-order.yaml
│ ├── check-readme.yaml
│ ├── check-spelling.yaml
│ ├── check-styling.yaml
│ ├── check-test-warnings.yaml
│ ├── check-vignette-warnings.yaml
│ ├── html-5-check.yaml
│ ├── lint-changed-files.yaml
│ ├── lint.yaml
│ ├── pkgdown-no-suggests.yaml
│ ├── pkgdown.yaml
│ ├── revdepcheck.yaml
│ ├── test-coverage-examples.yaml
│ ├── test-coverage.yaml
│ └── update-to-latest-easystats.yaml
├── .gitignore
├── .lintr
├── DESCRIPTION
├── LICENSE
├── NAMESPACE
├── NEWS.md
├── R
├── backtransform_predictions.R
├── clean_names.R
├── data.R
├── describe_nonlinear.R
├── estimate_contrast_methods.R
├── estimate_contrasts.R
├── estimate_contrasts_effectsize.R
├── estimate_grouplevel.R
├── estimate_means.R
├── estimate_predicted.R
├── estimate_slopes.R
├── format.R
├── get_emcontrasts.R
├── get_emmeans.R
├── get_emtrends.R
├── get_marginalcontrasts.R
├── get_marginaleffects_type.R
├── get_marginalmeans.R
├── get_marginaltrends.R
├── joint_test.R
├── modelbased-package.R
├── options.R
├── p_adjust.R
├── plot.R
├── pool.R
├── print.R
├── print_html.R
├── print_md.R
├── reexports.R
├── reshape_grouplevel.R
├── smoothing.R
├── standardize_methods.R
├── summary.R
├── table_footer.R
├── utils.R
├── visualisation_recipe.R
├── visualisation_recipe_internal.R
├── zero_crossings.R
└── zzz.R
├── README.Rmd
├── README.md
├── air.toml
├── cran-comments.md
├── data
├── coffee_data.rda
├── efc.rda
├── fish.rda
└── puppy_love.rda
├── inst
├── CITATION
└── WORDLIST
├── man
├── coffee_data.Rd
├── describe_nonlinear.Rd
├── dot-uniroot.all.Rd
├── efc.Rd
├── estimate_contrasts.Rd
├── estimate_expectation.Rd
├── estimate_grouplevel.Rd
├── estimate_means.Rd
├── estimate_slopes.Rd
├── figures
│ ├── derivative.png
│ ├── logo.png
│ ├── unnamed-chunk-10-1.png
│ ├── unnamed-chunk-11-1.png
│ ├── unnamed-chunk-12-1.png
│ ├── unnamed-chunk-13-1.png
│ ├── unnamed-chunk-14-1.png
│ ├── unnamed-chunk-15-1.png
│ ├── unnamed-chunk-16-1.png
│ ├── unnamed-chunk-17-1.png
│ ├── unnamed-chunk-3-1.png
│ ├── unnamed-chunk-4-1.png
│ ├── unnamed-chunk-5-1.png
│ ├── unnamed-chunk-6-1.png
│ ├── unnamed-chunk-7-1.png
│ ├── unnamed-chunk-8-1.png
│ └── unnamed-chunk-9-1.png
├── fish.Rd
├── get_emmeans.Rd
├── modelbased-options.Rd
├── modelbased-package.Rd
├── pool_contrasts.Rd
├── pool_predictions.Rd
├── print.estimate_contrasts.Rd
├── puppy_love.Rd
├── reexports.Rd
├── smoothing.Rd
├── visualisation_recipe.estimate_predicted.Rd
└── zero_crossings.Rd
├── modelbased.Rproj
├── modelbased.code-workspace
├── paper
├── .gitignore
├── apa.csl
├── example.R
├── paper.Rmd
├── paper.bib
├── paper.log
├── paper.md
├── paper.pdf
└── paper_files
│ └── figure-latex
│ └── fig1-1.pdf
├── pkgdown
├── _pkgdown.yml
└── favicon
│ ├── apple-touch-icon-120x120.png
│ ├── apple-touch-icon-152x152.png
│ ├── apple-touch-icon-180x180.png
│ ├── apple-touch-icon-60x60.png
│ ├── apple-touch-icon-76x76.png
│ ├── apple-touch-icon.png
│ ├── favicon-16x16.png
│ ├── favicon-32x32.png
│ └── favicon.ico
├── tests
├── testthat.R
└── testthat
│ ├── _snaps
│ ├── betareg.md
│ ├── brms.md
│ ├── estimate_contrasts.md
│ ├── estimate_contrasts_methods.md
│ ├── estimate_means_mixed.md
│ ├── joint_test.md
│ ├── plot-facet
│ │ ├── plot-interaction-facets-1.svg
│ │ ├── plot-interaction-facets-10.svg
│ │ ├── plot-interaction-facets-2.svg
│ │ ├── plot-interaction-facets-3.svg
│ │ ├── plot-interaction-facets-4.svg
│ │ ├── plot-interaction-facets-5.svg
│ │ ├── plot-interaction-facets-6.svg
│ │ ├── plot-interaction-facets-7.svg
│ │ ├── plot-interaction-facets-8.svg
│ │ ├── plot-interaction-facets-9.svg
│ │ ├── plot-interaction-facets-cat-1.svg
│ │ ├── plot-interaction-facets-cat-2.svg
│ │ ├── plot-interaction-facets-cat-3.svg
│ │ ├── plot-interaction-facets-cat-4emmeans.svg
│ │ ├── plot-interaction-facets-cat-5emmeans.svg
│ │ └── plot-interaction-facets-cat-6emmeans.svg
│ ├── plot-flexible_numeric
│ │ ├── plot-auto-numeric-by-1.svg
│ │ └── plot-auto-numeric-by-2.svg
│ ├── plot-grouplevel
│ │ ├── plot-grouplevel-bayes-1.svg
│ │ ├── plot-grouplevel-bayes-2.svg
│ │ ├── plot-grouplevel-bayes-3.svg
│ │ ├── plot-grouplevel-bayes-4.svg
│ │ ├── plot-grouplevel-bayes-5.svg
│ │ ├── plot-grouplevel-freq-1.svg
│ │ └── plot-grouplevel-freq-2.svg
│ ├── plot-ordinal
│ │ ├── plot-ordinal-1.svg
│ │ ├── plot-ordinal-2.svg
│ │ ├── plot-ordinal-3.svg
│ │ └── plot-ordinal-4.svg
│ ├── plot-slopes
│ │ ├── plot-slopes-y-axis-labels-1.svg
│ │ └── plot-slopes-y-axis-labels-2.svg
│ ├── plot
│ │ ├── plot-4way-numeric-1.svg
│ │ ├── plot-cat-num-predictor-1.svg
│ │ ├── plot-cat-num-predictor-2.svg
│ │ ├── plot-cat-num-predictor-3.svg
│ │ ├── plot-cat-num-predictor-4.svg
│ │ ├── plot-cat-num-predictor-5.svg
│ │ ├── plot-cat-num-predictor-6.svg
│ │ ├── plot-cat-num-predictor-7.svg
│ │ ├── plot-expectation-fivenum-2.svg
│ │ ├── plot-expectation-fivenum.svg
│ │ ├── plot-glm-logistic-1.svg
│ │ ├── plot-glm-logistic-2.svg
│ │ ├── plot-grouplevel-lme4-1.svg
│ │ ├── plot-grouplevel-lme4-2.svg
│ │ ├── plot-join-dots-1.svg
│ │ ├── plot-join-dots-2.svg
│ │ ├── plot-jonhson-neyman-1.svg
│ │ ├── plot-logistic-bounds-1.svg
│ │ ├── plot-logistic-bounds-2.svg
│ │ ├── plot-me-means-1.svg
│ │ ├── plot-me-means-2.svg
│ │ ├── plot-me-means-3.svg
│ │ ├── plot-me-means-4.svg
│ │ ├── plot-me-means-showdata-1.svg
│ │ ├── plot-me-means-showdata-2.svg
│ │ ├── plot-me-means-showdata-3.svg
│ │ ├── plot-me-means-showdata-4.svg
│ │ ├── plot-means-1.svg
│ │ ├── plot-means-2.svg
│ │ ├── plot-means-3.svg
│ │ ├── plot-means-4.svg
│ │ ├── plot-means-no-ci-1.svg
│ │ ├── plot-means-no-ci-2.svg
│ │ ├── plot-means-no-ci-3.svg
│ │ ├── plot-means-poisson-1.svg
│ │ ├── plot-means-poisson-2.svg
│ │ ├── plot-means-predictor-order-1.svg
│ │ ├── plot-means-predictor-order-2.svg
│ │ ├── plot-means-predictor-order-3.svg
│ │ ├── plot-means-predictor-order-4.svg
│ │ ├── plot-means-showdata-1.svg
│ │ ├── plot-means-showdata-2.svg
│ │ ├── plot-means-showdata-3.svg
│ │ ├── plot-means-showdata-4.svg
│ │ ├── plot-relation-1.svg
│ │ ├── plot-relation-2.svg
│ │ ├── plot-relation-3.svg
│ │ ├── plot-relation-4.svg
│ │ ├── plot-relation-multiple-ci-1.svg
│ │ ├── plot-relation-showdata-1.svg
│ │ ├── plot-relation-showdata-2.svg
│ │ ├── plot-relation-showdata-3.svg
│ │ ├── plot-relation-showdata-4.svg
│ │ ├── plot-slopes-1.svg
│ │ ├── plot-slopes-2.svg
│ │ ├── plot-slopes-3.svg
│ │ ├── plot-slopes-4.svg
│ │ └── plot-slopes-5.svg
│ ├── print.md
│ ├── summary_estimate_slopes.md
│ ├── transform_response.md
│ └── windows
│ │ ├── estimate_contrasts.md
│ │ ├── estimate_contrasts_effectsize.md
│ │ ├── estimate_means_counterfactuals.md
│ │ ├── ordinal.md
│ │ └── print.md
│ ├── test-attributes_estimatefun.R
│ ├── test-attributes_visualisation.R
│ ├── test-backtransform_invlink.R
│ ├── test-betareg.R
│ ├── test-bias_correction.R
│ ├── test-brms-marginaleffects.R
│ ├── test-brms.R
│ ├── test-describe_nonlinear.R
│ ├── test-estimate_contrasts-average.R
│ ├── test-estimate_contrasts.R
│ ├── test-estimate_contrasts_bookexamples.R
│ ├── test-estimate_contrasts_effectsize.R
│ ├── test-estimate_contrasts_methods.R
│ ├── test-estimate_expectation.R
│ ├── test-estimate_filter.R
│ ├── test-estimate_grouplevel.R
│ ├── test-estimate_means-average.R
│ ├── test-estimate_means.R
│ ├── test-estimate_means_ci.R
│ ├── test-estimate_means_counterfactuals.R
│ ├── test-estimate_means_dotargs.R
│ ├── test-estimate_means_marginalization.R
│ ├── test-estimate_means_mixed.R
│ ├── test-estimate_predicted.R
│ ├── test-estimate_slopes.R
│ ├── test-g_computation.R
│ ├── test-get_marginaltrends.R
│ ├── test-glmmTMB.R
│ ├── test-joint_test.R
│ ├── test-keep_iterations.R
│ ├── test-maihda.R
│ ├── test-mgcv.R
│ ├── test-mice.R
│ ├── test-multivariate_response.R
│ ├── test-offset.R
│ ├── test-ordinal.R
│ ├── test-plot-facet.R
│ ├── test-plot-flexible_numeric.R
│ ├── test-plot-grouplevel.R
│ ├── test-plot-ordinal.R
│ ├── test-plot-slopes.R
│ ├── test-plot.R
│ ├── test-predict-dpar.R
│ ├── test-print.R
│ ├── test-scoping_issues.R
│ ├── test-signal.R
│ ├── test-standardize.R
│ ├── test-summary_estimate_slopes.R
│ ├── test-table_footer.R
│ ├── test-transform_response.R
│ ├── test-vcov.R
│ ├── test-verbose.R
│ ├── test-visualisation_recipe.R
│ ├── test-zero_crossings.R
│ └── test-zeroinfl.R
└── vignettes
├── .gitignore
├── bibliography.bib
├── derivatives.Rmd
├── estimate_contrasts.Rmd
├── estimate_grouplevel.Rmd
├── estimate_means.Rmd
├── estimate_relation.Rmd
├── estimate_response.Rmd
├── estimate_slopes.Rmd
├── introduction_comparisons_1.Rmd
├── introduction_comparisons_2.Rmd
├── introduction_comparisons_3.Rmd
├── introduction_comparisons_4.Rmd
├── introduction_comparisons_5.Rmd
├── mixed_models.Rmd
├── modelisation_approach.Rmd
├── overview_of_vignettes.Rmd
├── plotting.Rmd
├── practical_causality.Rmd
├── practical_intersectionality.Rmd
├── visualisation_matrix.Rmd
└── workflow_modelbased.Rmd
/.Rbuildignore:
--------------------------------------------------------------------------------
1 | ^\.Rprofile$
2 | ^.github/.
3 | ^.*\.Rproj$
4 | ^\.Rproj\.user$
5 | ^\.travis.yml
6 | ^\_pkgdown.yml
7 | ^LICENSE
8 | ^docs/.
9 | ^paper/.
10 | ^vignettes/d.
11 | ^vignettes/e.
12 | ^vignettes/i.
13 | ^vignettes/m.
14 | ^vignettes/p.
15 | ^vignettes/t.
16 | ^vignettes/v.
17 | ^vignettes/w.
18 | ^logo.png
19 | ^README.Rmd
20 | ^air.toml
21 | ^logo.png
22 | ^pkgdown/.
23 | ^\.github$
24 | ^CODE_OF_CONDUCT\.md$
25 | ^WIP/.
26 | ^cran-comments\.md$
27 | ^CRAN-SUBMISSION$
28 | \.code-workspace$
29 | ^\.lintr$
30 | ^tests/testthat/_snaps/.
31 |
--------------------------------------------------------------------------------
/.github/.gitignore:
--------------------------------------------------------------------------------
1 | *.html
2 |
--------------------------------------------------------------------------------
/.github/CONTRIBUTING.md:
--------------------------------------------------------------------------------
1 | # Contribution Guidelines
2 |
3 | easystats guidelines 0.1.0
4 |
5 | **All people are very much welcome to contribute to code, documentation, testing and suggestions.**
6 |
7 | This package aims at being beginner-friendly. Even if you're new to this open-source way of life, new to coding and github stuff, we encourage you to try submitting pull requests (PRs).
8 |
9 | - **"I'd like to help, but I'm not good enough with programming yet"**
10 |
11 | It's alright, don't worry! You can always dig in the code, in the documentation or tests. There are always some typos to fix, some docs to improve, some details to add, some code lines to document, some tests to add... **Even the smaller PRs are appreciated**.
12 |
13 | - **"I'd like to help, but I don't know where to start"**
14 |
15 | You can look around the **issue section** to find some features / ideas / bugs to start working on. You can also open a new issue **just to say that you're there, interested in helping out**. We might have some ideas adapted to your skills.
16 |
17 | - **"I'm not sure if my suggestion or idea is worthwile"**
18 |
19 | Enough with the impostor syndrom! All suggestions and opinions are good, and even if it's just a thought or so, it's always good to receive feedback.
20 |
21 | - **"Why should I waste my time with this? Do I get any credit?"**
22 |
23 | Software contributions are getting more and more valued in the academic world, so it is a good time to collaborate with us! Authors of substantial contributions will be added within the **authors** list. We're also very keen on including them to eventual academic publications.
24 |
25 |
26 | **Anyway, starting is the most important! You will then enter a *whole new world, a new fantastic point of view*... So fork this repo, do some changes and submit them. We will then work together to make the best out of it :)**
27 |
28 |
29 | ## Code
30 |
31 | - Please document and comment your code, so that the purpose of each step (or code line) is stated in a clear and understandable way.
32 | - Before submitting a change, please read the [**R style guide**](https://style.tidyverse.org/) and in particular our [**easystats convention of code-style**](https://github.com/easystats/easystats#convention-of-code-style) to keep some consistency in code formatting.
33 | - Regarding the style guide, note this exception: we put readability and clarity before everything. Thus, we like underscores and full names (prefer `model_performance` over `modelperf` and `interpret_odds_logistic` over `intoddslog`).
34 | - Before you start to code, make sure you're on the `dev` branch (the most "advanced"). Then, you can create a new branch named by your feature (e.g., `feature_lightsaber`) and do your changes. Finally, submit your branch to be merged into the `dev` branch. Then, every now and then, the dev branch will merge into `master`, as a new package version.
35 |
36 | ## Checks to do before submission
37 |
38 | - Make sure **documentation** (roxygen) is good
39 | - Make sure to add **tests** for the new functions
40 | - Run:
41 |
42 | - `styler::style_pkg()`: Automatic style formatting
43 | - `lintr::lint_package()`: Style checks
44 | - `devtools::check()`: General checks
45 |
46 |
47 |
48 | ## Useful Materials
49 |
50 | - [Understanding the GitHub flow](https://guides.github.com/introduction/flow/)
51 |
52 |
53 |
--------------------------------------------------------------------------------
/.github/FUNDING.yml:
--------------------------------------------------------------------------------
1 | # These are supported funding model platforms
2 |
3 | github: easystats
4 |
--------------------------------------------------------------------------------
/.github/SUPPORT.md:
--------------------------------------------------------------------------------
1 | # Getting help with `{modelbased}`
2 |
3 | Thanks for using `{modelbased}`. Before filing an issue, there are a few places
4 | to explore and pieces to put together to make the process as smooth as possible.
5 |
6 | Start by making a minimal **repr**oducible **ex**ample using the
7 | [reprex](http://reprex.tidyverse.org/) package. If you haven't heard of or used
8 | reprex before, you're in for a treat! Seriously, reprex will make all of your
9 | R-question-asking endeavors easier (which is a pretty insane ROI for the five to
10 | ten minutes it'll take you to learn what it's all about). For additional reprex
11 | pointers, check out the [Get help!](https://www.tidyverse.org/help/) resource
12 | used by the tidyverse team.
13 |
14 | Armed with your reprex, the next step is to figure out where to ask:
15 |
16 | * If it's a question: start with StackOverflow. There are more people there to answer questions.
17 | * If it's a bug: you're in the right place, file an issue.
18 | * If you're not sure: let's [discuss](https://github.com/easystats/modelbased/discussions) it and try to figure it out! If your
19 | problem _is_ a bug or a feature request, you can easily return here and
20 | report it.
21 |
22 | Before opening a new issue, be sure to [search issues and pull requests](https://github.com/easystats/modelbased/issues) to make sure the
23 | bug hasn't been reported and/or already fixed in the development version. By
24 | default, the search will be pre-populated with `is:issue is:open`. You can
25 | [edit the qualifiers](https://help.github.com/articles/searching-issues-and-pull-requests/)
26 | (e.g. `is:pr`, `is:closed`) as needed. For example, you'd simply
27 | remove `is:open` to search _all_ issues in the repo, open or closed.
28 |
29 | Thanks for your help!
--------------------------------------------------------------------------------
/.github/dependabot.yaml:
--------------------------------------------------------------------------------
1 | version: 2
2 |
3 | updates:
4 | # Keep dependencies for GitHub Actions up-to-date
5 | - package-ecosystem: "github-actions"
6 | directory: "/"
7 | schedule:
8 | interval: "weekly"
9 |
--------------------------------------------------------------------------------
/.github/workflows/R-CMD-check-hard.yaml:
--------------------------------------------------------------------------------
1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
3 | #
4 | # NOTE: This workflow only directly installs "hard" dependencies, i.e. Depends,
5 | # Imports, and LinkingTo dependencies. Notably, Suggests dependencies are never
6 | # installed, with the exception of testthat, knitr, and rmarkdown. The cache is
7 | # never used to avoid accidentally restoring a cache containing a suggested
8 | # dependency.
9 | on:
10 | pull_request:
11 | branches: [main, master]
12 |
13 | name: R-CMD-check-hard
14 |
15 | jobs:
16 | R-CMD-check-hard:
17 | uses: easystats/workflows/.github/workflows/R-CMD-check-hard.yaml@main
18 |
--------------------------------------------------------------------------------
/.github/workflows/R-CMD-check-main.yaml:
--------------------------------------------------------------------------------
1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
3 | #
4 | # NOTE: This workflow is overkill for most R packages and
5 | # check-standard.yaml is likely a better choice.
6 | # usethis::use_github_action("check-standard") will install it.
7 | on:
8 | push:
9 | branches: [main, master]
10 |
11 | name: R-CMD-check
12 |
13 | jobs:
14 | R-CMD-check:
15 | uses: easystats/workflows/.github/workflows/R-CMD-check-main.yaml@main
16 |
--------------------------------------------------------------------------------
/.github/workflows/R-CMD-check-weekly.yaml:
--------------------------------------------------------------------------------
1 | on:
2 | schedule:
3 | # * is a special character in YAML so you have to quote this string
4 | # Trigger once a week at 00:00 on Sunday
5 | - cron: "0 0 * * SUN"
6 |
7 | name: R-CMD-check-weekly
8 |
9 | jobs:
10 | R-CMD-check:
11 | uses: easystats/workflows/.github/workflows/R-CMD-check-main.yaml@main
12 |
--------------------------------------------------------------------------------
/.github/workflows/R-CMD-check.yaml:
--------------------------------------------------------------------------------
1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
3 | #
4 | # NOTE: This workflow is overkill for most R packages and
5 | # check-standard.yaml is likely a better choice.
6 | # usethis::use_github_action("check-standard") will install it.
7 | on:
8 | pull_request:
9 | branches: [main, master]
10 |
11 | name: R-CMD-check
12 |
13 | jobs:
14 | R-CMD-check:
15 | uses: easystats/workflows/.github/workflows/R-CMD-check.yaml@main
16 |
--------------------------------------------------------------------------------
/.github/workflows/check-all-examples.yaml:
--------------------------------------------------------------------------------
1 | # Make sure all examples run successfully, even the ones that are not supposed
2 | # to be run or tested on CRAN machines by default.
3 | #
4 | # The examples that fail should use
5 | # - `if (FALSE) { ... }` (if example is included only for illustrative purposes)
6 | # - `try({ ... })` (if the intent is to show the error)
7 | #
8 | # This workflow helps find such failing examples that need to be modified.
9 | on:
10 | pull_request:
11 | branches: [main, master]
12 |
13 | name: check-all-examples
14 |
15 | jobs:
16 | check-all-examples:
17 | uses: easystats/workflows/.github/workflows/check-all-examples.yaml@main
18 |
--------------------------------------------------------------------------------
/.github/workflows/check-link-rot.yaml:
--------------------------------------------------------------------------------
1 | on:
2 | pull_request:
3 | branches: [main, master]
4 |
5 | name: check-link-rot
6 |
7 | jobs:
8 | check-link-rot:
9 | uses: easystats/workflows/.github/workflows/check-link-rot.yaml@main
10 |
--------------------------------------------------------------------------------
/.github/workflows/check-random-test-order.yaml:
--------------------------------------------------------------------------------
1 | # Run tests in random order
2 | on:
3 | pull_request:
4 | branches: [main, master]
5 |
6 | name: check-random-test-order
7 |
8 | jobs:
9 | check-random-test-order:
10 | uses: easystats/workflows/.github/workflows/check-random-test-order.yaml@main
11 |
--------------------------------------------------------------------------------
/.github/workflows/check-readme.yaml:
--------------------------------------------------------------------------------
1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
3 |
4 | on:
5 | push:
6 | branches: [main, master]
7 | pull_request:
8 | branches: [main, master]
9 |
10 | name: check-readme
11 |
12 | jobs:
13 | check-readme:
14 | uses: easystats/workflows/.github/workflows/check-readme.yaml@main
15 |
--------------------------------------------------------------------------------
/.github/workflows/check-spelling.yaml:
--------------------------------------------------------------------------------
1 | on:
2 | pull_request:
3 | branches: [main, master]
4 |
5 | name: check-spelling
6 |
7 | jobs:
8 | check-spelling:
9 | uses: easystats/workflows/.github/workflows/check-spelling.yaml@main
10 |
--------------------------------------------------------------------------------
/.github/workflows/check-styling.yaml:
--------------------------------------------------------------------------------
1 | on:
2 | pull_request:
3 | branches: [main, master]
4 |
5 | name: check-styling
6 |
7 | jobs:
8 | check-styling:
9 | uses: easystats/workflows/.github/workflows/check-styling.yaml@main
10 |
--------------------------------------------------------------------------------
/.github/workflows/check-test-warnings.yaml:
--------------------------------------------------------------------------------
1 | # Running tests with options(warn = 2) to fail on test warnings
2 | on:
3 | pull_request:
4 | branches: [main, master]
5 |
6 | name: check-test-warnings
7 |
8 | jobs:
9 | check-test-warnings:
10 | uses: easystats/workflows/.github/workflows/check-test-warnings.yaml@main
11 |
--------------------------------------------------------------------------------
/.github/workflows/check-vignette-warnings.yaml:
--------------------------------------------------------------------------------
1 | # Running tests with options(warn = 2) to fail on test warnings
2 | on:
3 | pull_request:
4 | branches: [main, master]
5 |
6 | name: check-vignette-warnings
7 |
8 | jobs:
9 | check-vignette-warnings:
10 | uses: easystats/workflows/.github/workflows/check-vignette-warnings.yaml@main
11 |
--------------------------------------------------------------------------------
/.github/workflows/html-5-check.yaml:
--------------------------------------------------------------------------------
1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
3 | on:
4 | pull_request:
5 | branches: [main, master]
6 |
7 | name: html-5-check
8 |
9 | jobs:
10 | html-5-check:
11 | uses: easystats/workflows/.github/workflows/html-5-check.yaml@main
12 |
--------------------------------------------------------------------------------
/.github/workflows/lint-changed-files.yaml:
--------------------------------------------------------------------------------
1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
3 | on:
4 | pull_request:
5 | branches: [main, master]
6 |
7 | name: lint-changed-files
8 |
9 | jobs:
10 | lint-changed-files:
11 | uses: easystats/workflows/.github/workflows/lint-changed-files.yaml@main
12 |
--------------------------------------------------------------------------------
/.github/workflows/lint.yaml:
--------------------------------------------------------------------------------
1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
3 | on:
4 | pull_request:
5 | branches: [main, master]
6 |
7 | name: lint
8 |
9 | jobs:
10 | lint:
11 | uses: easystats/workflows/.github/workflows/lint.yaml@main
12 |
--------------------------------------------------------------------------------
/.github/workflows/pkgdown-no-suggests.yaml:
--------------------------------------------------------------------------------
1 | on:
2 | pull_request:
3 | branches: [main, master]
4 |
5 | name: pkgdown-no-suggests
6 |
7 | jobs:
8 | pkgdown-no-suggests:
9 | uses: easystats/workflows/.github/workflows/pkgdown-no-suggests.yaml@main
10 |
--------------------------------------------------------------------------------
/.github/workflows/pkgdown.yaml:
--------------------------------------------------------------------------------
1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
3 | on:
4 | push:
5 | branches: [main, master]
6 | pull_request:
7 | branches: [main, master]
8 | release:
9 | types: [published]
10 | workflow_dispatch:
11 |
12 | name: pkgdown
13 |
14 | jobs:
15 | pkgdown:
16 | uses: easystats/workflows/.github/workflows/pkgdown.yaml@main
17 |
--------------------------------------------------------------------------------
/.github/workflows/revdepcheck.yaml:
--------------------------------------------------------------------------------
1 | on:
2 | pull_request:
3 | branches: [main, master]
4 |
5 | name: revdepcheck
6 |
7 | jobs:
8 | revdepcheck:
9 | uses: easystats/workflows/.github/workflows/revdepcheck.yaml@main
10 |
--------------------------------------------------------------------------------
/.github/workflows/test-coverage-examples.yaml:
--------------------------------------------------------------------------------
1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
3 | on:
4 | pull_request:
5 | branches: [main, master]
6 |
7 | name: test-coverage-examples
8 |
9 | jobs:
10 | test-coverage-examples:
11 | uses: easystats/workflows/.github/workflows/test-coverage-examples.yaml@main
12 |
--------------------------------------------------------------------------------
/.github/workflows/test-coverage.yaml:
--------------------------------------------------------------------------------
1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples
2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help
3 | on:
4 | pull_request:
5 | branches: [main, master]
6 |
7 | name: test-coverage
8 |
9 | jobs:
10 | test-coverage:
11 | uses: easystats/workflows/.github/workflows/test-coverage.yaml@main
12 |
--------------------------------------------------------------------------------
/.github/workflows/update-to-latest-easystats.yaml:
--------------------------------------------------------------------------------
1 | on:
2 | schedule:
3 | # Check for dependency updates once a month
4 | - cron: "0 0 1 * *"
5 |
6 | name: update-to-latest-easystats
7 |
8 | jobs:
9 | update-to-latest-easystats:
10 | uses: easystats/workflows/.github/workflows/update-to-latest-easystats.yaml@main
11 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # History files
2 | .Rhistory
3 | .Rapp.history
4 |
5 | # Session Data files
6 | .RData
7 |
8 | # Example code in package build process
9 | *-Ex.R
10 |
11 | # Output files from R CMD build
12 | /*.tar.gz
13 |
14 | # Output files from R CMD check
15 | /*.Rcheck/
16 |
17 | # RStudio files
18 | .Rproj.user/
19 | *.Rproj$
20 |
21 | # produced vignettes
22 | vignettes/*.html
23 | vignettes/*.pdf
24 |
25 | # OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3
26 | .httr-oauth
27 |
28 | # knitr and R markdown default cache directories
29 | /*_cache/
30 | /cache/
31 |
32 | # Temporary files created by R markdown
33 | *.utf8.md
34 | *.knit.md
35 |
36 | # Shiny token, see https://shiny.rstudio.com/articles/shinyapps.html
37 | rsconnect/
38 |
39 | # =========================
40 | # Operating System Files
41 | # OSX
42 | .DS_Store
43 | .AppleDouble
44 | .LSOverride
45 | # Thumbnails
46 | ._*
47 | # Files that might appear in the root of a volume
48 | .DocumentRevisions-V100
49 | .fseventsd
50 | .Spotlight-V100
51 | .TemporaryItems
52 | .Trashes
53 | .VolumeIcon.icns
54 | # Directories potentially created on remote AFP share
55 | .AppleDB
56 | .AppleDesktop
57 | Network Trash Folder
58 | Temporary Items
59 | .apdisk
60 | /.quarto/
61 |
--------------------------------------------------------------------------------
/.lintr:
--------------------------------------------------------------------------------
1 | linters: linters_with_defaults(
2 | absolute_path_linter = NULL,
3 | commented_code_linter = NULL,
4 | cyclocomp_linter = cyclocomp_linter(25),
5 | extraction_operator_linter = NULL,
6 | implicit_integer_linter = NULL,
7 | line_length_linter(120),
8 | namespace_linter = NULL,
9 | nonportable_path_linter = NULL,
10 | object_name_linter = NULL,
11 | object_length_linter(50),
12 | object_usage_linter = NULL,
13 | todo_comment_linter = NULL,
14 | undesirable_function_linter(c("mapply" = NA, "sapply" = NA, "setwd" = NA)),
15 | undesirable_operator_linter = NULL,
16 | unnecessary_concatenation_linter(allow_single_expression = FALSE),
17 | defaults = linters_with_tags(tags = NULL)
18 | )
19 |
--------------------------------------------------------------------------------
/DESCRIPTION:
--------------------------------------------------------------------------------
1 | Type: Package
2 | Package: modelbased
3 | Title: Estimation of Model-Based Predictions, Contrasts and Means
4 | Version: 0.11.2
5 | Authors@R:
6 | c(person(given = "Dominique",
7 | family = "Makowski",
8 | role = c("aut", "cre"),
9 | email = "officialeasystats@gmail.com",
10 | comment = c(ORCID = "0000-0001-5375-9967")),
11 | person(given = "Daniel",
12 | family = "Lüdecke",
13 | role = "aut",
14 | email = "d.luedecke@uke.de",
15 | comment = c(ORCID = "0000-0002-8895-3206")),
16 | person(given = "Mattan S.",
17 | family = "Ben-Shachar",
18 | role = "aut",
19 | email = "matanshm@post.bgu.ac.il",
20 | comment = c(ORCID = "0000-0002-4287-4801")),
21 | person(given = "Indrajeet",
22 | family = "Patil",
23 | role = "aut",
24 | email = "patilindrajeet.science@gmail.com",
25 | comment = c(ORCID = "0000-0003-1995-6531")),
26 | person(given = "Rémi",
27 | family = "Thériault",
28 | role = "aut",
29 | email = "remi.theriault@mail.mcgill.ca",
30 | comment = c(ORCID = "0000-0003-4315-6788")))
31 | Maintainer: Dominique Makowski
32 | Description: Implements a general interface for model-based estimations
33 | for a wide variety of models, used in the computation of
34 | marginal means, contrast analysis and predictions. For a list of supported models,
35 | see 'insight::supported_models()'.
36 | License: GPL-3
37 | URL: https://easystats.github.io/modelbased/
38 | BugReports: https://github.com/easystats/modelbased/issues
39 | Depends:
40 | R (>= 3.6)
41 | Imports:
42 | bayestestR (>= 0.16.0),
43 | datawizard (>= 1.1.0),
44 | insight (>= 1.3.0),
45 | parameters (>= 0.26.0),
46 | graphics,
47 | stats,
48 | tools,
49 | utils
50 | Suggests:
51 | BH,
52 | betareg,
53 | boot,
54 | bootES,
55 | brms,
56 | coda,
57 | collapse,
58 | correlation,
59 | curl,
60 | easystats,
61 | effectsize (>= 1.0.0),
62 | emmeans (>= 1.10.2),
63 | Formula,
64 | gamm4,
65 | gganimate,
66 | ggplot2,
67 | glmmTMB,
68 | httr2,
69 | knitr,
70 | lme4,
71 | lmerTest,
72 | logspline,
73 | MASS,
74 | Matrix,
75 | marginaleffects (>= 0.26.0),
76 | mice,
77 | mgcv,
78 | nanoparquet,
79 | ordinal,
80 | palmerpenguins,
81 | performance (>= 0.13.0),
82 | patchwork,
83 | pbkrtest,
84 | poorman,
85 | pscl,
86 | RcppEigen,
87 | report,
88 | rmarkdown,
89 | rstanarm,
90 | rtdists,
91 | RWiener,
92 | sandwich,
93 | see (>= 0.11.0),
94 | survival,
95 | testthat (>= 3.2.1),
96 | vdiffr,
97 | withr
98 | VignetteBuilder:
99 | knitr
100 | Encoding: UTF-8
101 | Language: en-US
102 | RoxygenNote: 7.3.2
103 | Config/testthat/edition: 3
104 | Config/testthat/parallel: true
105 | Roxygen: list(markdown = TRUE)
106 | Config/Needs/check: stan-dev/cmdstanr
107 | Config/Needs/website: easystats/easystatstemplate
108 | LazyData: true
109 |
--------------------------------------------------------------------------------
/NAMESPACE:
--------------------------------------------------------------------------------
1 | # Generated by roxygen2: do not edit by hand
2 |
3 | S3method(describe_nonlinear,data.frame)
4 | S3method(describe_nonlinear,estimate_predicted)
5 | S3method(describe_nonlinear,numeric)
6 | S3method(estimate_contrasts,default)
7 | S3method(estimate_contrasts,estimate_predicted)
8 | S3method(estimate_grouplevel,brmsfit)
9 | S3method(estimate_grouplevel,default)
10 | S3method(estimate_grouplevel,stanreg)
11 | S3method(format,estimate_contrasts)
12 | S3method(format,estimate_grouplevel)
13 | S3method(format,estimate_means)
14 | S3method(format,estimate_predicted)
15 | S3method(format,estimate_slopes)
16 | S3method(format,estimate_smooth)
17 | S3method(format,marginaleffects_contrasts)
18 | S3method(format,marginaleffects_means)
19 | S3method(format,marginaleffects_slopes)
20 | S3method(format,summary_estimate_slopes)
21 | S3method(format,visualisation_matrix)
22 | S3method(plot,estimate_contrasts)
23 | S3method(plot,estimate_grouplevel)
24 | S3method(plot,estimate_means)
25 | S3method(plot,estimate_predicted)
26 | S3method(plot,estimate_slopes)
27 | S3method(print,estimate_contrasts)
28 | S3method(print,estimate_grouplevel)
29 | S3method(print,estimate_means)
30 | S3method(print,estimate_predicted)
31 | S3method(print,estimate_slopes)
32 | S3method(print,estimate_smooth)
33 | S3method(print,summary_estimate_slopes)
34 | S3method(print,visualisation_matrix)
35 | S3method(print_html,estimate_contrasts)
36 | S3method(print_html,estimate_grouplevel)
37 | S3method(print_html,estimate_means)
38 | S3method(print_html,estimate_predicted)
39 | S3method(print_html,estimate_slopes)
40 | S3method(print_html,estimate_smooth)
41 | S3method(print_html,visualisation_matrix)
42 | S3method(print_md,estimate_contrasts)
43 | S3method(print_md,estimate_grouplevel)
44 | S3method(print_md,estimate_means)
45 | S3method(print_md,estimate_predicted)
46 | S3method(print_md,estimate_slopes)
47 | S3method(print_md,estimate_smooth)
48 | S3method(print_md,visualisation_matrix)
49 | S3method(reshape_grouplevel,data.frame)
50 | S3method(reshape_grouplevel,default)
51 | S3method(reshape_grouplevel,estimate_grouplevel)
52 | S3method(smoothing,data.frame)
53 | S3method(smoothing,numeric)
54 | S3method(standardize,estimate_contrasts)
55 | S3method(standardize,estimate_means)
56 | S3method(standardize,estimate_predicted)
57 | S3method(standardize,estimate_slopes)
58 | S3method(summary,estimate_slopes)
59 | S3method(summary,reshape_grouplevel)
60 | S3method(unstandardize,estimate_contrasts)
61 | S3method(unstandardize,estimate_means)
62 | S3method(unstandardize,estimate_predicted)
63 | S3method(visualisation_recipe,estimate_grouplevel)
64 | S3method(visualisation_recipe,estimate_means)
65 | S3method(visualisation_recipe,estimate_predicted)
66 | S3method(visualisation_recipe,estimate_slopes)
67 | export(describe_nonlinear)
68 | export(estimate_contrasts)
69 | export(estimate_expectation)
70 | export(estimate_grouplevel)
71 | export(estimate_link)
72 | export(estimate_means)
73 | export(estimate_prediction)
74 | export(estimate_relation)
75 | export(estimate_slopes)
76 | export(estimate_smooth)
77 | export(find_inversions)
78 | export(get_emcontrasts)
79 | export(get_emmeans)
80 | export(get_emtrends)
81 | export(get_marginalcontrasts)
82 | export(get_marginalmeans)
83 | export(get_marginaltrends)
84 | export(pool_contrasts)
85 | export(pool_predictions)
86 | export(pool_slopes)
87 | export(print_html)
88 | export(print_md)
89 | export(reshape_grouplevel)
90 | export(smoothing)
91 | export(standardize)
92 | export(unstandardize)
93 | export(visualisation_recipe)
94 | export(zero_crossings)
95 | importFrom(datawizard,standardize)
96 | importFrom(datawizard,unstandardize)
97 | importFrom(datawizard,visualisation_recipe)
98 | importFrom(insight,print_html)
99 | importFrom(insight,print_md)
100 |
--------------------------------------------------------------------------------
/R/backtransform_predictions.R:
--------------------------------------------------------------------------------
1 | # back-transform from link-scale? this functions is...
2 | # - only called for means, not contrasts, because for contrasts we rely on
3 | # the delta-method for SEs on the response scale
4 | # - only called when `type` (i.e. `predict`) is "response" AND the model class
5 | # has a "link" prediction type
6 | .backtransform_predictions <- function(means, model, predict_args, ci, df = Inf) {
7 | # In `.get_marginaleffects_type_argument()`, where we set the `predict`
8 | # aergument, we check whether response-scale is requested - if so, we set
9 | # predict-type to "link" and now backtransform predictions and CIs
10 | if (isTRUE(predict_args$backtransform) && all(c("conf.low", "conf.high", "estimate") %in% colnames(means))) {
11 | # extract link-inverse
12 | linv <- predict_args$link_inverse
13 | # if we have a standard error column, we need to back-transform this as well
14 | if ("std.error" %in% colnames(means)) {
15 | # define factor for multiplying SE
16 | alpha <- (1 + ci) / 2
17 | t_crit <- stats::qt(alpha, df = df)
18 | # first transform CI, then SE and finally estimates. this order is
19 | # required - e.g., SE would be wrong if we backtransform estimate first
20 | means$conf.low <- linv(means$estimate - t_crit * means$std.error)
21 | means$conf.high <- linv(means$estimate + t_crit * means$std.error)
22 | means$std.error <- exp(means$estimate) * means$std.error
23 | } else {
24 | # for some models, we have no std.error, so just transform CIs
25 | means$conf.low <- linv(means$conf.low)
26 | means$conf.high <- linv(means$conf.high)
27 | }
28 | means$estimate <- linv(means$estimate)
29 | }
30 |
31 | means
32 | }
33 |
34 |
35 | # internal to return possibly bias correct link-function
36 | .link_inverse <- function(model = NULL,
37 | bias_correction = FALSE,
38 | residual_variance = NULL,
39 | verbose = TRUE,
40 | ...) {
41 | if (isTRUE(bias_correction)) {
42 | dots <- list(...)
43 | if (!is.null(dots$sigma) && !is.na(dots$sigma)) {
44 | residual_variance <- dots$sigma^2
45 | }
46 | l <- .bias_correction(model, residual_variance, verbose, ...)$linkinv
47 | if (is.null(l)) {
48 | l <- insight::link_inverse(model)
49 | }
50 | } else {
51 | l <- insight::link_inverse(model)
52 | }
53 | l
54 | }
55 |
56 |
57 | # apply bias-correction for back-transformation of predictions on the link-scale
58 | # we want sigma^2 (residual_variance) here to calculate the correction
59 | .bias_correction <- function(model = NULL, residual_variance = NULL, verbose = TRUE, ...) {
60 | # we need a model object
61 | if (is.null(model)) {
62 | return(NULL)
63 | }
64 | # extract residual variance, if not provided
65 | if (is.null(residual_variance)) {
66 | residual_variance <- .get_residual_variance(model, ...) # returns sigma^2
67 | }
68 | # we need residual variance
69 | if (is.null(residual_variance)) {
70 | if (verbose) {
71 | insight::format_alert("Could not extract residual variance to apply bias correction. No bias adjustment carried out.") # nolint
72 | }
73 | return(NULL)
74 | }
75 |
76 | # extract current link function
77 | link <- .safe(insight::get_family(model))
78 | # we need a link function
79 | if (is.null(link)) {
80 | if (verbose) {
81 | insight::format_alert("Could not extract information about the model's link-function to apply bias correction. No bias adjustment carried out.") # nolint
82 | }
83 | return(NULL)
84 | }
85 |
86 | link$inv <- link$linkinv
87 | link$der <- link$mu.eta
88 | link$residual_variance <- residual_variance / 2
89 |
90 | link$der2 <- function(eta) {
91 | with(link, 1000 * (der(eta + 5e-4) - der(eta - 5e-4)))
92 | }
93 | link$linkinv <- function(eta) {
94 | with(link, inv(eta) + residual_variance * der2(eta))
95 | }
96 | link$mu.eta <- function(eta) {
97 | with(link, der(eta) + 1000 * residual_variance * (der2(eta + 5e-4) - der2(eta - 5e-4)))
98 | }
99 | link
100 | }
101 |
102 |
103 | .get_residual_variance <- function(x, tolerance = 1e-10, ...) {
104 | if (insight::is_mixed_model(x)) {
105 | out <- .safe(insight::get_variance_residual(x, tolerance = tolerance))
106 | } else {
107 | out <- .safe(insight::get_sigma(x, ci = NULL, no_recursion = TRUE, verbose = FALSE)^2, 0)
108 | if (!length(out)) {
109 | out <- 0
110 | }
111 | }
112 | out
113 | }
114 |
--------------------------------------------------------------------------------
/R/clean_names.R:
--------------------------------------------------------------------------------
1 | # Clean names -------------------------------------------------------------
2 |
3 |
4 | #' @keywords internal
5 | .clean_names_frequentist <- function(means, predict = NULL, info = NULL) {
6 | names(means)[names(means) == "emmean"] <- .guess_estimate_name(predict, info)
7 | names(means)[names(means) == "response"] <- .guess_estimate_name(predict, info)
8 | names(means)[names(means) == "prob"] <- "Probability"
9 | names(means)[names(means) == "estimate"] <- "Difference"
10 | names(means)[names(means) == "odds.ratio"] <- "Odds_ratio"
11 | names(means)[names(means) == "ratio"] <- "Ratio"
12 | names(means)[names(means) == "rate"] <- "Rate"
13 | names(means)[names(means) == "t.ratio"] <- "t"
14 | names(means)[names(means) == "z.ratio"] <- "z"
15 | names(means)[names(means) == "p.value"] <- "p"
16 | names(means)[names(means) == "lower.CL"] <- "CI_low"
17 | names(means)[names(means) == "upper.CL"] <- "CI_high"
18 | names(means)[names(means) == "asymp.LCL"] <- "CI_low"
19 | names(means)[names(means) == "asymp.UCL"] <- "CI_high"
20 | means
21 | }
22 |
23 |
24 | #' @keywords internal
25 | .clean_names_bayesian <- function(means, model, predict, type = "mean") {
26 | vars <- names(means)[names(means) %in% c("Median", "Mean", "MAP")]
27 | minfo <- insight::model_info(model, response = 1)
28 |
29 | if (length(vars) == 1) {
30 | if (type == "contrast") {
31 | names(means)[names(means) == vars] <- "Difference"
32 | } else if (type == "mean") {
33 | if (minfo$is_logit && predict == "response") {
34 | names(means)[names(means) == vars] <- "Probability"
35 | } else if (!is.null(predict) && predict %in% .brms_aux_elements(model)) {
36 | names(means)[names(means) == vars] <- tools::toTitleCase(predict)
37 | } else {
38 | names(means)[names(means) == vars] <- "Mean"
39 | }
40 | } else if (predict %in% .brms_aux_elements(model)) {
41 | names(means)[names(means) == vars] <- tools::toTitleCase(predict)
42 | } else {
43 | names(means)[names(means) == vars] <- "Coefficient"
44 | }
45 | }
46 |
47 | means$CI <- NULL
48 | means$ROPE_CI <- NULL
49 | means$ROPE_low <- NULL
50 | means$ROPE_high <- NULL
51 | means$Parameter <- NULL
52 | means
53 | }
54 |
--------------------------------------------------------------------------------
/R/data.R:
--------------------------------------------------------------------------------
1 | #' @docType data
2 | #' @title Sample data set
3 | #' @name fish
4 | #' @keywords data
5 | #'
6 | #' @description A sample data set, used in tests and some examples. Useful for
7 | #' demonstrating count models (with or without zero-inflation component). It
8 | #' consists of nine variables from 250 observations.
9 | NULL
10 |
11 |
12 | #' @docType data
13 | #' @title Sample dataset from the EFC Survey
14 | #' @name efc
15 | #' @keywords data
16 | #'
17 | #' @description Selected variables from the EUROFAMCARE survey. Useful when
18 | #' testing on "real-life" data sets, including random missing values. This
19 | #' data set also has value and variable label attributes.
20 | NULL
21 |
22 |
23 | #' @docType data
24 | #' @title More puppy therapy data
25 | #' @name puppy_love
26 | #' @keywords data
27 | #'
28 | #' @description Fictitious data related to whether puppy therapy works when you
29 | #' adjust for a person’s love of puppies, taken from the `{discovr}` package
30 | #' (Field 2025)
31 | #'
32 | #' @details Following variables are included in the dataset:
33 | #' - `id``: Participant id
34 | #' - `dose`: Treatment group to which the participant was randomly assigned (No
35 | #' puppies (control), 15 minutes of puppy therapy, 30 minutes of puppy
36 | #' therapy)
37 | #' - `happiness`: Self-reported happiness from 0 (as unhappy as I can possibly
38 | #' imagine being) to 10 (as happy as I can possibly imagine being)
39 | #' - `puppy_love`: Self-reported love of puppies from 0 (I am a weird person who
40 | #' hates puppies, please be deeply suspicious of me) to 7 (puppies are the
41 | #' best thing ever, one day I might marry one)
42 | #'
43 | #' For further details, see `?discovr::puppy_love`.
44 | #'
45 | #' @references Field, A. P. (2025). Discovering statistics using R and RStudio
46 | #' (2nd ed.). London: Sage.
47 | NULL
48 |
49 |
50 | #' @docType data
51 | #' @title Sample dataset from a course about analysis of factorial designs
52 | #' @name coffee_data
53 | #' @keywords data
54 | #'
55 | #' @description A sample data set from a course about the analysis of factorial
56 | #' designs, by Mattan S. Ben-Shachar. See following link for more information:
57 | #' https://github.com/mattansb/Analysis-of-Factorial-Designs-foR-Psychologists
58 | #'
59 | #' The data consists of five variables from 120 observations:
60 | #'
61 | #' - `ID`: A unique identifier for each participant
62 | #' - `sex`: The participant's sex
63 | #' - `time`: The time of day the participant was tested (morning, noon, or afternoon)
64 | #' - `coffee`: Group indicator, whether participant drank coffee or not
65 | #' ("`coffee"` or `"control"`).
66 | #' - `alertness`: The participant's alertness score.
67 | NULL
68 |
--------------------------------------------------------------------------------
/R/joint_test.R:
--------------------------------------------------------------------------------
1 | .joint_test <- function(means, ...) {
2 | UseMethod(".joint_test")
3 | }
4 |
5 |
6 | # marginaleffects
7 |
8 | .joint_test.predictions <- function(means, my_args, test = "f", ...) {
9 | cnames <- colnames(means)
10 | # we need to separate the "by" argument, to find out which variables
11 | # were used as contrasts, and which for grouping
12 | by_vars <- intersect(cnames, my_args$by)
13 | contrast_vars <- setdiff(my_args$by, by_vars)
14 |
15 | # if we have no grouping variable, joint test simplifies to an anova-table
16 | # tell user to use `anova()` then.
17 | if (!length(by_vars)) {
18 | insight::format_error("Joint tests using `comparison = \"joint\"` only work when `by` is specified. If this stratification is not desired, please use `anova()` on your model object instead.") # nolint
19 | }
20 |
21 | # get column names. We need to have the column "hypothesis", else,
22 | # no test can be performed
23 | if (!"hypothesis" %in% cnames) {
24 | insight::format_error("Can't perform joint test. Data frame needs a column \"hypothesis\".")
25 | }
26 |
27 | # check out how many comparisons we have. If only one,
28 | # # we jointly test all rows at once
29 | n_hypothesis <- prod(insight::n_unique(means[by_vars]))
30 |
31 | # determine number of rows to test, and which rows
32 | if (n_hypothesis == 1) {
33 | test_rows <- as.list(1:nrow(means))
34 | } else {
35 | test_rows <- split(
36 | seq_len(nrow(means)),
37 | cut(seq_len(nrow(means)), n_hypothesis, labels = FALSE)
38 | )
39 | }
40 |
41 | # sanity check
42 | if (is.null(test)) {
43 | test <- "f"
44 | } else {
45 | test <- tolower(insight::compact_character(test)[1])
46 | }
47 |
48 | # handle aliases
49 | test <- switch(tolower(test),
50 | chi2 = "chisq",
51 | test
52 | )
53 |
54 | # joint test for all test rows
55 | out <- lapply(test_rows, function(x) {
56 | marginaleffects::hypotheses(means, joint = x, joint_test = test)
57 | })
58 |
59 | # bind results
60 | result <- do.call(rbind, out)
61 |
62 | # add variable names and levels
63 | result <- cbind(
64 | contrast_vars,
65 | unique(means[by_vars]),
66 | estimate = NA,
67 | result
68 | )
69 |
70 | # proper column names
71 | if (test == "f") {
72 | colnames(result) <- c("Contrast", by_vars, "estimate", "F", "p", "df1", "df2")
73 | # these are special columns, not yet covered by "insight::format_table()"
74 | result$df1 <- insight::format_value(result$df1, protect_integers = TRUE)
75 | result$df2 <- insight::format_value(result$df2, protect_integers = TRUE)
76 | } else {
77 | colnames(result) <- c("Contrast", by_vars, "estimate", "Chi2", "p", "df")
78 | }
79 | class(result) <- unique(c(class(means), "marginal_jointtest", "data.frame"))
80 |
81 | result
82 | }
83 |
84 |
85 | # emmeans
86 |
87 | .joint_test.emmGrid <- function(means, my_args, ...) {
88 | by_arg <- attributes(means)$misc$by.vars
89 | result <- try(as.data.frame(emmeans::joint_tests(means, by = by_arg)), silent = TRUE)
90 |
91 | # check if everything was ok
92 | if (inherits(result, "try-error")) {
93 | insight::format_error(
94 | "Could not compute joint test. This error occured:",
95 | attributes(result)$condition$message,
96 | "\nYou may try to set `backend = \"marginaleffects\" in your call to `estimate_contrasts()`."
97 | )
98 | }
99 |
100 | # these are special columns, not yet covered by "insight::format_table()"
101 | result$df1 <- insight::format_value(result$df1, protect_integers = TRUE)
102 | result$df2 <- insight::format_value(result$df2, protect_integers = TRUE)
103 |
104 | # rename statistic column
105 | colnames(result)[colnames(result) == "F.ratio"] <- "F"
106 | colnames(result)[1] <- "Contrast"
107 | result[[1]] <- my_args$contrast
108 |
109 | result
110 | }
111 |
--------------------------------------------------------------------------------
/R/modelbased-package.R:
--------------------------------------------------------------------------------
1 | #' \code{modelbased}
2 | #'
3 | #' @title modelbased: Estimation of Model-Based Predictions, Contrasts and Means
4 | #'
5 | #' @description
6 | #'
7 | #' `modelbased` is a package helping with model-based estimations, to
8 | #' easily compute of marginal means, contrast analysis and model predictions.
9 | #'
10 | #' @docType package
11 | #' @aliases modelbased modelbased-package
12 | #' @name modelbased-package
13 | #' @keywords internal
14 | "_PACKAGE"
15 |
--------------------------------------------------------------------------------
/R/options.R:
--------------------------------------------------------------------------------
1 | #' @title Global options from the modelbased package
2 | #' @name modelbased-options
3 | #'
4 | #' @section Global options to set defaults for function arguments:
5 | #'
6 | #' **For calculating marginal means**
7 | #'
8 | #' - `options(modelbased_backend = )` will set a default value for the
9 | #' `backend` argument and can be used to set the package used by default to
10 | #' calculate marginal means. Can be `"marginaleffects"` or `"emmeans"`.
11 | #'
12 | #' - `options(modelbased_estimate = )` will set a default value for the
13 | #' `estimate` argument, which modulates the type of target population
14 | #' predictions refer to.
15 | #'
16 | #' **For printing**
17 | #'
18 | #' - `options(modelbased_select = )` will set a default value for the
19 | #' `select` argument and can be used to define a custom default layout for
20 | #' printing.
21 | #'
22 | #' - `options(modelbased_include_grid = TRUE)` will set a default value for the
23 | #' `include_grid` argument and can be used to include data grids in the output
24 | #' by default or not.
25 | #'
26 | #' - `options(modelbased_full_labels = FALSE)` will remove redundant
27 | #' (duplicated) labels from rows.
28 | #'
29 | #' **For plotting**
30 | #'
31 | #' - `options(modelbased_join_dots = )` will set a default value for
32 | #' the `join_dots`.
33 | #'
34 | #' - `options(modelbased_numeric_as_discrete = )` will set a default
35 | #' value for the `modelbased_numeric_as_discrete` argument. Can also be
36 | #' `FALSE`.
37 | NULL
38 |
--------------------------------------------------------------------------------
/R/plot.R:
--------------------------------------------------------------------------------
1 | #' @export
2 | plot.estimate_contrasts <- function(x, ...) {
3 | insight::check_if_installed("see")
4 | NextMethod()
5 | }
6 |
7 | #' @rdname visualisation_recipe.estimate_predicted
8 | #' @export
9 | plot.estimate_predicted <- function(x, ...) {
10 | layers <- visualisation_recipe(x, ...)
11 | graphics::plot(layers)
12 | }
13 |
14 | #' @rdname visualisation_recipe.estimate_predicted
15 | #' @export
16 | plot.estimate_means <- plot.estimate_predicted
17 |
18 | #' @export
19 | plot.estimate_grouplevel <- plot.estimate_predicted
20 |
21 | #' @export
22 | plot.estimate_slopes <- plot.estimate_predicted
23 |
--------------------------------------------------------------------------------
/R/print_html.R:
--------------------------------------------------------------------------------
1 | #' @export
2 | print_html.estimate_contrasts <- function(x,
3 | select = getOption("modelbased_select", NULL),
4 | include_grid = getOption("modelbased_include_grid", FALSE),
5 | full_labels = TRUE,
6 | ...) {
7 | # copy original
8 | out <- x
9 | # get attributes, but remove some of them - else, matching attribute fails
10 | attr <- attributes(x)
11 | attr <- attr[setdiff(names(attr), c("names", "row.names"))]
12 |
13 | # format table
14 | formatted_table <- format(out, select = select, format = "html", include_grid = include_grid, ...)
15 | attributes(formatted_table) <- utils::modifyList(attributes(formatted_table), attr)
16 |
17 | # remove redundant labels, for "by" variables
18 | formatted_table <- .remove_redundant_labels(x, formatted_table, full_labels)
19 |
20 | # set alignment, left-align first and non-numerics
21 | align <- .align_columns(x, formatted_table)
22 |
23 | # update footer
24 | table_footer <- attributes(formatted_table)$table_footer
25 | if (!is.null(table_footer)) {
26 | table_footer <- insight::compact_character(strsplit(table_footer, "\\n")[[1]])
27 | attr(formatted_table, "table_footer") <- paste(table_footer, collapse = "
")
28 | }
29 |
30 | insight::export_table(
31 | formatted_table,
32 | format = .check_format_backend(...),
33 | align = align,
34 | ...
35 | )
36 | }
37 |
38 |
39 | #' @export
40 | print_html.estimate_means <- print_html.estimate_contrasts
41 |
42 | #' @export
43 | print_html.estimate_slopes <- print_html.estimate_contrasts
44 |
45 | #' @export
46 | print_html.estimate_smooth <- print_html.estimate_contrasts
47 |
48 | #' @export
49 | print_html.estimate_predicted <- print_html.estimate_contrasts
50 |
51 | #' @export
52 | print_html.visualisation_matrix <- print_html.estimate_contrasts
53 |
54 | #' @export
55 | print_html.estimate_grouplevel <- print_html.estimate_contrasts
56 |
57 |
58 | # we allow exporting HTML format based on "gt" or "tinytable"
59 | .check_format_backend <- function(...) {
60 | dots <- list(...)
61 | if (is.null(dots) || !identical(dots$backend, "tt")) {
62 | "html"
63 | } else {
64 | "tt"
65 | }
66 | }
67 |
--------------------------------------------------------------------------------
/R/print_md.R:
--------------------------------------------------------------------------------
1 | #' @export
2 | print_md.estimate_contrasts <- function(x,
3 | select = getOption("modelbased_select", NULL),
4 | include_grid = getOption("modelbased_include_grid", FALSE),
5 | full_labels = TRUE,
6 | ...) {
7 | # copy original
8 | out <- x
9 | # get attributes, but remove some of them - else, matching attribute fails
10 | attr <- attributes(x)
11 | attr <- attr[setdiff(names(attr), c("names", "row.names"))]
12 |
13 | formatted_table <- format(out, select = select, format = "markdown", include_grid = include_grid, ...)
14 | attributes(formatted_table) <- utils::modifyList(attributes(formatted_table), attr)
15 |
16 | # remove redundant labels, for "by" variables
17 | formatted_table <- .remove_redundant_labels(x, formatted_table, full_labels)
18 |
19 | # set alignment, left-align first and non-numerics
20 | align <- .align_columns(x, formatted_table)
21 |
22 | # update footer
23 | table_footer <- attributes(formatted_table)$table_footer
24 | if (!is.null(table_footer)) {
25 | table_footer <- insight::compact_character(strsplit(table_footer, "\\n")[[1]])
26 | attr(formatted_table, "table_footer") <- paste0("*", paste(table_footer, collapse = "; "), "*")
27 | }
28 |
29 | insight::export_table(
30 | formatted_table,
31 | format = "markdown",
32 | align = align,
33 | ...
34 | )
35 | }
36 |
37 |
38 | #' @export
39 | print_md.estimate_means <- print_md.estimate_contrasts
40 |
41 | #' @export
42 | print_md.estimate_slopes <- print_md.estimate_contrasts
43 |
44 | #' @export
45 | print_md.estimate_smooth <- print_md.estimate_contrasts
46 |
47 | #' @export
48 | print_md.estimate_predicted <- print_md.estimate_contrasts
49 |
50 | #' @export
51 | print_md.visualisation_matrix <- print_md.estimate_contrasts
52 |
53 | #' @export
54 | print_md.estimate_grouplevel <- print_md.estimate_contrasts
55 |
--------------------------------------------------------------------------------
/R/reexports.R:
--------------------------------------------------------------------------------
1 | #' @export
2 | #' @importFrom datawizard visualisation_recipe
3 | datawizard::visualisation_recipe
4 |
5 | #' @export
6 | #' @importFrom datawizard standardize
7 | datawizard::standardize
8 |
9 | #' @export
10 | #' @importFrom datawizard unstandardize
11 | datawizard::unstandardize
12 |
13 | #' @importFrom insight print_md
14 | #' @export
15 | insight::print_md
16 |
17 | #' @importFrom insight print_html
18 | #' @export
19 | insight::print_html
20 |
--------------------------------------------------------------------------------
/R/reshape_grouplevel.R:
--------------------------------------------------------------------------------
1 | #' @rdname estimate_grouplevel
2 | #' @param x The output of `estimate_grouplevel()`.
3 | #' @param indices A character vector containing the indices (i.e., which
4 | #' columns) to extract (e.g., "Coefficient", "Median").
5 | #' @param group The name of the random factor to select as string value (e.g.,
6 | #' `"Participant"`, if the model was `y ~ x + (1|Participant)`.
7 | #'
8 | #' @export
9 | reshape_grouplevel <- function(x, ...) {
10 | UseMethod("reshape_grouplevel")
11 | }
12 |
13 | #' @export
14 | reshape_grouplevel.default <- function(x, ...) {
15 | insight::format_error(paste0(
16 | "`reshape_grouplevel()` not implemented yet for objects of class `",
17 | class(x)[1],
18 | "`."
19 | ))
20 | }
21 |
22 | #' @rdname estimate_grouplevel
23 | #' @export
24 | reshape_grouplevel.estimate_grouplevel <- function(x, indices = "all", group = NULL, ...) {
25 | # Find indices
26 | if (any(indices == "all")) {
27 | indices <- names(x)[!names(x) %in% c("Group", "Level", "Parameter", "CI")]
28 | }
29 |
30 | # Accommodate Bayesian
31 | if ("Coefficient" %in% indices) {
32 | indices <- c(indices, "Median", "Mean", "MAP")
33 | }
34 | if ("SE" %in% indices) {
35 | indices <- c(indices, "SD", "MAD")
36 | }
37 | indices <- intersect(colnames(x), unique(indices))
38 |
39 | # Random parameters
40 | if (is.null(group)) {
41 | group <- unique(x$Group)
42 | }
43 |
44 | if (length(group) > 1) {
45 | insight::format_alert(paste0(
46 | "Multiple groups are present (", toString(group),
47 | "). Selecting the first (", group[1], ")."
48 | ))
49 | group <- group[1]
50 | }
51 | x <- x[x$Group %in% group, ]
52 |
53 | # Create a new column for the parameter name
54 | x$.param <- x$Parameter
55 | if ("Component" %in% names(x)) {
56 | x$.param <- paste0(x$Component, "_", x$.param)
57 | }
58 | x$.param <- gsub("conditional_", "", x$.param)
59 |
60 | # Reshape
61 | data_wide <- datawizard::data_to_wide(
62 | x,
63 | id_cols = "Level",
64 | values_from = indices,
65 | names_from = ".param",
66 | names_sep = "_"
67 | )
68 |
69 | # Rename level to group
70 | names(data_wide)[names(data_wide) == "Level"] <- group
71 |
72 | # rename "intercept" column
73 | names(data_wide) <- gsub("(Intercept)", "Intercept", names(data_wide), fixed = TRUE)
74 |
75 | class(data_wide) <- c("reshape_grouplevel", class(data_wide))
76 | data_wide
77 | }
78 |
79 | #' @export
80 | reshape_grouplevel.data.frame <- reshape_grouplevel.estimate_grouplevel
81 |
--------------------------------------------------------------------------------
/R/smoothing.R:
--------------------------------------------------------------------------------
1 | #' Smoothing a vector or a time series
2 | #'
3 | #' Smoothing a vector or a time series. For data.frames, the function will
4 | #' smooth all numeric variables stratified by factor levels (i.e., will smooth
5 | #' within each factor level combination).
6 | #'
7 | #' @param x A numeric vector.
8 | #' @param method Can be ["loess"][loess] (default) or
9 | #' ["smooth"][smooth]. A loess smoothing can be slow.
10 | #' @param strength This argument only applies when `method = "loess"`.
11 | #' Degree of smoothing passed to `span` (see [loess()]).
12 | #' @param ... Arguments passed to or from other methods.
13 | #'
14 | #'
15 | #' @examples
16 | #' x <- sin(seq(0, 4 * pi, length.out = 100)) + rnorm(100, 0, 0.2)
17 | #' plot(x, type = "l")
18 | #' lines(smoothing(x, method = "smooth"), type = "l", col = "blue")
19 | #' lines(smoothing(x, method = "loess"), type = "l", col = "red")
20 | #'
21 | #' x <- sin(seq(0, 4 * pi, length.out = 10000)) + rnorm(10000, 0, 0.2)
22 | #' plot(x, type = "l")
23 | #' lines(smoothing(x, method = "smooth"), type = "l", col = "blue")
24 | #' lines(smoothing(x, method = "loess"), type = "l", col = "red")
25 | #' @return A smoothed vector or data frame.
26 | #' @export
27 | smoothing <- function(x, method = "loess", strength = 0.25, ...) {
28 | UseMethod("smoothing")
29 | }
30 |
31 |
32 | #' @export
33 | smoothing.numeric <- function(x, method = "loess", strength = 0.25, ...) {
34 | if (strength == 0 || isFALSE(strength) || is.null(method)) {
35 | return(x)
36 | }
37 |
38 | method <- match.arg(method, c("loess", "smooth"))
39 | if (method == "loess") {
40 | smoothed <- tryCatch(
41 | {
42 | stats::predict(stats::loess(
43 | paste0("y ~ x"),
44 | data = data.frame(y = x, x = seq_along(x)),
45 | span = strength
46 | ))
47 | },
48 | warning = function(w) {
49 | insight::format_warning(
50 | paste0(
51 | "Smoothing had some difficulties. Try tweaking the smoothing strength (currently at ",
52 | strength,
53 | ")."
54 | )
55 | )
56 | stats::predict(stats::loess(
57 | paste0("y ~ x"),
58 | data = data.frame(y = x, x = seq_along(x)),
59 | span = strength
60 | ))
61 | }
62 | )
63 | } else if (method == "smooth") {
64 | smoothed <- stats::smooth(x, ...)
65 | } else {
66 | insight::format_error("`method` must be one of c(\"loess\", \"smooth\")")
67 | }
68 | smoothed
69 | }
70 |
71 |
72 | #' @export
73 | smoothing.data.frame <- function(x, method = "loess", strength = 0.25, ...) {
74 | nums <- names(x)[vapply(x, is.numeric, TRUE)]
75 |
76 | # Stratify by factor levels
77 | factors <- names(x)[vapply(x, is.factor, TRUE)]
78 |
79 | if (length(factors) > 0) {
80 | combinations <- unique(x[factors])
81 | row.names(combinations) <- NULL
82 | x$temp <- apply(x[names(combinations)], 1, paste, collapse = "_")
83 |
84 | for (i in seq_len(nrow(combinations))) {
85 | current_row <- paste(t(combinations[i, ]), collapse = "_")
86 | x[x$temp == current_row, nums] <- sapply(x[x$temp == current_row, nums], smoothing, method = method, strength = strength, ...)
87 | }
88 |
89 | x$temp <- NULL
90 | } else {
91 | x[nums] <- sapply(x[nums], smoothing, method = method, strength = strength, ...)
92 | }
93 |
94 | x
95 | }
96 |
--------------------------------------------------------------------------------
/R/standardize_methods.R:
--------------------------------------------------------------------------------
1 | # Standardize -------------------------------------------------------------
2 |
3 | #' @export
4 | standardize.estimate_predicted <- function(x, include_response = TRUE, ...) {
5 | # Get data of predictors
6 | data <- insight::get_data(attributes(x)$model, verbose = FALSE, ...)
7 | data[[attributes(x)$response]] <- NULL # Remove resp from data
8 |
9 | # Standardize predictors
10 | x[names(data)] <- datawizard::standardize(as.data.frame(x)[names(data)], reference = data, ...)
11 |
12 | # Standardize response
13 | if (include_response && insight::model_info(attributes(x)$model, response = 1)$is_linear) {
14 | resp <- insight::get_response(attributes(x)$model)
15 | disp <- attributes(datawizard::standardize(resp, ...))$scale
16 |
17 | for (col in c("Predicted", "Mean", "CI_low", "CI_high")) {
18 | if (col %in% names(x)) {
19 | x[col] <- datawizard::standardize(x[[col]], reference = resp, ...)
20 | }
21 | }
22 |
23 | for (col in c("SE", "MAD")) {
24 | if (col %in% names(x)) {
25 | x[col] <- x[[col]] / disp
26 | }
27 | }
28 | }
29 |
30 | attr(x, "table_title") <- c(paste(attributes(x)$table_title[1], " (standardized)"), "blue")
31 | x
32 | }
33 |
34 |
35 | #' @export
36 | standardize.estimate_means <- standardize.estimate_predicted
37 |
38 |
39 | #' @export
40 | standardize.estimate_contrasts <- function(x, robust = FALSE, ...) {
41 | model <- attributes(x)$model
42 |
43 | if (insight::model_info(model, response = 1)$is_linear) {
44 | # Get dispersion scaling factor
45 | if (robust) {
46 | disp <- stats::mad(insight::get_response(model), na.rm = TRUE)
47 | } else {
48 | disp <- stats::sd(insight::get_response(model), na.rm = TRUE)
49 | }
50 |
51 | # Standardize relevant cols
52 | for (col in c("Difference", "Ratio", "Coefficient", "SE", "MAD", "CI_low", "CI_high")) {
53 | if (col %in% names(x)) {
54 | x[col] <- x[[col]] / disp
55 | }
56 | }
57 | }
58 |
59 | attr(x, "table_title") <- c(paste(attributes(x)$table_title[1], " (standardized)"), "blue")
60 | x
61 | }
62 |
63 | #' @export
64 | standardize.estimate_slopes <- standardize.estimate_contrasts
65 |
66 |
67 | # Unstandardize -------------------------------------------------------------
68 |
69 | #' @method unstandardize estimate_predicted
70 | #' @export
71 | unstandardize.estimate_predicted <- function(x, include_response = TRUE, ...) {
72 | model <- attributes(x)$model
73 |
74 | # Get data of predictors
75 | data <- insight::get_data(model, verbose = FALSE, ...)
76 | data[[attributes(x)$response]] <- NULL # Remove resp from data
77 |
78 | # Standardize predictors
79 | x[names(data)] <- datawizard::unstandardize(as.data.frame(x)[names(data)], reference = data, ...)
80 |
81 | # Standardize response
82 | if (include_response == TRUE && insight::model_info(model, response = 1)$is_linear) {
83 | resp <- insight::get_response(model)
84 | disp <- attributes(datawizard::standardize(resp, ...))$scale
85 |
86 | for (col in c("Predicted", "Mean", "CI_low", "CI_high")) {
87 | if (col %in% names(x)) {
88 | x[col] <- datawizard::unstandardize(x[[col]], reference = resp, ...)
89 | }
90 | }
91 |
92 | for (col in c("SE", "MAD")) {
93 | if (col %in% names(x)) {
94 | x[col] <- x[[col]] * disp
95 | }
96 | }
97 | }
98 | x
99 | }
100 |
101 |
102 | #' @export
103 | unstandardize.estimate_means <- unstandardize.estimate_predicted
104 |
105 |
106 | #' @export
107 | unstandardize.estimate_contrasts <- function(x, robust = FALSE, ...) {
108 | model <- attributes(x)$model
109 |
110 | if (insight::model_info(model, response = 1)$is_linear) {
111 | # Get dispersion scaling factor
112 | if (robust) {
113 | disp <- stats::mad(insight::get_response(model), na.rm = TRUE)
114 | } else {
115 | disp <- stats::sd(insight::get_response(model), na.rm = TRUE)
116 | }
117 |
118 | # Standardize relevant cols
119 | for (col in c("Difference", "Ratio", "Coefficient", "SE", "MAD", "CI_low", "CI_high")) {
120 | if (col %in% names(x)) {
121 | x[col] <- x[[col]] * disp
122 | }
123 | }
124 | }
125 |
126 | x
127 | }
128 |
--------------------------------------------------------------------------------
/R/summary.R:
--------------------------------------------------------------------------------
1 | #' @export
2 | summary.estimate_slopes <- function(object, verbose = TRUE, ...) {
3 | out <- as.data.frame(object)
4 | by <- attributes(object)$by
5 |
6 | if (verbose && nrow(out) < 50) {
7 | insight::format_alert("There might be too few data to accurately determine intervals. Consider setting `length = 100` (or larger) in your call to `estimate_slopes()`.") # nolint
8 | }
9 |
10 | # Add "Confidence" col based on the sig index present in the data
11 | out$Confidence <- .estimate_slopes_significance(out, ...)
12 | out$Direction <- .estimate_slopes_direction(out, ...)
13 |
14 | # if we have more than one variable in `by`, group result table and
15 | # add group name as separate column
16 | if (length(by) > 1) {
17 | parts <- split(out, out[[by[2]]])
18 | out <- do.call(rbind, lapply(parts, .estimate_slope_parts, by = by[1]))
19 | out <- datawizard::rownames_as_column(out, "Group")
20 | out$Group <- gsub("\\.\\d+$", "", out$Group)
21 | } else {
22 | out <- .estimate_slope_parts(out, by)
23 | }
24 |
25 | attributes(out) <- utils::modifyList(attributes(object), attributes(out))
26 | class(out) <- c("summary_estimate_slopes", "data.frame")
27 | attr(out, "table_title") <- c("Johnson-Neymann Intervals", "blue")
28 |
29 | out
30 | }
31 |
32 |
33 | #' @export
34 | summary.reshape_grouplevel <- function(object, ...) {
35 | x <- object[!duplicated(object), ]
36 | row.names(x) <- NULL
37 | x
38 | }
39 |
40 |
41 | # Utilities ===============================================================
42 |
43 |
44 | .estimate_slope_parts <- function(out, by) {
45 | # mark all "changes" from negative to positive and vice versa
46 | index <- 1
47 | out$switch <- index
48 | index <- index + 1
49 |
50 | for (i in 2:nrow(out)) {
51 | if (out$Direction[i] != out$Direction[i - 1] || out$Confidence[i] != out$Confidence[i - 1]) {
52 | out$switch[i:nrow(out)] <- index # styler: off
53 | index <- index + 1
54 | }
55 | }
56 |
57 | # split into "switches"
58 | parts <- split(out, out$switch)
59 |
60 | do.call(rbind, lapply(parts, function(i) {
61 | data.frame(
62 | Start = i[[by]][1],
63 | End = i[[by]][nrow(i)],
64 | Direction = i$Direction[1],
65 | Confidence = i$Confidence[1]
66 | )
67 | }))
68 | }
69 |
70 |
71 | .estimate_slopes_direction <- function(data, ...) {
72 | centrality_columns <- datawizard::extract_column_names(
73 | data,
74 | c("Coefficient", "Slope", "Median", "Mean", "MAP_Estimate"),
75 | verbose = FALSE
76 | )
77 | ifelse(data[[centrality_columns]] < 0, "negative", "positive")
78 | }
79 |
80 |
81 | .estimate_slopes_significance <- function(x, confidence = "auto", ...) {
82 | insight::check_if_installed("effectsize")
83 |
84 | if (confidence == "auto") {
85 | # TODO: make sure all of these work
86 | if ("BF" %in% names(x)) confidence <- "BF"
87 | if ("p" %in% names(x)) confidence <- "p"
88 | if ("pd" %in% names(x)) confidence <- "pd"
89 | }
90 |
91 | switch(confidence,
92 | p = tools::toTitleCase(effectsize::interpret_p(x$p, ...)),
93 | BF = tools::toTitleCase(effectsize::interpret_bf(x$BF, ...)),
94 | pd = tools::toTitleCase(effectsize::interpret_pd(x$pd, ...)),
95 | {
96 | # Based on CI
97 | out <- ifelse((x$CI_high < 0 & x$CI_low < 0) | (x$CI_high > 0 & x$CI_low > 0), "Significant", "Uncertain")
98 | factor(out, levels = c("Uncertain", "Significant"))
99 | }
100 | )
101 | }
102 |
--------------------------------------------------------------------------------
/R/utils.R:
--------------------------------------------------------------------------------
1 | #' @keywords internal
2 | #' @noRd
3 | .brms_aux_elements <- function(model = NULL) {
4 | out <- c(
5 | "sigma", "mu", "nu", "shape", "beta", "phi", "hu", "ndt", "zoi", "coi",
6 | "kappa", "bias", "bs", "zi", "alpha", "xi", "delta", "k"
7 | )
8 | unique(c(out, insight::find_auxiliary(model, verbose = FALSE)))
9 | }
10 |
11 |
12 | #' @keywords internal
13 | #' @noRd
14 | .valid_coefficient_names <- function(model = NULL) {
15 | out <- c(
16 | "Mean", "Probability", "Difference", "Ratio", "Rate", "ZI-Probability",
17 | "Proportion", "Median", "MAP", "Coefficient", "Odds_ratio"
18 | )
19 | dpars <- insight::find_auxiliary(model, verbose = FALSE)
20 | if (!is.null(dpars)) {
21 | out <- unique(c(out, tools::toTitleCase(dpars)))
22 | }
23 | out
24 | }
25 |
26 |
27 | #' @keywords internal
28 | #' @noRd
29 | .check_standard_errors <- function(out,
30 | by = NULL,
31 | contrast = NULL,
32 | model = NULL,
33 | model_name = "model",
34 | verbose = TRUE,
35 | ...) {
36 | if (!verbose || is.null(out$SE)) {
37 | return(NULL)
38 | }
39 |
40 | if (all(is.na(out$SE))) {
41 | # we show an example code how to resolve the problem. this example
42 | # code only works when we have at least `by` or `contrast`. If both
43 | # are NULL, we ignore the example code (see below)
44 | code_snippet <- paste0("\n\nestim <- estimate_relation(\n ", model_name)
45 | by_vars <- c(by, contrast)
46 | if (!is.null(by_vars)) {
47 | code_snippet <- paste0(
48 | code_snippet,
49 | ",\n by = ",
50 | ifelse(length(by_vars) > 1, "c(", ""),
51 | paste0("\"", by_vars, "\"", collapse = ", "),
52 | ifelse(length(by_vars) > 1, ")", "")
53 | )
54 | }
55 | code_snippet <- paste0(code_snippet, "\n)\nestimate_contrasts(\n estim")
56 | if (!is.null(contrast)) {
57 | code_snippet <- paste0(
58 | code_snippet,
59 | ",\n contrast = ",
60 | ifelse(length(contrast) > 1, "c(", ""),
61 | paste0("\"", contrast, "\"", collapse = ", "),
62 | ifelse(length(contrast) > 1, ")", "")
63 | )
64 | }
65 | code_snippet <- paste0(code_snippet, "\n)")
66 | # setup message
67 | msg <- insight::format_message(
68 | "Could not calculate standard errors for contrasts. This can happen when random effects are involved. You may try following:"
69 | )
70 | # add example code, if valid
71 | if (!is.null(by_vars)) {
72 | msg <- c(msg, insight::color_text(code_snippet, "green"), "\n")
73 | }
74 | message(msg)
75 | } else if (isTRUE(all(out$SE == out$SE[1])) && insight::is_mixed_model(model)) {
76 | msg <- "Standard errors are probably not reliable. This can happen when random effects are involved. You may try `estimate_relation()` instead." # nolint
77 | if (!inherits(model, "glmmTMB")) {
78 | msg <- paste(msg, "You may also try package {.pkg glmmTMB} to produce valid standard errors.")
79 | }
80 | insight::format_alert(msg)
81 | }
82 | }
83 |
84 |
85 | #' @keywords internal
86 | #' @noRd
87 | .safe <- function(code, on_error = NULL) {
88 | if (isTRUE(getOption("easystats_errors", FALSE)) && is.null(on_error)) {
89 | code
90 | } else {
91 | tryCatch(code, error = function(e) on_error)
92 | }
93 | }
94 |
95 |
96 | #' @keywords internal
97 | #' @noRd
98 | .is_integer <- function(x) {
99 | tryCatch(
100 | expr = {
101 | ifelse(is.infinite(x), FALSE, x %% 1 == 0)
102 | },
103 | warning = function(w) {
104 | is.integer(x)
105 | },
106 | error = function(e) {
107 | FALSE
108 | }
109 | )
110 | }
111 |
112 |
113 | #' @keywords internal
114 | #' @noRd
115 | .is_likert <- function(x, integer_as_numeric = 5, ...) {
116 | if (is.null(integer_as_numeric) || is.na(integer_as_numeric)) {
117 | return(FALSE)
118 | }
119 | all(.is_integer(x)) && insight::n_unique(x) <= integer_as_numeric
120 | }
121 |
--------------------------------------------------------------------------------
/R/zero_crossings.R:
--------------------------------------------------------------------------------
1 | #' Find zero-crossings and inversion points
2 | #'
3 | #' Find zero crossings of a vector, i.e., indices when the numeric variable
4 | #' crosses 0. It is useful for finding the points where a function changes by
5 | #' looking at the zero crossings of its derivative.
6 | #'
7 | #' @param x A numeric vector.
8 | #'
9 | #' @return Vector of zero crossings or points of inversion.
10 | #' @seealso Based on the `uniroot.all` function from the rootSolve package.
11 | #'
12 | #' @examples
13 | #' x <- sin(seq(0, 4 * pi, length.out = 100))
14 | #' # plot(x, type = "b")
15 | #'
16 | #' modelbased::zero_crossings(x)
17 | #' modelbased::find_inversions(x)
18 | #' @export
19 | zero_crossings <- function(x) {
20 | # Estimate gradient
21 | zerocrossings <- .uniroot.all(stats::approxfun(seq_along(x), x), interval = range(seq_along(x)))
22 | if (length(zerocrossings) == 0) {
23 | return(NA)
24 | }
25 |
26 | zerocrossings
27 | }
28 |
29 |
30 | #' Copied from rootSolve package
31 | #' @keywords internal
32 | .uniroot.all <- function(f,
33 | interval,
34 | lower = min(interval),
35 | upper = max(interval),
36 | tol = .Machine$double.eps^0.2,
37 | maxiter = 1000,
38 | n = 100,
39 | ...) {
40 | ## error checking as in uniroot...
41 | if (!missing(interval) && length(interval) != 2) {
42 | insight::format_error("`interval` must be a vector of length two.")
43 | }
44 |
45 | if (!is.numeric(lower) || !is.numeric(upper) || lower >= upper) {
46 | insight::format_error("`lower` is not smaller than `upper`.")
47 | }
48 |
49 | ## subdivide interval in n subintervals and estimate the function values
50 | xseq <- seq(lower, upper, len = n + 1)
51 | mod <- f(xseq, ...)
52 |
53 | ## some function values may already be 0
54 | Equi <- xseq[which(mod == 0)]
55 |
56 | ss <- mod[1:n] * mod[2:(n + 1)] # interval where functionvalues change sign
57 | ii <- which(ss < 0)
58 |
59 | for (i in ii) {
60 | Equi <- c(Equi, stats::uniroot(f, lower = xseq[i], upper = xseq[i + 1], ...)$root)
61 | }
62 |
63 | Equi
64 | }
65 |
66 |
67 | #' @rdname zero_crossings
68 | #'
69 | #' @export
70 | find_inversions <- function(x) {
71 | zero_crossings(diff(x))
72 | }
73 |
--------------------------------------------------------------------------------
/R/zzz.R:
--------------------------------------------------------------------------------
1 | .onLoad <- function(libname, pkgname) {
2 | # CRAN OMP THREAD LIMIT
3 | Sys.setenv(OMP_THREAD_LIMIT = 2)
4 | }
5 |
--------------------------------------------------------------------------------
/air.toml:
--------------------------------------------------------------------------------
1 | [format]
2 | line-width = 100
3 | indent-width = 2
4 | indent-style = "space"
5 | line-ending = "auto"
6 | persistent-line-breaks = false
7 | exclude = []
8 | default-exclude = true
9 |
--------------------------------------------------------------------------------
/cran-comments.md:
--------------------------------------------------------------------------------
1 | This release fixes CRAN check failures.
2 |
--------------------------------------------------------------------------------
/data/coffee_data.rda:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/data/coffee_data.rda
--------------------------------------------------------------------------------
/data/efc.rda:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/data/efc.rda
--------------------------------------------------------------------------------
/data/fish.rda:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/data/fish.rda
--------------------------------------------------------------------------------
/data/puppy_love.rda:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/data/puppy_love.rda
--------------------------------------------------------------------------------
/inst/CITATION:
--------------------------------------------------------------------------------
1 | bibentry(
2 | bibtype = "article",
3 | key = "modelbasedPackage",
4 | title = "{modelbased}: An {R} package to make the most out of your statistical models through marginal means, marginal effects, and model predictions.",
5 | volume = "10",
6 | doi = "10.21105/joss.07969",
7 | number = "109",
8 | journal = "Journal of Open Source Software",
9 | author = c(person("Dominique", "Makowski"), person("Mattan S.", "Ben-Shachar"), person("Brenton M.", "Wiernik"), person("Indrajeet", "Patil"), person("Rémi", "Thériault"), person("Daniel", "Lüdecke")),
10 | year = "2025",
11 | pages = "7969",
12 | url="https://joss.theoj.org/papers/10.21105/joss.07969",
13 | textVersion = "Makowski, D., Ben-Shachar, M. S., Wiernik, B. M., Patil, I., Thériault, R., & Lüdecke, D. (2025). modelbased: An R package to make the most out of your statistical models through marginal means, marginal effects, and model predictions. Journal of Open Source Software, 10(109), 7969. https://doi.org/10.21105/joss.07969"
14 | )
15 |
--------------------------------------------------------------------------------
/inst/WORDLIST:
--------------------------------------------------------------------------------
1 | ’s
2 | ANCOVAs
3 | Arel
4 | ATE
5 | ATEs
6 | ATT
7 | ATU
8 | Axelsson
9 | BH
10 | BLUPs
11 | Bundock
12 | cdot
13 | Chatton
14 | CJ
15 | coefficients’
16 | diag
17 | Dickerman
18 | Dom
19 | DS
20 | EFC
21 | EMM
22 | EMMs
23 | EUROFAMCARE
24 | Fitzsimons
25 | GAMM
26 | GLMM
27 | GLMMs
28 | GLM's
29 | Heiss
30 | Hernan
31 | Hernán
32 | ICC's
33 | ICCs
34 | IPW
35 | Intersectional
36 | Intersectionality
37 | Leckie
38 | LM
39 | MAIHDA
40 | Mattan
41 | McCabe
42 | McClelland
43 | McKoon
44 | Merlo
45 | Mmmh
46 | Modelisation
47 | Modelling
48 | Mulinari
49 | Neyman
50 | Nonresponse
51 | ORCID
52 | Owww
53 | PCVs
54 | PCV
55 | QoL
56 | Psychometrika
57 | RCTs
58 | README
59 | Rohrer
60 | RStudio
61 | RTs
62 | Ratcliff
63 | Reproducibility
64 | Rescaling
65 | Setosa
66 | Shachar
67 | Spiller
68 | SSM
69 | Subramanian
70 | SV
71 | Versicolor
72 | Virginica
73 | Visualisation
74 | Visualising
75 | Wagenmakers
76 | Wemrell
77 | Wiernik
78 | al
79 | analysing
80 | bc
81 | behaviour
82 | blogpost
83 | bocode
84 | bonferroni
85 | bootES
86 | brms
87 | caregiving
88 | ception
89 | codecov
90 | computable
91 | conditionspeed
92 | counterfactuals
93 | confounder
94 | confounders
95 | dat
96 | datagrid
97 | datawizard
98 | doi
99 | dpar
100 | easystats
101 | edu
102 | effectsize
103 | emmeans
104 | emtrends
105 | et
106 | exchangeability
107 | favour
108 | fdr
109 | fmwww
110 | foR
111 | generalizability
112 | geoms
113 | ggplot
114 | github
115 | glmmTMB
116 | grano
117 | grey
118 | hochberg
119 | holm
120 | hommel
121 | http
122 | https
123 | individuals'
124 | interpretable
125 | intersectional
126 | intersectionality
127 | intra
128 | io
129 | jmr
130 | joss
131 | labelled
132 | lifecycle
133 | lme
134 | lme4
135 | loess
136 | marginaleffects
137 | marginalizations
138 | mattansb
139 | modelisation
140 | modelled
141 | modelling
142 | natively
143 | nd
144 | ol
145 | patilindrajeets
146 | pscl
147 | pre
148 | quartiles
149 | rOpenSci
150 | Remotes
151 | recoding
152 | recodings
153 | repec
154 | reproducibility
155 | rescaled
156 | rescales
157 | rootSolve
158 | rstanarm
159 | salis
160 | spinoff
161 | ssmph
162 | strengejacke
163 | summarised
164 | summarises
165 | terciles
166 | transint
167 | tukey
168 | ultron
169 | uninformativeness
170 | unidimensional
171 | unstandardizing
172 | usecases
173 | versicolor
174 | virginica
175 | visualisation
176 | visualise
177 | visualising
178 | walkthrough
179 |
--------------------------------------------------------------------------------
/man/coffee_data.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/data.R
3 | \docType{data}
4 | \name{coffee_data}
5 | \alias{coffee_data}
6 | \title{Sample dataset from a course about analysis of factorial designs}
7 | \description{
8 | A sample data set from a course about the analysis of factorial
9 | designs, by Mattan S. Ben-Shachar. See following link for more information:
10 | https://github.com/mattansb/Analysis-of-Factorial-Designs-foR-Psychologists
11 |
12 | The data consists of five variables from 120 observations:
13 | \itemize{
14 | \item \code{ID}: A unique identifier for each participant
15 | \item \code{sex}: The participant's sex
16 | \item \code{time}: The time of day the participant was tested (morning, noon, or afternoon)
17 | \item \code{coffee}: Group indicator, whether participant drank coffee or not
18 | ("\verb{coffee"} or \code{"control"}).
19 | \item \code{alertness}: The participant's alertness score.
20 | }
21 | }
22 | \keyword{data}
23 |
--------------------------------------------------------------------------------
/man/describe_nonlinear.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/describe_nonlinear.R
3 | \name{describe_nonlinear}
4 | \alias{describe_nonlinear}
5 | \alias{describe_nonlinear.data.frame}
6 | \alias{estimate_smooth}
7 | \title{Describe the smooth term (for GAMs) or non-linear predictors}
8 | \usage{
9 | describe_nonlinear(data, ...)
10 |
11 | \method{describe_nonlinear}{data.frame}(data, x = NULL, y = NULL, ...)
12 |
13 | estimate_smooth(data, ...)
14 | }
15 | \arguments{
16 | \item{data}{The data containing the link, as for instance obtained by
17 | \code{\link[=estimate_relation]{estimate_relation()}}.}
18 |
19 | \item{...}{Other arguments to be passed to or from.}
20 |
21 | \item{x, y}{The name of the responses variable (\code{y}) predicting variable
22 | (\code{x}).}
23 | }
24 | \value{
25 | A data frame of linear description of non-linear terms.
26 | }
27 | \description{
28 | This function summarises the smooth term trend in terms of linear segments.
29 | Using the approximate derivative, it separates a non-linear vector into
30 | quasi-linear segments (in which the trend is either positive or negative).
31 | Each of this segment its characterized by its beginning, end, size (in
32 | proportion, relative to the total size) trend (the linear regression
33 | coefficient) and linearity (the R2 of the linear regression).
34 | }
35 | \examples{
36 | \dontshow{if (insight::check_if_installed("performance", quietly = TRUE)) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
37 | # Create data
38 | data <- data.frame(x = rnorm(200))
39 | data$y <- data$x^2 + rnorm(200, 0, 0.5)
40 |
41 | model <<- lm(y ~ poly(x, 2), data = data)
42 | link_data <- estimate_relation(model, length = 100)
43 |
44 | describe_nonlinear(link_data, x = "x")
45 | \dontshow{\}) # examplesIf}
46 | }
47 |
--------------------------------------------------------------------------------
/man/dot-uniroot.all.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/zero_crossings.R
3 | \name{.uniroot.all}
4 | \alias{.uniroot.all}
5 | \title{Copied from rootSolve package}
6 | \usage{
7 | .uniroot.all(
8 | f,
9 | interval,
10 | lower = min(interval),
11 | upper = max(interval),
12 | tol = .Machine$double.eps^0.2,
13 | maxiter = 1000,
14 | n = 100,
15 | ...
16 | )
17 | }
18 | \description{
19 | Copied from rootSolve package
20 | }
21 | \keyword{internal}
22 |
--------------------------------------------------------------------------------
/man/efc.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/data.R
3 | \docType{data}
4 | \name{efc}
5 | \alias{efc}
6 | \title{Sample dataset from the EFC Survey}
7 | \description{
8 | Selected variables from the EUROFAMCARE survey. Useful when
9 | testing on "real-life" data sets, including random missing values. This
10 | data set also has value and variable label attributes.
11 | }
12 | \keyword{data}
13 |
--------------------------------------------------------------------------------
/man/estimate_grouplevel.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/estimate_grouplevel.R, R/reshape_grouplevel.R
3 | \name{estimate_grouplevel}
4 | \alias{estimate_grouplevel}
5 | \alias{estimate_grouplevel.default}
6 | \alias{estimate_grouplevel.brmsfit}
7 | \alias{reshape_grouplevel}
8 | \alias{reshape_grouplevel.estimate_grouplevel}
9 | \title{Group-specific parameters of mixed models random effects}
10 | \usage{
11 | estimate_grouplevel(model, ...)
12 |
13 | \method{estimate_grouplevel}{default}(model, type = "random", ...)
14 |
15 | \method{estimate_grouplevel}{brmsfit}(
16 | model,
17 | type = "random",
18 | dispersion = TRUE,
19 | test = NULL,
20 | diagnostic = NULL,
21 | ...
22 | )
23 |
24 | reshape_grouplevel(x, ...)
25 |
26 | \method{reshape_grouplevel}{estimate_grouplevel}(x, indices = "all", group = NULL, ...)
27 | }
28 | \arguments{
29 | \item{model}{A mixed model with random effects.}
30 |
31 | \item{...}{Other arguments passed to \code{\link[parameters:model_parameters]{parameters::model_parameters()}}.}
32 |
33 | \item{type}{\code{"random"} or \code{"total"}. If \code{"random"} (default), the
34 | coefficients correspond to the conditional estimates of the random effects
35 | (as they are returned by \code{lme4::ranef()}). They typically correspond to the
36 | deviation of each individual group from their fixed effect (assuming the
37 | random effect is also included as a fixed effect). As such, a coefficient
38 | close to 0 means that the participants' effect is the same as the
39 | population-level effect (in other words, it is "in the norm"). If
40 | \code{"total"}, it will return the sum of the random effect and its
41 | corresponding fixed effects, which internally relies on the \code{coef()} method
42 | (see \code{?coef.merMod}). Note that \code{type = "total"} yet does not return
43 | uncertainty indices (such as SE and CI) for models from \emph{lme4} or
44 | \emph{glmmTMB}, as the necessary information to compute them is not yet
45 | available. However, for Bayesian models, it is possible to compute them.}
46 |
47 | \item{dispersion, test, diagnostic}{Arguments passed to
48 | \code{\link[parameters:model_parameters]{parameters::model_parameters()}} for Bayesian models. By default, it won't
49 | return significance or diagnostic indices (as it is not typically very
50 | useful).}
51 |
52 | \item{x}{The output of \code{estimate_grouplevel()}.}
53 |
54 | \item{indices}{A character vector containing the indices (i.e., which
55 | columns) to extract (e.g., "Coefficient", "Median").}
56 |
57 | \item{group}{The name of the random factor to select as string value (e.g.,
58 | \code{"Participant"}, if the model was \code{y ~ x + (1|Participant)}.}
59 | }
60 | \description{
61 | Extract random parameters of each individual group in the context of mixed
62 | models, commonly referred to as BLUPs (Best Linear Unbiased Predictors).
63 | Can be reshaped to be of the same dimensions as the original data,
64 | which can be useful to add the random effects to the original data.
65 | }
66 | \details{
67 | Unlike raw group means, BLUPs apply shrinkage: they are a compromise between
68 | the group estimate and the population estimate. This improves generalizability
69 | and prevents overfitting.
70 | }
71 | \examples{
72 | \dontshow{if (all(insight::check_if_installed(c("see", "lme4"), quietly = TRUE)) && packageVersion("insight") > "1.1.0" && packageVersion("parameters") > "0.24.1") (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
73 | # lme4 model
74 | data(mtcars)
75 | model <- lme4::lmer(mpg ~ hp + (1 | carb), data = mtcars)
76 | random <- estimate_grouplevel(model)
77 |
78 | # Show group-specific effects
79 | random
80 |
81 | # Visualize random effects
82 | plot(random)
83 |
84 | # Reshape to wide data...
85 | reshaped <- reshape_grouplevel(random, group = "carb", indices = c("Coefficient", "SE"))
86 |
87 | # ...and can be easily combined with the original data
88 | alldata <- merge(mtcars, reshaped)
89 |
90 | # overall coefficients
91 | estimate_grouplevel(model, type = "total")
92 | \dontshow{\}) # examplesIf}
93 | }
94 |
--------------------------------------------------------------------------------
/man/figures/derivative.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/derivative.png
--------------------------------------------------------------------------------
/man/figures/logo.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/logo.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-10-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-10-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-11-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-11-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-12-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-12-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-13-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-13-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-14-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-14-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-15-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-15-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-16-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-16-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-17-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-17-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-3-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-3-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-4-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-4-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-5-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-5-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-6-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-6-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-7-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-7-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-8-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-8-1.png
--------------------------------------------------------------------------------
/man/figures/unnamed-chunk-9-1.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/man/figures/unnamed-chunk-9-1.png
--------------------------------------------------------------------------------
/man/fish.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/data.R
3 | \docType{data}
4 | \name{fish}
5 | \alias{fish}
6 | \title{Sample data set}
7 | \description{
8 | A sample data set, used in tests and some examples. Useful for
9 | demonstrating count models (with or without zero-inflation component). It
10 | consists of nine variables from 250 observations.
11 | }
12 | \keyword{data}
13 |
--------------------------------------------------------------------------------
/man/modelbased-options.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/options.R
3 | \name{modelbased-options}
4 | \alias{modelbased-options}
5 | \title{Global options from the modelbased package}
6 | \description{
7 | Global options from the modelbased package
8 | }
9 | \section{Global options to set defaults for function arguments}{
10 |
11 |
12 | \strong{For calculating marginal means}
13 | \itemize{
14 | \item \verb{options(modelbased_backend = )} will set a default value for the
15 | \code{backend} argument and can be used to set the package used by default to
16 | calculate marginal means. Can be \code{"marginaleffects"} or \code{"emmeans"}.
17 | \item \verb{options(modelbased_estimate = )} will set a default value for the
18 | \code{estimate} argument, which modulates the type of target population
19 | predictions refer to.
20 | }
21 |
22 | \strong{For printing}
23 | \itemize{
24 | \item \verb{options(modelbased_select = )} will set a default value for the
25 | \code{select} argument and can be used to define a custom default layout for
26 | printing.
27 | \item \code{options(modelbased_include_grid = TRUE)} will set a default value for the
28 | \code{include_grid} argument and can be used to include data grids in the output
29 | by default or not.
30 | \item \code{options(modelbased_full_labels = FALSE)} will remove redundant
31 | (duplicated) labels from rows.
32 | }
33 |
34 | \strong{For plotting}
35 | \itemize{
36 | \item \verb{options(modelbased_join_dots = )} will set a default value for
37 | the \code{join_dots}.
38 | \item \verb{options(modelbased_numeric_as_discrete = )} will set a default
39 | value for the \code{modelbased_numeric_as_discrete} argument. Can also be
40 | \code{FALSE}.
41 | }
42 | }
43 |
44 |
--------------------------------------------------------------------------------
/man/modelbased-package.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/modelbased-package.R
3 | \docType{package}
4 | \name{modelbased-package}
5 | \alias{modelbased-package}
6 | \alias{modelbased}
7 | \title{modelbased: Estimation of Model-Based Predictions, Contrasts and Means}
8 | \description{
9 | \code{modelbased} is a package helping with model-based estimations, to
10 | easily compute of marginal means, contrast analysis and model predictions.
11 | }
12 | \details{
13 | \code{modelbased}
14 | }
15 | \seealso{
16 | Useful links:
17 | \itemize{
18 | \item \url{https://easystats.github.io/modelbased/}
19 | \item Report bugs at \url{https://github.com/easystats/modelbased/issues}
20 | }
21 |
22 | }
23 | \author{
24 | \strong{Maintainer}: Dominique Makowski \email{officialeasystats@gmail.com} (\href{https://orcid.org/0000-0001-5375-9967}{ORCID})
25 |
26 | Authors:
27 | \itemize{
28 | \item Daniel Lüdecke \email{d.luedecke@uke.de} (\href{https://orcid.org/0000-0002-8895-3206}{ORCID})
29 | \item Mattan S. Ben-Shachar \email{matanshm@post.bgu.ac.il} (\href{https://orcid.org/0000-0002-4287-4801}{ORCID})
30 | \item Indrajeet Patil \email{patilindrajeet.science@gmail.com} (\href{https://orcid.org/0000-0003-1995-6531}{ORCID})
31 | \item Rémi Thériault \email{remi.theriault@mail.mcgill.ca} (\href{https://orcid.org/0000-0003-4315-6788}{ORCID})
32 | }
33 |
34 | }
35 | \keyword{internal}
36 |
--------------------------------------------------------------------------------
/man/pool_contrasts.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/pool.R
3 | \name{pool_contrasts}
4 | \alias{pool_contrasts}
5 | \title{Pool contrasts and comparisons from \code{estimate_contrasts()}}
6 | \usage{
7 | pool_contrasts(x, ...)
8 | }
9 | \arguments{
10 | \item{x}{A list of \code{estimate_contrasts} objects, as returned by
11 | \code{estimate_contrasts()}.}
12 |
13 | \item{...}{Currently not used.}
14 | }
15 | \value{
16 | A data frame with pooled comparisons or contrasts of predictions.
17 | }
18 | \description{
19 | This function "pools" (i.e. combines) multiple \code{estimate_contrasts} objects,
20 | returned by \code{\link[=estimate_contrasts]{estimate_contrasts()}}, in a similar fashion as \code{\link[mice:pool]{mice::pool()}}.
21 | }
22 | \details{
23 | Averaging of parameters follows Rubin's rules (\emph{Rubin, 1987, p. 76}).
24 | }
25 | \examples{
26 | \dontshow{if (require("mice")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
27 | data("nhanes2", package = "mice")
28 | imp <- mice::mice(nhanes2, printFlag = FALSE)
29 | comparisons <- lapply(1:5, function(i) {
30 | m <- lm(bmi ~ age + hyp + chl, data = mice::complete(imp, action = i))
31 | estimate_contrasts(m, "age")
32 | })
33 | pool_contrasts(comparisons)
34 | \dontshow{\}) # examplesIf}
35 | }
36 | \references{
37 | Rubin, D.B. (1987). Multiple Imputation for Nonresponse in Surveys. New York:
38 | John Wiley and Sons.
39 | }
40 |
--------------------------------------------------------------------------------
/man/pool_predictions.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/pool.R
3 | \name{pool_predictions}
4 | \alias{pool_predictions}
5 | \alias{pool_slopes}
6 | \title{Pool Predictions and Estimated Marginal Means}
7 | \usage{
8 | pool_predictions(x, transform = NULL, ...)
9 |
10 | pool_slopes(x, transform = NULL, ...)
11 | }
12 | \arguments{
13 | \item{x}{A list of \code{estimate_means} objects, as returned by
14 | \code{\link[=estimate_means]{estimate_means()}}, or \code{estimate_predicted} objects, as returned by
15 | \code{\link[=estimate_relation]{estimate_relation()}} and related functions. For \code{pool_slopes()}, must be
16 | a list of \code{estimate_slopes} objects, as returned by \code{\link[=estimate_slopes]{estimate_slopes()}}.}
17 |
18 | \item{transform}{A function applied to predictions and confidence intervals
19 | to (back-) transform results, which can be useful in case the regression
20 | model has a transformed response variable (e.g., \code{lm(log(y) ~ x)}). For
21 | Bayesian models, this function is applied to individual draws from the
22 | posterior distribution, before computing summaries. Can also be \code{TRUE}, in
23 | which case \code{insight::get_transformation()} is called to determine the
24 | appropriate transformation-function. Note that no standard errors are returned
25 | when transformations are applied.}
26 |
27 | \item{...}{Currently not used.}
28 | }
29 | \value{
30 | A data frame with pooled predictions.
31 | }
32 | \description{
33 | This function "pools" (i.e. combines) multiple \code{estimate_means} objects, in
34 | a similar fashion as \code{\link[mice:pool]{mice::pool()}}.
35 | }
36 | \details{
37 | Averaging of parameters follows Rubin's rules (\emph{Rubin, 1987, p. 76}).
38 | Pooling is applied to the predicted values and based on the standard errors
39 | as they are calculated in the \code{estimate_means} or \code{estimate_predicted}
40 | objects provided in \code{x}. For objects of class \code{estimate_means}, the predicted
41 | values are on the response scale by default, and standard errors are
42 | calculated using the delta method. Then, pooling estimates and calculating
43 | standard errors for the pooled estimates based ob Rubin's rule is carried
44 | out. There is no back-transformation to the link-scale of predicted values
45 | before applying Rubin's rule.
46 | }
47 | \examples{
48 | \dontshow{if (require("mice") && requireNamespace("marginaleffects")) (if (getRversion() >= "3.4") withAutoprint else force)(\{ # examplesIf}
49 | # example for multiple imputed datasets
50 | data("nhanes2", package = "mice")
51 | imp <- mice::mice(nhanes2, printFlag = FALSE)
52 |
53 | # estimated marginal means
54 | predictions <- lapply(1:5, function(i) {
55 | m <- lm(bmi ~ age + hyp + chl, data = mice::complete(imp, action = i))
56 | estimate_means(m, "age")
57 | })
58 | pool_predictions(predictions)
59 |
60 | # estimated slopes (marginal effects)
61 | slopes <- lapply(1:5, function(i) {
62 | m <- lm(bmi ~ age + hyp + chl, data = mice::complete(imp, action = i))
63 | estimate_slopes(m, "chl")
64 | })
65 | pool_slopes(slopes)
66 | \dontshow{\}) # examplesIf}
67 | }
68 | \references{
69 | Rubin, D.B. (1987). Multiple Imputation for Nonresponse in Surveys. New York:
70 | John Wiley and Sons.
71 | }
72 |
--------------------------------------------------------------------------------
/man/puppy_love.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/data.R
3 | \docType{data}
4 | \name{puppy_love}
5 | \alias{puppy_love}
6 | \title{More puppy therapy data}
7 | \description{
8 | Fictitious data related to whether puppy therapy works when you
9 | adjust for a person’s love of puppies, taken from the \code{{discovr}} package
10 | (Field 2025)
11 | }
12 | \details{
13 | Following variables are included in the dataset:
14 | \itemize{
15 | \item `id``: Participant id
16 | \item \code{dose}: Treatment group to which the participant was randomly assigned (No
17 | puppies (control), 15 minutes of puppy therapy, 30 minutes of puppy
18 | therapy)
19 | \item \code{happiness}: Self-reported happiness from 0 (as unhappy as I can possibly
20 | imagine being) to 10 (as happy as I can possibly imagine being)
21 | \item \code{puppy_love}: Self-reported love of puppies from 0 (I am a weird person who
22 | hates puppies, please be deeply suspicious of me) to 7 (puppies are the
23 | best thing ever, one day I might marry one)
24 | }
25 |
26 | For further details, see \code{?discovr::puppy_love}.
27 | }
28 | \references{
29 | Field, A. P. (2025). Discovering statistics using R and RStudio
30 | (2nd ed.). London: Sage.
31 | }
32 | \keyword{data}
33 |
--------------------------------------------------------------------------------
/man/reexports.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/reexports.R
3 | \docType{import}
4 | \name{reexports}
5 | \alias{reexports}
6 | \alias{visualisation_recipe}
7 | \alias{standardize}
8 | \alias{unstandardize}
9 | \alias{print_md}
10 | \alias{print_html}
11 | \title{Objects exported from other packages}
12 | \keyword{internal}
13 | \description{
14 | These objects are imported from other packages. Follow the links
15 | below to see their documentation.
16 |
17 | \describe{
18 | \item{datawizard}{\code{\link[datawizard]{standardize}}, \code{\link[datawizard:standardize]{unstandardize}}, \code{\link[datawizard]{visualisation_recipe}}}
19 |
20 | \item{insight}{\code{\link[insight:display]{print_html}}, \code{\link[insight:display]{print_md}}}
21 | }}
22 |
23 |
--------------------------------------------------------------------------------
/man/smoothing.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/smoothing.R
3 | \name{smoothing}
4 | \alias{smoothing}
5 | \title{Smoothing a vector or a time series}
6 | \usage{
7 | smoothing(x, method = "loess", strength = 0.25, ...)
8 | }
9 | \arguments{
10 | \item{x}{A numeric vector.}
11 |
12 | \item{method}{Can be \link[=loess]{"loess"} (default) or
13 | \link[=smooth]{"smooth"}. A loess smoothing can be slow.}
14 |
15 | \item{strength}{This argument only applies when \code{method = "loess"}.
16 | Degree of smoothing passed to \code{span} (see \code{\link[=loess]{loess()}}).}
17 |
18 | \item{...}{Arguments passed to or from other methods.}
19 | }
20 | \value{
21 | A smoothed vector or data frame.
22 | }
23 | \description{
24 | Smoothing a vector or a time series. For data.frames, the function will
25 | smooth all numeric variables stratified by factor levels (i.e., will smooth
26 | within each factor level combination).
27 | }
28 | \examples{
29 | x <- sin(seq(0, 4 * pi, length.out = 100)) + rnorm(100, 0, 0.2)
30 | plot(x, type = "l")
31 | lines(smoothing(x, method = "smooth"), type = "l", col = "blue")
32 | lines(smoothing(x, method = "loess"), type = "l", col = "red")
33 |
34 | x <- sin(seq(0, 4 * pi, length.out = 10000)) + rnorm(10000, 0, 0.2)
35 | plot(x, type = "l")
36 | lines(smoothing(x, method = "smooth"), type = "l", col = "blue")
37 | lines(smoothing(x, method = "loess"), type = "l", col = "red")
38 | }
39 |
--------------------------------------------------------------------------------
/man/zero_crossings.Rd:
--------------------------------------------------------------------------------
1 | % Generated by roxygen2: do not edit by hand
2 | % Please edit documentation in R/zero_crossings.R
3 | \name{zero_crossings}
4 | \alias{zero_crossings}
5 | \alias{find_inversions}
6 | \title{Find zero-crossings and inversion points}
7 | \usage{
8 | zero_crossings(x)
9 |
10 | find_inversions(x)
11 | }
12 | \arguments{
13 | \item{x}{A numeric vector.}
14 | }
15 | \value{
16 | Vector of zero crossings or points of inversion.
17 | }
18 | \description{
19 | Find zero crossings of a vector, i.e., indices when the numeric variable
20 | crosses 0. It is useful for finding the points where a function changes by
21 | looking at the zero crossings of its derivative.
22 | }
23 | \examples{
24 | x <- sin(seq(0, 4 * pi, length.out = 100))
25 | # plot(x, type = "b")
26 |
27 | modelbased::zero_crossings(x)
28 | modelbased::find_inversions(x)
29 | }
30 | \seealso{
31 | Based on the \code{uniroot.all} function from the rootSolve package.
32 | }
33 |
--------------------------------------------------------------------------------
/modelbased.Rproj:
--------------------------------------------------------------------------------
1 | Version: 1.0
2 | ProjectId: 4f80d7d0-9b96-4a0d-a188-9b6b7596a118
3 |
4 | RestoreWorkspace: No
5 | SaveWorkspace: No
6 | AlwaysSaveHistory: No
7 |
8 | EnableCodeIndexing: Yes
9 | UseSpacesForTab: Yes
10 | NumSpacesForTab: 2
11 | Encoding: UTF-8
12 |
13 | RnwWeave: Sweave
14 | LaTeX: pdfLaTeX
15 |
16 | StripTrailingWhitespace: Yes
17 |
18 | BuildType: Package
19 | PackageUseDevtools: Yes
20 | PackageInstallArgs: --with-keep.source
21 | PackageCheckArgs: --as-cran
22 | PackageRoxygenize: rd,collate,namespace
23 |
24 | QuitChildProcessesOnExit: Yes
25 | DisableExecuteRprofile: Yes
26 |
--------------------------------------------------------------------------------
/modelbased.code-workspace:
--------------------------------------------------------------------------------
1 | {
2 | "folders": [
3 | {
4 | "path": "."
5 | }
6 | ],
7 | "launch": {
8 | "version": "0.2.0",
9 | "configurations": [
10 | {
11 | "type": "R-Debugger",
12 | "name": "Launch R-Workspace",
13 | "request": "launch",
14 | "debugMode": "workspace",
15 | "workingDirectory": ""
16 | }
17 | ]
18 | }
19 | }
--------------------------------------------------------------------------------
/paper/.gitignore:
--------------------------------------------------------------------------------
1 | /.quarto/
2 |
--------------------------------------------------------------------------------
/paper/example.R:
--------------------------------------------------------------------------------
1 | library(easystats)
2 |
3 | model <- lm(Petal.Width ~ Petal.Length * Species, data = iris)
4 |
5 | parameters::parameters(model)
6 |
7 |
8 | estimate_relation(model, by=c("Petal.Length", "Species"), length=100) |>
9 | plot()
10 |
11 |
12 | estimate_means(model, by="Species")
13 |
14 | estimate_contrasts(model, contrast="Species")
15 |
16 | estimate_slopes(model, trend = "Petal.Length", by="Species")
17 |
18 | estimate_contrasts(model, contrast="Petal.Length", by="Species", backend="marginaleffectss")
19 | # estimate_contrasts(model, contrast="Petal.Length", by="Species")
20 |
21 |
--------------------------------------------------------------------------------
/paper/paper.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/paper/paper.pdf
--------------------------------------------------------------------------------
/paper/paper_files/figure-latex/fig1-1.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/paper/paper_files/figure-latex/fig1-1.pdf
--------------------------------------------------------------------------------
/pkgdown/_pkgdown.yml:
--------------------------------------------------------------------------------
1 | url: https://easystats.github.io/modelbased/
2 |
3 | template:
4 | bootstrap: 5
5 | package: easystatstemplate
6 |
7 | reference:
8 | - title: "Model-based Estimations"
9 | contents:
10 | - starts_with("estimate_")
11 | - title: "Data grids and Visualisation"
12 | contents:
13 | - starts_with("visualisation_")
14 | - title: "Missing Data"
15 | contents:
16 | - starts_with("pool_")
17 | - title: "Miscellaenous"
18 | contents:
19 | - print.estimate_contrasts
20 | - describe_nonlinear
21 | - get_emmeans
22 | - get_marginalmeans
23 | - find_inversions
24 | - zero_crossings
25 | - smoothing
26 | - title: "Global options"
27 | contents:
28 | - modelbased-options
29 | - title: "Sample datasets"
30 | contents:
31 | - coffee_data
32 | - puppy_love
33 | - efc
34 | - fish
35 |
36 | # Keep articles organized
37 | navbar:
38 | left:
39 | - icon: fa fa-file-code
40 | text: Reference
41 | href: reference/index.html
42 | - text: "Introductions"
43 | menu:
44 | - text: "Overview of Vignettes"
45 | href: articles/overview_of_vignettes.html
46 | - text: -------
47 | - text: "Basics"
48 | - text: "Data grids"
49 | href: articles/visualisation_matrix.html
50 | - text: "Marginal means"
51 | href: articles/estimate_means.html
52 | - text: "Contrast analysis"
53 | href: articles/estimate_contrasts.html
54 | - text: "Marginal effects and derivatives"
55 | href: articles/estimate_slopes.html
56 | - text: "Mixed effects models"
57 | href: articles/mixed_models.html
58 | - text: -------
59 | - text: "Interpretation"
60 | - text: "Use a model to make predictions"
61 | href: articles/estimate_response.html
62 | - text: "Interpret models using Effect Derivatives"
63 | href: articles/derivatives.html
64 | - text: "Estimate and re-use Random Effects"
65 | href: articles/estimate_grouplevel.html
66 | - text: -------
67 | - text: "Visualization"
68 | - text: "Plotting estimated marginal means"
69 | href: articles/plotting.html
70 | - text: "Visualize effects and interactions"
71 | href: articles/estimate_relation.html
72 | - text: "The modelisation approach"
73 | href: articles/modelisation_approach.html
74 | - text: "Case Studies"
75 | menu:
76 | - text: "Workflows"
77 | - text: "Understanding your models"
78 | href: articles/workflow_modelbased.html
79 | - text: "Causal inference for observational data"
80 | href: articles/practical_causality.html
81 | - text: "Intersectionality analysis using the MAIHDA framework"
82 | href: articles/practical_intersectionality.html
83 | - text: -------
84 | - text: "Contrasts"
85 | - text: "Contrasts and comparisons"
86 | href: articles/introduction_comparisons_1.html
87 | - text: "User Defined Contrasts and Joint Tests"
88 | href: articles/introduction_comparisons_2.html
89 | - text: "Slopes, floodlight and spotlight analysis (Johnson-Neyman intervals)"
90 | href: articles/introduction_comparisons_3.html
91 | - text: "Contrasts and comparisons for generalized linear models"
92 | href: articles/introduction_comparisons_4.html
93 | - text: "Contrasts and comparisons for zero-inflation models"
94 | href: articles/introduction_comparisons_5.html
95 | - icon: fa fa-newspaper
96 | text: News
97 | href: news/index.html
98 |
--------------------------------------------------------------------------------
/pkgdown/favicon/apple-touch-icon-120x120.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/pkgdown/favicon/apple-touch-icon-120x120.png
--------------------------------------------------------------------------------
/pkgdown/favicon/apple-touch-icon-152x152.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/pkgdown/favicon/apple-touch-icon-152x152.png
--------------------------------------------------------------------------------
/pkgdown/favicon/apple-touch-icon-180x180.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/pkgdown/favicon/apple-touch-icon-180x180.png
--------------------------------------------------------------------------------
/pkgdown/favicon/apple-touch-icon-60x60.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/pkgdown/favicon/apple-touch-icon-60x60.png
--------------------------------------------------------------------------------
/pkgdown/favicon/apple-touch-icon-76x76.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/pkgdown/favicon/apple-touch-icon-76x76.png
--------------------------------------------------------------------------------
/pkgdown/favicon/apple-touch-icon.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/pkgdown/favicon/apple-touch-icon.png
--------------------------------------------------------------------------------
/pkgdown/favicon/favicon-16x16.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/pkgdown/favicon/favicon-16x16.png
--------------------------------------------------------------------------------
/pkgdown/favicon/favicon-32x32.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/pkgdown/favicon/favicon-32x32.png
--------------------------------------------------------------------------------
/pkgdown/favicon/favicon.ico:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/easystats/modelbased/e4b6ee841e9ea4011ea829f3ed1f3a7d731262f5/pkgdown/favicon/favicon.ico
--------------------------------------------------------------------------------
/tests/testthat.R:
--------------------------------------------------------------------------------
1 | # This file is part of the standard setup for testthat.
2 | # It is recommended that you do not modify it.
3 | #
4 | # Where should you do additional test configuration?
5 | #
6 | # * https://r-pkgs.org/tests.html
7 | # * https://testthat.r-lib.org/reference/test_package.html#special-files
8 | library(testthat)
9 | library(modelbased)
10 |
11 | test_check("modelbased")
12 |
--------------------------------------------------------------------------------
/tests/testthat/_snaps/betareg.md:
--------------------------------------------------------------------------------
1 | # estimate_means for betareg
2 |
3 | Code
4 | print(out, zap_small = TRUE)
5 | Output
6 | Estimated Marginal Means
7 |
8 | batch | Proportion | SE | 95% CI | z
9 | ------------------------------------------------
10 | 1 | 0.31 | 0.01 | [0.29, 0.34] | 26.00
11 | 2 | 0.23 | 0.01 | [0.21, 0.26] | 16.76
12 | 3 | 0.28 | 0.01 | [0.25, 0.31] | 18.78
13 | 4 | 0.19 | 0.01 | [0.17, 0.21] | 19.96
14 | 5 | 0.20 | 0.01 | [0.18, 0.22] | 19.16
15 | 6 | 0.19 | 0.01 | [0.17, 0.21] | 17.83
16 | 7 | 0.12 | 0.01 | [0.11, 0.14] | 15.57
17 | 8 | 0.12 | 0.01 | [0.10, 0.13] | 14.56
18 | 9 | 0.11 | 0.01 | [0.09, 0.12] | 12.25
19 | 10 | 0.07 | 0.01 | [0.06, 0.09] | 12.78
20 |
21 | Variable predicted: yield
22 | Predictors modulated: batch
23 | Predictors averaged: temp (3.3e+02)
24 | Predictions are on the response-scale.
25 |
26 | # estimate_relation for betareg
27 |
28 | Code
29 | print(out, zap_small = TRUE)
30 | Output
31 | Model-based Predictions
32 |
33 | batch | Predicted | 95% CI
34 | --------------------------------
35 | 1 | 0.31 | [0.28, 0.35]
36 | 2 | 0.23 | [0.21, 0.25]
37 | 3 | 0.28 | [0.25, 0.31]
38 | 4 | 0.19 | [0.17, 0.21]
39 | 5 | 0.20 | [0.18, 0.22]
40 | 6 | 0.19 | [0.17, 0.21]
41 | 7 | 0.12 | [0.11, 0.14]
42 | 8 | 0.12 | [0.10, 0.13]
43 | 9 | 0.11 | [0.09, 0.12]
44 | 10 | 0.07 | [0.06, 0.09]
45 |
46 | Variable predicted: yield
47 | Predictors modulated: batch
48 | Predictors controlled: temp (3.3e+02)
49 | Predictions are on the response-scale.
50 |
51 |
--------------------------------------------------------------------------------
/tests/testthat/_snaps/brms.md:
--------------------------------------------------------------------------------
1 | # estimate_means - brms, Wiener
2 |
3 | Code
4 | print(out)
5 | Output
6 | Model-based Predictions: rt
7 |
8 | Row | rt | Predicted | SE | 95% CI
9 | --------------------------------------------
10 | 1 | 0.96 | 1.20 | 0.90 | [0.32, 3.70]
11 | 2 | 0.69 | 1.17 | 0.80 | [0.31, 3.26]
12 | 3 | 1.18 | 1.21 | 0.89 | [0.32, 3.85]
13 | 4 | 1.54 | 1.20 | 0.85 | [0.32, 3.37]
14 | 5 | 1.48 | 1.19 | 0.83 | [0.33, 3.38]
15 |
16 | Model-based Predictions: response
17 |
18 | Row | response | Predicted | SE | 95% CI
19 | ------------------------------------------------
20 | 1 | 1.00 | 0.41 | 0.49 | [0.00, 1.00]
21 | 2 | 0.00 | 0.41 | 0.49 | [0.00, 1.00]
22 | 3 | 0.00 | 0.43 | 0.49 | [0.00, 1.00]
23 | 4 | 0.00 | 0.42 | 0.49 | [0.00, 1.00]
24 | 5 | 0.00 | 0.44 | 0.50 | [0.00, 1.00]
25 |
26 | Variable predicted: rt, response
27 | Predictions are on the response-scale.
28 |
29 | ---
30 |
31 | Code
32 | print(out, table_width = Inf)
33 | Output
34 | Model-based Predictions: rt
35 |
36 | Row | rt | Predicted | SE | 95% CI | iter_1 | iter_2 | iter_3
37 | -----------------------------------------------------------------------
38 | 1 | 0.96 | 1.20 | 0.90 | [0.32, 3.70] | 1.77 | 1.26 | 1.70
39 | 2 | 0.69 | 1.17 | 0.80 | [0.31, 3.26] | 0.37 | 0.83 | 0.59
40 | 3 | 1.18 | 1.21 | 0.89 | [0.32, 3.85] | 0.64 | 2.14 | 1.43
41 | 4 | 1.54 | 1.20 | 0.85 | [0.32, 3.37] | 0.42 | 1.29 | 1.28
42 | 5 | 1.48 | 1.19 | 0.83 | [0.33, 3.38] | 2.15 | 0.51 | 1.38
43 |
44 | Model-based Predictions: response
45 |
46 | Row | response | Predicted | SE | 95% CI | iter_1 | iter_2 | iter_3
47 | ---------------------------------------------------------------------------
48 | 1 | 1.00 | 0.41 | 0.49 | [0.00, 1.00] | 1.00 | 0.00 | 0.00
49 | 2 | 0.00 | 0.41 | 0.49 | [0.00, 1.00] | 0.00 | 0.00 | 1.00
50 | 3 | 0.00 | 0.43 | 0.49 | [0.00, 1.00] | 1.00 | 1.00 | 1.00
51 | 4 | 0.00 | 0.42 | 0.49 | [0.00, 1.00] | 1.00 | 0.00 | 1.00
52 | 5 | 0.00 | 0.44 | 0.50 | [0.00, 1.00] | 1.00 | 0.00 | 0.00
53 |
54 | Variable predicted: rt, response
55 | Predictions are on the response-scale.
56 |
57 |
--------------------------------------------------------------------------------
/tests/testthat/_snaps/estimate_means_mixed.md:
--------------------------------------------------------------------------------
1 | # estimate_means() - mixed models
2 |
3 | Code
4 | estimate_means(m, c("mined", "spp"), backend = "marginaleffects", predict = "inverse_link")
5 | Output
6 | Estimated Marginal Means
7 |
8 | mined | spp | Mean | SE | 95% CI | z
9 | --------------------------------------------------
10 | yes | GP | 0.12 | 0.09 | [0.03, 0.55] | -2.71
11 | no | GP | 2.63 | 0.38 | [1.98, 3.48] | 6.75
12 | yes | PR | 0.30 | 0.17 | [0.10, 0.90] | -2.15
13 | no | PR | 0.63 | 0.16 | [0.38, 1.05] | -1.76
14 | yes | DM | 1.21 | 0.44 | [0.59, 2.48] | 0.51
15 | no | DM | 3.16 | 0.44 | [2.40, 4.16] | 8.22
16 | yes | EC-A | 0.23 | 0.14 | [0.07, 0.75] | -2.43
17 | no | EC-A | 1.49 | 0.32 | [0.97, 2.27] | 1.83
18 | yes | EC-L | 0.56 | 0.22 | [0.26, 1.22] | -1.47
19 | no | EC-L | 5.11 | 0.64 | [3.99, 6.53] | 12.96
20 | yes | DES-L | 1.26 | 0.43 | [0.64, 2.47] | 0.66
21 | no | DES-L | 4.68 | 0.58 | [3.66, 5.97] | 12.34
22 | yes | DF | 1.22 | 0.38 | [0.67, 2.23] | 0.65
23 | no | DF | 2.53 | 0.39 | [1.87, 3.41] | 6.04
24 |
25 | Variable predicted: count
26 | Predictors modulated: mined, spp
27 | Predictors averaged: site
28 | Predictions are on the response-scale.
29 |
30 | ---
31 |
32 | Code
33 | estimate_means(m, c("mined", "spp"), backend = "marginaleffects")
34 | Output
35 | Estimated Marginal Means
36 |
37 | mined | spp | Mean | SE | 95% CI | z
38 | --------------------------------------------------
39 | yes | GP | 0.04 | 0.03 | [-0.02, 0.11] | 1.33
40 | no | GP | 2.03 | 0.30 | [ 1.44, 2.61] | 6.80
41 | yes | PR | 0.11 | 0.06 | [ 0.00, 0.23] | 1.95
42 | no | PR | 0.49 | 0.12 | [ 0.24, 0.73] | 3.93
43 | yes | DM | 0.45 | 0.15 | [ 0.17, 0.74] | 3.12
44 | no | DM | 2.44 | 0.35 | [ 1.75, 3.13] | 6.92
45 | yes | EC-A | 0.09 | 0.05 | [-0.01, 0.18] | 1.80
46 | no | EC-A | 1.15 | 0.24 | [ 0.68, 1.62] | 4.79
47 | yes | EC-L | 0.21 | 0.08 | [ 0.06, 0.36] | 2.74
48 | no | EC-L | 3.95 | 0.52 | [ 2.93, 4.96] | 7.60
49 | yes | DES-L | 0.47 | 0.14 | [ 0.20, 0.75] | 3.36
50 | no | DES-L | 3.61 | 0.47 | [ 2.68, 4.54] | 7.62
51 | yes | DF | 0.46 | 0.13 | [ 0.21, 0.71] | 3.58
52 | no | DF | 1.95 | 0.30 | [ 1.36, 2.55] | 6.43
53 |
54 | Variable predicted: count
55 | Predictors modulated: mined, spp
56 | Predictors averaged: site
57 | Predictions are on the response-scale.
58 |
59 | ---
60 |
61 | Code
62 | estimate_means(m, c("mined", "spp"), backend = "marginaleffects")
63 | Output
64 | Estimated Marginal Means
65 |
66 | mined | spp | Mean | 95% CI
67 | -----------------------------------
68 | yes | GP | 0.26 | [0.20, 0.34]
69 | no | GP | 2.01 | [1.66, 2.43]
70 | yes | PR | 0.07 | [0.04, 0.10]
71 | no | PR | 0.50 | [0.34, 0.73]
72 | yes | DM | 0.33 | [0.26, 0.43]
73 | no | DM | 2.53 | [2.14, 3.00]
74 | yes | EC-A | 0.12 | [0.09, 0.17]
75 | no | EC-A | 0.93 | [0.70, 1.23]
76 | yes | EC-L | 0.49 | [0.38, 0.62]
77 | no | EC-L | 3.74 | [3.25, 4.30]
78 | yes | DES-L | 0.52 | [0.41, 0.65]
79 | no | DES-L | 3.96 | [3.46, 4.54]
80 | yes | DF | 0.28 | [0.22, 0.37]
81 | no | DF | 2.18 | [1.81, 2.61]
82 |
83 | Variable predicted: count
84 | Predictors modulated: mined, spp
85 | Predictions are on the response-scale.
86 |
87 |
--------------------------------------------------------------------------------
/tests/testthat/_snaps/joint_test.md:
--------------------------------------------------------------------------------
1 | # estimate_contrasts - joint test, works with select printing
2 |
3 | Code
4 | estimate_contrasts(m, contrast = "time", by = "coffee", comparison = "joint")
5 | Output
6 | Marginal Joint Test
7 |
8 | Contrast | coffee | df1 | df2 | F | p
9 | ---------------------------------------------
10 | time | coffee | 2 | 114 | 1.76 | 0.176
11 | time | control | 2 | 114 | 5.29 | 0.006
12 |
13 | p-values are uncorrected.
14 |
15 |
--------------------------------------------------------------------------------
/tests/testthat/_snaps/plot/plot-cat-num-predictor-1.svg:
--------------------------------------------------------------------------------
1 |
2 |
57 |
--------------------------------------------------------------------------------
/tests/testthat/_snaps/plot/plot-cat-num-predictor-2.svg:
--------------------------------------------------------------------------------
1 |
2 |
57 |
--------------------------------------------------------------------------------
/tests/testthat/_snaps/plot/plot-cat-num-predictor-4.svg:
--------------------------------------------------------------------------------
1 |
2 |
59 |
--------------------------------------------------------------------------------
/tests/testthat/_snaps/plot/plot-means-no-ci-2.svg:
--------------------------------------------------------------------------------
1 |
2 |
57 |
--------------------------------------------------------------------------------
/tests/testthat/_snaps/summary_estimate_slopes.md:
--------------------------------------------------------------------------------
1 | # summary.estimate_slopes
2 |
3 | Code
4 | summary(slopes)
5 | Message
6 | There might be too few data to accurately determine intervals. Consider
7 | setting `length = 100` (or larger) in your call to `estimate_slopes()`.
8 | Output
9 | Johnson-Neymann Intervals
10 |
11 | Group | Start | End | Direction | Confidence
12 | -------------------------------------------------------
13 | setosa | 1.00 | 1.62 | positive | Not Significant
14 | versicolor | 3.17 | 5.04 | positive | Significant
15 | virginica | 4.73 | 4.73 | positive | Not Significant
16 | virginica | 5.04 | 5.66 | positive | Significant
17 | virginica | 5.97 | 6.90 | positive | Not Significant
18 |
19 | Marginal effects estimated for Petal.Length
20 | Type of slope was dY/dX
21 |
22 | ---
23 |
24 | Code
25 | summary(slopes)
26 | Output
27 | Johnson-Neymann Intervals
28 |
29 | Group | Start | End | Direction | Confidence
30 | -------------------------------------------------------
31 | setosa | 1.00 | 1.89 | positive | Not Significant
32 | versicolor | 3.03 | 5.05 | positive | Significant
33 | virginica | 4.52 | 4.70 | positive | Not Significant
34 | virginica | 4.75 | 5.77 | positive | Significant
35 | virginica | 5.83 | 6.90 | positive | Not Significant
36 |
37 | Marginal effects estimated for Petal.Length
38 | Type of slope was dY/dX
39 |
40 |
--------------------------------------------------------------------------------
/tests/testthat/_snaps/transform_response.md:
--------------------------------------------------------------------------------
1 | # estimate_means, transform
2 |
3 | Code
4 | out
5 | Output
6 | Estimated Marginal Means
7 |
8 | speed | Mean | SE | 95% CI | t(48)
9 | ------------------------------------------
10 | 4.00 | 2.16 | 0.15 | [1.86, 2.46] | 14.27
11 | 6.33 | 2.44 | 0.13 | [2.19, 2.69] | 19.33
12 | 8.67 | 2.72 | 0.10 | [2.52, 2.93] | 26.48
13 | 11.00 | 3.00 | 0.08 | [2.84, 3.17] | 36.44
14 | 13.33 | 3.29 | 0.07 | [3.15, 3.42] | 48.42
15 | 15.67 | 3.57 | 0.06 | [3.44, 3.70] | 56.46
16 | 18.00 | 3.85 | 0.07 | [3.71, 3.99] | 54.63
17 | 20.33 | 4.13 | 0.09 | [3.96, 4.31] | 47.64
18 | 22.67 | 4.41 | 0.11 | [4.20, 4.63] | 40.87
19 | 25.00 | 4.70 | 0.13 | [4.43, 4.96] | 35.61
20 |
21 | Variable predicted: dist
22 | Predictors modulated: speed
23 | Predictions are on the log-scale (consider `transform=TRUE`).
24 |
25 | ---
26 |
27 | Code
28 | out1
29 | Output
30 | Estimated Marginal Means
31 |
32 | speed | Mean | 95% CI | df
33 | -------------------------------------
34 | 4.00 | 8.66 | [ 6.39, 11.74] | 48
35 | 6.33 | 11.48 | [ 8.91, 14.80] | 48
36 | 8.67 | 15.22 | [12.38, 18.72] | 48
37 | 11.00 | 20.18 | [17.09, 23.82] | 48
38 | 13.33 | 26.74 | [23.33, 30.65] | 48
39 | 15.67 | 35.45 | [31.22, 40.25] | 48
40 | 18.00 | 46.99 | [40.78, 54.14] | 48
41 | 20.33 | 62.28 | [52.31, 74.15] | 48
42 | 22.67 | 82.56 | [66.44, 102.58] | 48
43 | 25.00 | 109.43 | [83.94, 142.65] | 48
44 |
45 | Variable predicted: dist
46 | Predictors modulated: speed
47 |
48 |
--------------------------------------------------------------------------------
/tests/testthat/_snaps/windows/estimate_contrasts.md:
--------------------------------------------------------------------------------
1 | # estimate_contrasts - Frequentist, duplicated levels
2 |
3 | Code
4 | print(estimate_contrasts(model, contrast = c("three", "vs", "am"), backend = "marginaleffects"),
5 | digits = 1, zap_small = TRUE, table_width = Inf)
6 | Output
7 | Marginal Contrasts Analysis
8 |
9 | Level1 | Level2 | Difference | SE | 95% CI | t(24) | p
10 | ----------------------------------------------------------------------------------------------
11 | three 0, vs 0, am 1 | three 0, vs 0, am 0 | 2.9 | 2.2 | [ -1.8, 7.5] | 1.3 | 0.213
12 | three 0, vs 1, am 0 | three 0, vs 0, am 0 | 5.8 | 2.5 | [ 0.8, 10.9] | 2.4 | 0.026
13 | three 0, vs 1, am 1 | three 0, vs 0, am 0 | 14.0 | 2.1 | [ 9.7, 18.4] | 6.7 | < .001
14 | three 1, vs 0, am 0 | three 0, vs 0, am 0 | -0.6 | 2.0 | [ -4.7, 3.6] | -0.3 | 0.780
15 | three 1, vs 0, am 1 | three 0, vs 0, am 0 | 7.5 | 2.8 | [ 1.7, 13.4] | 2.7 | 0.014
16 | three 1, vs 1, am 0 | three 0, vs 0, am 0 | 5.1 | 2.2 | [ 0.5, 9.7] | 2.3 | 0.032
17 | three 1, vs 1, am 1 | three 0, vs 0, am 0 | 10.6 | 2.8 | [ 4.7, 16.4] | 3.7 | 0.001
18 | three 0, vs 1, am 0 | three 0, vs 0, am 1 | 3.0 | 2.7 | [ -2.5, 8.4] | 1.1 | 0.275
19 | three 0, vs 1, am 1 | three 0, vs 0, am 1 | 11.2 | 2.3 | [ 6.4, 16.0] | 4.8 | < .001
20 | three 1, vs 0, am 0 | three 0, vs 0, am 1 | -3.4 | 2.2 | [ -8.1, 1.2] | -1.5 | 0.139
21 | three 1, vs 0, am 1 | three 0, vs 0, am 1 | 4.7 | 3.0 | [ -1.6, 10.9] | 1.5 | 0.135
22 | three 1, vs 1, am 0 | three 0, vs 0, am 1 | 2.2 | 2.5 | [ -2.8, 7.3] | 0.9 | 0.374
23 | three 1, vs 1, am 1 | three 0, vs 0, am 1 | 7.7 | 3.0 | [ 1.5, 13.9] | 2.6 | 0.017
24 | three 0, vs 1, am 1 | three 0, vs 1, am 0 | 8.2 | 2.5 | [ 3.0, 13.4] | 3.2 | 0.004
25 | three 1, vs 0, am 0 | three 0, vs 1, am 0 | -6.4 | 2.5 | [-11.5, -1.3] | -2.6 | 0.016
26 | three 1, vs 0, am 1 | three 0, vs 1, am 0 | 1.7 | 3.2 | [ -4.9, 8.2] | 0.5 | 0.600
27 | three 1, vs 1, am 0 | three 0, vs 1, am 0 | -0.7 | 2.7 | [ -6.2, 4.7] | -0.3 | 0.782
28 | three 1, vs 1, am 1 | three 0, vs 1, am 0 | 4.7 | 3.2 | [ -1.8, 11.3] | 1.5 | 0.149
29 | three 1, vs 0, am 0 | three 0, vs 1, am 1 | -14.6 | 2.1 | [-18.9, -10.3] | -6.9 | < .001
30 | three 1, vs 0, am 1 | three 0, vs 1, am 1 | -6.5 | 2.9 | [-12.5, -0.5] | -2.2 | 0.035
31 | three 1, vs 1, am 0 | three 0, vs 1, am 1 | -8.9 | 2.3 | [-13.7, -4.1] | -3.8 | < .001
32 | three 1, vs 1, am 1 | three 0, vs 1, am 1 | -3.5 | 2.9 | [ -9.5, 2.5] | -1.2 | 0.246
33 | three 1, vs 0, am 1 | three 1, vs 0, am 0 | 8.1 | 2.8 | [ 2.2, 13.9] | 2.8 | 0.009
34 | three 1, vs 1, am 0 | three 1, vs 0, am 0 | 5.7 | 2.2 | [ 1.0, 10.3] | 2.5 | 0.019
35 | three 1, vs 1, am 1 | three 1, vs 0, am 0 | 11.1 | 2.8 | [ 5.3, 17.0] | 3.9 | < .001
36 | three 1, vs 1, am 0 | three 1, vs 0, am 1 | -2.4 | 3.0 | [ -8.6, 3.8] | -0.8 | 0.428
37 | three 1, vs 1, am 1 | three 1, vs 0, am 1 | 3.1 | 3.5 | [ -4.1, 10.2] | 0.9 | 0.389
38 | three 1, vs 1, am 1 | three 1, vs 1, am 0 | 5.5 | 3.0 | [ -0.7, 11.7] | 1.8 | 0.081
39 |
40 | Variable predicted: mpg
41 | Predictors contrasted: three, vs, am
42 | p-values are uncorrected.
43 |
44 | ---
45 |
46 | Code
47 | print(estimate_contrasts(model, contrast = "am", backend = "marginaleffects"),
48 | zap_small = TRUE, table_width = Inf)
49 | Output
50 | Marginal Contrasts Analysis
51 |
52 | Level1 | Level2 | Difference | SE | 95% CI | t(24) | p
53 | -------------------------------------------------------------------
54 | 1 | 0 | 6.15 | 1.34 | [3.40, 8.91] | 4.61 | < .001
55 |
56 | Variable predicted: mpg
57 | Predictors contrasted: am
58 | Predictors averaged: three, vs
59 | p-values are uncorrected.
60 |
61 |
--------------------------------------------------------------------------------
/tests/testthat/test-attributes_visualisation.R:
--------------------------------------------------------------------------------
1 | test_that("attributes_means", {
2 | skip_if_not_installed("emmeans")
3 | model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris)
4 |
5 | estim <- suppressMessages(estimate_means(model, backend = "emmeans"))
6 | expect_identical(attributes(estim)$by, "Species")
7 |
8 | estim <- suppressMessages(estimate_means(model, by = "all", backend = "emmeans"))
9 | expect_identical(attributes(estim)$by, c("Species", "Sepal.Width"))
10 | })
11 |
12 |
13 | test_that("attributes_contrasts", {
14 | skip_if_not_installed("emmeans")
15 | model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris)
16 |
17 | estim <- suppressMessages(estimate_contrasts(model, backend = "emmeans"))
18 | expect_identical(attributes(estim)$contrast, "Species")
19 | expect_null(attributes(estim)$by)
20 | })
21 |
22 |
23 | test_that("attributes_link", {
24 | skip_if_not_installed("emmeans")
25 | model <- lm(Sepal.Length ~ Species * Sepal.Width, data = iris)
26 |
27 | estim <- estimate_link(model)
28 | expect_identical(attributes(estim)$response, "Sepal.Length")
29 | })
30 |
--------------------------------------------------------------------------------
/tests/testthat/test-backtransform_invlink.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("marginaleffects")
3 | skip_if_not_installed("glmmTMB")
4 |
5 | test_that("estimate_means correct inverse link for glmmTMB", {
6 | data(mtcars)
7 | d <- mtcars
8 | d$count <- rep_len(c(0, 0, 0, 0, 1, 2, 4), nrow(mtcars))
9 | m <- glmmTMB::glmmTMB(
10 | count ~ cyl,
11 | data = datawizard::data_modify(d, cyl = as.factor(cyl)),
12 | family = "poisson"
13 | )
14 | out1 <- estimate_means(m, backend = "marginaleffects", predict = "inverse_link")
15 | out2 <- estimate_means(m, backend = "emmeans")
16 | expect_equal(out1$Mean, out2$Rate, tolerance = 1e-4)
17 | expect_equal(out1$CI_low, out2$CI_low, tolerance = 1e-4)
18 | expect_equal(out1$CI_high, out2$CI_high, tolerance = 1e-4)
19 | })
20 |
21 |
22 | test_that("estimate_means correct inverse link for glm", {
23 | set.seed(5)
24 | data <- data.frame(
25 | outcome = rbinom(100, 1, 0.5),
26 | var1 = rbinom(100, 1, 0.1),
27 | var2 = rnorm(100, 10, 7)
28 | )
29 | m <- glm(
30 | outcome ~ var1 * var2,
31 | data = data,
32 | family = binomial(link = "logit")
33 | )
34 |
35 | out <- estimate_relation(m, by = c("var1", "var2"))
36 | expect_true(all(out$CI_low >= 0 & out$CI_low <= 1))
37 | expect_true(all(out$CI_high >= 0 & out$CI_high <= 1))
38 |
39 | out <- estimate_means(m, by = c("var1", "var2"))
40 | expect_true(all(out$CI_low >= 0 & out$CI_low <= 1))
41 | expect_true(all(out$CI_high >= 0 & out$CI_high <= 1))
42 |
43 | out <- estimate_means(m, by = c("var1", "var2"), estimate = "population")
44 | expect_true(all(out$CI_low >= 0 & out$CI_low <= 1))
45 | expect_true(all(out$CI_high >= 0 & out$CI_high <= 1))
46 | })
47 |
48 |
49 | test_that("estimate_means correct inverse link for glmer", {
50 | data(efc, package = "modelbased")
51 |
52 | x <- which(efc$negc7d == 1 & efc$c172code == 3)
53 | efc$negc7d[x[sample.int(length(x), round(length(x) / 1.1))]] <- 0
54 | efc$c172code <- as.factor(efc$c172code)
55 | fit <- lme4::glmer(
56 | negc7d ~ c12hour + e42dep + c161sex + c172code + (1 | grp),
57 | data = efc,
58 | family = binomial(link = "logit")
59 | )
60 | out1 <- estimate_means(fit, "c172code", predict = "inverse_link")
61 | out2 <- estimate_relation(fit, by = "c172code", verbose = FALSE)
62 | expect_equal(out1$Probability, out2$Predicted, tolerance = 1e-2)
63 | expect_equal(out1$CI_low, out2$CI_low, tolerance = 1e-2)
64 | expect_true(all(out1$CI_low >= 0 & out1$CI_low <= 1))
65 | expect_true(all(out1$CI_high >= 0 & out1$CI_high <= 1))
66 | })
67 |
68 |
69 | test_that("estimate_means correct inverse link for stan-glm", {
70 | skip_if_not_installed("curl")
71 | skip_if_offline()
72 | skip_if_not_installed("httr2")
73 | skip_if_not_installed("rstanarm")
74 |
75 | m <- insight::download_model("stanreg_glm_1")
76 | skip_if(is.null(m))
77 |
78 | out <- estimate_means(m, "wt = [sd]")
79 | expect_equal(out$Median, c(0.81144, 0.38844, 0.08599), tolerance = 1e-4)
80 | expect_equal(out$CI_low, c(0.54837, 0.2029, 0.01342), tolerance = 1e-4)
81 | })
82 |
--------------------------------------------------------------------------------
/tests/testthat/test-betareg.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("curl")
3 | skip_if_offline()
4 | skip_if_not_installed("marginaleffects")
5 | skip_if_not_installed("betareg")
6 |
7 | test_that("estimate_means for betareg", {
8 | data("GasolineYield", package = "betareg")
9 | m1 <- betareg::betareg(yield ~ batch + temp, data = GasolineYield)
10 | out <- estimate_means(m1, "batch")
11 | expect_snapshot(print(out, zap_small = TRUE))
12 | })
13 |
14 |
15 | test_that("estimate_relation for betareg", {
16 | data("GasolineYield", package = "betareg")
17 | m1 <- betareg::betareg(yield ~ batch + temp, data = GasolineYield)
18 | out <- estimate_relation(m1, by = "batch", verbose = FALSE)
19 | expect_snapshot(print(out, zap_small = TRUE))
20 | })
21 |
--------------------------------------------------------------------------------
/tests/testthat/test-bias_correction.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("marginaleffects")
3 | skip_if_not_installed("emmeans")
4 | skip_if_not_installed("lme4")
5 |
6 | test_that("estimate_means bias_correction", {
7 | set.seed(123)
8 | dat <- data.frame(
9 | outcome = rbinom(n = 100, size = 1, prob = 0.35),
10 | var_binom = as.factor(rbinom(n = 100, size = 1, prob = 0.7)),
11 | var_cont = rnorm(n = 100, mean = 10, sd = 7),
12 | grp = as.factor(sample(letters[1:4], size = 100, replace = TRUE))
13 | )
14 | m1 <- lme4::glmer(
15 | outcome ~ var_binom + var_cont + (1 | grp),
16 | data = dat,
17 | family = binomial(link = "logit")
18 | )
19 | set.seed(123)
20 | out <- estimate_means(m1, "var_binom", predict = "inverse_link")
21 | expect_equal(out$Probability, c(0.38508, 0.36696), tolerance = 1e-4)
22 | out <- estimate_means(m1, "var_binom", bias_correction = TRUE)
23 | expect_equal(out$Probability, c(0.4746, 0.46863), tolerance = 1e-4)
24 | out <- estimate_means(m1, "var_binom", bias_correction = TRUE, sigma = 2.5)
25 | expect_equal(out$Probability, c(0.55516, 0.56012), tolerance = 1e-4)
26 | out2 <- as.data.frame(emmeans::emmeans(m1, "var_binom", bias.adj = TRUE, type = "response", sigma = 2.5))
27 | expect_equal(out$Probability, out2$prob, tolerance = 1e-2)
28 | })
29 |
--------------------------------------------------------------------------------
/tests/testthat/test-brms.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | # skip_on_os("windows")
3 | skip_if_not_installed("brms")
4 | skip_if_not_installed("BH")
5 | skip_if_not_installed("RcppEigen")
6 | skip_if_not_installed("emmeans")
7 | skip_if_not_installed("marginaleffects")
8 |
9 | test_that("estimate_means - brms", {
10 | model <- brms::brm(Sepal.Length ~ Species * Sepal.Width, data = iris, refresh = 0, iter = 1000)
11 | estim <- estimate_means(model, backend = "emmeans")
12 | expect_identical(dim(estim), c(3L, 5L))
13 | })
14 |
15 | test_that("estimate_relation - brms", {
16 | model <- brms::brm(Sepal.Length ~ Species * Sepal.Width, data = iris, refresh = 0, iter = 1000)
17 | estim <- estimate_relation(model, preserve_range = FALSE)
18 | expect_identical(dim(estim), c(30L, 6L))
19 |
20 | # estim <- estimate_relation(model, preserve_range=FALSE, iterations = 10)
21 | # expect_equal(dim(estim), c(30, 6))
22 | })
23 |
24 | test_that("estimate_slopes - brms", {
25 | model <- brms::brm(Sepal.Length ~ Species * Sepal.Width, data = iris, refresh = 0, iter = 1000)
26 | estim <- estimate_slopes(model, by = "Species", backend = "emmeans")
27 | expect_identical(dim(estim), c(3L, 5L))
28 | })
29 |
30 | test_that("estimate_means - brms, multivariate", {
31 | skip_if_not_installed("curl")
32 | skip_if_offline()
33 | skip_if_not_installed("httr2")
34 |
35 | m <- insight::download_model("brms_mv_1")
36 | skip_if(is.null(m))
37 | estim <- estimate_means(m, "wt")
38 | expect_identical(dim(estim), c(30L, 10L))
39 | expect_named(
40 | estim,
41 | c(
42 | "wt", "ROPE_CI", "Response", "Median", "CI_low", "CI_high",
43 | "pd", "ROPE_low", "ROPE_high", "ROPE_Percentage"
44 | )
45 | )
46 | })
47 |
48 |
49 | test_that("estimate_means - brms, Wiener", {
50 | skip_if_not_installed("curl")
51 | skip_if_offline()
52 | skip_if_not_installed("httr2")
53 | skip_if_not_installed("RWiener")
54 |
55 | m <- insight::download_model("m_ddm_1")
56 | skip_if(is.null(m))
57 | d <- insight::get_data(m)[1:5, ]
58 |
59 | set.seed(123)
60 | out <- estimate_prediction(m, data = d)
61 | expect_snapshot(print(out))
62 |
63 | set.seed(123)
64 | out <- estimate_prediction(m, data = d, keep_iterations = 3)
65 | expect_snapshot(print(out, table_width = Inf))
66 | })
67 |
--------------------------------------------------------------------------------
/tests/testthat/test-describe_nonlinear.R:
--------------------------------------------------------------------------------
1 | skip_if_not_installed("performance")
2 |
3 | test_that("describe_nonlinear", {
4 | set.seed(123)
5 | d <- data.frame(x = rnorm(200))
6 | d$y <- d$x^2 + rnorm(200, 0, 0.5)
7 | model <- lm(y ~ poly(x, 2), data = d)
8 | link_data <- estimate_relation(model, length = 100)
9 | out <- describe_nonlinear(link_data, x = "x")
10 |
11 | expect_equal(out$Start, c(-2.309, -0.011), tolerance = 1e-4)
12 | expect_equal(out$End, c(-0.011, 3.241), tolerance = 1e-4)
13 | expect_error(describe_nonlinear(link_data), regex = "The name of the predictor")
14 | expect_error(describe_nonlinear(link_data, x = "x", y = "test"), regex = "The name of the response")
15 | })
16 |
17 | # test_that("estimate_smooth", {
18 | # skip_on_cran()
19 | # skip_if_not_installed("rstanarm")
20 | # set.seed(333)
21 | #
22 | # model <-
23 | # suppressWarnings(
24 | # rstanarm::stan_gamm4(
25 | # Sepal.Width ~ s(Petal.Length),
26 | # data = iris,
27 | # refresh = 0,
28 | # iter = 200,
29 | # chains = 2,
30 | # seed = 333
31 | # )
32 | # )
33 | # estim <- estimate_smooth(model)
34 | # expect_equal(ncol(estim), 6)
35 | #
36 | # model <-
37 | # suppressWarnings(
38 | # rstanarm::stan_glm(
39 | # Sepal.Width ~ poly(Petal.Length, 2),
40 | # data = iris,
41 | # refresh = 0,
42 | # iter = 200,
43 | # chains = 2,
44 | # seed = 333
45 | # )
46 | # )
47 | # estim <- estimate_smooth(model)
48 | # expect_equal(c(nrow(estim), ncol(estim)), c(2, 6))
49 | #
50 | # model <-
51 | # suppressWarnings(
52 | # rstanarm::stan_glm(
53 | # Sepal.Width ~ Species * poly(Petal.Length, 2),
54 | # data = iris,
55 | # refresh = 0,
56 | # iter = 200,
57 | # chains = 2,
58 | # seed = 333
59 | # )
60 | # )
61 | # estim <- estimate_smooth(model)
62 | # expect_equal(ncol(estim), 6)
63 | # estim <- estimate_smooth(model, levels = "Species")
64 | # expect_equal(ncol(estim), 7)
65 | # })
66 | #
67 |
--------------------------------------------------------------------------------
/tests/testthat/test-estimate_contrasts-average.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("emmeans")
3 | skip_if_not_installed("marginaleffects")
4 | skip_on_os("mac")
5 |
6 |
7 | test_that("estimate_contrast, filter by numeric values", {
8 | skip_if_not_installed("lme4")
9 | data(iris)
10 | mod <- lm(Sepal.Length ~ Petal.Width * Species, data = iris)
11 | out1 <- estimate_contrasts(mod, contrast = "Species=c('versicolor','setosa')", by = "Petal.Width", estimate = "average")
12 | expect_identical(dim(out1), c(5L, 10L))
13 | expect_equal(out1$Difference, c(0.13903, 0.06148, -0.01608, -0.09363, -0.17118), tolerance = 1e-4)
14 |
15 | expect_error(
16 | estimate_contrasts(mod, contrast = "Species=c('versicolor','setosa')", by = "Petal.Width=c(2,3)", estimate = "average"),
17 | regex = "None of the values"
18 | )
19 |
20 | data(CO2)
21 | mod <- suppressWarnings(lme4::lmer(uptake ~ conc * Plant + (1 | Type), data = CO2))
22 | out1 <- estimate_contrasts(mod, contrast = "Plant", by = "conc", estimate = "average")
23 | expect_identical(dim(out1), c(462L, 10L))
24 |
25 | out1 <- estimate_contrasts(mod, contrast = "Plant=c('Qn1','Qn2','Qn3')", by = "conc", estimate = "average")
26 | expect_identical(dim(out1), c(21L, 10L))
27 |
28 | out1 <- estimate_contrasts(mod, contrast = "Plant=c('Qn1','Qn2','Qn3')", estimate = "average")
29 | expect_identical(dim(out1), c(3L, 9L))
30 | expect_equal(out1$Difference, c(1.92857, 4.38571, 2.45714), tolerance = 1e-4)
31 |
32 | out <- estimate_contrasts(mod, contrast = "conc", by = "Plant", comparison = "b1=b2", estimate = "average")
33 | expect_equal(out$Difference, -0.007061251, tolerance = 1e-4)
34 | })
35 |
36 |
37 | test_that("estimate_contrast, filterin in `by` and `contrast`", {
38 | data(efc, package = "modelbased")
39 | efc <- datawizard::to_factor(efc, c("c161sex", "c172code", "e16sex", "e42dep"))
40 | levels(efc$c172code) <- c("low", "mid", "high")
41 | m <- lm(neg_c_7 ~ barthtot + c172code * e42dep + c161sex, data = efc)
42 |
43 | out <- estimate_contrasts(m, c("e42dep", "c172code"), estimate = "average")
44 | expect_identical(dim(out), c(66L, 9L))
45 |
46 | out <- estimate_contrasts(
47 | m,
48 | "e42dep=c('independent','slightly dependent','moderately dependent')",
49 | by = "c172code",
50 | estimate = "average"
51 | )
52 | expect_identical(dim(out), c(9L, 10L))
53 | expect_equal(
54 | out$Difference,
55 | c(
56 | -0.56667, 0.87147, 1.43814, 1.30144, 3.00341, 1.70197, 2.78974,
57 | 3.11667, 0.32692
58 | ),
59 | tolerance = 1e-4
60 | )
61 |
62 | out <- estimate_contrasts(
63 | m,
64 | "e42dep=c('independent','slightly dependent','moderately dependent')",
65 | by = "c172code",
66 | comparison = "b1=b4",
67 | estimate = "average"
68 | )
69 | expect_equal(out$Difference, 1.507576, tolerance = 1e-4)
70 |
71 | out <- estimate_contrasts(m, "e42dep", by = "c172code=c('low','mid')", estimate = "average")
72 | expect_identical(dim(out), c(12L, 10L))
73 |
74 | out <- estimate_contrasts(m, "e42dep=c('independent','slightly dependent')", by = "c172code=c('low','mid')", estimate = "average")
75 | expect_identical(dim(out), c(2L, 10L))
76 | })
77 |
--------------------------------------------------------------------------------
/tests/testthat/test-estimate_contrasts_bookexamples.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("marginaleffects")
3 | skip_on_os("mac")
4 |
5 | test_that("estimate_contrasts - book examples 1", {
6 | data(puppy_love, package = "modelbased")
7 |
8 | # set contrasts.
9 | treat_vs_none <- c(-2 / 3, 1 / 3, 1 / 3)
10 | short_vs_long <- c(0, -1 / 2, 1 / 2)
11 |
12 | puppy_love$dose_original <- puppy_love$dose
13 | contrasts(puppy_love$dose) <- cbind(treat_vs_none, short_vs_long)
14 |
15 | puppy_love$treat_vs_none <- as.factor(
16 | ifelse(puppy_love$dose == "no treatment", "no treatment", "Puppies")
17 | )
18 |
19 | puppy_love$short_vs_long <- factor(
20 | short_vs_long,
21 | levels = c("short", "long", "no treatment")
22 | )
23 |
24 | puppy_love$puppy_love <- puppy_love$puppy_love - mean(puppy_love$puppy_love)
25 |
26 | # fit model
27 | m1 <- lm(happiness ~ puppy_love * dose, data = puppy_love)
28 | m2 <- lm(happiness ~ puppy_love * dose_original, data = puppy_love)
29 |
30 | expect_equal(
31 | coef(m1)["dosetreat_vs_none"],
32 | estimate_contrasts(m2, "dose_original", comparison = "((b2+b3)/2) = b1")$Difference,
33 | tolerance = 1e-4,
34 | ignore_attr = TRUE
35 | )
36 | expect_equal(
37 | coef(m1)["doseshort_vs_long"],
38 | estimate_contrasts(m2, "dose_original", comparison = "b3 = b2")$Difference,
39 | tolerance = 1e-4,
40 | ignore_attr = TRUE
41 | )
42 | })
43 |
44 |
45 | test_that("estimate_contrasts - book examples 2", {
46 | data(puppy_love, package = "modelbased")
47 | cond_tx <- cbind("no treatment" = c(1, 0, 0), "treatment" = c(0, 0.5, 0.5))
48 |
49 | m1 <- lm(happiness ~ puppy_love * dose, data = puppy_love)
50 |
51 | out1 <- marginaleffects::avg_slopes(m1, variables = "puppy_love", by = "dose", hypothesis = cond_tx)
52 | out2 <- estimate_slopes(m1, "puppy_love", by = "dose", hypothesis = cond_tx)
53 | expect_equal(out1$estimate, out2$Slope, tolerance = 1e-4)
54 | # we donb't officially have this argument for slopes, but we simply pass
55 | # it to the "hypothesis"
56 | out3 <- estimate_slopes(m1, "puppy_love", by = "dose", comparison = cond_tx)
57 | expect_equal(out3$Slope, out2$Slope, tolerance = 1e-4)
58 | })
59 |
60 |
61 | skip_if_not_installed("withr")
62 |
63 | withr::with_environment(
64 | new.env(),
65 | test_that("estimate_contrasts - book examples 3", {
66 | data(puppy_love, package = "modelbased")
67 | cond_tx_foo <<- function(x) {
68 | drop(x %*% cbind("no treatment" = c(1, 0, 0), "treatment" = c(0, 0.5, 0.5)))
69 | }
70 | m1 <- lm(happiness ~ puppy_love * dose, data = puppy_love)
71 |
72 | out1 <- marginaleffects::avg_predictions(
73 | m1,
74 | variables = c("puppy_love", "dose"),
75 | hypothesis = ~ I(cond_tx_foo(x)) | puppy_love
76 | )
77 |
78 | out2 <- estimate_contrasts(
79 | m1,
80 | c("puppy_love=c(0, 1, 2.5, 4, 7)"),
81 | by = "dose",
82 | comparison = ~ I(cond_tx_foo(x)) | puppy_love
83 | )
84 |
85 | expect_equal(out1$estimate, out2$Difference, tolerance = 1e-4)
86 | })
87 | )
88 |
89 |
90 | withr::with_environment(
91 | new.env(),
92 | test_that("estimate_contrasts - custom function in 'comparison'", {
93 | dat <- expand.grid(
94 | treatment = 0:1,
95 | week = 1:52
96 | )
97 | set.seed(123)
98 | dat$y <- rpois(nrow(dat), 5)
99 | mod <- glm(y ~ treatment * week, data = dat, family = poisson)
100 | hyp <<- function(x) {
101 | sum(x$estimate[x$treatment == 1]) - sum(x$estimate[x$treatment == 0])
102 | }
103 | out1 <- marginaleffects::predictions(mod, type = "response", hypothesis = hyp)
104 | # we need to set `estimate = "average"`, because the function "hyp()"
105 | # required all predicted values, no data grid
106 | out2 <- estimate_contrasts(
107 | mod,
108 | c("treatment", "week"),
109 | comparison = hyp,
110 | estimate = "average"
111 | )
112 | expect_equal(out1$estimate, out2$Difference, tolerance = 1e-4)
113 | })
114 | )
115 |
--------------------------------------------------------------------------------
/tests/testthat/test-estimate_contrasts_effectsize.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("emmeans")
3 | skip_if_not_installed("marginaleffects")
4 | skip_if_not_installed("bootES")
5 | skip_if_not_installed("lme4")
6 | skip_on_os("mac")
7 | skip_if(packageVersion("insight") <= "1.1.0")
8 |
9 | data(iris)
10 | model <- lm(Sepal.Width ~ Species, data = iris)
11 |
12 | test_that("estimate_contrasts - emmeans backend", {
13 | expect_snapshot(estimate_contrasts(model, backend = "emmeans"),
14 | variant = "windows"
15 | )
16 | expect_snapshot(estimate_contrasts(model, effectsize = "none", backend = "emmeans"),
17 | variant = "windows"
18 | )
19 | expect_snapshot(estimate_contrasts(model, effectsize = "emmeans", backend = "emmeans"),
20 | variant = "windows"
21 | )
22 | expect_snapshot(estimate_contrasts(model, effectsize = "marginal", backend = "emmeans"),
23 | variant = "windows"
24 | )
25 | set.seed(100)
26 | expect_snapshot(estimate_contrasts(model, effectsize = "boot", backend = "emmeans"),
27 | variant = "windows"
28 | )
29 | set.seed(100)
30 | expect_snapshot(estimate_contrasts(model,
31 | effectsize = "boot",
32 | es_type = "akp.robust.d",
33 | backend = "emmeans"
34 | ), variant = "windows")
35 | set.seed(100)
36 | expect_snapshot(estimate_contrasts(
37 | model,
38 | effectsize = "boot",
39 | es_type = "hedges.g",
40 | backend = "emmeans"
41 | ), variant = "windows")
42 | })
43 |
44 | test_that("estimate_contrasts - marginaleffects backend", {
45 | expect_snapshot(estimate_contrasts(model, backend = "marginaleffects"), variant = "windows")
46 | expect_snapshot(estimate_contrasts(model, effectsize = "none", backend = "marginaleffects"),
47 | variant = "windows"
48 | )
49 | expect_error(
50 | estimate_contrasts(model, effectsize = "emmeans", backend = "marginaleffects"),
51 | "only possible with"
52 | )
53 | expect_snapshot(estimate_contrasts(model, effectsize = "marginal", backend = "marginaleffects"),
54 | variant = "windows"
55 | )
56 | })
57 |
58 | test_that("estimate_contrasts - random effects", {
59 | sleepstudy <- lme4::sleepstudy
60 | sleepstudy$Days_factor <- cut(sleepstudy$Days, breaks = 3, labels = c("Low", "Medium", "High"))
61 | model_random_effects <- lme4::lmer(Reaction ~ Days_factor + (1 | Subject), data = sleepstudy)
62 |
63 | expect_error(
64 | estimate_contrasts(model_random_effects, effectsize = "emmeans", backend = "emmeans"),
65 | "We strongly recommend not using"
66 | )
67 | })
68 |
--------------------------------------------------------------------------------
/tests/testthat/test-estimate_expectation.R:
--------------------------------------------------------------------------------
1 | skip_if_not_installed("insight", minimum_version = "1.1.0")
2 |
3 | test_that("estimate_expectation", {
4 | skip_if_not_installed("lme4")
5 | model <- lme4::lmer(mpg ~ wt + factor(am) + (1 | cyl), data = mtcars)
6 | estim <- estimate_expectation(model)
7 | expect_identical(dim(estim), c(32L, 8L))
8 | expect_named(
9 | estim,
10 | c("wt", "am", "cyl", "Predicted", "SE", "CI_low", "CI_high", "Residuals")
11 | )
12 |
13 | m <- lm(mpg ~ 1, data = mtcars)
14 | estim <- estimate_expectation(m)
15 | expect_identical(dim(estim), c(32L, 5L))
16 | expect_named(
17 | estim,
18 | c("Predicted", "SE", "CI_low", "CI_high", "Residuals")
19 | )
20 |
21 | m <- lm(mpg ~ am, data = mtcars)
22 | estim <- estimate_expectation(m)
23 | expect_identical(dim(estim), c(32L, 6L))
24 | expect_named(
25 | estim,
26 | c("am", "Predicted", "SE", "CI_low", "CI_high", "Residuals")
27 | )
28 | })
29 |
30 |
31 | test_that("estimate_expectation - data-grid", {
32 | skip_if_not_installed("lme4")
33 | model <- lme4::lmer(mpg ~ wt + factor(am) + (1 | cyl), data = mtcars)
34 | estim <- estimate_expectation(model, data = "grid")
35 | expect_identical(dim(estim), c(12L, 7L))
36 | expect_named(
37 | estim,
38 | c("wt", "am", "cyl", "Predicted", "SE", "CI_low", "CI_high")
39 | )
40 |
41 | m <- lm(mpg ~ 1, data = mtcars)
42 | estim <- estimate_expectation(m, data = "grid")
43 | expect_identical(dim(estim), c(10L, 5L))
44 | expect_named(
45 | estim,
46 | c("Predicted", "SE", "CI_low", "CI_high", "Residuals")
47 | )
48 |
49 | m <- lm(mpg ~ cyl, data = mtcars)
50 | estim <- estimate_expectation(m, data = "grid")
51 | expect_identical(dim(estim), c(3L, 5L))
52 | expect_named(
53 | estim,
54 | c("cyl", "Predicted", "SE", "CI_low", "CI_high")
55 | )
56 | estim2 <- estimate_expectation(m, by = "cyl")
57 | expect_equal(estim$cyl, estim2$cyl, tolerance = 1e-4)
58 | expect_equal(estim$Predicted, estim2$Predicted, tolerance = 1e-4)
59 |
60 | m <- lm(mpg ~ factor(cyl), data = mtcars)
61 | estim <- estimate_expectation(m, data = "grid")
62 | expect_identical(dim(estim), c(3L, 5L))
63 | expect_named(
64 | estim,
65 | c("cyl", "Predicted", "SE", "CI_low", "CI_high")
66 | )
67 | })
68 |
69 |
70 | test_that("estimate_expectation - error", {
71 | m <- lm(mpg ~ cyl, data = mtcars)
72 | expect_error(
73 | estimate_expectation(m, data = mtcars, by = "cyl"),
74 | regex = "You can only"
75 | )
76 | })
77 |
78 |
79 | test_that("estimate_relation and estimate specific", {
80 | skip_if_not_installed("marginaleffects")
81 | data(efc, package = "modelbased")
82 | efc <- datawizard::to_factor(efc, c("c161sex", "c172code", "e16sex", "e42dep"))
83 | fit <- lm(neg_c_7 ~ c12hour + barthtot + c161sex + e42dep + c172code, data = efc)
84 | out1 <- estimate_means(fit, "e42dep", estimate = "specific", backend = "marginaleffects")
85 | out2 <- estimate_relation(fit, by = "e42dep")
86 | expect_equal(out1$Mean, out2$Predicted, tolerance = 1e-4)
87 | })
88 |
--------------------------------------------------------------------------------
/tests/testthat/test-estimate_filter.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("marginaleffects")
3 | skip_on_os("mac")
4 |
5 | test_that("filtering for by and contrast works for different estimate options", {
6 | data(efc, package = "modelbased")
7 | efc <- datawizard::to_factor(efc, c("c172code", "c161sex", "e16sex", "e42dep"))
8 | levels(efc$c172code) <- c("low", "mid", "high")
9 | m <- lm(barthtot ~ c161sex * c172code + neg_c_7, data = efc)
10 |
11 | out <- estimate_means(m, c("c172code=c('low','mid')", "c161sex"), estimate = "specific")
12 | expect_identical(dim(out), c(4L, 8L))
13 | expect_identical(as.character(out$c172code), c("low", "low", "mid", "mid"))
14 | out <- estimate_means(m, c("c172code=c('low','mid')", "c161sex"), estimate = "typical")
15 | expect_identical(dim(out), c(4L, 8L))
16 | expect_identical(as.character(out$c172code), c("low", "low", "mid", "mid"))
17 | out <- estimate_means(m, c("c172code=c('low','mid')", "c161sex"), estimate = "average")
18 | expect_identical(dim(out), c(4L, 8L))
19 | expect_identical(as.character(out$c172code), c("low", "low", "mid", "mid"))
20 | out <- estimate_means(m, c("c172code=c('low','mid')", "c161sex"), estimate = "population")
21 | expect_identical(dim(out), c(4L, 8L))
22 | expect_identical(as.character(out$c172code), c("low", "low", "mid", "mid"))
23 |
24 | out <- estimate_contrasts(m, "c172code=c('low','mid')", by = "c161sex", estimate = "specific")
25 | expect_identical(dim(out), c(2L, 10L))
26 | expect_identical(as.character(out$Level1), c("mid", "mid"))
27 | out <- estimate_contrasts(m, "c172code=c('low','mid')", by = "c161sex", estimate = "typical")
28 | expect_identical(dim(out), c(2L, 10L))
29 | expect_identical(as.character(out$Level1), c("mid", "mid"))
30 | out <- estimate_contrasts(m, "c172code=c('low','mid')", by = "c161sex", estimate = "average")
31 | expect_identical(dim(out), c(2L, 10L))
32 | expect_identical(as.character(out$Level1), c("mid", "mid"))
33 | out <- estimate_contrasts(m, "c172code=c('low','mid')", by = "c161sex", estimate = "population")
34 | expect_identical(dim(out), c(2L, 10L))
35 | expect_identical(as.character(out$Level1), c("mid", "mid"))
36 | })
37 |
38 |
39 | test_that("special filtering for by and contrast works", {
40 | data(iris)
41 | model <- lm(Sepal.Width ~ Species * Petal.Width, data = iris)
42 | out <- estimate_contrasts(model, contrast = c("Species", "Petal.Width=c(1, 2)"))
43 | expect_identical(dim(out), c(15L, 9L))
44 | expect_identical(
45 | as.character(out$Level1),
46 | c(
47 | "setosa, 2", "versicolor, 1", "versicolor, 2", "virginica, 1",
48 | "virginica, 2", "versicolor, 1", "versicolor, 2", "virginica, 1",
49 | "virginica, 2", "versicolor, 2", "virginica, 1", "virginica, 2",
50 | "virginica, 1", "virginica, 2", "virginica, 2"
51 | )
52 | )
53 |
54 | ## FIXME: not working yet
55 |
56 | # out <- estimate_contrasts(model, contrast = c("Species", "Petal.Width=c(1, 2)"), estimate = "average")
57 | # expect_identical(dim(out), c(15L, 9L))
58 | # expect_identical(
59 | # as.character(out$Level1),
60 | # c(
61 | # "setosa, 2", "versicolor, 1", "versicolor, 2", "virginica, 1",
62 | # "virginica, 2", "versicolor, 1", "versicolor, 2", "virginica, 1",
63 | # "virginica, 2", "versicolor, 2", "virginica, 1", "virginica, 2",
64 | # "virginica, 1", "virginica, 2", "virginica, 2"
65 | # )
66 | # )
67 | })
68 |
--------------------------------------------------------------------------------
/tests/testthat/test-estimate_means-average.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("marginaleffects")
3 | skip_on_os("mac")
4 |
5 |
6 | test_that("estimate_means, filter by numeric values", {
7 | skip_if_not_installed("lme4")
8 | data(iris)
9 | mod <- lm(Sepal.Length ~ Petal.Width * Species, data = iris)
10 | out1 <- estimate_means(mod, c("Species=c('versicolor','setosa')", "Petal.Width"), estimate = "average")
11 | expect_identical(dim(out1), c(2L, 8L))
12 | expect_equal(out1$Mean, c(4.87019, 6.46946), tolerance = 1e-4)
13 | expect_equal(out1$Petal.Width, c(0.1, 1.7), tolerance = 1e-4)
14 |
15 | out1 <- estimate_means(mod, c("Species", "Petal.Width=c(1.2, 2.4)"), estimate = "average")
16 | expect_identical(dim(out1), c(2L, 8L))
17 | expect_equal(out1$Mean, c(5.75628, 6.83141), tolerance = 1e-4)
18 | expect_equal(out1$Petal.Width, c(1.2, 2.4), tolerance = 1e-4)
19 |
20 | expect_error(
21 | estimate_means(mod, c("Species=c('versicolor','setosa')", "Petal.Width=c(3,5)"), estimate = "average"),
22 | regex = "None of the values specified"
23 | )
24 | })
25 |
26 |
27 | test_that("estimate_contrast, filterin in `by` and `contrast`", {
28 | data(efc, package = "modelbased")
29 | efc <- datawizard::to_factor(efc, c("c161sex", "c172code", "e16sex", "e42dep"))
30 | levels(efc$c172code) <- c("low", "mid", "high")
31 | m <- lm(neg_c_7 ~ barthtot + c172code * e42dep + c161sex, data = efc)
32 |
33 | out <- estimate_means(m, c("e42dep", "c172code"), estimate = "average")
34 | expect_identical(dim(out), c(12L, 8L))
35 |
36 | out <- estimate_means(
37 | m,
38 | by = c(
39 | "e42dep=c('independent','slightly dependent','moderately dependent')",
40 | "c172code"
41 | ),
42 | estimate = "average"
43 | )
44 | expect_identical(dim(out), c(9L, 8L))
45 | expect_equal(
46 | out$Mean,
47 | c(
48 | 10.41667, 8.90909, 8.8, 9.85, 10.21053, 11.58974, 11.28814,
49 | 11.9125, 11.91667
50 | ),
51 | tolerance = 1e-4
52 | )
53 |
54 | out <- estimate_means(m, c("e42dep", "c172code=c('low','mid')"), estimate = "average")
55 | expect_identical(dim(out), c(8L, 8L))
56 |
57 | out <- estimate_means(m,
58 | c(
59 | "e42dep=c('independent','slightly dependent')",
60 | "c172code=c('low','mid')"
61 | ),
62 | estimate = "average"
63 | )
64 | expect_identical(dim(out), c(4L, 8L))
65 | })
66 |
--------------------------------------------------------------------------------
/tests/testthat/test-estimate_means_ci.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("marginaleffects")
3 |
4 | test_that("estimate_means() - ci frequentist", {
5 | data(iris)
6 |
7 | mod2 <- lm(Sepal.Length ~ Sepal.Width * Species, data = iris)
8 |
9 | # Estimate means with different CIs
10 | out <- estimate_means(mod2, by = "Species", backend = "marginaleffects", ci = 0.95)
11 | x <- capture.output(out)
12 | expect_identical(x[5], "setosa | 4.75 | 0.09 | [4.58, 4.92] | 54.34")
13 |
14 | out <- estimate_means(mod2, by = "Species", backend = "marginaleffects", ci = 0.89)
15 | x <- capture.output(out)
16 | expect_identical(x[5], "setosa | 4.75 | 0.09 | [4.61, 4.89] | 54.34")
17 |
18 | out <- estimate_contrasts(mod2, contrast = "Species", backend = "marginaleffects", ci = 0.95)
19 | x <- capture.output(out)
20 | expect_identical(x[5], "versicolor | setosa | 1.43 | 0.12 | [1.19, 1.68] | 11.78 | < .001")
21 |
22 | out <- estimate_contrasts(mod2, contrast = "Species", backend = "marginaleffects", ci = 0.89)
23 | x <- capture.output(out)
24 | expect_identical(x[5], "versicolor | setosa | 1.43 | 0.12 | [1.24, 1.63] | 11.78 | < .001")
25 |
26 | # Estimate slopes with different CIs
27 | out <- estimate_slopes(mod2, trend = "Sepal.Width", by = "Species", backend = "marginaleffects", ci = 0.95)
28 | x <- capture.output(out)
29 | expect_identical(x[5], "setosa | 0.69 | 0.17 | [0.36, 1.02] | 4.17 | < .001")
30 |
31 | out <- estimate_slopes(mod2, trend = "Sepal.Width", by = "Species", backend = "marginaleffects", ci = 0.89)
32 | x <- capture.output(out)
33 | expect_identical(x[5], "setosa | 0.69 | 0.17 | [0.42, 0.96] | 4.17 | < .001")
34 | })
35 |
36 |
37 | skip_if_not_installed("curl")
38 | skip_if_offline()
39 | skip_if_not_installed("brms")
40 | skip_if_not_installed("BH")
41 | skip_if_not_installed("RcppEigen")
42 | skip_if_not_installed("httr2")
43 |
44 | test_that("estimate_means() - ci frequentist", {
45 | mod2 <- insight::download_model("brms_4")
46 | skip_if(is.null(mod2))
47 |
48 | # Estimate means with different CIs
49 | out <- estimate_means(mod2, by = "Species", backend = "marginaleffects", ci = 0.95)
50 | x <- capture.output(out)
51 | expect_identical(x[5], "setosa | 4.76 | [4.59, 4.93] | 100% | [-0.10, 0.10] | 0%")
52 |
53 | out <- estimate_means(mod2, by = "Species", backend = "marginaleffects", ci = 0.89)
54 | x <- capture.output(out)
55 | expect_identical(x[5], "setosa | 4.76 | [4.62, 4.90] | 100% | [-0.10, 0.10] | 0%")
56 |
57 | out <- estimate_contrasts(mod2, contrast = "Species", backend = "marginaleffects", ci = 0.95)
58 | x <- capture.output(out)
59 | expect_identical(x[5], "versicolor | setosa | 1.43 | [1.18, 1.67] | 100% | [-0.10, 0.10] | 0%")
60 |
61 | out <- estimate_contrasts(mod2, contrast = "Species", backend = "marginaleffects", ci = 0.89)
62 | x <- capture.output(out)
63 | expect_identical(x[5], "versicolor | setosa | 1.43 | [1.23, 1.63] | 100% | [-0.10, 0.10] | 0%")
64 |
65 | # Estimate slopes with different CIs
66 | out <- estimate_slopes(mod2, trend = "Sepal.Width", by = "Species", backend = "marginaleffects", ci = 0.95)
67 | x <- capture.output(out)
68 | expect_identical(x[5], "setosa | 0.67 | [0.35, 0.99] | 100% | [-0.10, 0.10] | 0%")
69 |
70 | out <- estimate_slopes(mod2, trend = "Sepal.Width", by = "Species", backend = "marginaleffects", ci = 0.89)
71 | x <- capture.output(out)
72 | expect_identical(x[5], "setosa | 0.67 | [0.41, 0.93] | 100% | [-0.10, 0.10] | 0%")
73 | })
74 |
--------------------------------------------------------------------------------
/tests/testthat/test-estimate_means_counterfactuals.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("emmeans")
3 | skip_if_not_installed("marginaleffects")
4 | skip_if_not_installed("glmmTMB")
5 | skip_if_not_installed("lme4")
6 |
7 | test_that("estimate_means() - counterfactuals", {
8 | data(Salamanders, package = "glmmTMB")
9 | m <- glmmTMB::glmmTMB(
10 | count ~ spp + mined + (1 | site),
11 | family = poisson(),
12 | data = Salamanders
13 | )
14 | expect_snapshot(print(estimate_means(m, "spp", backend = "marginaleffects"), zap_small = TRUE, table_width = Inf), variant = "windows") # nolint
15 | expect_snapshot(print(estimate_means(m, "spp", backend = "marginaleffects", estimate = "population"), zap_small = TRUE, table_width = Inf), variant = "windows") # nolint
16 |
17 | expect_snapshot(print(estimate_means(m, "spp", backend = "marginaleffects", predict = "inverse_link"), zap_small = TRUE, table_width = Inf), variant = "windows") # nolint
18 | expect_snapshot(print(estimate_means(m, "spp", backend = "marginaleffects", estimate = "population", predict = "inverse_link"), zap_small = TRUE, table_width = Inf), variant = "windows") # nolint
19 |
20 | out1 <- estimate_means(m, "spp", backend = "marginaleffects", predict = "inverse_link")
21 | out2 <- estimate_means(m, "spp", backend = "emmeans")
22 | expect_equal(out1$Mean, out2$Rate, tolerance = 1e-1)
23 |
24 | data(sleepstudy, package = "lme4")
25 | # create imbalanced data set
26 | set.seed(123)
27 | strapped <- sleepstudy[sample.int(nrow(sleepstudy), nrow(sleepstudy), replace = TRUE), ]
28 | m <- lme4::lmer(Reaction ~ Days + (1 + Days | Subject), data = strapped)
29 | expect_snapshot(print(estimate_means(m, "Days", backend = "marginaleffects"), zap_small = TRUE, table_width = Inf), variant = "windows") # nolint
30 | expect_snapshot(print(estimate_means(m, "Days", backend = "marginaleffects", estimate = "population"), zap_small = TRUE, table_width = Inf), variant = "windows") # nolint
31 | })
32 |
--------------------------------------------------------------------------------
/tests/testthat/test-estimate_means_dotargs.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("marginaleffects")
3 |
4 | test_that("estimate_means() - estimate = average", {
5 | data(mtcars)
6 | set.seed(123)
7 | mtcars$weight <- abs(rnorm(nrow(mtcars), 1, 0.3))
8 | mtcars$cyl <- as.factor(mtcars$cyl)
9 | m <- lm(mpg ~ wt + cyl + vs, data = mtcars, weights = weight)
10 |
11 | expect_warning(
12 | estimate_means(m, "cyl", weights = "weight"),
13 | regex = "Using weights is not possible"
14 | )
15 |
16 | out1 <- estimate_means(m, "cyl", weights = "weight", estimate = "average")
17 | out2 <- marginaleffects::avg_predictions(m, by = "cyl", wts = "weight")
18 | expect_equal(out1$Mean, out2$estimate, tolerance = 1e-4)
19 | out1 <- estimate_means(m, "cyl", estimate = "average")
20 | out2 <- marginaleffects::avg_predictions(m, by = "cyl")
21 | expect_equal(out1$Mean, out2$estimate, tolerance = 1e-4)
22 | })
23 |
24 |
25 | test_that("estimate_means() - estimate = population", {
26 | data(mtcars)
27 | set.seed(123)
28 | mtcars$weight <- abs(rnorm(nrow(mtcars), 1, 0.3))
29 | mtcars$cyl <- as.factor(mtcars$cyl)
30 | m <- lm(mpg ~ wt + cyl + vs, data = mtcars, weights = weight)
31 | out1 <- estimate_means(m, "cyl", weights = "weight", estimate = "population")
32 | out2 <- marginaleffects::avg_predictions(m, variables = "cyl", wts = "weight")
33 | expect_equal(out1$Mean, out2$estimate, tolerance = 1e-4)
34 | out1 <- estimate_means(m, "cyl", estimate = "population")
35 | out2 <- marginaleffects::avg_predictions(m, variables = "cyl")
36 | expect_equal(out1$Mean, out2$estimate, tolerance = 1e-4)
37 | })
38 |
--------------------------------------------------------------------------------
/tests/testthat/test-estimate_means_marginalization.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("marginaleffects")
3 |
4 | test_that("estimate_means() - estimate options", {
5 | data(efc, package = "modelbased")
6 | efc <- datawizard::to_factor(efc, c("c161sex", "c172code", "e16sex", "e42dep"))
7 | levels(efc$c172code) <- c("low", "mid", "high")
8 | m <- lm(barthtot ~ c161sex + c172code + neg_c_7, data = efc)
9 |
10 | out <- estimate_means(m, "c161sex", estimate = "specific")
11 | expect_equal(out$Mean, c(61.15226, 60.60773), tolerance = 1e-4)
12 | out <- estimate_means(m, "c161sex", estimate = "typical")
13 | expect_equal(out$Mean, c(64.61181, 64.06727), tolerance = 1e-4)
14 | out <- estimate_means(m, "c161sex", estimate = "average")
15 | expect_equal(out$Mean, c(67.05128, 64.03226), tolerance = 1e-4)
16 | out <- estimate_means(m, "c161sex", estimate = "population")
17 | expect_equal(out$Mean, c(65.16885, 64.62431), tolerance = 1e-4)
18 | })
19 |
--------------------------------------------------------------------------------
/tests/testthat/test-g_computation.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("marginaleffects")
3 | skip_if_not_installed("nanoparquet")
4 | skip_on_os("mac")
5 |
6 | test_that("estimate_contrasts - ATT and ATU", {
7 | dat <- marginaleffects::get_dataset("lottery")
8 | dat <- subset(dat, win_big == 1 | win == 0)
9 | dat$win_big <- as.factor(dat$win_big)
10 |
11 | mod <- lm(
12 | earnings_post_avg ~ win_big * (
13 | tickets + man + work + age + education + college + year +
14 | earnings_pre_1 + earnings_pre_2 + earnings_pre_3),
15 | data = dat
16 | )
17 |
18 | out1 <- marginaleffects::avg_predictions(mod, variables = "win_big", by = "win_big")
19 | out2 <- estimate_means(mod, "win_big")
20 | expect_equal(out1$estimate, out2$Mean, tolerance = 1e-4)
21 |
22 | # ATE
23 | out1 <- marginaleffects::avg_comparisons(mod, variables = "win_big", newdata = dat)
24 | out2 <- estimate_contrasts(mod, "win_big")
25 | expect_equal(out1$estimate, out2$Difference, tolerance = 1e-4)
26 |
27 | # ATT
28 | out1 <- marginaleffects::avg_comparisons(mod, variables = "win_big", newdata = subset(dat, win_big == 1))
29 | out2 <- estimate_contrasts(mod, "win_big", newdata = subset(dat, win_big == 1), estimate = "population")
30 | expect_equal(out1$estimate, out2$Difference, tolerance = 1e-4)
31 |
32 | # ATU
33 | out1 <- marginaleffects::avg_comparisons(mod, variables = "win_big", newdata = subset(dat, win_big == 0))
34 | out2 <- estimate_contrasts(mod, "win_big", newdata = subset(dat, win_big == 0), estimate = "population")
35 | expect_equal(out1$estimate, out2$Difference, tolerance = 1e-4)
36 |
37 | # error
38 | expect_error(
39 | estimate_contrasts(mod, "win_big", newdata = subset(dat, win_big == 1)),
40 | regex = "It seems that not all"
41 | )
42 | })
43 |
--------------------------------------------------------------------------------
/tests/testthat/test-get_marginaltrends.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("marginaleffects")
3 | skip_if_not_installed("emmeans")
4 |
5 | test_that("get_marginaltrends", {
6 | model <- lm(Sepal.Width ~ Species * Petal.Length, data = iris)
7 | out <- get_marginaltrends(model, trend = "Petal.Length", by = "Species")
8 | expect_identical(nrow(out), 3L)
9 |
10 | out2 <- estimate_slopes(model, trend = "Petal.Length", by = "Species")
11 | expect_equal(out$estimate, out2$Slope, tolerance = 1e-3)
12 |
13 | e1 <- as.data.frame(get_emtrends(model, by = NULL, trend = "Petal.Length"))
14 | e2 <- as.data.frame(get_marginaltrends(model, by = NULL, trend = "Petal.Length"))
15 | expect_equal(e1$Petal.Length.trend, e2$estimate, tolerance = 1e-4)
16 |
17 | e1 <- as.data.frame(get_emtrends(model, by = "Species", trend = "Petal.Length"))
18 | e2 <- as.data.frame(get_marginaltrends(model, by = "Species", trend = "Petal.Length"))
19 | expect_equal(e1$Petal.Length.trend, e2$estimate, tolerance = 1e-4)
20 |
21 | ## TODO: find out why these two slightly differ
22 | e1 <- as.data.frame(get_emtrends(model, by = "Petal.Length", trend = "Petal.Length"))
23 | e2 <- as.data.frame(get_marginaltrends(model, by = "Petal.Length", trend = "Petal.Length"))
24 | expect_equal(e1$Petal.Length.trend, e2$estimate, tolerance = 0.2)
25 | })
26 |
27 | test_that("get_marginaltrends, warnings", {
28 | data(iris)
29 | model <- lm(Sepal.Width ~ Petal.Width * Petal.Length, data = iris)
30 | expect_message(
31 | get_marginaltrends(model, trend = c("Petal.Width", "Petal.Length")),
32 | regex = "More than one numeric variable"
33 | )
34 | })
35 |
--------------------------------------------------------------------------------
/tests/testthat/test-maihda.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("glmmTMB")
3 | skip_if_not_installed("datawizard")
4 |
5 | test_that("maihda", {
6 | # sample data set
7 | data(efc, package = "modelbased")
8 |
9 | efc <- datawizard::to_factor(efc, select = c("c161sex", "c172code", "c175empl"))
10 | efc <- datawizard::recode_values(
11 | efc,
12 | select = "c160age",
13 | recode = list(`1` = "min:40", `2` = 41:64, `3` = "65:max")
14 | )
15 | efc <- datawizard::data_rename(
16 | efc,
17 | select = c("c161sex", "c160age", "quol_5", "c175empl"),
18 | replacement = c("gender", "age", "qol", "employed")
19 | )
20 | efc <- datawizard::data_modify(efc, age = factor(age, labels = c("-40", "41-64", "65+")))
21 |
22 | set.seed(1)
23 | efc$weights <- abs(rnorm(nrow(efc), mean = 1, sd = 0.1))
24 |
25 | m_null <- glmmTMB::glmmTMB(
26 | qol ~ 1 + (1 | gender:employed:age),
27 | data = efc,
28 | weights = weights
29 | )
30 |
31 | out <- estimate_relation(m_null, by = c("gender", "employed", "age"))
32 | expect_identical(dim(out), c(12L, 9L))
33 | out <- estimate_contrasts(out, contrast = c("gender", "employed", "age"))
34 | expect_identical(dim(out), c(66L, 8L))
35 | })
36 |
--------------------------------------------------------------------------------
/tests/testthat/test-mgcv.R:
--------------------------------------------------------------------------------
1 | skip_if_not_installed("emmeans")
2 | skip_if_not_installed("marginaleffects")
3 | skip_if_not_installed("mgcv")
4 | skip_if_not_installed("gamm4")
5 |
6 |
7 | test_that("estimate_means - mgcv gam", {
8 | model <- mgcv::gam(Sepal.Length ~ Species + s(Sepal.Width, by = Species), data = iris)
9 | estim <- suppressMessages(estimate_means(model, backend = "emmeans"))
10 | expect_identical(dim(estim), c(3L, 5L))
11 | estim2 <- suppressMessages(estimate_means(model, backend = "marginaleffects"))
12 | expect_identical(dim(estim2), c(3L, 7L))
13 | expect_named(estim2, c("Species", "Mean", "SE", "CI_low", "CI_high", "t", "df"))
14 | expect_equal(estim$Mean, estim2$Mean, tolerance = 1e-4)
15 | })
16 |
17 |
18 | test_that("estimate_contrasts - mgcv gam", {
19 | model <- mgcv::gam(Sepal.Length ~ Species + s(Sepal.Width, by = Species), data = iris)
20 | estim <- suppressMessages(estimate_contrasts(model, backend = "emmeans"))
21 | expect_identical(dim(estim), c(3L, 9L))
22 | estim2 <- suppressMessages(estimate_contrasts(model, backend = "marginaleffects"))
23 | expect_identical(dim(estim2), c(3L, 9L))
24 | expect_named(estim2, c("Level1", "Level2", "Difference", "SE", "CI_low", "CI_high", "t", "df", "p"))
25 | expect_equal(estim$Difference, estim2$Difference * -1, tolerance = 1e-4) # switched signs
26 | })
27 |
28 |
29 | test_that("estimate_expectation - mgcv gam", {
30 | model <- mgcv::gam(Sepal.Length ~ Species + s(Sepal.Width, by = Species), data = iris)
31 | estim <- suppressMessages(estimate_expectation(model))
32 | expect_identical(dim(estim), c(150L, 7L))
33 | })
34 |
35 |
36 | test_that("estimate_link - mgcv gam", {
37 | model <- mgcv::gam(Sepal.Length ~ Species + s(Sepal.Width, by = Species), data = iris)
38 | estim <- suppressMessages(estimate_link(model, preserve_range = FALSE))
39 | expect_equal(dim(estim), c(30, 6))
40 | })
41 |
42 |
43 | # model <- mgcv::gam(Sepal.Length ~ Petal.Length + s(Sepal.Width) + s(Species, bs = "fs"), data = iris)
44 | # estim <- estimate_link(model)
45 |
46 | test_that("estimate_expectation - mgcv gamm", {
47 | model <- mgcv::gamm(Sepal.Length ~ Petal.Length + s(Sepal.Width), random = list(Species = ~1), data = iris)
48 | estim <- suppressMessages(estimate_expectation(model))
49 | expect_equal(dim(estim), c(150, 8))
50 | })
51 |
52 |
53 | test_that("estimate_link - mgcv gamm", {
54 | skip_on_os("mac")
55 | model <- mgcv::gamm(Sepal.Length ~ Petal.Length + s(Sepal.Width), random = list(Species = ~1), data = iris)
56 | estim <- estimate_link(model, length = 4, verbose = FALSE)
57 | expect_identical(dim(estim), as.integer(c(16, 6)))
58 | })
59 |
60 |
61 | # Gamm4 -------------------------------------------------------------------
62 |
63 | # model <- gamm4::gamm4(Sepal.Length ~ Petal.Length + s(Sepal.Width), random=~(1|Species), data = iris)
64 | #
65 | # test_that("estimate_expectation - gamm4", {
66 | # estim <- estimate_expectation(model)
67 | # expect_equal(dim(estim), c(150, 5))
68 | # })
69 | #
70 | # test_that("estimate_link - gamm4", {
71 | # estim <- estimate_link(model, length=4)
72 | # expect_equal(dim(estim), c(16, 5))
73 | # })
74 |
--------------------------------------------------------------------------------
/tests/testthat/test-mice.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("mice")
3 | skip_if_not_installed("marginaleffects")
4 |
5 | test_that("pool_predictions", {
6 | set.seed(123)
7 | data("nhanes2", package = "mice")
8 | imp <- mice::mice(nhanes2, printFlag = FALSE)
9 | predictions <- lapply(1:5, function(i) {
10 | m <- lm(bmi ~ age + hyp + chl, data = mice::complete(imp, action = i))
11 | estimate_means(m, "age")
12 | })
13 | out <- pool_predictions(predictions)
14 | expect_equal(out$Mean, c(29.84661, 25.20021, 23.14022), tolerance = 1e-2)
15 | expect_equal(out$CI_low, c(2.10117, 3.44548, -5.79522), tolerance = 1e-2)
16 |
17 | # transformed response
18 | predictions <- lapply(1:5, function(i) {
19 | m <- lm(log1p(bmi) ~ age + hyp + chl, data = mice::complete(imp, action = i))
20 | estimate_means(m, "age")
21 | })
22 | out <- pool_predictions(predictions, transform = TRUE)
23 | expect_equal(out$Mean, c(29.67473, 24.99382, 23.19148), tolerance = 1e-2)
24 | expect_equal(out$CI_low, c(10.58962, 11.13011, 7.43196), tolerance = 1e-2)
25 |
26 | # glm, logistic
27 | predictions <- lapply(1:5, function(i) {
28 | m <- glm(hyp ~ age + chl, data = mice::complete(imp, action = i), family = binomial())
29 | estimate_means(m, "age")
30 | })
31 | expect_warning(pool_predictions(predictions), regex = "Could not extract")
32 |
33 | predictions <- lapply(1:5, function(i) {
34 | m <- glm(hyp ~ age + chl, data = mice::complete(imp, action = i), family = binomial())
35 | estimate_means(m, "age", type = "response")
36 | })
37 | expect_silent(pool_predictions(predictions))
38 | })
39 |
40 |
41 | test_that("pool_contrasts", {
42 | set.seed(123)
43 | data("nhanes2", package = "mice")
44 | imp <- mice::mice(nhanes2, printFlag = FALSE)
45 | comparisons <- lapply(1:5, function(i) {
46 | m <- lm(bmi ~ age + hyp + chl, data = mice::complete(imp, action = i))
47 | estimate_contrasts(m, "age")
48 | })
49 | out <- pool_contrasts(comparisons)
50 | expect_equal(out$Difference, c(-4.6464, -6.70639, -2.05999), tolerance = 1e-2)
51 | expect_equal(out$CI_low, c(-12.31066, -18.92406, -11.94194), tolerance = 1e-2)
52 | expect_equal(out$p, c(0.14926, 0.17899, 0.55449), tolerance = 1e-2)
53 | })
54 |
55 |
56 | test_that("pool_slopes", {
57 | set.seed(123)
58 | data("nhanes2", package = "mice")
59 | imp <- mice::mice(nhanes2, printFlag = FALSE)
60 | slopes <- lapply(1:5, function(i) {
61 | m <- lm(bmi ~ age + hyp + chl, data = mice::complete(imp, action = i))
62 | estimate_slopes(m, "chl")
63 | })
64 | out <- pool_slopes(slopes)
65 | expect_equal(out$Slope, 0.05666, tolerance = 1e-2)
66 | expect_equal(out$CI_low, 0.0005592606, tolerance = 1e-2)
67 | expect_named(
68 | out,
69 | c("Slope", "SE", "CI_low", "CI_high", "t", "df", "p")
70 | )
71 |
72 | slopes <- lapply(1:5, function(i) {
73 | m <- lm(bmi ~ age + hyp + chl, data = mice::complete(imp, action = i))
74 | estimate_slopes(m, "age")
75 | })
76 | out <- pool_slopes(slopes)
77 | expect_equal(out$Slope, c(-4.6464, -6.70639), tolerance = 1e-2)
78 | expect_equal(out$CI_low, c(-10.83711, -16.57506), tolerance = 1e-2)
79 | expect_named(
80 | out,
81 | c("Comparison", "Slope", "SE", "CI_low", "CI_high", "t", "df", "p")
82 | )
83 | })
84 |
--------------------------------------------------------------------------------
/tests/testthat/test-multivariate_response.R:
--------------------------------------------------------------------------------
1 | skip_if_not_installed("marginaleffects")
2 | skip_if_not_installed("lme4")
3 |
4 | test_that("multivariate response", {
5 | data(cbpp, package = "lme4")
6 | gm1 <- lme4::glmer(
7 | cbind(incidence, size - incidence) ~ period + (1 | herd),
8 | data = cbpp,
9 | family = "binomial"
10 | )
11 |
12 | out <- estimate_expectation(gm1, by = "period", include_random = TRUE)
13 | expect_identical(dim(out), c(4L, 6L))
14 |
15 | out <- estimate_expectation(gm1, include_random = TRUE)
16 | expect_identical(dim(out), c(56L, 6L))
17 |
18 | fm1 <- lm(cbind(mpg, wt) ~ cyl + disp, data = mtcars)
19 | out <- estimate_expectation(fm1)
20 | expect_identical(dim(out), c(64L, 5L))
21 |
22 | out <- estimate_expectation(fm1, by = "cyl")
23 | expect_identical(dim(out), c(6L, 5L))
24 | })
25 |
--------------------------------------------------------------------------------
/tests/testthat/test-offset.R:
--------------------------------------------------------------------------------
1 | skip_if_not_installed("marginaleffects")
2 | skip_if_not_installed("MASS")
3 |
4 | test_that("verbose", {
5 | set.seed(1)
6 | newdata <- data.frame(
7 | y = c(602, 38, 616, 256, 21, 723, 245, 176, 89, 1614, 31, 27, 313, 251, 345),
8 | x = as.factor(sample(letters[1:3], 15, replace = TRUE)),
9 | offset_1 = c(72, 50, 31, 30, 16, 25, 75, 16, 78, 40, 68, 25, 71, 52, 17)
10 | )
11 |
12 | m <- MASS::glm.nb(y ~ x, data = newdata)
13 | out1 <- estimate_means(m, "x")
14 | out2 <- estimate_means(m, "x", estimate = "average")
15 | expect_equal(out1$Mean, out2$Mean, tolerance = 1e-3)
16 |
17 | moff <- MASS::glm.nb(y ~ x + offset(log(offset_1)), data = newdata)
18 | expect_message(
19 | {
20 | out1 <- estimate_means(moff, "x")
21 | },
22 | regex = "which is set to"
23 | )
24 | expect_message(
25 | {
26 | out2 <- estimate_means(moff, "x", estimate = "average")
27 | },
28 | regex = "and you average"
29 | )
30 | expect_equal(out1$Mean, c(295.12035, 454.3339, 654.64225), tolerance = 1e-3)
31 | expect_equal(out2$Mean, c(256.42016, 289.02697, 707.83022), tolerance = 1e-3)
32 |
33 | expect_message(
34 | {
35 | estimate_means(moff, "x")
36 | },
37 | regex = "We also found"
38 | )
39 |
40 | expect_silent({
41 | out1 <- estimate_means(moff, "x", offset = 100)
42 | })
43 | expect_message(
44 | {
45 | out2 <- estimate_means(moff, "x", estimate = "average", offset = 100)
46 | },
47 | regex = "For"
48 | )
49 | expect_equal(out1$Mean, c(664.68547, 1023.27456, 1474.41949), tolerance = 1e-3)
50 | expect_equal(out2$Mean, c(256.42016, 289.02697, 707.83022), tolerance = 1e-3)
51 |
52 | set.seed(1)
53 | newdata <- data.frame(
54 | y = c(602, 38, 616, 256, 21, 723, 245, 176, 89, 1614, 31, 27, 313, 251, 345),
55 | x = as.factor(sample(letters[1:3], 15, replace = TRUE)),
56 | offset_1 = rep_len(50, 15)
57 | )
58 | moff <- MASS::glm.nb(y ~ x + offset(log(offset_1)), data = newdata)
59 | expect_silent({
60 | out1 <- estimate_means(moff, "x", verbose = FALSE)
61 | })
62 | expect_silent({
63 | out2 <- estimate_means(moff, "x", estimate = "average", verbose = FALSE)
64 | })
65 | expect_equal(out1$Mean, out2$Mean, tolerance = 1e-3)
66 | })
67 |
68 |
69 | test_that("offset, estimate_relation", {
70 | set.seed(1)
71 | newdata <- data.frame(
72 | y = c(602, 38, 616, 256, 21, 723, 245, 176, 89, 1614, 31, 27, 313, 251, 345),
73 | x = as.factor(sample(letters[1:3], 15, replace = TRUE)),
74 | offset_1 = c(72, 50, 31, 30, 16, 25, 75, 16, 78, 40, 68, 25, 71, 52, 17)
75 | )
76 |
77 | moff <- MASS::glm.nb(y ~ x + offset(log(offset_1)), data = newdata)
78 | out <- estimate_relation(moff, by = "x")
79 | expect_equal(attributes(out)$datagrid$offset_1, c(44.4, 44.4, 44.4), tolerance = 1e-3)
80 | out <- estimate_relation(moff, by = "x", offset = 100)
81 | expect_equal(attributes(out)$datagrid$offset_1, c(100, 100, 100), tolerance = 1e-3)
82 | })
83 |
--------------------------------------------------------------------------------
/tests/testthat/test-ordinal.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("curl")
3 | skip_if_offline()
4 | skip_if_not_installed("brms")
5 | skip_if_not_installed("BH")
6 | skip_if_not_installed("RcppEigen")
7 | skip_if_not_installed("marginaleffects")
8 | skip_if_not_installed("httr2")
9 | skip_if_not_installed("MASS")
10 |
11 | test_that("estimate_relation prints ordinal models correctly", {
12 | m <- suppressWarnings(insight::download_model("brms_categorical_2_num"))
13 | out <- suppressWarnings(estimate_relation(m))
14 | expect_snapshot(print(out, zap_small = TRUE), variant = "windows")
15 | out <- suppressWarnings(estimate_means(m, by = "Sepal.Width"))
16 | expect_snapshot(print(out, zap_small = TRUE), variant = "windows")
17 |
18 | m <- MASS::polr(Species ~ Sepal.Width, data = iris)
19 | out <- estimate_relation(m, verbose = FALSE)
20 | expect_snapshot(print(out, zap_small = TRUE), variant = "windows")
21 | out <- estimate_means(m, by = "Sepal.Width")
22 | expect_snapshot(print(out, zap_small = TRUE), variant = "windows")
23 |
24 | # keep row column
25 | out <- suppressWarnings(estimate_relation(m, data = iris[1:3, ], verbose = FALSE))
26 | expect_named(out, c("Row", "Response", "Sepal.Width", "Predicted", "CI_low", "CI_high", "Residuals")) # nolint
27 | expect_identical(dim(out), c(9L, 7L))
28 | })
29 |
--------------------------------------------------------------------------------
/tests/testthat/test-plot-flexible_numeric.R:
--------------------------------------------------------------------------------
1 | skip_on_os(c("mac", "solaris", "linux"))
2 | skip_if_not_installed("ggplot2")
3 | skip_if_not_installed("see")
4 | skip_if_not_installed("vdiffr")
5 | skip_if_not_installed("marginaleffects")
6 | skip_on_cran()
7 |
8 | test_that("plot 2nd by is numeric", {
9 | set.seed(1234)
10 | x1 <- rnorm(200)
11 | x2 <- rnorm(200)
12 | y <- 2 * x1 + x1^2 + 4 * x2 + rnorm(200)
13 |
14 | d <- data.frame(x1, x2, y)
15 | model <- lm(y ~ x1 + x2, data = d)
16 |
17 | pr <- estimate_means(model, c("x1", "x2"))
18 | vdiffr::expect_doppelganger(
19 | "plot-auto-numeric-by-1",
20 | plot(pr)
21 | )
22 |
23 | pr <- estimate_means(model, c("x1", "x2 = [sd]"))
24 | vdiffr::expect_doppelganger(
25 | "plot-auto-numeric-by-2",
26 | plot(pr)
27 | )
28 | })
29 |
--------------------------------------------------------------------------------
/tests/testthat/test-plot-grouplevel.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_on_os(c("mac", "solaris", "linux"))
3 | skip_if_not_installed("ggplot2")
4 | skip_if_not_installed("see")
5 | skip_if_not_installed("vdiffr")
6 |
7 | skip_if(utils::packageVersion("insight") <= "1.1.0")
8 | skip_if(utils::packageVersion("parameters") <= "0.24.1")
9 |
10 | test_that("plots grouplevel frequentist", {
11 | skip_if_not_installed("lme4")
12 |
13 | data(mtcars)
14 | model <- lme4::lmer(mpg ~ hp + (1 | carb), data = mtcars)
15 |
16 | out <- estimate_grouplevel(model)
17 | vdiffr::expect_doppelganger("plot-grouplevel-freq-1", plot(out))
18 |
19 | out <- estimate_grouplevel(model, type = "total")
20 | vdiffr::expect_doppelganger("plot-grouplevel-freq-2", plot(out))
21 | })
22 |
23 | test_that("plots grouplevel Bayesian", {
24 | skip_if_not_installed("curl")
25 | skip_if_offline()
26 | skip_if_not_installed("httr2")
27 | skip_if_not_installed("brms")
28 |
29 | model <- insight::download_model("brms_sigma_3")
30 |
31 | out <- estimate_grouplevel(model)
32 | vdiffr::expect_doppelganger("plot-grouplevel-Bayes-1", plot(out))
33 |
34 | out <- estimate_grouplevel(model, type = "total")
35 | vdiffr::expect_doppelganger("plot-grouplevel-Bayes-2", plot(out))
36 |
37 | model <- insight::download_model("brms_zi_4")
38 |
39 | out <- estimate_grouplevel(model)
40 | vdiffr::expect_doppelganger("plot-grouplevel-Bayes-3", plot(out))
41 |
42 | out <- estimate_grouplevel(model, type = "total")
43 | vdiffr::expect_doppelganger("plot-grouplevel-Bayes-4", plot(out))
44 |
45 | out <- estimate_grouplevel(model, type = "total")
46 | vr <- visualisation_recipe(out, pointrange = list(color = "black"))
47 | vdiffr::expect_doppelganger("plot-grouplevel-Bayes-5", plot(vr))
48 | })
49 |
--------------------------------------------------------------------------------
/tests/testthat/test-plot-ordinal.R:
--------------------------------------------------------------------------------
1 | skip_on_os(c("mac", "solaris", "linux"))
2 | skip_if_not_installed("ggplot2")
3 | skip_if_not_installed("see")
4 | skip_if_not_installed("vdiffr")
5 | skip_if_not_installed("marginaleffects")
6 | skip_if_not_installed("MASS")
7 | skip_if_not_installed("ordinal")
8 | skip_on_cran()
9 |
10 | test_that("plots ordinal", {
11 | m <- MASS::polr(Species ~ Sepal.Width, data = iris)
12 | out <- estimate_means(m, by = "Sepal.Width")
13 | set.seed(123)
14 | vdiffr::expect_doppelganger(
15 | "plot-ordinal-1",
16 | plot(out, show_data = FALSE)
17 | )
18 |
19 | out <- estimate_relation(m, by = "Sepal.Width", verbose = FALSE)
20 | set.seed(123)
21 | vdiffr::expect_doppelganger(
22 | "plot-ordinal-2",
23 | plot(out, show_data = FALSE)
24 | )
25 | })
26 |
27 |
28 | test_that("plots package ordinal", {
29 | data(wine, package = "ordinal")
30 | m1 <- ordinal::clm(rating ~ temp * contact, data = wine)
31 |
32 | out <- estimate_means(m1, by = "temp")
33 | set.seed(123)
34 | vdiffr::expect_doppelganger(
35 | "plot-ordinal-3",
36 | plot(out, show_data = FALSE)
37 | )
38 |
39 | out <- estimate_relation(m1, by = "temp", verbose = FALSE)
40 | set.seed(123)
41 | vdiffr::expect_doppelganger(
42 | "plot-ordinal-4",
43 | plot(out, show_data = FALSE)
44 | )
45 | })
46 |
--------------------------------------------------------------------------------
/tests/testthat/test-plot-slopes.R:
--------------------------------------------------------------------------------
1 | skip_on_os(c("mac", "solaris", "linux"))
2 | skip_if_not_installed("ggplot2")
3 | skip_if_not_installed("see")
4 | skip_if_not_installed("vdiffr")
5 | skip_if_not_installed("marginaleffects")
6 | skip_on_cran()
7 |
8 | test_that("plot slopes, correct y axis labels", {
9 | data(mtcars)
10 | mtcars$gear <- as.factor(mtcars$gear)
11 | model <- lm(mpg ~ hp * wt, data = mtcars)
12 | slopes <- estimate_slopes(model, trend = "hp", by = "wt")
13 |
14 | vdiffr::expect_doppelganger(
15 | "plot-slopes-y-axis-labels-1",
16 | plot(slopes)
17 | )
18 |
19 | model <- lm(mpg ~ hp * gear, data = mtcars)
20 | slopes <- estimate_slopes(model, trend = "hp", by = "gear")
21 |
22 | vdiffr::expect_doppelganger(
23 | "plot-slopes-y-axis-labels-2",
24 | plot(slopes)
25 | )
26 | })
27 |
--------------------------------------------------------------------------------
/tests/testthat/test-predict-dpar.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 |
3 | test_that("estimate_means and estimate_relation - dpar", {
4 | skip_if_not_installed("brms")
5 | skip_if_not_installed("marginaleffects")
6 | skip_if_not_installed("datawizard")
7 | skip_if_not_installed("httr2")
8 | skip_if_not_installed("collapse")
9 |
10 | m <- suppressWarnings(insight::download_model("brms_sigma_2"))
11 | skip_if(is.null(m))
12 |
13 | out1 <- marginaleffects::predictions(
14 | m,
15 | newdata = insight::get_datagrid(m, c("Condition", "Participant")),
16 | by = c("Condition", "Participant"),
17 | dpar = "sigma"
18 | )
19 | out2 <- estimate_means(
20 | m,
21 | by = c("Condition", "Participant"),
22 | predict = "sigma",
23 | backend = "marginaleffects"
24 | )
25 | expect_equal(out1$estimate, out2$Sigma, tolerance = 1e-4)
26 |
27 | out1 <- estimate_relation(
28 | m,
29 | by = c("Condition", "Participant"),
30 | predict = "sigma"
31 | )
32 | out1 <- datawizard::data_arrange(out1, "Condition")
33 | dg <- insight::get_datagrid(m, c("Condition", "Participant"))
34 | out2 <- cbind(
35 | dg,
36 | data.frame(
37 | predicted = colMeans(brms::posterior_epred(
38 | m,
39 | newdata = dg,
40 | dpar = "sigma"
41 | ))
42 | )
43 | )
44 | out2 <- datawizard::data_arrange(out2, "Condition")
45 | expect_equal(out1$Predicted, out2$predicted, tolerance = 1e-4)
46 | })
47 |
--------------------------------------------------------------------------------
/tests/testthat/test-scoping_issues.R:
--------------------------------------------------------------------------------
1 | skip_on_os(c("mac", "linux"))
2 | skip_if_not_installed("marginaleffects")
3 | skip_if_not_installed("withr")
4 |
5 | withr::with_environment(
6 | new.env(),
7 | test_that("scoping issues", {
8 | data(iris)
9 | model <- lm(Sepal.Width ~ Species, data = iris)
10 |
11 | out1 <- estimate_contrasts(model, backend = "marginaleffects")
12 |
13 | contrast <- NULL
14 | out2 <- estimate_contrasts(model, backend = "marginaleffects")
15 |
16 | expect_equal(out1$Difference, out1$Difference, tolerance = 1e-4)
17 | })
18 | )
19 |
--------------------------------------------------------------------------------
/tests/testthat/test-signal.R:
--------------------------------------------------------------------------------
1 | test_that("signal", {
2 | set.seed(333)
3 |
4 | x <- sin(seq(0, 4 * pi, length.out = 100)) + rnorm(100, 0, 0.2)
5 | s1 <- as.vector(smoothing(x, method = "loess"))
6 | s2 <- as.vector(smoothing(x, method = "smooth"))
7 |
8 | expect_true(as.numeric(datawizard::smoothness(s1)) > as.numeric(datawizard::smoothness(s2)))
9 | })
10 |
--------------------------------------------------------------------------------
/tests/testthat/test-standardize.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("emmeans")
3 | skip_if_not_installed("marginaleffects")
4 |
5 | test_that("standardize() - estimate_means()", {
6 | data(mtcars)
7 |
8 | dat <- mtcars
9 | dat$gear <- as.factor(dat$gear)
10 | dat$cyl <- as.factor(dat$cyl)
11 |
12 | # Simple
13 | model <- lm(mpg ~ cyl, data = dat)
14 | estim <- estimate_means(model, "cyl", backend = "marginaleffects")
15 | out1 <- standardize(estim)
16 | out2 <- unstandardize(out1)
17 | expect_equal(as.vector(out1$Mean), c(1.0906, -0.0577, -0.82805), tolerance = 1e-4)
18 | expect_equal(as.vector(out2$Mean), estim$Mean, tolerance = 1e-4)
19 | })
20 |
21 |
22 | test_that("standardize() - estimate_predicted", {
23 | data(mtcars)
24 |
25 | dat <- mtcars
26 | dat$gear <- as.factor(dat$gear)
27 | dat$cyl <- as.factor(dat$cyl)
28 |
29 | # Simple
30 | model <- lm(mpg ~ cyl, data = dat)
31 | estim <- estimate_relation(model, by = "cyl")
32 | out1 <- standardize(estim)
33 | out2 <- unstandardize(out1)
34 | expect_equal(as.vector(out1$Predicted), c(1.0906, -0.0577, -0.82805), tolerance = 1e-4)
35 | expect_equal(as.vector(out2$Predicted), estim$Predicted, tolerance = 1e-4)
36 | })
37 |
38 |
39 | test_that("standardize() - estimate_contrasts()", {
40 | data(mtcars)
41 |
42 | dat <- mtcars
43 | dat$gear <- as.factor(dat$gear)
44 | dat$cyl <- as.factor(dat$cyl)
45 |
46 | # Simple
47 | model <- lm(mpg ~ cyl, data = dat)
48 | estim <- estimate_contrasts(model, "cyl", backend = "marginaleffects")
49 | out1 <- standardize(estim)
50 | out2 <- unstandardize(out1)
51 | expect_equal(as.vector(out1$Difference), c(-1.14831, -1.91866, -0.77035), tolerance = 1e-4)
52 | expect_equal(as.vector(out2$Difference), estim$Difference, tolerance = 1e-4)
53 | })
54 |
--------------------------------------------------------------------------------
/tests/testthat/test-summary_estimate_slopes.R:
--------------------------------------------------------------------------------
1 | skip_if_not_installed("marginaleffects")
2 | skip_if_not_installed("mgcv")
3 | skip_on_cran()
4 |
5 | test_that("summary.estimate_slopes", {
6 | set.seed(333)
7 | model <- mgcv::gam(Sepal.Width ~ s(Petal.Length, by = Species), data = iris)
8 | slopes <- estimate_slopes(model,
9 | trend = "Petal.Length",
10 | by = c("Petal.Length", "Species"), length = 20
11 | )
12 | expect_snapshot(summary(slopes))
13 |
14 | set.seed(333)
15 | model <- mgcv::gam(Sepal.Width ~ s(Petal.Length, by = Species), data = iris)
16 | slopes <- estimate_slopes(model,
17 | trend = "Petal.Length",
18 | by = c("Petal.Length", "Species"), length = 100
19 | )
20 | expect_snapshot(summary(slopes))
21 | })
22 |
--------------------------------------------------------------------------------
/tests/testthat/test-table_footer.R:
--------------------------------------------------------------------------------
1 | skip_if_not_installed("marginaleffects")
2 | skip_if_not_installed("emmeans")
3 |
4 | test_that("table_footer", {
5 | data(efc, package = "modelbased")
6 | efc <- datawizard::to_factor(efc, c("c161sex", "c172code", "e16sex", "e42dep"))
7 | m <- lm(neg_c_7 ~ c12hour + barthtot + c161sex + e42dep + c172code, data = efc)
8 | out <- utils::capture.output(estimate_means(m, "c172code", estimate = "specific", verbose = FALSE))
9 | expect_identical(
10 | out[11],
11 | "Predictors controlled: c12hour (42), barthtot (65), c161sex (Male), e42dep (independent)"
12 | )
13 | out <- utils::capture.output(estimate_means(m, "c172code", estimate = "typical", verbose = FALSE))
14 | expect_identical(
15 | out[11],
16 | "Predictors averaged: c12hour (42), barthtot (65), c161sex, e42dep"
17 | )
18 | out <- utils::capture.output(estimate_means(m, "c172code", estimate = "average", verbose = FALSE))
19 | expect_length(out, 10)
20 | })
21 |
--------------------------------------------------------------------------------
/tests/testthat/test-transform_response.R:
--------------------------------------------------------------------------------
1 | skip_if_not_installed("marginaleffects")
2 |
3 | test_that("estimate_means, transform", {
4 | data(cars)
5 | m <- lm(log(dist) ~ speed, data = cars)
6 | out <- estimate_means(m, "speed")
7 | expect_named(out, c("speed", "Mean", "SE", "CI_low", "CI_high", "t", "df"))
8 | expect_equal(
9 | out$Mean,
10 | c(
11 | 2.15918, 2.44097, 2.72276, 3.00454, 3.28633, 3.56811, 3.8499,
12 | 4.13168, 4.41347, 4.69525
13 | ),
14 | tolerance = 1e-4
15 | )
16 | expect_snapshot(out)
17 | out1 <- estimate_means(m, "speed", transform = TRUE)
18 | expect_named(out1, c("speed", "Mean", "CI_low", "CI_high", "df"))
19 | expect_equal(
20 | out1$Mean,
21 | c(
22 | 8.66407, 11.48417, 15.2222, 20.17694, 26.74442, 35.44958, 46.98822,
23 | 62.28261, 82.55525, 109.42651
24 | ),
25 | tolerance = 1e-4
26 | )
27 | expect_snapshot(out1)
28 | out2 <- estimate_means(m, "speed", transform = exp)
29 | expect_equal(out1$Mean, out2$Mean, tolerance = 1e-4)
30 | })
31 |
32 |
33 | test_that("estimate_expectation, transform", {
34 | data(cars)
35 | m <- lm(log(dist) ~ speed, data = cars)
36 | out <- estimate_expectation(m, by = "speed")
37 | expect_named(out, c("speed", "Predicted", "SE", "CI_low", "CI_high"))
38 | expect_equal(
39 | out$Predicted,
40 | c(
41 | 2.15918, 2.44097, 2.72276, 3.00454, 3.28633, 3.56811, 3.8499,
42 | 4.13168, 4.41347, 4.69525
43 | ),
44 | tolerance = 1e-4
45 | )
46 | out1 <- estimate_expectation(m, by = "speed", transform = TRUE)
47 | expect_named(out1, c("speed", "Predicted", "CI_low", "CI_high"))
48 | expect_equal(
49 | out1$Predicted,
50 | c(
51 | 8.66407, 11.48417, 15.2222, 20.17694, 26.74442, 35.44958, 46.98822,
52 | 62.28261, 82.55525, 109.42651
53 | ),
54 | tolerance = 1e-4
55 | )
56 | out2 <- estimate_expectation(m, by = "speed", transform = exp)
57 | expect_equal(out1$Predicted, out2$Predicted, tolerance = 1e-4)
58 | })
59 |
60 |
61 | test_that("estimate_slopes, transform", {
62 | data(iris)
63 | mod <- lm(log(Sepal.Length) ~ Sepal.Width * Species, data = iris)
64 |
65 | out <- estimate_slopes(mod, trend = "Sepal.Width", by = "Species")
66 | expect_identical(dim(out), c(3L, 8L))
67 | expect_equal(out$Slope, c(0.13752, 0.14779, 0.13957), tolerance = 1e-3)
68 |
69 | out <- estimate_contrasts(mod, "Sepal.Width", by = "Species")
70 | expect_identical(dim(out), c(3L, 9L))
71 | expect_equal(out$Difference, c(0.01027, 0.00205, -0.00822), tolerance = 1e-3)
72 |
73 | out <- estimate_slopes(mod, trend = "Sepal.Width", by = "Species", transform = TRUE)
74 | expect_identical(dim(out), c(3L, 7L))
75 | expect_equal(out$Slope, c(1.14743, 1.15927, 1.14978), tolerance = 1e-3)
76 |
77 | out <- estimate_contrasts(mod, "Sepal.Width", by = "Species", transform = TRUE)
78 | expect_identical(dim(out), c(3L, 8L))
79 | expect_equal(out$Difference, c(1.01032, 1.00206, 0.99182), tolerance = 1e-3)
80 | })
81 |
--------------------------------------------------------------------------------
/tests/testthat/test-vcov.R:
--------------------------------------------------------------------------------
1 | skip_on_cran()
2 | skip_if_not_installed("marginaleffects")
3 | skip_if_not_installed("nanoparquet")
4 | skip_if_not_installed("sandwich")
5 | skip_on_os("mac")
6 |
7 | test_that("estimate_contrasts - vcov", {
8 | dat <- marginaleffects::get_dataset("lottery")
9 | dat <- subset(dat, win_big == 1 | win == 0)
10 | dat$win_big <- as.factor(dat$win_big)
11 |
12 | mod <- lm(
13 | earnings_post_avg ~ win_big * (
14 | tickets + man + work + age + education + college + year +
15 | earnings_pre_1 + earnings_pre_2 + earnings_pre_3),
16 | data = dat
17 | )
18 |
19 | out1 <- marginaleffects::avg_predictions(mod, variables = "win_big", by = "win_big", vcov = "HC3")
20 | out2 <- estimate_means(mod, "win_big", vcov = "HC3")
21 | expect_equal(out1$std.error, out2$SE, tolerance = 1e-4)
22 | })
23 |
--------------------------------------------------------------------------------
/tests/testthat/test-verbose.R:
--------------------------------------------------------------------------------
1 | skip_if_not_installed("marginaleffects")
2 | skip_if_not_installed("emmeans")
3 |
4 | test_that("verbose", {
5 | data(iris)
6 | model <- lm(Sepal.Width ~ Species, data = iris)
7 | expect_message(estimate_contrasts(model, backend = "emmeans"), regex = "No variable was specified")
8 | expect_silent(estimate_contrasts(model, backend = "emmeans", verbose = FALSE))
9 | expect_message(estimate_means(model, backend = "emmeans"), regex = "We selected")
10 | expect_silent(estimate_means(model, backend = "emmeans", verbose = FALSE))
11 | expect_silent(estimate_contrasts(model, backend = "marginaleffects", verbose = FALSE))
12 | expect_message(estimate_means(model, backend = "marginaleffects"), regex = "We selected")
13 | expect_silent(estimate_means(model, backend = "marginaleffects", verbose = FALSE))
14 | })
15 |
--------------------------------------------------------------------------------
/tests/testthat/test-zero_crossings.R:
--------------------------------------------------------------------------------
1 | test_that("zero_crossings", {
2 | x <- sin(seq(0, 4 * pi, length.out = 100))
3 | out <- zero_crossings(x)
4 | expect_equal(out, c(1, 25.74975, 50.5, 75.25025), tolerance = 1e-4)
5 | out <- find_inversions(x)
6 | expect_equal(out, c(12.87478, 37.62484, 62.37516, 87.12522), tolerance = 1e-4)
7 | expect_true(is.na(zero_crossings(c(1, 2))))
8 | expect_error(zero_crossings(1), regex = "is not smaller")
9 | })
10 |
--------------------------------------------------------------------------------
/vignettes/.gitignore:
--------------------------------------------------------------------------------
1 | /.quarto/
2 |
--------------------------------------------------------------------------------
/vignettes/estimate_relation.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Visualize effects and interactions"
3 | output:
4 | rmarkdown::html_vignette:
5 | toc: true
6 | fig_width: 10.08
7 | fig_height: 6
8 | tags: [r, estimate, estimate link, estimate relation, predictions]
9 | vignette: >
10 | %\VignetteIndexEntry{Visualize effects and interactions}
11 | \usepackage[utf8]{inputenc}
12 | %\VignetteEngine{knitr::rmarkdown}
13 | editor_options:
14 | chunk_output_type: console
15 | bibliography: bibliography.bib
16 | ---
17 |
18 | ```{r message=FALSE, warning=FALSE, include=FALSE}
19 | library(knitr)
20 | options(knitr.kable.NA = "")
21 | knitr::opts_chunk$set(comment = ">", dpi = 300)
22 | options(digits = 2)
23 |
24 | if (!requireNamespace("ggplot2", quietly = TRUE) ||
25 | !requireNamespace("mgcv", quietly = TRUE) ||
26 | !requireNamespace("see", quietly = TRUE)) {
27 | knitr::opts_chunk$set(eval = FALSE)
28 | }
29 |
30 | set.seed(333)
31 | ```
32 |
33 | This vignette will present how to visualize the effects and interactions using
34 | `estimate_relation()`.
35 |
36 | Note that the statistically correct name of `estimate_relation` is `estimate_expectation` (which can be used as an alias), as it refers to expected predictions (read [more](https://easystats.github.io/insight/reference/get_predicted.html)).
37 |
38 | ## Simple regression
39 |
40 | ### Linear relationship
41 |
42 | ```{r}
43 | library(modelbased)
44 |
45 | model <- lm(Sepal.Length ~ Sepal.Width, data = iris)
46 |
47 | visualization_data <- estimate_relation(model)
48 | head(visualization_data)
49 | ```
50 |
51 |
52 | ```{r}
53 | library(ggplot2)
54 | plot(visualization_data, line = list(color = "red")) +
55 | theme_minimal()
56 | ```
57 |
58 | ## More complex regressions
59 |
60 | ### Polynomial
61 |
62 | ```{r}
63 | lm(Sepal.Length ~ poly(Sepal.Width, 2), data = iris) |>
64 | modelbased::estimate_relation(length = 50) |>
65 | plot()
66 | ```
67 |
68 | ### Additive Models
69 |
70 | ```{r}
71 | library(mgcv)
72 |
73 | mgcv::gam(Sepal.Length ~ s(Sepal.Width), data = iris) |>
74 | modelbased::estimate_relation(length = 50) |>
75 | plot()
76 | ```
77 |
78 |
79 | ## References
80 |
--------------------------------------------------------------------------------
/vignettes/introduction_comparisons_4.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Contrasts and comparisons for generalized linear models"
3 | output: rmarkdown::html_vignette
4 | vignette: >
5 | %\VignetteIndexEntry{Contrasts and comparisons for generalized linear models}
6 | %\VignetteEngine{knitr::rmarkdown}
7 | %\VignetteEncoding{UTF-8}
8 | ---
9 |
10 | ```{r set-options, echo = FALSE}
11 | knitr::opts_chunk$set(
12 | collapse = TRUE,
13 | comment = "#>",
14 | dev = "png",
15 | fig.width = 7,
16 | fig.height = 3.5,
17 | message = FALSE, warning = FALSE
18 | )
19 | options(width = 800)
20 | arrow_color <- "#FF00cc"
21 | p <- ht8 <- NULL
22 |
23 | pkgs <- c("ggplot2", "see", "marginaleffects")
24 |
25 | if (!all(vapply(pkgs, requireNamespace, quietly = TRUE, FUN.VALUE = logical(1L)))) {
26 | knitr::opts_chunk$set(eval = FALSE)
27 | }
28 | ```
29 |
30 |
31 | This vignette is the fourth in a 5-part series:
32 |
33 | 1. [**Contrasts and Pairwise Comparisons**](https://easystats.github.io/modelbased/articles/introduction_comparisons_1.html)
34 |
35 | 2. [**User Defined Contrasts and Joint Tests**](https://easystats.github.io/modelbased/articles/introduction_comparisons_2.html)
36 |
37 |
38 | 3. [**Comparisons of Slopes, Floodlight and Spotlight Analysis (Johnson-Neyman Intervals)**](https://easystats.github.io/modelbased/articles/introduction_comparisons_3.html)
39 |
40 | 4. **Contrasts and Comparisons for Generalized Linear Models**
41 |
42 | 5. [**Contrasts and Comparisons for Zero-Inflation Models**](https://easystats.github.io/modelbased/articles/introduction_comparisons_5.html)
43 |
44 |
45 | # Contrasts and comparisons for GLM - logistic regression example
46 |
47 | We will now show an example for non-Gaussian models. For GLM's (generalized linear models) with (non-Gaussian) link-functions, `estimate_means()` by default returns predicted values on the *response* scale. For example, predicted values for logistic regression models are shown as *probabilities*.
48 |
49 | Let's look at a simple example.
50 |
51 | ```{r}
52 | library(modelbased)
53 | set.seed(1234)
54 | dat <- data.frame(
55 | outcome = rbinom(n = 100, size = 1, prob = 0.35),
56 | x1 = as.factor(sample(1:3, size = 100, TRUE, prob = c(0.5, 0.2, 0.3))),
57 | x2 = rnorm(n = 100, mean = 10, sd = 7),
58 | x3 = as.factor(sample(1:4, size = 100, TRUE, prob = c(0.1, 0.4, 0.2, 0.3)))
59 | )
60 |
61 | m <- glm(outcome ~ x1 + x2 + x3, data = dat, family = binomial())
62 | estimate_means(m, "x1")
63 | ```
64 |
65 | ## Contrasts and comparisons for categorical focal terms
66 |
67 | Contrasts or comparisons - like predictions (see above) - are by default on the *response* scale, i.e. they're represented as difference between probabilities (in percentage points).
68 |
69 | ```{r message=TRUE}
70 | estimate_contrasts(m, "x1")
71 | ```
72 |
73 | ```{r echo=FALSE}
74 | p <- estimate_means(m, "x1")
75 | ht8 <- estimate_contrasts(m, "x1")
76 | ```
77 |
78 | The difference between the predicted probability of `x1 = 1` (21.2%) and `x1 = 2` (13.9%) is roughly 7.3 percentage points. This difference is not statistically significant (p = 0.417).
79 |
80 | Contrasts or comparisons can also be represented on the link-scale, in this case as _log-odds_. To do so, use `predict = "link"`.
81 |
82 | ```{r message=TRUE}
83 | estimate_contrasts(m, "x1", predict = "link")
84 | ```
85 |
86 | The `transform` argument in `estimate_contrasts()` can be used transform comparisons. For example, to transform contrasts to _odds ratios_, we can use `transform = exp` in combination with `predict = "link"`.
87 |
88 | ```{r message=TRUE}
89 | estimate_contrasts(m, "x1", predict = "link", transform = exp)
90 | ```
91 |
92 | [Go to next vignette: **Contrasts and Comparisons for Zero-Inflation Models**](https://easystats.github.io/modelbased/articles/introduction_comparisons_5.html)
93 |
--------------------------------------------------------------------------------
/vignettes/overview_of_vignettes.Rmd:
--------------------------------------------------------------------------------
1 | ---
2 | title: "Overview of Vignettes"
3 | output:
4 | rmarkdown::html_vignette:
5 | vignette: >
6 | %\VignetteIndexEntry{Overview of Vignettes}
7 | \usepackage[utf8]{inputenc}
8 | %\VignetteEngine{knitr::rmarkdown}
9 | editor_options:
10 | chunk_output_type: console
11 | ---
12 |
13 | ```{r message=FALSE, warning=FALSE, include=FALSE}
14 | library(knitr)
15 | knitr::opts_chunk$set(
16 | echo = TRUE,
17 | collapse = TRUE,
18 | warning = FALSE,
19 | message = FALSE,
20 | comment = "#>",
21 | eval = TRUE
22 | )
23 | ```
24 |
25 | All package vignettes are available at [https://easystats.github.io/modelbased/](https://easystats.github.io/modelbased/).
26 |
27 | ## Function Overview
28 |
29 | * [Function Documentation](https://easystats.github.io/modelbased/reference/index.html)
30 |
31 | ## Introductions
32 |
33 | ### Basics
34 |
35 | * [Data grids](https://easystats.github.io/modelbased/articles/visualisation_matrix.html)
36 | * [What are, why use and how to get marginal means](https://easystats.github.io/modelbased/articles/estimate_means.html)
37 | * [Contrast analysis](https://easystats.github.io/modelbased/articles/estimate_contrasts.html)
38 | * [Marginal effects and derivatives](https://easystats.github.io/modelbased/articles/estimate_slopes.html)
39 | * [Mixed effects models](https://easystats.github.io/modelbased/articles/mixed_models.html)
40 |
41 | ### Interpretation
42 |
43 | * [Use a model to make predictions](https://easystats.github.io/modelbased/articles/estimate_response.html)
44 | * [Interpret simple and complex models using the power of Effect Derivatives](https://easystats.github.io/modelbased/articles/derivatives.html)
45 | * [How to use Mixed models to Estimate Individuals' Scores](https://easystats.github.io/modelbased/articles/estimate_grouplevel.html)
46 |
47 | ### Visualization
48 |
49 | * [Plotting estimated marginal means](https://easystats.github.io/modelbased/articles/plotting.html)
50 | * [Visualize effects and interactions](https://easystats.github.io/modelbased/articles/estimate_relation.html)
51 | * [The Modelisation Approach to Statistics](https://easystats.github.io/modelbased/articles/modelisation_approach.html)
52 |
53 |
54 | ## Case Studies
55 |
56 | ### Workflows
57 |
58 | * [Understanding your models](https://easystats.github.io/modelbased/articles/workflow_modelbased.html)
59 | * [Causal inference for observational data](https://easystats.github.io/modelbased/articles/practical_causality.html)
60 | * [Intersectionality analysis using the MAIHDA framework](https://easystats.github.io/modelbased/articles/practical_intersectionality.html)
61 |
62 | ### Contrasts
63 |
64 | * [Contrasts and pairwise comparisons](https://easystats.github.io/modelbased/articles/introduction_comparisons_1.html)
65 | * [User Defined Contrasts and Joint Tests](https://easystats.github.io/modelbased/articles/introduction_comparisons_2.html)
66 | * [Slopes, floodlight and spotlight analysis (Johnson-Neyman intervals)](https://easystats.github.io/modelbased/articles/introduction_comparisons_3.html)
67 | * [Contrasts and comparisons for generalized linear models](https://easystats.github.io/modelbased/articles/introduction_comparisons_4.html)
68 | * [Contrasts and comparisons for zero-inflation models](https://easystats.github.io/modelbased/articles/introduction_comparisons_5.html)
69 |
--------------------------------------------------------------------------------