├── .Rbuildignore ├── .gitattributes ├── .github ├── .gitignore ├── CODE_OF_CONDUCT.md └── workflows │ ├── R-CMD-check.yaml │ ├── check-hard.yaml │ ├── lock.yaml │ ├── pkgdown.yaml │ ├── pr-commands.yaml │ └── test-coverage.yaml ├── .gitignore ├── .vscode ├── extensions.json └── settings.json ├── CONTRIBUTING.md ├── DESCRIPTION ├── LICENSE ├── LICENSE.md ├── NAMESPACE ├── NEWS.md ├── R ├── assume.R ├── calculate.R ├── deprecated.R ├── fit.R ├── generate.R ├── get_confidence_interval.R ├── get_p_value.R ├── gss.R ├── hypothesize.R ├── infer.R ├── observe.R ├── pipe.R ├── print_methods.R ├── rep_sample_n.R ├── set_params.R ├── shade_confidence_interval.R ├── shade_p_value.R ├── specify.R ├── utils.R ├── visualize.R └── wrappers.R ├── README.Rmd ├── README.md ├── README_files └── figure-gfm │ └── viz-1.png ├── _pkgdown.yml ├── air.toml ├── codecov.yml ├── cran-comments.md ├── data-raw └── save_gss.R ├── data └── gss.rda ├── figs ├── ht-diagram.png ├── infer.svg ├── infer_gnome.png ├── master.svg ├── paper │ ├── apa.csl │ ├── columns.tex │ ├── paper.Rmd │ ├── paper.bib │ ├── paper.log │ ├── paper.md │ └── paper.pdf └── rethinking-inference.key ├── infer.Rproj ├── inst └── CITATION ├── man-roxygen └── seeds.Rmd ├── man ├── assume.Rd ├── calculate.Rd ├── chisq_stat.Rd ├── chisq_test.Rd ├── deprecated.Rd ├── figures │ ├── lifecycle-archived.svg │ ├── lifecycle-defunct.svg │ ├── lifecycle-deprecated.svg │ ├── lifecycle-experimental.svg │ ├── lifecycle-maturing.svg │ ├── lifecycle-questioning.svg │ ├── lifecycle-soft-deprecated.svg │ ├── lifecycle-stable.svg │ ├── lifecycle-superseded.svg │ └── logo.png ├── fit.infer.Rd ├── generate.Rd ├── get_confidence_interval.Rd ├── get_p_value.Rd ├── gss.Rd ├── hypothesize.Rd ├── infer.Rd ├── observe.Rd ├── pipe.Rd ├── print.infer.Rd ├── prop_test.Rd ├── reexports.Rd ├── rep_sample_n.Rd ├── shade_confidence_interval.Rd ├── shade_p_value.Rd ├── specify.Rd ├── t_stat.Rd ├── t_test.Rd └── visualize.Rd ├── pkgdown └── favicon │ ├── apple-touch-icon-120x120.png │ ├── apple-touch-icon-152x152.png │ ├── apple-touch-icon-180x180.png │ ├── apple-touch-icon-60x60.png │ ├── apple-touch-icon-76x76.png │ ├── apple-touch-icon.png │ ├── favicon-16x16.png │ ├── favicon-32x32.png │ └── favicon.ico ├── tests ├── testthat.R └── testthat │ ├── _snaps │ ├── aliases.md │ ├── assume.md │ ├── calculate.md │ ├── fit.md │ ├── generate.md │ ├── get_confidence_interval.md │ ├── get_p_value.md │ ├── hypothesize.md │ ├── observe.md │ ├── print.md │ ├── rep_sample_n.md │ ├── shade_confidence_interval.md │ ├── shade_confidence_interval │ │ ├── ci-both-fill.svg │ │ ├── ci-both-nofill.svg │ │ ├── ci-extra-aes-1.svg │ │ ├── ci-extra-aes-2.svg │ │ ├── ci-null-endpoints.svg │ │ ├── ci-sim-fill.svg │ │ ├── ci-sim-nofill.svg │ │ ├── ci-theor-fill.svg │ │ └── ci-theor-nofill.svg │ ├── shade_p_value.md │ ├── shade_p_value │ │ ├── pval-both-both.svg │ │ ├── pval-both-corrupt.svg │ │ ├── pval-both-left.svg │ │ ├── pval-both-null.svg │ │ ├── pval-both-right.svg │ │ ├── pval-direction-both.svg │ │ ├── pval-direction-left.svg │ │ ├── pval-direction-right.svg │ │ ├── pval-extra-aes-1.svg │ │ ├── pval-extra-aes-2.svg │ │ ├── pval-extra-aes-3.svg │ │ ├── pval-null-obs-stat.svg │ │ ├── pval-sim-both.svg │ │ ├── pval-sim-corrupt.svg │ │ ├── pval-sim-left.svg │ │ ├── pval-sim-null.svg │ │ ├── pval-sim-right.svg │ │ ├── pval-stat-match.svg │ │ ├── pval-theor-both.svg │ │ ├── pval-theor-corrupt.svg │ │ ├── pval-theor-left.svg │ │ ├── pval-theor-null.svg │ │ ├── pval-theor-right.svg │ │ └── zero-area-shade.svg │ ├── specify.md │ ├── utils.md │ ├── visualize.md │ ├── visualize │ │ ├── ci-vis.svg │ │ ├── df-obs-stat-1.svg │ │ ├── df-obs-stat-2.svg │ │ ├── method-both.svg │ │ ├── vis-both-both-1.svg │ │ ├── vis-both-both-2.svg │ │ ├── vis-both-left-1.svg │ │ ├── vis-both-left-2.svg │ │ ├── vis-both-none-1.svg │ │ ├── vis-both-none-2.svg │ │ ├── vis-both-right-1.svg │ │ ├── vis-both-right-2.svg │ │ ├── vis-no-hypothesize-both.svg │ │ ├── vis-no-hypothesize-sim.svg │ │ ├── vis-sim-both-1.svg │ │ ├── vis-sim-both-2.svg │ │ ├── vis-sim-left-1.svg │ │ ├── vis-sim-none-1.svg │ │ ├── vis-sim-right-1.svg │ │ ├── vis-theor-both-1.svg │ │ ├── vis-theor-both-2.svg │ │ ├── vis-theor-left-1.svg │ │ ├── vis-theor-none-1.svg │ │ ├── vis-theor-none-2.svg │ │ ├── vis-theor-none-3.svg │ │ ├── vis-theor-none-4.svg │ │ ├── vis-theor-right-1.svg │ │ ├── visualise.svg │ │ ├── visualize.svg │ │ ├── viz-assume-2t-ci.svg │ │ ├── viz-assume-2t-p-val-both.svg │ │ ├── viz-assume-2t-p-val-left.svg │ │ ├── viz-assume-2t-p-val-right.svg │ │ ├── viz-assume-2t.svg │ │ ├── viz-assume-2z-ci.svg │ │ ├── viz-assume-2z-p-val-both.svg │ │ ├── viz-assume-2z-p-val-left.svg │ │ ├── viz-assume-2z-p-val-right.svg │ │ ├── viz-assume-2z.svg │ │ ├── viz-assume-f-p-val.svg │ │ ├── viz-assume-f.svg │ │ ├── viz-assume-t-both.svg │ │ ├── viz-assume-t-ci.svg │ │ ├── viz-assume-t-p-val-both.svg │ │ ├── viz-assume-t-p-val-left.svg │ │ ├── viz-assume-t-p-val-right.svg │ │ ├── viz-assume-t-sim.svg │ │ ├── viz-assume-t.svg │ │ ├── viz-assume-z-ci.svg │ │ ├── viz-assume-z-p-val-both.svg │ │ ├── viz-assume-z-p-val-left.svg │ │ ├── viz-assume-z-p-val-right.svg │ │ ├── viz-assume-z.svg │ │ ├── viz-fit-bare.svg │ │ ├── viz-fit-conf-int.svg │ │ ├── viz-fit-no-h0.svg │ │ ├── viz-fit-p-val-both.svg │ │ ├── viz-fit-p-val-left.svg │ │ └── viz-fit-p-val-right.svg │ └── wrappers.md │ ├── helper-data.R │ ├── test-aliases.R │ ├── test-assume.R │ ├── test-calculate.R │ ├── test-fit.R │ ├── test-generate.R │ ├── test-get_confidence_interval.R │ ├── test-get_p_value.R │ ├── test-hypothesize.R │ ├── test-observe.R │ ├── test-print.R │ ├── test-rep_sample_n.R │ ├── test-shade_confidence_interval.R │ ├── test-shade_p_value.R │ ├── test-specify.R │ ├── test-utils.R │ ├── test-visualize.R │ └── test-wrappers.R └── vignettes ├── anova.Rmd ├── chi_squared.Rmd ├── infer.Rmd ├── infer_cache └── html │ ├── __packages │ ├── calculate-point_94c073b633c3cf7bef3252dcad544ee2.RData │ ├── calculate-point_94c073b633c3cf7bef3252dcad544ee2.rdb │ ├── calculate-point_94c073b633c3cf7bef3252dcad544ee2.rdx │ ├── generate-permute_21b25928d642a97a30057306d51f1b23.RData │ ├── generate-permute_21b25928d642a97a30057306d51f1b23.rdb │ ├── generate-permute_21b25928d642a97a30057306d51f1b23.rdx │ ├── generate-point_d562524427be20dbb4736ca1ea29b04b.RData │ ├── generate-point_d562524427be20dbb4736ca1ea29b04b.rdb │ ├── generate-point_d562524427be20dbb4736ca1ea29b04b.rdx │ ├── hypothesize-40-hr-week_c8e33c404efa90c2ca0b2eacad95b06c.RData │ ├── hypothesize-40-hr-week_c8e33c404efa90c2ca0b2eacad95b06c.rdb │ ├── hypothesize-40-hr-week_c8e33c404efa90c2ca0b2eacad95b06c.rdx │ ├── hypothesize-independence_fe1c79b9f1dc0df488828fdd34c8145f.RData │ ├── hypothesize-independence_fe1c79b9f1dc0df488828fdd34c8145f.rdb │ ├── hypothesize-independence_fe1c79b9f1dc0df488828fdd34c8145f.rdx │ ├── specify-diff-in-means_e4103c4c3e3daedd5c1429b7a1bc8727.RData │ ├── specify-diff-in-means_e4103c4c3e3daedd5c1429b7a1bc8727.rdb │ ├── specify-diff-in-means_e4103c4c3e3daedd5c1429b7a1bc8727.rdx │ ├── specify-example_3ea3cfa390233b127dc25b05b0354bcf.RData │ ├── specify-example_3ea3cfa390233b127dc25b05b0354bcf.rdb │ ├── specify-example_3ea3cfa390233b127dc25b05b0354bcf.rdx │ ├── specify-one_149be66261b0606b7ddb80efd10fa81d.RData │ ├── specify-one_149be66261b0606b7ddb80efd10fa81d.rdb │ ├── specify-one_149be66261b0606b7ddb80efd10fa81d.rdx │ ├── specify-success_e8eb15e9f621ccf60cb6527a6bccdb4b.RData │ ├── specify-success_e8eb15e9f621ccf60cb6527a6bccdb4b.rdb │ ├── specify-success_e8eb15e9f621ccf60cb6527a6bccdb4b.rdx │ ├── specify-two_20085531c110a936ee691162f225333b.RData │ ├── specify-two_20085531c110a936ee691162f225333b.rdb │ └── specify-two_20085531c110a936ee691162f225333b.rdx ├── observed_stat_examples.Rmd ├── paired.Rmd └── t_test.Rmd /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^CRAN-RELEASE$ 2 | ^.*\.Rproj$ 3 | ^\.Rproj\.user$ 4 | ^README\.Rmd$ 5 | ^figs$ 6 | ^profiles* 7 | ^examples* 8 | ^codecov\.yml$ 9 | ^docs* 10 | ^CONDUCT\.md$ 11 | ^cran-comments\.md$ 12 | ^_build\.sh$ 13 | ^appveyor\.yml$ 14 | ^\.implement_new_methods\.md 15 | ^CONTRIBUTING\.md$ 16 | ^TO-DO\.md$ 17 | ^\.httr-oauth$ 18 | ^_pkgdown.yml 19 | ^_pkgdown\.yml$ 20 | ^docs$ 21 | ^data-raw* 22 | ^doc$ 23 | ^Meta$ 24 | README_files/ 25 | ^pkgdown$ 26 | ^\.github$ 27 | ^LICENSE\.md$ 28 | ^man-roxygen$ 29 | ^[\.]?air\.toml$ 30 | ^\.vscode$ 31 | -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | * text=auto 2 | data/* binary 3 | src/* text=lf 4 | R/* text=lf -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, caste, color, religion, or sexual 10 | identity and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the overall 26 | community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or advances of 31 | any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email address, 35 | without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at codeofconduct@posit.co. 63 | All complaints will be reviewed and investigated promptly and fairly. 64 | 65 | All community leaders are obligated to respect the privacy and security of the 66 | reporter of any incident. 67 | 68 | ## Enforcement Guidelines 69 | 70 | Community leaders will follow these Community Impact Guidelines in determining 71 | the consequences for any action they deem in violation of this Code of Conduct: 72 | 73 | ### 1. Correction 74 | 75 | **Community Impact**: Use of inappropriate language or other behavior deemed 76 | unprofessional or unwelcome in the community. 77 | 78 | **Consequence**: A private, written warning from community leaders, providing 79 | clarity around the nature of the violation and an explanation of why the 80 | behavior was inappropriate. A public apology may be requested. 81 | 82 | ### 2. Warning 83 | 84 | **Community Impact**: A violation through a single incident or series of 85 | actions. 86 | 87 | **Consequence**: A warning with consequences for continued behavior. No 88 | interaction with the people involved, including unsolicited interaction with 89 | those enforcing the Code of Conduct, for a specified period of time. This 90 | includes avoiding interactions in community spaces as well as external channels 91 | like social media. Violating these terms may lead to a temporary or permanent 92 | ban. 93 | 94 | ### 3. Temporary Ban 95 | 96 | **Community Impact**: A serious violation of community standards, including 97 | sustained inappropriate behavior. 98 | 99 | **Consequence**: A temporary ban from any sort of interaction or public 100 | communication with the community for a specified period of time. No public or 101 | private interaction with the people involved, including unsolicited interaction 102 | with those enforcing the Code of Conduct, is allowed during this period. 103 | Violating these terms may lead to a permanent ban. 104 | 105 | ### 4. Permanent Ban 106 | 107 | **Community Impact**: Demonstrating a pattern of violation of community 108 | standards, including sustained inappropriate behavior, harassment of an 109 | individual, or aggression toward or disparagement of classes of individuals. 110 | 111 | **Consequence**: A permanent ban from any sort of public interaction within the 112 | community. 113 | 114 | ## Attribution 115 | 116 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 117 | version 2.1, available at 118 | . 119 | 120 | Community Impact Guidelines were inspired by 121 | [Mozilla's code of conduct enforcement ladder][https://github.com/mozilla/inclusion]. 122 | 123 | For answers to common questions about this code of conduct, see the FAQ at 124 | . Translations are available at . 125 | 126 | [homepage]: https://www.contributor-covenant.org 127 | -------------------------------------------------------------------------------- /.github/workflows/R-CMD-check.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | # 4 | # NOTE: This workflow is overkill for most R packages and 5 | # check-standard.yaml is likely a better choice. 6 | # usethis::use_github_action("check-standard") will install it. 7 | on: 8 | push: 9 | branches: [main, master] 10 | pull_request: 11 | 12 | name: R-CMD-check.yaml 13 | 14 | permissions: read-all 15 | 16 | jobs: 17 | R-CMD-check: 18 | runs-on: ${{ matrix.config.os }} 19 | 20 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 21 | 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | config: 26 | - {os: macos-latest, r: 'release'} 27 | 28 | - {os: windows-latest, r: 'release'} 29 | # use 4.0 or 4.1 to check with rtools40's older compiler 30 | - {os: windows-latest, r: 'oldrel-4'} 31 | 32 | - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'} 33 | - {os: ubuntu-latest, r: 'release'} 34 | - {os: ubuntu-latest, r: 'oldrel-1'} 35 | - {os: ubuntu-latest, r: 'oldrel-2'} 36 | - {os: ubuntu-latest, r: 'oldrel-3'} 37 | - {os: ubuntu-latest, r: 'oldrel-4'} 38 | 39 | env: 40 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 41 | R_KEEP_PKG_SOURCE: yes 42 | 43 | steps: 44 | - uses: actions/checkout@v4 45 | 46 | - uses: r-lib/actions/setup-pandoc@v2 47 | 48 | - uses: r-lib/actions/setup-r@v2 49 | with: 50 | r-version: ${{ matrix.config.r }} 51 | http-user-agent: ${{ matrix.config.http-user-agent }} 52 | use-public-rspm: true 53 | 54 | - uses: r-lib/actions/setup-r-dependencies@v2 55 | with: 56 | extra-packages: any::rcmdcheck 57 | needs: check 58 | 59 | - uses: r-lib/actions/check-r-package@v2 60 | with: 61 | upload-snapshots: true 62 | build_args: 'c("--no-manual","--compact-vignettes=gs+qpdf")' 63 | -------------------------------------------------------------------------------- /.github/workflows/check-hard.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | # 4 | # NOTE: This workflow only directly installs "hard" dependencies, i.e. Depends, 5 | # Imports, and LinkingTo dependencies. Notably, Suggests dependencies are never 6 | # installed, with the exception of testthat, knitr, and rmarkdown. The cache is 7 | # never used to avoid accidentally restoring a cache containing a suggested 8 | # dependency. 9 | on: 10 | push: 11 | branches: [main] 12 | pull_request: 13 | branches: [main] 14 | 15 | name: R-CMD-check-hard 16 | 17 | jobs: 18 | R-CMD-check: 19 | runs-on: ${{ matrix.config.os }} 20 | 21 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 22 | 23 | strategy: 24 | fail-fast: false 25 | matrix: 26 | config: 27 | - {os: ubuntu-latest, r: 'release'} 28 | 29 | env: 30 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 31 | R_KEEP_PKG_SOURCE: yes 32 | 33 | steps: 34 | - uses: actions/checkout@v2 35 | 36 | - uses: r-lib/actions/setup-pandoc@v2 37 | 38 | - uses: r-lib/actions/setup-r@v2 39 | with: 40 | r-version: ${{ matrix.config.r }} 41 | http-user-agent: ${{ matrix.config.http-user-agent }} 42 | use-public-rspm: true 43 | 44 | - uses: r-lib/actions/setup-r-dependencies@v2 45 | with: 46 | dependencies: '"hard"' 47 | cache: false 48 | extra-packages: | 49 | any::rcmdcheck 50 | any::testthat 51 | any::knitr 52 | any::rmarkdown 53 | needs: check 54 | 55 | - uses: r-lib/actions/check-r-package@v2 56 | with: 57 | upload-snapshots: true -------------------------------------------------------------------------------- /.github/workflows/lock.yaml: -------------------------------------------------------------------------------- 1 | name: 'Lock Threads' 2 | 3 | on: 4 | schedule: 5 | - cron: '0 0 * * *' 6 | 7 | jobs: 8 | lock: 9 | runs-on: ubuntu-latest 10 | steps: 11 | - uses: dessant/lock-threads@v2 12 | with: 13 | github-token: ${{ github.token }} 14 | issue-lock-inactive-days: '14' 15 | # issue-exclude-labels: '' 16 | # issue-lock-labels: 'outdated' 17 | issue-lock-comment: > 18 | This issue has been automatically locked. If you believe you have 19 | found a related problem, please file a new issue (with a reprex: 20 | ) and link to this issue. 21 | issue-lock-reason: '' 22 | pr-lock-inactive-days: '14' 23 | # pr-exclude-labels: 'wip' 24 | pr-lock-labels: '' 25 | pr-lock-comment: > 26 | This pull request has been automatically locked. If you believe you 27 | have found a related problem, please file a new issue (with a reprex: 28 | ) and link to this issue. 29 | pr-lock-reason: '' 30 | # process-only: 'issues' 31 | -------------------------------------------------------------------------------- /.github/workflows/pkgdown.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | release: 8 | types: [published] 9 | workflow_dispatch: 10 | 11 | name: pkgdown.yaml 12 | 13 | permissions: read-all 14 | 15 | jobs: 16 | pkgdown: 17 | runs-on: ubuntu-latest 18 | # Only restrict concurrency for non-PR jobs 19 | concurrency: 20 | group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }} 21 | env: 22 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 23 | permissions: 24 | contents: write 25 | steps: 26 | - uses: actions/checkout@v4 27 | 28 | - uses: r-lib/actions/setup-pandoc@v2 29 | 30 | - uses: r-lib/actions/setup-r@v2 31 | with: 32 | use-public-rspm: true 33 | 34 | - uses: r-lib/actions/setup-r-dependencies@v2 35 | with: 36 | extra-packages: any::pkgdown, local::. 37 | needs: website 38 | 39 | - name: Build site 40 | run: pkgdown::build_site_github_pages(new_process = FALSE, install = FALSE) 41 | shell: Rscript {0} 42 | 43 | - name: Deploy to GitHub pages 🚀 44 | if: github.event_name != 'pull_request' 45 | uses: JamesIves/github-pages-deploy-action@v4.5.0 46 | with: 47 | clean: false 48 | branch: gh-pages 49 | folder: docs 50 | -------------------------------------------------------------------------------- /.github/workflows/pr-commands.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | issue_comment: 5 | types: [created] 6 | 7 | name: pr-commands.yaml 8 | 9 | permissions: read-all 10 | 11 | jobs: 12 | document: 13 | if: ${{ github.event.issue.pull_request && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') && startsWith(github.event.comment.body, '/document') }} 14 | name: document 15 | runs-on: ubuntu-latest 16 | env: 17 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 18 | permissions: 19 | contents: write 20 | steps: 21 | - uses: actions/checkout@v4 22 | 23 | - uses: r-lib/actions/pr-fetch@v2 24 | with: 25 | repo-token: ${{ secrets.GITHUB_TOKEN }} 26 | 27 | - uses: r-lib/actions/setup-r@v2 28 | with: 29 | use-public-rspm: true 30 | 31 | - uses: r-lib/actions/setup-r-dependencies@v2 32 | with: 33 | extra-packages: any::roxygen2 34 | needs: pr-document 35 | 36 | - name: Document 37 | run: roxygen2::roxygenise() 38 | shell: Rscript {0} 39 | 40 | - name: commit 41 | run: | 42 | git config --local user.name "$GITHUB_ACTOR" 43 | git config --local user.email "$GITHUB_ACTOR@users.noreply.github.com" 44 | git add man/\* NAMESPACE 45 | git commit -m 'Document' 46 | 47 | - uses: r-lib/actions/pr-push@v2 48 | with: 49 | repo-token: ${{ secrets.GITHUB_TOKEN }} 50 | 51 | style: 52 | if: ${{ github.event.issue.pull_request && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') && startsWith(github.event.comment.body, '/style') }} 53 | name: style 54 | runs-on: ubuntu-latest 55 | env: 56 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 57 | permissions: 58 | contents: write 59 | steps: 60 | - uses: actions/checkout@v4 61 | 62 | - uses: r-lib/actions/pr-fetch@v2 63 | with: 64 | repo-token: ${{ secrets.GITHUB_TOKEN }} 65 | 66 | - uses: r-lib/actions/setup-r@v2 67 | 68 | - name: Install dependencies 69 | run: install.packages("styler") 70 | shell: Rscript {0} 71 | 72 | - name: Style 73 | run: styler::style_pkg() 74 | shell: Rscript {0} 75 | 76 | - name: commit 77 | run: | 78 | git config --local user.name "$GITHUB_ACTOR" 79 | git config --local user.email "$GITHUB_ACTOR@users.noreply.github.com" 80 | git add \*.R 81 | git commit -m 'Style' 82 | 83 | - uses: r-lib/actions/pr-push@v2 84 | with: 85 | repo-token: ${{ secrets.GITHUB_TOKEN }} 86 | -------------------------------------------------------------------------------- /.github/workflows/test-coverage.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | 8 | name: test-coverage.yaml 9 | 10 | permissions: read-all 11 | 12 | jobs: 13 | test-coverage: 14 | runs-on: ubuntu-latest 15 | env: 16 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 17 | 18 | steps: 19 | - uses: actions/checkout@v4 20 | 21 | - uses: r-lib/actions/setup-r@v2 22 | with: 23 | use-public-rspm: true 24 | 25 | - uses: r-lib/actions/setup-r-dependencies@v2 26 | with: 27 | extra-packages: any::covr, any::xml2 28 | needs: coverage 29 | 30 | - name: Test coverage 31 | run: | 32 | cov <- covr::package_coverage( 33 | quiet = FALSE, 34 | clean = FALSE, 35 | install_path = file.path(normalizePath(Sys.getenv("RUNNER_TEMP"), winslash = "/"), "package") 36 | ) 37 | print(cov) 38 | covr::to_cobertura(cov) 39 | shell: Rscript {0} 40 | 41 | - uses: codecov/codecov-action@v5 42 | with: 43 | # Fail if error if not on PR, or if on PR and token is given 44 | fail_ci_if_error: ${{ github.event_name != 'pull_request' || secrets.CODECOV_TOKEN }} 45 | files: ./cobertura.xml 46 | plugins: noop 47 | disable_search: true 48 | token: ${{ secrets.CODECOV_TOKEN }} 49 | 50 | - name: Show testthat output 51 | if: always() 52 | run: | 53 | ## -------------------------------------------------------------------- 54 | find '${{ runner.temp }}/package' -name 'testthat.Rout*' -exec cat '{}' \; || true 55 | shell: bash 56 | 57 | - name: Upload test results 58 | if: failure() 59 | uses: actions/upload-artifact@v4 60 | with: 61 | name: coverage-test-failures 62 | path: ${{ runner.temp }}/package 63 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .RData 4 | .Ruserdata 5 | .DS_Store 6 | .httr-oauth 7 | doc 8 | Meta 9 | docs 10 | -------------------------------------------------------------------------------- /.vscode/extensions.json: -------------------------------------------------------------------------------- 1 | { 2 | "recommendations": [ 3 | "Posit.air-vscode" 4 | ] 5 | } 6 | -------------------------------------------------------------------------------- /.vscode/settings.json: -------------------------------------------------------------------------------- 1 | { 2 | "[r]": { 3 | "editor.formatOnSave": true, 4 | "editor.defaultFormatter": "Posit.air-vscode" 5 | } 6 | } 7 | -------------------------------------------------------------------------------- /CONTRIBUTING.md: -------------------------------------------------------------------------------- 1 | # Contributing 2 | 3 | Contributions to the `infer` whether in the form of bug fixes, issue reports, new 4 | code or documentation improvements are encouraged and welcome. We welcome novices 5 | who may have never contributed to a package before as well as friendly 6 | veterans looking to help us improve the package for users. We are eager to include 7 | and accepting of contributions from everyone that meets our [code of conduct](.github/CODE_OF_CONDUCT.md) 8 | guidelines. 9 | 10 | Please use the GitHub issues. For any pull request, please link to or open a 11 | corresponding issue in GitHub issues. Please ensure that you have notifications 12 | turned on and respond to questions, comments or needed changes promptly. 13 | 14 | ## Tests 15 | 16 | `infer` uses `testthat` for testing. Please try to provide 100% test coverage 17 | for any submitted code and always check that existing tests continue to pass. 18 | If you are a beginner and need help with writing a test, mention this 19 | in the issue and we will try to help. 20 | 21 | It's also helpful to run `goodpractice::gp()` to ensure that lines of code are 22 | not over 80 characters and that all lines of code have tests written. Please do 23 | so prior to submitting any pull request and fix any suggestions from there. 24 | Reach out to us if you need any assistance there too. 25 | 26 | ## Code style 27 | 28 | Please use snake case (such as `rep_sample_n`) for function names. 29 | Besides that, in general follow the 30 | [tidyverse style](http://style.tidyverse.org/) for R. 31 | 32 | ## Code of Conduct 33 | 34 | When contributing to the `infer` package you must follow the code of 35 | conduct defined in [CONDUCT](.github/CODE_OF_CONDUCT.md). 36 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Type: Package 2 | Package: infer 3 | Title: Tidy Statistical Inference 4 | Version: 1.0.8.9000 5 | Authors@R: c( 6 | person("Andrew", "Bray", , "abray@reed.edu", role = "aut"), 7 | person("Chester", "Ismay", , "chester.ismay@gmail.com", role = "aut", 8 | comment = c(ORCID = "0000-0003-2820-2547")), 9 | person("Evgeni", "Chasnovski", , "evgeni.chasnovski@gmail.com", role = "aut", 10 | comment = c(ORCID = "0000-0002-1617-4019")), 11 | person("Simon", "Couch", , "simon.couch@posit.co", role = c("aut", "cre"), 12 | comment = c(ORCID = "0000-0001-5676-5107")), 13 | person("Ben", "Baumer", , "ben.baumer@gmail.com", role = "aut", 14 | comment = c(ORCID = "0000-0002-3279-0516")), 15 | person("Mine", "Cetinkaya-Rundel", , "mine@stat.duke.edu", role = "aut", 16 | comment = c(ORCID = "0000-0001-6452-2420")), 17 | person("Ted", "Laderas", , "tedladeras@gmail.com", role = "ctb", 18 | comment = c(ORCID = "0000-0002-6207-7068")), 19 | person("Nick", "Solomon", , "nick.solomon@datacamp.com", role = "ctb"), 20 | person("Johanna", "Hardin", , "Jo.Hardin@pomona.edu", role = "ctb"), 21 | person("Albert Y.", "Kim", , "albert.ys.kim@gmail.com", role = "ctb", 22 | comment = c(ORCID = "0000-0001-7824-306X")), 23 | person("Neal", "Fultz", , "nfultz@gmail.com", role = "ctb"), 24 | person("Doug", "Friedman", , "doug.nhp@gmail.com", role = "ctb"), 25 | person("Richie", "Cotton", , "richie@datacamp.com", role = "ctb", 26 | comment = c(ORCID = "0000-0003-2504-802X")), 27 | person("Brian", "Fannin", , "captain@pirategrunt.com", role = "ctb") 28 | ) 29 | Description: The objective of this package is to perform inference using 30 | an expressive statistical grammar that coheres with the tidy design 31 | framework. 32 | License: MIT + file LICENSE 33 | URL: https://github.com/tidymodels/infer, https://infer.tidymodels.org/ 34 | BugReports: https://github.com/tidymodels/infer/issues 35 | Depends: 36 | R (>= 4.1) 37 | Imports: 38 | broom, 39 | cli, 40 | dplyr (>= 0.7.0), 41 | generics, 42 | ggplot2 (>= 3.5.2), 43 | glue (>= 1.3.0), 44 | grDevices, 45 | lifecycle, 46 | magrittr, 47 | methods, 48 | patchwork, 49 | purrr, 50 | rlang (>= 0.2.0), 51 | tibble, 52 | tidyr, 53 | vctrs (>= 0.6.5) 54 | Suggests: 55 | covr, 56 | devtools (>= 1.12.0), 57 | fs, 58 | knitr, 59 | nycflights13, 60 | parsnip, 61 | rmarkdown, 62 | stringr, 63 | testthat (>= 3.0.0), 64 | vdiffr (>= 1.0.0) 65 | VignetteBuilder: 66 | knitr 67 | Config/Needs/website: tidyverse/tidytemplate 68 | Config/testthat/edition: 3 69 | Config/usethis/last-upkeep: 2025-04-25 70 | Encoding: UTF-8 71 | LazyData: true 72 | Roxygen: list(markdown = TRUE) 73 | RoxygenNote: 7.3.2 74 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | YEAR: 2025 2 | COPYRIGHT HOLDER: infer authors 3 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | Copyright (c) 2025 infer authors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(calc_impl,Chisq) 4 | S3method(calc_impl,F) 5 | S3method(calc_impl,correlation) 6 | S3method(calc_impl,count) 7 | S3method(calc_impl,diff_in_means) 8 | S3method(calc_impl,diff_in_medians) 9 | S3method(calc_impl,diff_in_props) 10 | S3method(calc_impl,function_of_props) 11 | S3method(calc_impl,mean) 12 | S3method(calc_impl,median) 13 | S3method(calc_impl,odds_ratio) 14 | S3method(calc_impl,prop) 15 | S3method(calc_impl,ratio_of_means) 16 | S3method(calc_impl,ratio_of_props) 17 | S3method(calc_impl,sd) 18 | S3method(calc_impl,slope) 19 | S3method(calc_impl,sum) 20 | S3method(calc_impl,t) 21 | S3method(calc_impl,z) 22 | S3method(fit,infer) 23 | S3method(get_p_value,default) 24 | S3method(get_p_value,infer_dist) 25 | S3method(ggplot_add,infer_layer) 26 | S3method(print,infer) 27 | S3method(print,infer_dist) 28 | S3method(print,infer_layer) 29 | export("%>%") 30 | export(assume) 31 | export(calculate) 32 | export(chisq_stat) 33 | export(chisq_test) 34 | export(conf_int) 35 | export(fit) 36 | export(fit.infer) 37 | export(generate) 38 | export(get_ci) 39 | export(get_confidence_interval) 40 | export(get_p_value) 41 | export(get_pvalue) 42 | export(ggplot_add) 43 | export(hypothesise) 44 | export(hypothesize) 45 | export(observe) 46 | export(p_value) 47 | export(prop_test) 48 | export(rep_sample_n) 49 | export(rep_slice_sample) 50 | export(shade_ci) 51 | export(shade_confidence_interval) 52 | export(shade_p_value) 53 | export(shade_pvalue) 54 | export(specify) 55 | export(t_stat) 56 | export(t_test) 57 | export(visualise) 58 | export(visualize) 59 | importFrom(cli,cli_abort) 60 | importFrom(cli,cli_inform) 61 | importFrom(cli,cli_warn) 62 | importFrom(cli,no) 63 | importFrom(cli,qty) 64 | importFrom(dplyr,across) 65 | importFrom(dplyr,any_of) 66 | importFrom(dplyr,bind_rows) 67 | importFrom(dplyr,group_by) 68 | importFrom(dplyr,n) 69 | importFrom(dplyr,pull) 70 | importFrom(dplyr,select) 71 | importFrom(dplyr,summarize) 72 | importFrom(generics,fit) 73 | importFrom(ggplot2,aes) 74 | importFrom(ggplot2,geom_bar) 75 | importFrom(ggplot2,geom_histogram) 76 | importFrom(ggplot2,geom_rect) 77 | importFrom(ggplot2,geom_vline) 78 | importFrom(ggplot2,ggplot) 79 | importFrom(ggplot2,ggplot_add) 80 | importFrom(ggplot2,ggtitle) 81 | importFrom(ggplot2,xlab) 82 | importFrom(ggplot2,ylab) 83 | importFrom(glue,glue) 84 | importFrom(glue,glue_collapse) 85 | importFrom(magrittr,"%>%") 86 | importFrom(methods,hasArg) 87 | importFrom(purrr,compact) 88 | importFrom(rlang,"!!") 89 | importFrom(rlang,":=") 90 | importFrom(rlang,caller_env) 91 | importFrom(rlang,enquo) 92 | importFrom(rlang,eval_tidy) 93 | importFrom(rlang,f_lhs) 94 | importFrom(rlang,f_rhs) 95 | importFrom(rlang,get_expr) 96 | importFrom(rlang,new_formula) 97 | importFrom(rlang,quo) 98 | importFrom(rlang,sym) 99 | importFrom(stats,as.formula) 100 | importFrom(stats,dchisq) 101 | importFrom(stats,df) 102 | importFrom(stats,dnorm) 103 | importFrom(stats,dt) 104 | importFrom(stats,qchisq) 105 | importFrom(stats,qf) 106 | importFrom(stats,qnorm) 107 | importFrom(stats,qt) 108 | importFrom(tibble,tibble) 109 | -------------------------------------------------------------------------------- /R/deprecated.R: -------------------------------------------------------------------------------- 1 | #' Deprecated functions and objects 2 | #' 3 | #' These functions and objects should no longer be used. They will be removed 4 | #' in a future release of infer. 5 | #' @param x See the non-deprecated function. 6 | #' @param level See the non-deprecated function. 7 | #' @param type See the non-deprecated function. 8 | #' @param point_estimate See the non-deprecated function. 9 | #' @param obs_stat See the non-deprecated function. 10 | #' @param direction See the non-deprecated function. 11 | #' @seealso [get_p_value()], [get_confidence_interval()], [generate()] 12 | #' @name deprecated 13 | NULL 14 | 15 | 16 | #' @rdname deprecated 17 | #' @export 18 | conf_int <- function( 19 | x, 20 | level = 0.95, 21 | type = "percentile", 22 | point_estimate = NULL 23 | ) { 24 | lifecycle::deprecate_stop("0.4.0", "conf_int()", "get_confidence_interval()") 25 | } 26 | 27 | 28 | #' @rdname deprecated 29 | #' @export 30 | p_value <- function(x, obs_stat, direction) { 31 | lifecycle::deprecate_stop("0.4.0", "conf_int()", "get_p_value()") 32 | } 33 | -------------------------------------------------------------------------------- /R/gss.R: -------------------------------------------------------------------------------- 1 | #' Subset of data from the General Social Survey (GSS). 2 | #' 3 | #' The General Social Survey is a high-quality survey which gathers data on 4 | #' American society and opinions, conducted since 1972. This data set is a 5 | #' sample of 500 entries from the GSS, spanning years 1973-2018, 6 | #' including demographic markers and some 7 | #' economic variables. Note that this data is included for demonstration only, 8 | #' and should not be assumed to provide accurate estimates relating to the GSS. 9 | #' However, due to the high quality of the GSS, the unweighted data will 10 | #' approximate the weighted data in some analyses. 11 | #' @format A tibble with 500 rows and 11 variables: 12 | #' \describe{ 13 | #' \item{year}{year respondent was surveyed} 14 | #' \item{age}{age at time of survey, truncated at 89} 15 | #' \item{sex}{respondent's sex (self-identified)} 16 | #' \item{college}{whether on not respondent has a college degree, including 17 | #' junior/community college} 18 | #' \item{partyid}{political party affiliation} 19 | #' \item{hompop}{number of persons in household} 20 | #' \item{hours}{number of hours worked in week before survey, truncated at 89} 21 | #' \item{income}{total family income} 22 | #' \item{class}{subjective socioeconomic class identification} 23 | #' \item{finrela}{opinion of family income} 24 | #' \item{weight}{survey weight} 25 | #' } 26 | #' @source \url{https://gss.norc.org} 27 | "gss" 28 | -------------------------------------------------------------------------------- /R/infer.R: -------------------------------------------------------------------------------- 1 | #' infer: a grammar for statistical inference 2 | #' 3 | #' The objective of this package is to perform statistical inference using a 4 | #' grammar that illustrates the underlying concepts and a format that coheres 5 | #' with the tidyverse. 6 | #' 7 | #' For an overview of how to use the core functionality, see `vignette("infer")` 8 | #' 9 | #' 10 | #' @docType package 11 | #' @name infer 12 | "_PACKAGE" 13 | 14 | #' @importFrom cli cli_abort cli_warn cli_inform qty no 15 | 16 | ## quiets concerns of R CMD check re: the .'s that appear in pipelines 17 | ## From Jenny Bryan's googlesheets package 18 | if (getRversion() >= "2.15.1") { 19 | utils::globalVariables( 20 | c( 21 | "prop", 22 | "stat", 23 | "value", 24 | "x", 25 | "y", 26 | "..density..", 27 | "statistic", 28 | ".", 29 | "parameter", 30 | "p.value", 31 | "xmin", 32 | "x_min", 33 | "xmax", 34 | "x_max", 35 | "density", 36 | "denom", 37 | "diff_prop", 38 | "group_num", 39 | "n1", 40 | "n2", 41 | "num_suc", 42 | "p_hat", 43 | "total_suc", 44 | "explan", 45 | "probs", 46 | "conf.low", 47 | "conf.high", 48 | "prop_1", 49 | "prop_2", 50 | "data", 51 | "setNames", 52 | "resp", 53 | "capture.output", 54 | "stats", 55 | "estimate", 56 | "any_of", 57 | "model", 58 | "term", 59 | "where", 60 | "hypothesis" 61 | ) 62 | ) 63 | } 64 | -------------------------------------------------------------------------------- /R/observe.R: -------------------------------------------------------------------------------- 1 | #' Calculate observed statistics 2 | #' 3 | #' @description 4 | #' 5 | #' This function is a wrapper that calls [specify()], [hypothesize()], and 6 | #' [calculate()] consecutively that can be used to calculate observed 7 | #' statistics from data. [hypothesize()] will only be called if a point 8 | #' null hypothesis parameter is supplied. 9 | #' 10 | #' Learn more in `vignette("infer")`. 11 | #' 12 | #' @inheritParams specify 13 | #' @inheritParams hypothesize 14 | #' @inheritParams calculate 15 | #' 16 | #' @return A 1-column tibble containing the calculated statistic `stat`. 17 | #' 18 | #' @examples 19 | #' # calculating the observed mean number of hours worked per week 20 | #' gss |> 21 | #' observe(hours ~ NULL, stat = "mean") 22 | #' 23 | #' # equivalently, calculating the same statistic with the core verbs 24 | #' gss |> 25 | #' specify(response = hours) |> 26 | #' calculate(stat = "mean") 27 | #' 28 | #' # calculating a t statistic for hypothesized mu = 40 hours worked/week 29 | #' gss |> 30 | #' observe(hours ~ NULL, stat = "t", null = "point", mu = 40) 31 | #' 32 | #' # equivalently, calculating the same statistic with the core verbs 33 | #' gss |> 34 | #' specify(response = hours) |> 35 | #' hypothesize(null = "point", mu = 40) |> 36 | #' calculate(stat = "t") 37 | #' 38 | #' # similarly for a difference in means in age based on whether 39 | #' # the respondent has a college degree 40 | #' observe( 41 | #' gss, 42 | #' age ~ college, 43 | #' stat = "diff in means", 44 | #' order = c("degree", "no degree") 45 | #' ) 46 | #' 47 | #' # equivalently, calculating the same statistic with the core verbs 48 | #' gss |> 49 | #' specify(age ~ college) |> 50 | #' calculate("diff in means", order = c("degree", "no degree")) 51 | #' 52 | #' # for a more in-depth explanation of how to use the infer package 53 | #' \dontrun{ 54 | #' vignette("infer") 55 | #' } 56 | #' 57 | #' @family wrapper functions 58 | #' @family functions for calculating observed statistics 59 | #' @export 60 | observe <- function( 61 | x, 62 | # specify arguments 63 | formula, 64 | response = NULL, 65 | explanatory = NULL, 66 | success = NULL, 67 | # hypothesize arguments 68 | null = NULL, 69 | p = NULL, 70 | mu = NULL, 71 | med = NULL, 72 | sigma = NULL, 73 | # calculate arguments 74 | stat = c( 75 | "mean", 76 | "median", 77 | "sum", 78 | "sd", 79 | "prop", 80 | "count", 81 | "diff in means", 82 | "diff in medians", 83 | "diff in props", 84 | "Chisq", 85 | "F", 86 | "slope", 87 | "correlation", 88 | "t", 89 | "z", 90 | "ratio of props", 91 | "odds ratio" 92 | ), 93 | order = NULL, 94 | ... 95 | ) { 96 | # use hypothesize() if appropriate (or needed to pass an informative 97 | # message/warning). otherwise, pipe directly to calculate(). 98 | if (!all(sapply(list(p, mu, med, sigma), is.null))) { 99 | hypothesize_fn <- hypothesize 100 | } else { 101 | hypothesize_fn <- function(x, ...) { 102 | x 103 | } 104 | } 105 | 106 | # pass arguments on to core verbs 107 | res <- 108 | specify( 109 | x = x, 110 | formula = formula, 111 | response = {{ response }}, 112 | explanatory = {{ explanatory }}, 113 | success = success 114 | ) 115 | 116 | hypothesize_fn( 117 | res, 118 | null = if (has_explanatory(res)) { 119 | "independence" 120 | } else { 121 | "point" 122 | }, 123 | p = p, 124 | mu = mu, 125 | med = med, 126 | sigma = sigma 127 | ) |> 128 | calculate( 129 | stat = stat, 130 | order = order, 131 | ... 132 | ) 133 | } 134 | -------------------------------------------------------------------------------- /R/pipe.R: -------------------------------------------------------------------------------- 1 | #' Pipe 2 | #' 3 | #' Like \{dplyr\}, \{infer\} also uses the pipe (\code{|>}) function 4 | #' from \code{magrittr} to turn function composition into a series of 5 | #' iterative statements. 6 | #' 7 | #' @param lhs,rhs Inference functions and the initial data frame. 8 | #' 9 | #' @importFrom magrittr %>% 10 | #' @name %>% 11 | #' @rdname pipe 12 | #' @export 13 | NULL 14 | -------------------------------------------------------------------------------- /R/print_methods.R: -------------------------------------------------------------------------------- 1 | #' Print methods 2 | #' 3 | #' @param x An object of class `infer`, i.e. output from [specify()] or 4 | #' [hypothesize()], or of class `infer_layer`, i.e. output from 5 | #' [shade_p_value()] or [shade_confidence_interval()]. 6 | #' @param ... Arguments passed to methods. 7 | #' @importFrom glue glue_collapse glue 8 | #' 9 | #' @rdname print.infer 10 | #' @export 11 | print.infer <- function(x, ...) { 12 | attrs <- names(attributes(x)) 13 | header <- character(3) 14 | if ("response" %in% attrs) { 15 | header[1] <- glue( 16 | 'Response: {response_name(x)} ({attr(x, "response_type")})', 17 | .null = "NULL" 18 | ) 19 | if ("explanatory" %in% attrs) { 20 | header[2] <- glue( 21 | 'Explanatory: {paste0(paste0(explanatory_name(x), " (", 22 | attr(x, "explanatory_type"), ")"), collapse = ", ")}', 23 | .null = "NULL" 24 | ) 25 | } 26 | } 27 | if ("null" %in% attrs) { 28 | header[3] <- glue('Null Hypothesis: {attr(x, "null")}', .null = "NULL") 29 | } 30 | 31 | cat(glue::glue_collapse( 32 | header[header != ""], 33 | width = cli::console_width(), 34 | sep = "\n" 35 | )) 36 | cat("\n") 37 | 38 | NextMethod() 39 | } 40 | 41 | #' @rdname print.infer 42 | #' @export 43 | print.infer_layer <- function(x, ...) { 44 | cat(x) 45 | } 46 | 47 | #' @rdname print.infer 48 | #' @export 49 | print.infer_dist <- function(x, ...) { 50 | cat(x) 51 | } 52 | -------------------------------------------------------------------------------- /R/set_params.R: -------------------------------------------------------------------------------- 1 | #' To determine which theoretical distribution to fit (if any) 2 | #' 3 | #' @param x A data frame that can be coerced into a [tibble][tibble::tibble]. 4 | #' 5 | #' @noRd 6 | set_params <- function(x) { 7 | attr(x, "theory_type") <- NULL 8 | 9 | if (has_response(x)) { 10 | num_response_levels <- length(unique(response_variable(x))) 11 | 12 | check_factor_levels( 13 | response_variable(x), 14 | "response", 15 | response_name(x) 16 | ) 17 | } 18 | 19 | if (is_mlr(x)) { 20 | return(x) 21 | } 22 | 23 | if (has_explanatory(x)) { 24 | num_explanatory_levels <- length(unique(explanatory_variable(x))) 25 | 26 | check_factor_levels( 27 | explanatory_variable(x), 28 | "explanatory", 29 | explanatory_name(x) 30 | ) 31 | } 32 | 33 | # One variable 34 | if ( 35 | has_response(x) && 36 | !has_explanatory(x) && 37 | has_attr(x, "response_type") && 38 | !has_attr(x, "explanatory_type") 39 | ) { 40 | # One mean 41 | if (attr(x, "response_type") %in% c("integer", "numeric")) { 42 | attr(x, "theory_type") <- "One sample t" 43 | attr(x, "distr_param") <- stats::t.test( 44 | response_variable(x) 45 | )[["parameter"]] 46 | attr(x, "type") <- "bootstrap" 47 | } else if ( 48 | # One prop 49 | (attr(x, "response_type") == "factor") && (num_response_levels == 2) 50 | ) { 51 | # No parameters since standard normal 52 | attr(x, "theory_type") <- "One sample prop z" 53 | # Changed to `"draw"` when `p` provided in `hypothesize()` 54 | attr(x, "type") <- "bootstrap" 55 | } else { 56 | attr(x, "theory_type") <- "Chi-square Goodness of Fit" 57 | attr(x, "distr_param") <- num_response_levels - 1 58 | attr(x, "type") <- "draw" 59 | } 60 | } 61 | 62 | # Two variables 63 | if ( 64 | has_response(x) && 65 | has_explanatory(x) & 66 | has_attr(x, "response_type") && 67 | has_attr(x, "explanatory_type") 68 | ) { 69 | attr(x, "type") <- "bootstrap" 70 | 71 | # Response is numeric, explanatory is categorical 72 | if ( 73 | (attr(x, "response_type") %in% c("integer", "numeric")) & 74 | (attr(x, "explanatory_type") == "factor") 75 | ) { 76 | # Two sample means (t distribution) 77 | if (num_explanatory_levels == 2) { 78 | attr(x, "theory_type") <- "Two sample t" 79 | # Keep track of Satterthwaite degrees of freedom since lost when 80 | # in aggregation w/ calculate()/generate() 81 | attr(x, "distr_param") <- stats::t.test( 82 | response_variable(x) ~ explanatory_variable(x) 83 | )[["parameter"]] 84 | } else { 85 | # >2 sample means (F distribution) 86 | attr(x, "theory_type") <- "ANOVA" 87 | # Get numerator and denominator degrees of freedom 88 | degrees <- stats::anova(stats::aov( 89 | response_variable(x) ~ explanatory_variable(x) 90 | ))$Df 91 | attr(x, "distr_param") <- degrees[1] 92 | attr(x, "distr_param2") <- degrees[2] 93 | } 94 | } 95 | 96 | # Response is categorical, explanatory is categorical 97 | if ( 98 | (attr(x, "response_type") == "factor") & 99 | (attr(x, "explanatory_type") == "factor") 100 | ) { 101 | attr(x, "type") <- "bootstrap" 102 | 103 | # Two sample proportions (z distribution) 104 | # Parameter(s) not needed since standard normal 105 | if ( 106 | (num_response_levels == 2) & 107 | (num_explanatory_levels == 2) 108 | ) { 109 | attr(x, "theory_type") <- "Two sample props z" 110 | } else { 111 | # >2 sample proportions (chi-square test of indep) 112 | attr(x, "theory_type") <- "Chi-square test of indep" 113 | attr(x, "distr_param") <- suppressWarnings( 114 | stats::chisq.test( 115 | table(response_variable(x), explanatory_variable(x)) 116 | )$parameter 117 | ) 118 | } 119 | } 120 | 121 | # Response is numeric, explanatory is numeric 122 | if ( 123 | (attr(x, "response_type") %in% c("integer", "numeric")) & 124 | (attr(x, "explanatory_type") %in% c("integer", "numeric")) 125 | ) { 126 | response_string <- response_name(x) 127 | explanatory_string <- explanatory_name(x) 128 | attr(x, "theory_type") <- "Slope/correlation with t" 129 | attr(x, "distr_param") <- nrow(x) - 2 130 | } 131 | } 132 | 133 | x 134 | } 135 | 136 | check_factor_levels <- function(x, type, name) { 137 | if (is.factor(x)) { 138 | unused <- setdiff(levels(x), unique(x)) 139 | 140 | if (length(unused) > 0) { 141 | cli_inform( 142 | "Dropping unused factor levels {list(unused)} from the \\ 143 | supplied {type} variable '{name}'." 144 | ) 145 | } 146 | } 147 | } 148 | -------------------------------------------------------------------------------- /README_files/figure-gfm/viz-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/README_files/figure-gfm/viz-1.png -------------------------------------------------------------------------------- /_pkgdown.yml: -------------------------------------------------------------------------------- 1 | url: https://infer.tidymodels.org 2 | 3 | template: 4 | package: tidytemplate 5 | bootstrap: 5 6 | bslib: 7 | danger: "#CA225E" 8 | primary: "#CA225E" 9 | includes: 10 | in_header: | 11 | 12 | 13 | figures: 14 | fig.width: 8 15 | fig.height: 5.75 16 | 17 | reference: 18 | - title: Core Verbs 19 | contents: 20 | - specify 21 | - hypothesize 22 | - generate 23 | - calculate 24 | - fit.infer 25 | - assume 26 | - title: Helpers 27 | contents: 28 | - visualize 29 | - get_p_value 30 | - get_confidence_interval 31 | - shade_p_value 32 | - shade_confidence_interval 33 | - title: Wrappers 34 | contents: 35 | - observe 36 | - ends_with("_test") 37 | - ends_with("_stat") 38 | - title: Miscellaneous 39 | contents: 40 | - infer 41 | - gss 42 | - deprecated 43 | - rep_sample_n 44 | - "`%>%`" 45 | - print.infer 46 | 47 | articles: 48 | - title: Articles 49 | navbar: ~ 50 | contents: 51 | - infer 52 | - t_test 53 | - anova 54 | - chi_squared 55 | - paired 56 | - observed_stat_examples 57 | 58 | development: 59 | mode: auto 60 | -------------------------------------------------------------------------------- /air.toml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/air.toml -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: false 2 | 3 | coverage: 4 | status: 5 | project: 6 | default: 7 | target: auto 8 | threshold: 1% 9 | informational: true 10 | patch: 11 | default: 12 | target: auto 13 | threshold: 1% 14 | informational: true 15 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | ## R CMD check results 2 | 3 | 0 errors | 0 warnings | 0 notes 4 | 5 | With current CRAN checks, we do see failures on R-devel with Windows. This check ERROR doesn't seem to cite an actual check failure and is present on the check pages for many other packages, so we assume it is a false positive. 6 | 7 | ## Reverse dependencies 8 | 9 | We checked 5 reverse dependencies, comparing R CMD check results across CRAN and dev versions of this package, and saw no new problems. 10 | -------------------------------------------------------------------------------- /data-raw/save_gss.R: -------------------------------------------------------------------------------- 1 | library(dplyr) 2 | library(forcats) 3 | library(srvyr) 4 | library(ggplot2) 5 | 6 | # pull gss data 7 | temp <- tempfile() 8 | download.file("https://gss.norc.org/documents/stata/GSS_stata.zip", temp) 9 | 10 | # if this next line errors with "No such file or directory", try 11 | # incrementing the number after "_R" 12 | gss_orig <- haven::read_dta(unz(temp, filename = "GSS7218_R2.DTA")) |> 13 | haven::as_factor() 14 | unlink(temp) 15 | 16 | # select relevant columns 17 | gss_small <- gss_orig |> 18 | filter(!stringr::str_detect(sample, "blk oversamp")) |> # this is for weighting 19 | select( 20 | year, 21 | age, 22 | sex, 23 | college = degree, 24 | partyid, 25 | hompop, 26 | hours = hrs1, 27 | income, 28 | class, 29 | finrela, 30 | weight = wtssall 31 | ) |> 32 | mutate_if( 33 | is.factor, 34 | ~ fct_collapse(., NULL = c("IAP", "NA", "iap", "na")) 35 | ) |> 36 | mutate( 37 | age = age |> 38 | fct_recode("89" = "89 or older", NULL = "DK") |> # truncated at 89 39 | as.character() |> 40 | as.numeric(), 41 | hompop = hompop |> 42 | fct_collapse(NULL = c("DK")) |> 43 | as.character() |> 44 | as.numeric(), 45 | hours = hours |> 46 | fct_recode("89" = "89+ hrs", NULL = "DK") |> # truncated at 89 47 | as.character() |> 48 | as.numeric(), 49 | weight = weight |> 50 | as.character() |> 51 | as.numeric(), 52 | partyid = fct_collapse( 53 | partyid, 54 | dem = c("strong democrat", "not str democrat"), 55 | rep = c("strong republican", "not str republican"), 56 | ind = c("ind,near dem", "independent", "ind,near rep"), 57 | other = "other party" 58 | ), 59 | income = factor(income, ordered = TRUE), 60 | college = fct_collapse( 61 | college, 62 | degree = c("junior college", "bachelor", "graduate"), 63 | "no degree" = c("lt high school", "high school"), 64 | NULL = "dk" # no dks show up in the data, so drop this level 65 | ) 66 | ) 67 | 68 | # sample 3k rows, first dropping NAs 69 | set.seed(20200201) 70 | gss <- gss_small |> 71 | drop_na() |> 72 | sample_n(500) 73 | 74 | # check that the sample is similar unweighted to weighted 75 | gss_wt <- srvyr::as_survey_design(gss, weights = weight) 76 | 77 | unweighted <- gss |> 78 | group_by(year, sex, partyid) |> 79 | summarize(n = n()) |> 80 | ungroup() |> 81 | group_by(year, sex) |> 82 | mutate(prop = n / sum(n)) 83 | 84 | weighted <- gss_wt |> 85 | group_by(year, sex, partyid) |> 86 | summarize(prop = srvyr::survey_mean()) 87 | 88 | # save data into package 89 | usethis::use_data(gss, overwrite = TRUE) 90 | 91 | devtools::document() 92 | -------------------------------------------------------------------------------- /data/gss.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/data/gss.rda -------------------------------------------------------------------------------- /figs/ht-diagram.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/figs/ht-diagram.png -------------------------------------------------------------------------------- /figs/infer.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 16 | 17 | CRAN 18 | 19 | 20 | CRAN 21 | 22 | 23 | 0.1.1 24 | 25 | 26 | 0.1.1 27 | 28 | 29 | -------------------------------------------------------------------------------- /figs/infer_gnome.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/figs/infer_gnome.png -------------------------------------------------------------------------------- /figs/master.svg: -------------------------------------------------------------------------------- 1 | coveragecoverage96%96% -------------------------------------------------------------------------------- /figs/paper/columns.tex: -------------------------------------------------------------------------------- 1 | \newenvironment{cols}[1][]{}{} 2 | 3 | \newenvironment{col}[1]{\begin{minipage}{#1}\ignorespaces}{% 4 | \end{minipage} 5 | \ifhmode\unskip\fi 6 | \aftergroup\useignorespacesandallpars} 7 | 8 | \def\useignorespacesandallpars#1\ignorespaces\fi{% 9 | #1\fi\ignorespacesandallpars} 10 | 11 | \makeatletter 12 | \def\ignorespacesandallpars{% 13 | \@ifnextchar\par 14 | {\expandafter\ignorespacesandallpars\@gobble}% 15 | {}% 16 | } 17 | \makeatother -------------------------------------------------------------------------------- /figs/paper/paper.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/figs/paper/paper.pdf -------------------------------------------------------------------------------- /figs/rethinking-inference.key: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/figs/rethinking-inference.key -------------------------------------------------------------------------------- /infer.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 3 10 | Encoding: UTF-8 11 | 12 | RnwWeave: knitr 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source 21 | -------------------------------------------------------------------------------- /inst/CITATION: -------------------------------------------------------------------------------- 1 | bibentry( 2 | "Article", 3 | title = "{infer}: An {R} package for tidyverse-friendly statistical inference", 4 | author = "Simon P. Couch, Andrew P. Bray, Chester Ismay, Evgeni Chasnovski, Benjamin S. Baumer, Mine Çetinkaya-Rundel", 5 | journal = "Journal of Open Source Software", 6 | year = 2021, 7 | volume = 6, 8 | number = 65, 9 | pages = 3661, 10 | doi = "10.21105/joss.03661", 11 | textVersion = "Couch et al., (2021). infer: An R package for tidyverse-friendly statistical inference. Journal of Open Source Software, 6(65), 3661, https://doi.org/10.21105/joss.03661" 12 | ) 13 | -------------------------------------------------------------------------------- /man-roxygen/seeds.Rmd: -------------------------------------------------------------------------------- 1 | # Reproducibility 2 | 3 | When using the infer package for research, or in other cases when exact reproducibility is a priority, be sure the set the seed for R's random number generator. infer will respect the random seed specified in the `set.seed()` function, returning the same result when `generate()`ing data given an identical seed. For instance, we can calculate the difference in mean `age` by `college` degree status using the `gss` dataset from 10 versions of the `gss` resampled with permutation using the following code. 4 | 5 | ```{r, include = FALSE} 6 | library(infer) 7 | ``` 8 | 9 | ```{r} 10 | set.seed(1) 11 | 12 | gss |> 13 | specify(age ~ college) |> 14 | hypothesize(null = "independence") |> 15 | generate(reps = 5, type = "permute") |> 16 | calculate("diff in means", order = c("degree", "no degree")) 17 | ``` 18 | 19 | Setting the seed to the same value again and rerunning the same code will produce the same result. 20 | 21 | ```{r} 22 | # set the seed 23 | set.seed(1) 24 | 25 | gss |> 26 | specify(age ~ college) |> 27 | hypothesize(null = "independence") |> 28 | generate(reps = 5, type = "permute") |> 29 | calculate("diff in means", order = c("degree", "no degree")) 30 | ``` 31 | 32 | Please keep this in mind when writing infer code that utilizes resampling with `generate()`. 33 | -------------------------------------------------------------------------------- /man/chisq_stat.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/wrappers.R 3 | \name{chisq_stat} 4 | \alias{chisq_stat} 5 | \title{Tidy chi-squared test statistic} 6 | \usage{ 7 | chisq_stat(x, formula, response = NULL, explanatory = NULL, ...) 8 | } 9 | \arguments{ 10 | \item{x}{A data frame that can be coerced into a \link[tibble:tibble]{tibble}.} 11 | 12 | \item{formula}{A formula with the response variable on the left and the 13 | explanatory on the right. Alternatively, a \code{response} and \code{explanatory} 14 | argument can be supplied.} 15 | 16 | \item{response}{The variable name in \code{x} that will serve as the response. 17 | This is an alternative to using the \code{formula} argument.} 18 | 19 | \item{explanatory}{The variable name in \code{x} that will serve as the 20 | explanatory variable. This is an alternative to using the formula argument.} 21 | 22 | \item{...}{Additional arguments for \link[stats:chisq.test]{chisq.test()}.} 23 | } 24 | \description{ 25 | @description 26 | } 27 | \details{ 28 | A shortcut wrapper function to get the observed test statistic for a chisq 29 | test. Uses \link[stats:chisq.test]{chisq.test()}, which applies a continuity 30 | correction. This function has been deprecated in favor of the more 31 | general \code{\link[=observe]{observe()}}. 32 | } 33 | \examples{ 34 | # chi-squared test statistic for test of independence 35 | # of college completion status depending and one's 36 | # self-identified income class 37 | chisq_stat(gss, college ~ finrela) 38 | 39 | # chi-squared test statistic for a goodness of fit 40 | # test on whether self-identified income class 41 | # follows a uniform distribution 42 | chisq_stat(gss, 43 | response = finrela, 44 | p = c("far below average" = 1/6, 45 | "below average" = 1/6, 46 | "average" = 1/6, 47 | "above average" = 1/6, 48 | "far above average" = 1/6, 49 | "DK" = 1/6)) 50 | 51 | } 52 | \seealso{ 53 | Other wrapper functions: 54 | \code{\link{chisq_test}()}, 55 | \code{\link{observe}()}, 56 | \code{\link{prop_test}()}, 57 | \code{\link{t_stat}()}, 58 | \code{\link{t_test}()} 59 | 60 | Other functions for calculating observed statistics: 61 | \code{\link{observe}()}, 62 | \code{\link{t_stat}()} 63 | } 64 | \concept{functions for calculating observed statistics} 65 | \concept{wrapper functions} 66 | -------------------------------------------------------------------------------- /man/chisq_test.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/wrappers.R 3 | \name{chisq_test} 4 | \alias{chisq_test} 5 | \title{Tidy chi-squared test} 6 | \usage{ 7 | chisq_test(x, formula, response = NULL, explanatory = NULL, ...) 8 | } 9 | \arguments{ 10 | \item{x}{A data frame that can be coerced into a \link[tibble:tibble]{tibble}.} 11 | 12 | \item{formula}{A formula with the response variable on the left and the 13 | explanatory on the right. Alternatively, a \code{response} and \code{explanatory} 14 | argument can be supplied.} 15 | 16 | \item{response}{The variable name in \code{x} that will serve as the response. 17 | This is an alternative to using the \code{formula} argument.} 18 | 19 | \item{explanatory}{The variable name in \code{x} that will serve as the 20 | explanatory variable. This is an alternative to using the formula argument.} 21 | 22 | \item{...}{Additional arguments for \link[stats:chisq.test]{chisq.test()}.} 23 | } 24 | \description{ 25 | A tidier version of \link[stats:chisq.test]{chisq.test()} for goodness of fit 26 | tests and tests of independence. 27 | } 28 | \examples{ 29 | # chi-squared test of independence for college completion 30 | # status depending on one's self-identified income class 31 | chisq_test(gss, college ~ finrela) 32 | 33 | # chi-squared goodness of fit test on whether self-identified 34 | # income class follows a uniform distribution 35 | chisq_test(gss, 36 | response = finrela, 37 | p = c("far below average" = 1/6, 38 | "below average" = 1/6, 39 | "average" = 1/6, 40 | "above average" = 1/6, 41 | "far above average" = 1/6, 42 | "DK" = 1/6)) 43 | 44 | } 45 | \seealso{ 46 | Other wrapper functions: 47 | \code{\link{chisq_stat}()}, 48 | \code{\link{observe}()}, 49 | \code{\link{prop_test}()}, 50 | \code{\link{t_stat}()}, 51 | \code{\link{t_test}()} 52 | } 53 | \concept{wrapper functions} 54 | -------------------------------------------------------------------------------- /man/deprecated.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/deprecated.R 3 | \name{deprecated} 4 | \alias{deprecated} 5 | \alias{conf_int} 6 | \alias{p_value} 7 | \title{Deprecated functions and objects} 8 | \usage{ 9 | conf_int(x, level = 0.95, type = "percentile", point_estimate = NULL) 10 | 11 | p_value(x, obs_stat, direction) 12 | } 13 | \arguments{ 14 | \item{x}{See the non-deprecated function.} 15 | 16 | \item{level}{See the non-deprecated function.} 17 | 18 | \item{type}{See the non-deprecated function.} 19 | 20 | \item{point_estimate}{See the non-deprecated function.} 21 | 22 | \item{obs_stat}{See the non-deprecated function.} 23 | 24 | \item{direction}{See the non-deprecated function.} 25 | } 26 | \description{ 27 | These functions and objects should no longer be used. They will be removed 28 | in a future release of infer. 29 | } 30 | \seealso{ 31 | \code{\link[=get_p_value]{get_p_value()}}, \code{\link[=get_confidence_interval]{get_confidence_interval()}}, \code{\link[=generate]{generate()}} 32 | } 33 | -------------------------------------------------------------------------------- /man/figures/lifecycle-archived.svg: -------------------------------------------------------------------------------- 1 | 2 | lifecycle: archived 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | lifecycle 18 | 19 | archived 20 | 21 | 22 | -------------------------------------------------------------------------------- /man/figures/lifecycle-defunct.svg: -------------------------------------------------------------------------------- 1 | 2 | lifecycle: defunct 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | lifecycle 18 | 19 | defunct 20 | 21 | 22 | -------------------------------------------------------------------------------- /man/figures/lifecycle-deprecated.svg: -------------------------------------------------------------------------------- 1 | 2 | lifecycle: deprecated 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | lifecycle 18 | 19 | deprecated 20 | 21 | 22 | -------------------------------------------------------------------------------- /man/figures/lifecycle-experimental.svg: -------------------------------------------------------------------------------- 1 | 2 | lifecycle: experimental 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | lifecycle 18 | 19 | experimental 20 | 21 | 22 | -------------------------------------------------------------------------------- /man/figures/lifecycle-maturing.svg: -------------------------------------------------------------------------------- 1 | 2 | lifecycle: maturing 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | lifecycle 18 | 19 | maturing 20 | 21 | 22 | -------------------------------------------------------------------------------- /man/figures/lifecycle-questioning.svg: -------------------------------------------------------------------------------- 1 | 2 | lifecycle: questioning 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | lifecycle 18 | 19 | questioning 20 | 21 | 22 | -------------------------------------------------------------------------------- /man/figures/lifecycle-soft-deprecated.svg: -------------------------------------------------------------------------------- 1 | 2 | lifecycle: soft-deprecated 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | lifecycle 18 | 19 | soft-deprecated 20 | 21 | 22 | -------------------------------------------------------------------------------- /man/figures/lifecycle-stable.svg: -------------------------------------------------------------------------------- 1 | 2 | lifecycle: stable 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 19 | 20 | lifecycle 21 | 22 | 25 | 26 | stable 27 | 28 | 29 | 30 | -------------------------------------------------------------------------------- /man/figures/lifecycle-superseded.svg: -------------------------------------------------------------------------------- 1 | 2 | lifecycle: superseded 3 | 4 | 5 | 6 | 7 | 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | 16 | 17 | lifecycle 18 | 19 | superseded 20 | 21 | 22 | -------------------------------------------------------------------------------- /man/figures/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/man/figures/logo.png -------------------------------------------------------------------------------- /man/get_p_value.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/get_p_value.R 3 | \name{get_p_value} 4 | \alias{get_p_value} 5 | \alias{get_p_value.default} 6 | \alias{get_pvalue} 7 | \alias{get_p_value.infer_dist} 8 | \title{Compute p-value} 9 | \usage{ 10 | get_p_value(x, obs_stat, direction) 11 | 12 | \method{get_p_value}{default}(x, obs_stat, direction) 13 | 14 | get_pvalue(x, obs_stat, direction) 15 | 16 | \method{get_p_value}{infer_dist}(x, obs_stat, direction) 17 | } 18 | \arguments{ 19 | \item{x}{A null distribution. For simulation-based inference, a data frame 20 | containing a distribution of \code{\link[=calculate]{calculate()}}d statistics 21 | or \code{\link[=fit.infer]{fit()}}ted coefficient estimates. This object should 22 | have been passed to \code{\link[=generate]{generate()}} before being supplied or 23 | \code{\link[=calculate]{calculate()}} to \code{\link[=fit.infer]{fit()}}. For theory-based inference, 24 | the output of \code{\link[=assume]{assume()}}.} 25 | 26 | \item{obs_stat}{A data frame containing the observed statistic (in a 27 | \code{\link[=calculate]{calculate()}}-based workflow) or observed fit (in a 28 | \code{\link[=fit.infer]{fit()}}-based workflow). This object is likely the output 29 | of \code{\link[=calculate]{calculate()}} or \code{\link[=fit.infer]{fit()}} and need not 30 | to have been passed to \code{\link[=generate]{generate()}}.} 31 | 32 | \item{direction}{A character string. Options are \code{"less"}, \code{"greater"}, or 33 | \code{"two-sided"}. Can also use \code{"left"}, \code{"right"}, \code{"both"}, 34 | \code{"two_sided"}, or \code{"two sided"}, \code{"two.sided"}.} 35 | } 36 | \value{ 37 | A \link[tibble:tibble]{tibble} containing the following columns: 38 | 39 | \itemize{ 40 | \item \code{term}: The explanatory variable (or intercept) in question. Only 41 | supplied if the input had been previously passed to \code{\link[=fit.infer]{fit()}}. 42 | \item \code{p_value}: A value in [0, 1] giving the probability that a 43 | statistic/coefficient as or more extreme than the observed 44 | statistic/coefficient would occur if the null hypothesis were true. 45 | } 46 | } 47 | \description{ 48 | Compute a p-value from a null distribution and observed statistic. 49 | 50 | Learn more in \code{vignette("infer")}. 51 | } 52 | \section{Aliases}{ 53 | 54 | \code{get_pvalue()} is an alias of \code{get_p_value()}. 55 | \code{p_value} is a deprecated alias of \code{get_p_value()}. 56 | } 57 | 58 | \section{Zero p-value}{ 59 | 60 | Though a true p-value of 0 is impossible, \code{get_p_value()} may return 0 in 61 | some cases. This is due to the simulation-based nature of the \{infer\} 62 | package; the output of this function is an approximation based on 63 | the number of \code{reps} chosen in the \code{generate()} step. When the observed 64 | statistic is very unlikely given the null hypothesis, and only a small 65 | number of \code{reps} have been generated to form a null distribution, 66 | it is possible that the observed statistic will be more extreme than 67 | every test statistic generated to form the null distribution, resulting 68 | in an approximate p-value of 0. In this case, the true p-value is a small 69 | value likely less than \code{3/reps} (based on a poisson approximation). 70 | 71 | In the case that a p-value of zero is reported, a warning message will be 72 | raised to caution the user against reporting a p-value exactly equal to 0. 73 | } 74 | 75 | \examples{ 76 | 77 | # using a simulation-based null distribution ------------------------------ 78 | 79 | # find the point estimate---mean number of hours worked per week 80 | point_estimate <- gss |> 81 | specify(response = hours) |> 82 | calculate(stat = "mean") 83 | 84 | # starting with the gss dataset 85 | gss |> 86 | # ...we're interested in the number of hours worked per week 87 | specify(response = hours) |> 88 | # hypothesizing that the mean is 40 89 | hypothesize(null = "point", mu = 40) |> 90 | # generating data points for a null distribution 91 | generate(reps = 1000, type = "bootstrap") |> 92 | # finding the null distribution 93 | calculate(stat = "mean") |> 94 | get_p_value(obs_stat = point_estimate, direction = "two-sided") 95 | 96 | # using a theoretical null distribution ----------------------------------- 97 | 98 | # calculate the observed statistic 99 | obs_stat <- gss |> 100 | specify(response = hours) |> 101 | hypothesize(null = "point", mu = 40) |> 102 | calculate(stat = "t") 103 | 104 | # define a null distribution 105 | null_dist <- gss |> 106 | specify(response = hours) |> 107 | assume("t") 108 | 109 | # calculate a p-value 110 | get_p_value(null_dist, obs_stat, direction = "both") 111 | 112 | # using a model fitting workflow ----------------------------------------- 113 | 114 | # fit a linear model predicting number of hours worked per 115 | # week using respondent age and degree status. 116 | observed_fit <- gss |> 117 | specify(hours ~ age + college) |> 118 | fit() 119 | 120 | observed_fit 121 | 122 | # fit 100 models to resamples of the gss dataset, where the response 123 | # `hours` is permuted in each. note that this code is the same as 124 | # the above except for the addition of the `generate` step. 125 | null_fits <- gss |> 126 | specify(hours ~ age + college) |> 127 | hypothesize(null = "independence") |> 128 | generate(reps = 100, type = "permute") |> 129 | fit() 130 | 131 | null_fits 132 | 133 | get_p_value(null_fits, obs_stat = observed_fit, direction = "two-sided") 134 | 135 | # more in-depth explanation of how to use the infer package 136 | \dontrun{ 137 | vignette("infer") 138 | } 139 | 140 | } 141 | \seealso{ 142 | Other auxillary functions: 143 | \code{\link{get_confidence_interval}()} 144 | } 145 | \concept{auxillary functions} 146 | -------------------------------------------------------------------------------- /man/gss.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/gss.R 3 | \docType{data} 4 | \name{gss} 5 | \alias{gss} 6 | \title{Subset of data from the General Social Survey (GSS).} 7 | \format{ 8 | A tibble with 500 rows and 11 variables: 9 | \describe{ 10 | \item{year}{year respondent was surveyed} 11 | \item{age}{age at time of survey, truncated at 89} 12 | \item{sex}{respondent's sex (self-identified)} 13 | \item{college}{whether on not respondent has a college degree, including 14 | junior/community college} 15 | \item{partyid}{political party affiliation} 16 | \item{hompop}{number of persons in household} 17 | \item{hours}{number of hours worked in week before survey, truncated at 89} 18 | \item{income}{total family income} 19 | \item{class}{subjective socioeconomic class identification} 20 | \item{finrela}{opinion of family income} 21 | \item{weight}{survey weight} 22 | } 23 | } 24 | \source{ 25 | \url{https://gss.norc.org} 26 | } 27 | \usage{ 28 | gss 29 | } 30 | \description{ 31 | The General Social Survey is a high-quality survey which gathers data on 32 | American society and opinions, conducted since 1972. This data set is a 33 | sample of 500 entries from the GSS, spanning years 1973-2018, 34 | including demographic markers and some 35 | economic variables. Note that this data is included for demonstration only, 36 | and should not be assumed to provide accurate estimates relating to the GSS. 37 | However, due to the high quality of the GSS, the unweighted data will 38 | approximate the weighted data in some analyses. 39 | } 40 | \keyword{datasets} 41 | -------------------------------------------------------------------------------- /man/hypothesize.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hypothesize.R 3 | \name{hypothesize} 4 | \alias{hypothesize} 5 | \alias{hypothesise} 6 | \title{Declare a null hypothesis} 7 | \usage{ 8 | hypothesize(x, null, p = NULL, mu = NULL, med = NULL, sigma = NULL) 9 | 10 | hypothesise(x, null, p = NULL, mu = NULL, med = NULL, sigma = NULL) 11 | } 12 | \arguments{ 13 | \item{x}{A data frame that can be coerced into a \link[tibble:tibble]{tibble}.} 14 | 15 | \item{null}{The null hypothesis. Options include \code{"independence"}, 16 | \code{"point"}, and \code{"paired independence"}. 17 | \itemize{ 18 | \item \code{independence}: Should be used with both a \code{response} and \code{explanatory} 19 | variable. Indicates that the values of the specified \code{response} variable 20 | are independent of the associated values in \code{explanatory}. 21 | \item \code{point}: Should be used with only a \code{response} variable. Indicates 22 | that a point estimate based on the values in \code{response} is associated 23 | with a parameter. Sometimes requires supplying one of \code{p}, \code{mu}, \code{med}, or 24 | \code{sigma}. 25 | \item \verb{paired independence}: Should be used with only a \code{response} variable 26 | giving the pre-computed difference between paired observations. Indicates 27 | that the order of subtraction between paired values does not affect the 28 | resulting distribution. 29 | }} 30 | 31 | \item{p}{The true proportion of successes (a number between 0 and 1). To be used with point null hypotheses when the specified response 32 | variable is categorical.} 33 | 34 | \item{mu}{The true mean (any numerical value). To be used with point null 35 | hypotheses when the specified response variable is continuous.} 36 | 37 | \item{med}{The true median (any numerical value). To be used with point null 38 | hypotheses when the specified response variable is continuous.} 39 | 40 | \item{sigma}{The true standard deviation (any numerical value). To be used with 41 | point null hypotheses.} 42 | } 43 | \value{ 44 | A tibble containing the response (and explanatory, if specified) 45 | variable data with parameter information stored as well. 46 | } 47 | \description{ 48 | Declare a null hypothesis about variables selected in \code{\link[=specify]{specify()}}. 49 | 50 | Learn more in \code{vignette("infer")}. 51 | } 52 | \examples{ 53 | # hypothesize independence of two variables 54 | gss |> 55 | specify(college ~ partyid, success = "degree") |> 56 | hypothesize(null = "independence") 57 | 58 | # hypothesize a mean number of hours worked per week of 40 59 | gss |> 60 | specify(response = hours) |> 61 | hypothesize(null = "point", mu = 40) 62 | 63 | # more in-depth explanation of how to use the infer package 64 | \dontrun{ 65 | vignette("infer") 66 | } 67 | 68 | } 69 | \seealso{ 70 | Other core functions: 71 | \code{\link{calculate}()}, 72 | \code{\link{generate}()}, 73 | \code{\link{specify}()} 74 | } 75 | \concept{core functions} 76 | -------------------------------------------------------------------------------- /man/infer.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/infer.R 3 | \docType{package} 4 | \name{infer} 5 | \alias{infer-package} 6 | \alias{infer} 7 | \title{infer: a grammar for statistical inference} 8 | \description{ 9 | The objective of this package is to perform statistical inference using a 10 | grammar that illustrates the underlying concepts and a format that coheres 11 | with the tidyverse. 12 | } 13 | \details{ 14 | For an overview of how to use the core functionality, see \code{vignette("infer")} 15 | } 16 | \seealso{ 17 | Useful links: 18 | \itemize{ 19 | \item \url{https://github.com/tidymodels/infer} 20 | \item \url{https://infer.tidymodels.org/} 21 | \item Report bugs at \url{https://github.com/tidymodels/infer/issues} 22 | } 23 | 24 | } 25 | \author{ 26 | \strong{Maintainer}: Simon Couch \email{simon.couch@posit.co} (\href{https://orcid.org/0000-0001-5676-5107}{ORCID}) 27 | 28 | Authors: 29 | \itemize{ 30 | \item Andrew Bray \email{abray@reed.edu} 31 | \item Chester Ismay \email{chester.ismay@gmail.com} (\href{https://orcid.org/0000-0003-2820-2547}{ORCID}) 32 | \item Evgeni Chasnovski \email{evgeni.chasnovski@gmail.com} (\href{https://orcid.org/0000-0002-1617-4019}{ORCID}) 33 | \item Ben Baumer \email{ben.baumer@gmail.com} (\href{https://orcid.org/0000-0002-3279-0516}{ORCID}) 34 | \item Mine Cetinkaya-Rundel \email{mine@stat.duke.edu} (\href{https://orcid.org/0000-0001-6452-2420}{ORCID}) 35 | } 36 | 37 | Other contributors: 38 | \itemize{ 39 | \item Ted Laderas \email{tedladeras@gmail.com} (\href{https://orcid.org/0000-0002-6207-7068}{ORCID}) [contributor] 40 | \item Nick Solomon \email{nick.solomon@datacamp.com} [contributor] 41 | \item Johanna Hardin \email{Jo.Hardin@pomona.edu} [contributor] 42 | \item Albert Y. Kim \email{albert.ys.kim@gmail.com} (\href{https://orcid.org/0000-0001-7824-306X}{ORCID}) [contributor] 43 | \item Neal Fultz \email{nfultz@gmail.com} [contributor] 44 | \item Doug Friedman \email{doug.nhp@gmail.com} [contributor] 45 | \item Richie Cotton \email{richie@datacamp.com} (\href{https://orcid.org/0000-0003-2504-802X}{ORCID}) [contributor] 46 | \item Brian Fannin \email{captain@pirategrunt.com} [contributor] 47 | } 48 | 49 | } 50 | -------------------------------------------------------------------------------- /man/pipe.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pipe.R 3 | \name{\%>\%} 4 | \alias{\%>\%} 5 | \title{Pipe} 6 | \arguments{ 7 | \item{lhs, rhs}{Inference functions and the initial data frame.} 8 | } 9 | \description{ 10 | Like \{dplyr\}, \{infer\} also uses the pipe (\code{|>}) function 11 | from \code{magrittr} to turn function composition into a series of 12 | iterative statements. 13 | } 14 | -------------------------------------------------------------------------------- /man/print.infer.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/print_methods.R 3 | \name{print.infer} 4 | \alias{print.infer} 5 | \alias{print.infer_layer} 6 | \alias{print.infer_dist} 7 | \title{Print methods} 8 | \usage{ 9 | \method{print}{infer}(x, ...) 10 | 11 | \method{print}{infer_layer}(x, ...) 12 | 13 | \method{print}{infer_dist}(x, ...) 14 | } 15 | \arguments{ 16 | \item{x}{An object of class \code{infer}, i.e. output from \code{\link[=specify]{specify()}} or 17 | \code{\link[=hypothesize]{hypothesize()}}, or of class \code{infer_layer}, i.e. output from 18 | \code{\link[=shade_p_value]{shade_p_value()}} or \code{\link[=shade_confidence_interval]{shade_confidence_interval()}}.} 19 | 20 | \item{...}{Arguments passed to methods.} 21 | } 22 | \description{ 23 | Print methods 24 | } 25 | -------------------------------------------------------------------------------- /man/prop_test.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/wrappers.R 3 | \name{prop_test} 4 | \alias{prop_test} 5 | \title{Tidy proportion test} 6 | \usage{ 7 | prop_test( 8 | x, 9 | formula, 10 | response = NULL, 11 | explanatory = NULL, 12 | p = NULL, 13 | order = NULL, 14 | alternative = "two-sided", 15 | conf_int = TRUE, 16 | conf_level = 0.95, 17 | success = NULL, 18 | correct = NULL, 19 | z = FALSE, 20 | ... 21 | ) 22 | } 23 | \arguments{ 24 | \item{x}{A data frame that can be coerced into a \link[tibble:tibble]{tibble}.} 25 | 26 | \item{formula}{A formula with the response variable on the left and the 27 | explanatory on the right. Alternatively, a \code{response} and \code{explanatory} 28 | argument can be supplied.} 29 | 30 | \item{response}{The variable name in \code{x} that will serve as the response. 31 | This is an alternative to using the \code{formula} argument.} 32 | 33 | \item{explanatory}{The variable name in \code{x} that will serve as the 34 | explanatory variable. This is an alternative to using the formula argument.} 35 | 36 | \item{p}{A numeric vector giving the hypothesized null proportion of 37 | success for each group.} 38 | 39 | \item{order}{A string vector specifying the order in which the proportions 40 | should be subtracted, where \code{order = c("first", "second")} means 41 | \code{"first" - "second"}. Ignored for one-sample tests, and optional for two 42 | sample tests.} 43 | 44 | \item{alternative}{Character string giving the direction of the alternative 45 | hypothesis. Options are \code{"two-sided"} (default), \code{"greater"}, or \code{"less"}. 46 | Only used when testing the null that a single proportion equals a given 47 | value, or that two proportions are equal; ignored otherwise.} 48 | 49 | \item{conf_int}{A logical value for whether to include the confidence 50 | interval or not. \code{TRUE} by default.} 51 | 52 | \item{conf_level}{A numeric value between 0 and 1. Default value is 0.95.} 53 | 54 | \item{success}{The level of \code{response} that will be considered a success, as 55 | a string. Only used when testing the null that a single 56 | proportion equals a given value, or that two proportions are equal; 57 | ignored otherwise.} 58 | 59 | \item{correct}{A logical indicating whether Yates' continuity correction 60 | should be applied where possible. If \code{z = TRUE}, the \code{correct} argument will 61 | be overwritten as \code{FALSE}. Otherwise defaults to \code{correct = TRUE}.} 62 | 63 | \item{z}{A logical value for whether to report the statistic as a standard 64 | normal deviate or a Pearson's chi-square statistic. \eqn{z^2} is distributed 65 | chi-square with 1 degree of freedom, though note that the user will likely 66 | need to turn off Yates' continuity correction by setting \code{correct = FALSE} 67 | to see this connection.} 68 | 69 | \item{...}{Additional arguments for \link[stats:prop.test]{prop.test()}.} 70 | } 71 | \description{ 72 | A tidier version of \link[stats:prop.test]{prop.test()} for equal or given 73 | proportions. 74 | } 75 | \details{ 76 | When testing with an explanatory variable with more than two levels, the 77 | \code{order} argument as used in the package is no longer well-defined. The function 78 | will thus raise a warning and ignore the value if supplied a non-NULL \code{order} 79 | argument. 80 | 81 | The columns present in the output depend on the output of both \code{\link[=prop.test]{prop.test()}} 82 | and \code{\link[broom:tidy.htest]{broom::glance.htest()}}. See the latter's documentation for column 83 | definitions; columns have been renamed with the following mapping: 84 | \itemize{ 85 | \item \code{chisq_df} = \code{parameter} 86 | \item \code{p_value} = \code{p.value} 87 | \item \code{lower_ci} = \code{conf.low} 88 | \item \code{upper_ci} = \code{conf.high} 89 | } 90 | } 91 | \examples{ 92 | # two-sample proportion test for difference in proportions of 93 | # college completion by respondent sex 94 | prop_test(gss, 95 | college ~ sex, 96 | order = c("female", "male")) 97 | 98 | # one-sample proportion test for hypothesized null 99 | # proportion of college completion of .2 100 | prop_test(gss, 101 | college ~ NULL, 102 | p = .2) 103 | 104 | # report as a z-statistic rather than chi-square 105 | # and specify the success level of the response 106 | prop_test(gss, 107 | college ~ NULL, 108 | success = "degree", 109 | p = .2, 110 | z = TRUE) 111 | 112 | } 113 | \seealso{ 114 | Other wrapper functions: 115 | \code{\link{chisq_stat}()}, 116 | \code{\link{chisq_test}()}, 117 | \code{\link{observe}()}, 118 | \code{\link{t_stat}()}, 119 | \code{\link{t_test}()} 120 | } 121 | \concept{wrapper functions} 122 | -------------------------------------------------------------------------------- /man/reexports.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit.R, R/visualize.R 3 | \docType{import} 4 | \name{reexports} 5 | \alias{reexports} 6 | \alias{fit} 7 | \alias{ggplot_add} 8 | \title{Objects exported from other packages} 9 | \details{ 10 | Read more about infer's \link[=fit.infer]{fit} function \link[=fit.infer]{here} or 11 | by running \code{?fit.infer} in your console. 12 | } 13 | \keyword{internal} 14 | \description{ 15 | These objects are imported from other packages. Follow the links 16 | below to see their documentation. 17 | 18 | \describe{ 19 | \item{generics}{\code{\link[generics]{fit}}} 20 | 21 | \item{ggplot2}{\code{\link[ggplot2]{ggplot_add}}} 22 | }} 23 | 24 | -------------------------------------------------------------------------------- /man/rep_sample_n.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/rep_sample_n.R 3 | \name{rep_sample_n} 4 | \alias{rep_sample_n} 5 | \alias{rep_slice_sample} 6 | \title{Perform repeated sampling} 7 | \usage{ 8 | rep_sample_n(tbl, size, replace = FALSE, reps = 1, prob = NULL) 9 | 10 | rep_slice_sample( 11 | .data, 12 | n = NULL, 13 | prop = NULL, 14 | replace = FALSE, 15 | weight_by = NULL, 16 | reps = 1 17 | ) 18 | } 19 | \arguments{ 20 | \item{tbl, .data}{Data frame of population from which to sample.} 21 | 22 | \item{size, n, prop}{\code{size} and \code{n} refer to the sample size of each sample. 23 | The \code{size} argument to \code{rep_sample_n()} is required, while in 24 | \code{rep_slice_sample()} sample size defaults to 1 if not specified. \code{prop}, an 25 | argument to \code{rep_slice_sample()}, refers to the proportion of rows to sample 26 | in each sample, and is rounded down in the case that \code{prop * nrow(.data)} is 27 | not an integer. When using \code{rep_slice_sample()}, please only supply one of 28 | \code{n} or \code{prop}.} 29 | 30 | \item{replace}{Should samples be taken with replacement?} 31 | 32 | \item{reps}{Number of samples to take.} 33 | 34 | \item{prob, weight_by}{A vector of sampling weights for each of the rows in 35 | \code{.data}—must have length equal to \code{nrow(.data)}. For \code{weight_by}, this 36 | may also be an unquoted column name in \code{.data}.} 37 | } 38 | \value{ 39 | A tibble of size \code{reps * n} rows corresponding to \code{reps} 40 | samples of size \code{n} from \code{.data}, grouped by \code{replicate}. 41 | } 42 | \description{ 43 | These functions extend the functionality of \code{\link[dplyr:sample_n]{dplyr::sample_n()}} and 44 | \code{\link[dplyr:slice]{dplyr::slice_sample()}} by allowing for repeated sampling of data. 45 | This operation is especially helpful while creating sampling 46 | distributions—see the examples below! 47 | } 48 | \details{ 49 | \code{rep_sample_n()} and \code{rep_slice_sample()} are designed to behave similar to 50 | their dplyr counterparts. As such, they have at least the following 51 | differences: 52 | \itemize{ 53 | \item In case \code{replace = FALSE} having \code{size} bigger than number of data rows in 54 | \code{rep_sample_n()} will give an error. In \code{rep_slice_sample()} having such \code{n} 55 | or \code{prop > 1} will give warning and output sample size will be set to number 56 | of rows in data. 57 | } 58 | 59 | Note that the \code{\link[dplyr:sample_n]{dplyr::sample_n()}} function has been superseded by 60 | \code{\link[dplyr:slice]{dplyr::slice_sample()}}. 61 | } 62 | \examples{ 63 | library(dplyr) 64 | library(ggplot2) 65 | library(tibble) 66 | 67 | # take 1000 samples of size n = 50, without replacement 68 | slices <- gss |> 69 | rep_slice_sample(n = 50, reps = 1000) 70 | 71 | slices 72 | 73 | # compute the proportion of respondents with a college 74 | # degree in each replicate 75 | p_hats <- slices |> 76 | group_by(replicate) |> 77 | summarize(prop_college = mean(college == "degree")) 78 | 79 | # plot sampling distribution 80 | ggplot(p_hats, aes(x = prop_college)) + 81 | geom_density() + 82 | labs( 83 | x = "p_hat", y = "Number of samples", 84 | title = "Sampling distribution of p_hat" 85 | ) 86 | 87 | # sampling with probability weights. Note probabilities are automatically 88 | # renormalized to sum to 1 89 | df <- tibble( 90 | id = 1:5, 91 | letter = factor(c("a", "b", "c", "d", "e")) 92 | ) 93 | 94 | rep_slice_sample(df, n = 2, reps = 5, weight_by = c(.5, .4, .3, .2, .1)) 95 | 96 | # alternatively, pass an unquoted column name in `.data` as `weight_by` 97 | df <- df |> mutate(wts = c(.5, .4, .3, .2, .1)) 98 | 99 | rep_slice_sample(df, n = 2, reps = 5, weight_by = wts) 100 | } 101 | -------------------------------------------------------------------------------- /man/shade_confidence_interval.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/shade_confidence_interval.R 3 | \name{shade_confidence_interval} 4 | \alias{shade_confidence_interval} 5 | \alias{shade_ci} 6 | \title{Add information about confidence interval} 7 | \usage{ 8 | shade_confidence_interval( 9 | endpoints, 10 | color = "mediumaquamarine", 11 | fill = "turquoise", 12 | ... 13 | ) 14 | 15 | shade_ci(endpoints, color = "mediumaquamarine", fill = "turquoise", ...) 16 | } 17 | \arguments{ 18 | \item{endpoints}{The lower and upper bounds of the interval to be plotted. 19 | Likely, this will be the output of \code{\link[=get_confidence_interval]{get_confidence_interval()}}. 20 | For \code{\link[=calculate]{calculate()}}-based workflows, this will be a 2-element vector 21 | or a \verb{1 x 2} data frame containing the lower and upper values to be plotted. 22 | For \code{\link[=fit.infer]{fit()}}-based workflows, a \verb{(p + 1) x 3} data frame 23 | with columns \code{term}, \code{lower_ci}, and \code{upper_ci}, giving the upper and 24 | lower bounds for each regression term. For use in visualizations of 25 | \code{\link[=assume]{assume()}} output, this must be the output of \code{\link[=get_confidence_interval]{get_confidence_interval()}}.} 26 | 27 | \item{color}{A character or hex string specifying the color of the 28 | end points as a vertical lines on the plot.} 29 | 30 | \item{fill}{A character or hex string specifying the color to shade the 31 | confidence interval. If \code{NULL} then no shading is actually done.} 32 | 33 | \item{...}{Other arguments passed along to ggplot2 functions.} 34 | } 35 | \value{ 36 | If added to an existing infer visualization, a ggplot2 37 | object displaying the supplied intervals on top of its corresponding 38 | distribution. Otherwise, an \code{infer_layer} list. 39 | } 40 | \description{ 41 | \code{shade_confidence_interval()} plots a confidence interval region on top of 42 | \code{\link[=visualize]{visualize()}} output. The output is a ggplot2 layer that can be added with 43 | \code{+}. The function has a shorter alias, \code{shade_ci()}. 44 | 45 | Learn more in \code{vignette("infer")}. 46 | } 47 | \examples{ 48 | # find the point estimate---mean number of hours worked per week 49 | point_estimate <- gss |> 50 | specify(response = hours) |> 51 | calculate(stat = "mean") 52 | 53 | # ...and a bootstrap distribution 54 | boot_dist <- gss |> 55 | # ...we're interested in the number of hours worked per week 56 | specify(response = hours) |> 57 | # generating data points 58 | generate(reps = 1000, type = "bootstrap") |> 59 | # finding the distribution from the generated data 60 | calculate(stat = "mean") 61 | 62 | # find a confidence interval around the point estimate 63 | ci <- boot_dist |> 64 | get_confidence_interval(point_estimate = point_estimate, 65 | # at the 95\% confidence level 66 | level = .95, 67 | # using the standard error method 68 | type = "se") 69 | 70 | 71 | # and plot it! 72 | boot_dist |> 73 | visualize() + 74 | shade_confidence_interval(ci) 75 | 76 | # or just plot the bounds 77 | boot_dist |> 78 | visualize() + 79 | shade_confidence_interval(ci, fill = NULL) 80 | 81 | # you can shade confidence intervals on top of 82 | # theoretical distributions, too---the theoretical 83 | # distribution will be recentered and rescaled to 84 | # align with the confidence interval 85 | sampling_dist <- gss |> 86 | specify(response = hours) |> 87 | assume(distribution = "t") 88 | 89 | visualize(sampling_dist) + 90 | shade_confidence_interval(ci) 91 | 92 | \donttest{ 93 | # to visualize distributions of coefficients for multiple 94 | # explanatory variables, use a `fit()`-based workflow 95 | 96 | # fit 1000 linear models with the `hours` variable permuted 97 | null_fits <- gss |> 98 | specify(hours ~ age + college) |> 99 | hypothesize(null = "independence") |> 100 | generate(reps = 1000, type = "permute") |> 101 | fit() 102 | 103 | null_fits 104 | 105 | # fit a linear model to the observed data 106 | obs_fit <- gss |> 107 | specify(hours ~ age + college) |> 108 | fit() 109 | 110 | obs_fit 111 | 112 | # get confidence intervals for each term 113 | conf_ints <- 114 | get_confidence_interval( 115 | null_fits, 116 | point_estimate = obs_fit, 117 | level = .95 118 | ) 119 | 120 | # visualize distributions of coefficients 121 | # generated under the null 122 | visualize(null_fits) 123 | 124 | # add a confidence interval shading layer to juxtapose 125 | # the null fits with the observed fit for each term 126 | visualize(null_fits) + 127 | shade_confidence_interval(conf_ints) 128 | } 129 | 130 | # more in-depth explanation of how to use the infer package 131 | \dontrun{ 132 | vignette("infer") 133 | } 134 | 135 | } 136 | \seealso{ 137 | Other visualization functions: 138 | \code{\link{shade_p_value}()} 139 | } 140 | \concept{visualization functions} 141 | -------------------------------------------------------------------------------- /man/shade_p_value.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/shade_p_value.R 3 | \name{shade_p_value} 4 | \alias{shade_p_value} 5 | \alias{shade_pvalue} 6 | \title{Shade histogram area beyond an observed statistic} 7 | \usage{ 8 | shade_p_value(obs_stat, direction, color = "red2", fill = "pink", ...) 9 | 10 | shade_pvalue(obs_stat, direction, color = "red2", fill = "pink", ...) 11 | } 12 | \arguments{ 13 | \item{obs_stat}{The observed statistic or estimate. For 14 | \code{\link[=calculate]{calculate()}}-based workflows, this will be a 1-element numeric vector or 15 | a \verb{1 x 1} data frame containing the observed statistic. 16 | For \code{\link[=fit.infer]{fit()}}-based workflows, a \verb{(p + 1) x 2} data frame 17 | with columns \code{term} and \code{estimate} giving the observed estimate for 18 | each term.} 19 | 20 | \item{direction}{A string specifying in which direction the shading should 21 | occur. Options are \code{"less"}, \code{"greater"}, or \code{"two-sided"}. Can 22 | also give \code{"left"}, \code{"right"}, \code{"both"}, \code{"two_sided"}, \code{"two sided"}, 23 | or \code{"two.sided"}. If \code{NULL}, the function will not shade any area.} 24 | 25 | \item{color}{A character or hex string specifying the color of the observed 26 | statistic as a vertical line on the plot.} 27 | 28 | \item{fill}{A character or hex string specifying the color to shade the 29 | p-value region. If \code{NULL}, the function will not shade any area.} 30 | 31 | \item{...}{Other arguments passed along to ggplot2 functions. 32 | For expert use only.} 33 | } 34 | \value{ 35 | If added to an existing infer visualization, a ggplot2 36 | object displaying the supplied statistic on top of its corresponding 37 | distribution. Otherwise, an \code{infer_layer} list. 38 | } 39 | \description{ 40 | \code{shade_p_value()} plots a p-value region on top of 41 | \code{\link[=visualize]{visualize()}} output. The output is a ggplot2 layer that can be added with 42 | \code{+}. The function has a shorter alias, \code{shade_pvalue()}. 43 | 44 | Learn more in \code{vignette("infer")}. 45 | } 46 | \examples{ 47 | # find the point estimate---mean number of hours worked per week 48 | point_estimate <- gss |> 49 | specify(response = hours) |> 50 | hypothesize(null = "point", mu = 40) |> 51 | calculate(stat = "t") 52 | 53 | # ...and a null distribution 54 | null_dist <- gss |> 55 | # ...we're interested in the number of hours worked per week 56 | specify(response = hours) |> 57 | # hypothesizing that the mean is 40 58 | hypothesize(null = "point", mu = 40) |> 59 | # generating data points for a null distribution 60 | generate(reps = 1000, type = "bootstrap") |> 61 | # estimating the null distribution 62 | calculate(stat = "t") 63 | 64 | # shade the p-value of the point estimate 65 | null_dist |> 66 | visualize() + 67 | shade_p_value(obs_stat = point_estimate, direction = "two-sided") 68 | 69 | # you can shade confidence intervals on top of 70 | # theoretical distributions, too! 71 | null_dist_theory <- gss |> 72 | specify(response = hours) |> 73 | assume(distribution = "t") 74 | 75 | null_dist_theory |> 76 | visualize() + 77 | shade_p_value(obs_stat = point_estimate, direction = "two-sided") 78 | 79 | \donttest{ 80 | # to visualize distributions of coefficients for multiple 81 | # explanatory variables, use a `fit()`-based workflow 82 | 83 | # fit 1000 linear models with the `hours` variable permuted 84 | null_fits <- gss |> 85 | specify(hours ~ age + college) |> 86 | hypothesize(null = "independence") |> 87 | generate(reps = 1000, type = "permute") |> 88 | fit() 89 | 90 | null_fits 91 | 92 | # fit a linear model to the observed data 93 | obs_fit <- gss |> 94 | specify(hours ~ age + college) |> 95 | fit() 96 | 97 | obs_fit 98 | 99 | # visualize distributions of coefficients 100 | # generated under the null 101 | visualize(null_fits) 102 | 103 | # add a p-value shading layer to juxtapose the null 104 | # fits with the observed fit for each term 105 | visualize(null_fits) + 106 | shade_p_value(obs_fit, direction = "both") 107 | 108 | # the direction argument will be applied 109 | # to the plot for each term 110 | visualize(null_fits) + 111 | shade_p_value(obs_fit, direction = "left") 112 | } 113 | 114 | # more in-depth explanation of how to use the infer package 115 | \dontrun{ 116 | vignette("infer") 117 | } 118 | 119 | } 120 | \seealso{ 121 | Other visualization functions: 122 | \code{\link{shade_confidence_interval}()} 123 | } 124 | \concept{visualization functions} 125 | -------------------------------------------------------------------------------- /man/specify.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/specify.R 3 | \name{specify} 4 | \alias{specify} 5 | \title{Specify response and explanatory variables} 6 | \usage{ 7 | specify(x, formula, response = NULL, explanatory = NULL, success = NULL) 8 | } 9 | \arguments{ 10 | \item{x}{A data frame that can be coerced into a \link[tibble:tibble]{tibble}.} 11 | 12 | \item{formula}{A formula with the response variable on the left and the 13 | explanatory on the right. Alternatively, a \code{response} and \code{explanatory} 14 | argument can be supplied.} 15 | 16 | \item{response}{The variable name in \code{x} that will serve as the response. 17 | This is an alternative to using the \code{formula} argument.} 18 | 19 | \item{explanatory}{The variable name in \code{x} that will serve as the 20 | explanatory variable. This is an alternative to using the formula argument.} 21 | 22 | \item{success}{The level of \code{response} that will be considered a success, as 23 | a string. Needed for inference on one proportion, a difference in 24 | proportions, and corresponding z stats.} 25 | } 26 | \value{ 27 | A tibble containing the response (and explanatory, if specified) 28 | variable data. 29 | } 30 | \description{ 31 | \code{specify()} is used to specify which columns in the supplied data frame are 32 | the relevant response (and, if applicable, explanatory) variables. Note that 33 | character variables are converted to \code{factor}s. 34 | 35 | Learn more in \code{vignette("infer")}. 36 | } 37 | \examples{ 38 | # specifying for a point estimate on one variable 39 | gss |> 40 | specify(response = age) 41 | 42 | # specify a relationship between variables as a formula... 43 | gss |> 44 | specify(age ~ partyid) 45 | 46 | # ...or with named arguments! 47 | gss |> 48 | specify(response = age, explanatory = partyid) 49 | 50 | # more in-depth explanation of how to use the infer package 51 | \dontrun{ 52 | vignette("infer") 53 | } 54 | 55 | } 56 | \seealso{ 57 | Other core functions: 58 | \code{\link{calculate}()}, 59 | \code{\link{generate}()}, 60 | \code{\link{hypothesize}()} 61 | } 62 | \concept{core functions} 63 | -------------------------------------------------------------------------------- /man/t_stat.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/wrappers.R 3 | \name{t_stat} 4 | \alias{t_stat} 5 | \title{Tidy t-test statistic} 6 | \usage{ 7 | t_stat( 8 | x, 9 | formula, 10 | response = NULL, 11 | explanatory = NULL, 12 | order = NULL, 13 | alternative = "two-sided", 14 | mu = 0, 15 | conf_int = FALSE, 16 | conf_level = 0.95, 17 | ... 18 | ) 19 | } 20 | \arguments{ 21 | \item{x}{A data frame that can be coerced into a \link[tibble:tibble]{tibble}.} 22 | 23 | \item{formula}{A formula with the response variable on the left and the 24 | explanatory on the right. Alternatively, a \code{response} and \code{explanatory} 25 | argument can be supplied.} 26 | 27 | \item{response}{The variable name in \code{x} that will serve as the response. 28 | This is an alternative to using the \code{formula} argument.} 29 | 30 | \item{explanatory}{The variable name in \code{x} that will serve as the 31 | explanatory variable. This is an alternative to using the formula argument.} 32 | 33 | \item{order}{A string vector of specifying the order in which the levels of 34 | the explanatory variable should be ordered for subtraction, where \code{order = c("first", "second")} means \code{("first" - "second")}.} 35 | 36 | \item{alternative}{Character string giving the direction of the alternative 37 | hypothesis. Options are \code{"two-sided"} (default), \code{"greater"}, or \code{"less"}.} 38 | 39 | \item{mu}{A numeric value giving the hypothesized null mean value for a one 40 | sample test and the hypothesized difference for a two sample test.} 41 | 42 | \item{conf_int}{A logical value for whether to include the confidence 43 | interval or not. \code{TRUE} by default.} 44 | 45 | \item{conf_level}{A numeric value between 0 and 1. Default value is 0.95.} 46 | 47 | \item{...}{Pass in arguments to infer functions.} 48 | } 49 | \description{ 50 | A shortcut wrapper function to get the observed test statistic for a t test. 51 | This function has been deprecated in favor of the more general \code{\link[=observe]{observe()}}. 52 | } 53 | \examples{ 54 | library(tidyr) 55 | 56 | # t test statistic for true mean number of hours worked 57 | # per week of 40 58 | gss |> 59 | t_stat(response = hours, mu = 40) 60 | 61 | # t test statistic for number of hours worked per week 62 | # by college degree status 63 | gss |> 64 | tidyr::drop_na(college) |> 65 | t_stat(formula = hours ~ college, 66 | order = c("degree", "no degree"), 67 | alternative = "two-sided") 68 | 69 | } 70 | \seealso{ 71 | Other wrapper functions: 72 | \code{\link{chisq_stat}()}, 73 | \code{\link{chisq_test}()}, 74 | \code{\link{observe}()}, 75 | \code{\link{prop_test}()}, 76 | \code{\link{t_test}()} 77 | 78 | Other functions for calculating observed statistics: 79 | \code{\link{chisq_stat}()}, 80 | \code{\link{observe}()} 81 | } 82 | \concept{functions for calculating observed statistics} 83 | \concept{wrapper functions} 84 | -------------------------------------------------------------------------------- /man/t_test.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/wrappers.R 3 | \name{t_test} 4 | \alias{t_test} 5 | \title{Tidy t-test} 6 | \usage{ 7 | t_test( 8 | x, 9 | formula, 10 | response = NULL, 11 | explanatory = NULL, 12 | order = NULL, 13 | alternative = "two-sided", 14 | mu = 0, 15 | conf_int = TRUE, 16 | conf_level = 0.95, 17 | ... 18 | ) 19 | } 20 | \arguments{ 21 | \item{x}{A data frame that can be coerced into a \link[tibble:tibble]{tibble}.} 22 | 23 | \item{formula}{A formula with the response variable on the left and the 24 | explanatory on the right. Alternatively, a \code{response} and \code{explanatory} 25 | argument can be supplied.} 26 | 27 | \item{response}{The variable name in \code{x} that will serve as the response. 28 | This is an alternative to using the \code{formula} argument.} 29 | 30 | \item{explanatory}{The variable name in \code{x} that will serve as the 31 | explanatory variable. This is an alternative to using the formula argument.} 32 | 33 | \item{order}{A string vector of specifying the order in which the levels of 34 | the explanatory variable should be ordered for subtraction, where \code{order = c("first", "second")} means \code{("first" - "second")}.} 35 | 36 | \item{alternative}{Character string giving the direction of the alternative 37 | hypothesis. Options are \code{"two-sided"} (default), \code{"greater"}, or \code{"less"}.} 38 | 39 | \item{mu}{A numeric value giving the hypothesized null mean value for a one 40 | sample test and the hypothesized difference for a two sample test.} 41 | 42 | \item{conf_int}{A logical value for whether to include the confidence 43 | interval or not. \code{TRUE} by default.} 44 | 45 | \item{conf_level}{A numeric value between 0 and 1. Default value is 0.95.} 46 | 47 | \item{...}{For passing in other arguments to \link[stats:t.test]{t.test()}.} 48 | } 49 | \description{ 50 | A tidier version of \link[stats:t.test]{t.test()} for two sample tests. 51 | } 52 | \examples{ 53 | library(tidyr) 54 | 55 | # t test for number of hours worked per week 56 | # by college degree status 57 | gss |> 58 | tidyr::drop_na(college) |> 59 | t_test(formula = hours ~ college, 60 | order = c("degree", "no degree"), 61 | alternative = "two-sided") 62 | 63 | # see vignette("infer") for more explanation of the 64 | # intuition behind the infer package, and vignette("t_test") 65 | # for more examples of t-tests using infer 66 | 67 | } 68 | \seealso{ 69 | Other wrapper functions: 70 | \code{\link{chisq_stat}()}, 71 | \code{\link{chisq_test}()}, 72 | \code{\link{observe}()}, 73 | \code{\link{prop_test}()}, 74 | \code{\link{t_stat}()} 75 | } 76 | \concept{wrapper functions} 77 | -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-120x120.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/pkgdown/favicon/apple-touch-icon-120x120.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-152x152.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/pkgdown/favicon/apple-touch-icon-152x152.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-180x180.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/pkgdown/favicon/apple-touch-icon-180x180.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-60x60.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/pkgdown/favicon/apple-touch-icon-60x60.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-76x76.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/pkgdown/favicon/apple-touch-icon-76x76.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/pkgdown/favicon/apple-touch-icon.png -------------------------------------------------------------------------------- /pkgdown/favicon/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/pkgdown/favicon/favicon-16x16.png -------------------------------------------------------------------------------- /pkgdown/favicon/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/pkgdown/favicon/favicon-32x32.png -------------------------------------------------------------------------------- /pkgdown/favicon/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/pkgdown/favicon/favicon.ico -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | # This file is part of the standard setup for testthat. 2 | # It is recommended that you do not modify it. 3 | # 4 | # Where should you do additional test configuration? 5 | # Learn more about the roles of various files in: 6 | # * https://r-pkgs.org/tests.html 7 | # * https://testthat.r-lib.org/reference/test_package.html#special-files 8 | 9 | library(testthat) 10 | library(infer) 11 | 12 | test_check("infer") 13 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/aliases.md: -------------------------------------------------------------------------------- 1 | # old aliases produce informative error 2 | 3 | Code 4 | res <- p_value(gss_calc, obs_stat = -0.2, direction = "right") 5 | Condition 6 | Error: 7 | ! `conf_int()` was deprecated in infer 0.4.0 and is now defunct. 8 | i Please use `get_p_value()` instead. 9 | 10 | --- 11 | 12 | Code 13 | res_ <- conf_int(gss_permute) 14 | Condition 15 | Error: 16 | ! `conf_int()` was deprecated in infer 0.4.0 and is now defunct. 17 | i Please use `get_confidence_interval()` instead. 18 | 19 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/assume.md: -------------------------------------------------------------------------------- 1 | # assume errors with bad arguments 2 | 3 | Code 4 | assume(hypothesize(specify(gss, age ~ college), null = "independence"), "boop", 5 | nrow(gss) - 1) 6 | Condition 7 | Error in `assume()`: 8 | ! The distribution argument must be one of "Chisq", "F", "t", or "z". 9 | 10 | --- 11 | 12 | Code 13 | assume(hypothesize(specify(gss, age ~ college), null = "independence"), "t", c( 14 | nrow(gss) - 1, 2)) 15 | Condition 16 | Error in `assume()`: 17 | ! A T distribution requires 1 degrees of freedom argument, but 2 were supplied. 18 | 19 | --- 20 | 21 | Code 22 | assume(hypothesize(specify(gss, age ~ partyid), null = "independence"), "F", 23 | nrow(gss) - 1) 24 | Message 25 | Dropping unused factor levels DK from the supplied explanatory variable 'partyid'. 26 | Condition 27 | Error in `assume()`: 28 | ! An F distribution requires 2 degrees of freedom arguments, but 1 was supplied. 29 | 30 | --- 31 | 32 | Code 33 | assume(hypothesize(specify(gss, age ~ partyid), null = "independence"), "F", 34 | "boop") 35 | Message 36 | Dropping unused factor levels DK from the supplied explanatory variable 'partyid'. 37 | Condition 38 | Error in `assume()`: 39 | ! `assume()` expects the `df` argument to be a numeric vector, but you supplied a character object. 40 | 41 | --- 42 | 43 | Code 44 | assume(hypothesize(specify(gss, age ~ partyid), null = "independence"), "F", 45 | nrow(gss) - 1, 1) 46 | Message 47 | Dropping unused factor levels DK from the supplied explanatory variable 'partyid'. 48 | Condition 49 | Error in `assume()`: 50 | ! `assume()` ignores the dots `...` argument, though the argument were supplied. 51 | i Did you forget to concatenate the `df` argument with `c()`? 52 | 53 | --- 54 | 55 | Code 56 | assume(hypothesize(specify(gss, age ~ partyid), null = "independence"), "F", 57 | nrow(gss) - 1, 1, 2) 58 | Message 59 | Dropping unused factor levels DK from the supplied explanatory variable 'partyid'. 60 | Condition 61 | Error in `assume()`: 62 | ! `assume()` ignores the dots `...` argument, though the arguments were supplied. 63 | i Did you forget to concatenate the `df` argument with `c()`? 64 | 65 | --- 66 | 67 | Code 68 | assume(hypothesize(specify(gss, age ~ finrela), null = "independence"), "t", 69 | nrow(gss) - 1) 70 | Condition 71 | Error in `assume()`: 72 | ! The supplied distribution "t" is not well-defined for a numeric response variable (age) and a multinomial categorical explanatory variable (finrela). 73 | 74 | --- 75 | 76 | Code 77 | assume(hypothesize(specify(gss, age ~ finrela), null = "independence"), "z", 78 | nrow(gss) - 1) 79 | Condition 80 | Error in `assume()`: 81 | ! The supplied distribution "z" is not well-defined for a numeric response variable (age) and a multinomial categorical explanatory variable (finrela). 82 | 83 | --- 84 | 85 | Code 86 | assume(hypothesize(specify(gss, age ~ NULL), null = "point", mu = 40), "z", 87 | nrow(gss) - 1) 88 | Condition 89 | Error in `assume()`: 90 | ! The supplied distribution "z" is not well-defined for a numeric response variable (age) and no explanatory variable. 91 | 92 | --- 93 | 94 | Code 95 | assume(gss, "z", nrow(gss) - 1) 96 | Condition 97 | Error in `assume()`: 98 | ! The `x` argument must be the output of a core infer function, likely `specify()` or `hypothesize()`. 99 | 100 | --- 101 | 102 | Code 103 | assume("boop", "z", nrow(gss) - 1) 104 | Condition 105 | Error in `assume()`: 106 | ! The `x` argument must be the output of a core infer function, likely `specify()` or `hypothesize()`. 107 | 108 | # assume() handles automatic df gracefully 109 | 110 | Code 111 | res_ <- assume(hypothesize(specify(gss, response = hours), null = "point", mu = 40), 112 | "t", nrow(gss) - 2) 113 | Message 114 | Message: The supplied `df` argument does not match its expected value. If this is unexpected, ensure that your calculation for `df` is correct (see `assume()` (`?infer::assume()`) for recognized values) or supply `df = NULL` to `assume()`. 115 | 116 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/fit.md: -------------------------------------------------------------------------------- 1 | # fit.infer messages informatively on excessive null 2 | 3 | Code 4 | res_ <- fit(hypothesize(specify(gss, hours ~ age + college), null = "independence")) 5 | Message 6 | Message: The independence null hypothesis does not inform calculation of the observed fit and will be ignored. 7 | 8 | # fit.infer logistic regression works 9 | 10 | Code 11 | fit(specify(gss, finrela ~ age + college)) 12 | Condition 13 | Error in `fit()`: 14 | ! infer does not support fitting models for categorical response variables with more than two levels. 15 | i Please see `multinom_reg()` from the parsnip package. 16 | 17 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/get_p_value.md: -------------------------------------------------------------------------------- 1 | # direction is appropriate 2 | 3 | Code 4 | get_p_value(test_df, obs_stat = 0.5, direction = "righ") 5 | Condition 6 | Error in `get_p_value()`: 7 | ! The provided value for `direction` is not appropriate. Possible values are "less", "greater", "two-sided", "left", "right", "both", "two_sided", "two sided", or "two.sided". 8 | 9 | # theoretical p-value not supported error 10 | 11 | Code 12 | get_p_value(calculate(hypothesize(specify(gss_tbl, hours ~ partyid), null = "independence"), 13 | stat = "F"), obs_stat = obs_F, direction = "right") 14 | Condition 15 | Error in `get_p_value()`: 16 | ! Theoretical p-values are not yet supported. 17 | i `x` should be the result of calling `generate()`. 18 | 19 | # get_p_value warns in case of zero p-value 20 | 21 | Code 22 | res_ <- get_p_value(gss_calc, obs_stat = -10, direction = "left") 23 | Condition 24 | Warning: 25 | Please be cautious in reporting a p-value of 0. This result is an approximation based on the number of `reps` chosen in the `generate()` step. 26 | i See `get_p_value()` (`?infer::get_p_value()`) for more information. 27 | 28 | # get_p_value throws error in case of `NaN` stat 29 | 30 | Code 31 | res_ <- get_p_value(gss_calc, 0, "both") 32 | Condition 33 | Error: 34 | ! 1 calculated statistic was `NaN`. Simulation-based p-values are not well-defined for null distributions with non-finite values. 35 | i See `calculate()` (`?infer::calculate()`) for more details. 36 | 37 | --- 38 | 39 | Code 40 | res_ <- get_p_value(gss_calc, 0, "both") 41 | Condition 42 | Error: 43 | ! 2 calculated statistics were `NaN`. Simulation-based p-values are not well-defined for null distributions with non-finite values. 44 | i See `calculate()` (`?infer::calculate()`) for more details. 45 | 46 | --- 47 | 48 | Code 49 | res_ <- get_p_value(gss_calc, 0, "both") 50 | Condition 51 | Error: 52 | ! All calculated statistics were `NaN`. 53 | i See `calculate()` (`?infer::calculate()`) for more details. 54 | 55 | # get_p_value can handle fitted objects 56 | 57 | Code 58 | get_p_value(null_fits, obs_fit_2, "both") 59 | Condition 60 | Error in `get_p_value()`: 61 | ! The explanatory variables used to generate the distribution of null fits are not the same used to fit the observed data. 62 | 63 | --- 64 | 65 | Code 66 | get_p_value(null_fits, obs_fit_3, "both") 67 | Condition 68 | Error in `get_p_value()`: 69 | ! The response variable of the null fits (hours) is not the same as that of the observed fit (year). 70 | 71 | # get_p_value can handle bad args with fitted objects 72 | 73 | Code 74 | get_p_value(null_fits, "boop", "both") 75 | Condition 76 | Error in `get_p_value()`: 77 | ! The `obs_stat` argument should be the output of `fit()`. 78 | i See the documentation with `?get_p_value`. 79 | 80 | --- 81 | 82 | Code 83 | get_p_value(null_fits, obs_fit$estimate, "both") 84 | Condition 85 | Error in `get_p_value()`: 86 | ! The `obs_stat` argument should be the output of `fit()`. 87 | i See the documentation with `?get_p_value`. 88 | 89 | --- 90 | 91 | Code 92 | get_p_value(obs_fit, null_fits, "both") 93 | Condition 94 | Error in `get_p_value()`: 95 | ! The `x` argument needs to be passed to `generate()` before `fit()`. 96 | 97 | # get_p_value errors informatively when args are switched 98 | 99 | Code 100 | get_p_value(obs_stat, null_dist, "both") 101 | Condition 102 | Error in `get_p_value()`: 103 | ! It seems like the `obs_stat` argument has been passed to `get_p_value()` as the first argument when `get_p_value()` expects `x`, a distribution of statistics or coefficient estimates, as the first argument. 104 | i Have you mistakenly switched the order of `obs_stat` and `x`? 105 | 106 | # get_p_value can handle theoretical distributions 107 | 108 | Code 109 | old_way <- chisq_test(gss, college ~ finrela) 110 | Condition 111 | Warning in `stats::chisq.test()`: 112 | Chi-squared approximation may be incorrect 113 | 114 | # get_p_value warns with bad theoretical distributions 115 | 116 | Code 117 | res_ <- get_p_value(t_dist_30, t_obs, direction = "both") 118 | Condition 119 | Warning: 120 | `x` and `obs_stat` were generated using different null hypotheses. This workflow is untested and results may not mean what you think they mean. 121 | 122 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/observe.md: -------------------------------------------------------------------------------- 1 | # observe() output is the same as the old wrappers 2 | 3 | Code 4 | res_wrap <- chisq_stat(gss_tbl, college ~ partyid) 5 | Condition 6 | Warning: 7 | `chisq_stat()` was deprecated in infer 1.0.0. 8 | i Please use `observe()` instead. 9 | 10 | --- 11 | 12 | Code 13 | res_wrap_2 <- t_stat(gss_tbl, hours ~ sex, order = c("male", "female")) 14 | Condition 15 | Warning: 16 | `t_stat()` was deprecated in infer 1.0.0. 17 | i Please use `observe()` instead. 18 | 19 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/print.md: -------------------------------------------------------------------------------- 1 | # print method fits linewidth with many predictors (#543) 2 | 3 | Code 4 | specify(mtcars, mpg ~ cyl + disp + hp + drat + wt + qsec) 5 | Output 6 | Response: mpg (numeric) 7 | Explanatory: cyl (numeric), disp (numeric), hp (numer... 8 | # A tibble: 32 x 7 9 | mpg cyl disp hp drat wt qsec 10 | 11 | 1 21 6 160 110 3.9 2.62 16.5 12 | 2 21 6 160 110 3.9 2.88 17.0 13 | 3 22.8 4 108 93 3.85 2.32 18.6 14 | 4 21.4 6 258 110 3.08 3.22 19.4 15 | 5 18.7 8 360 175 3.15 3.44 17.0 16 | 6 18.1 6 225 105 2.76 3.46 20.2 17 | 7 14.3 8 360 245 3.21 3.57 15.8 18 | 8 24.4 4 147. 62 3.69 3.19 20 19 | 9 22.8 4 141. 95 3.92 3.15 22.9 20 | 10 19.2 6 168. 123 3.92 3.44 18.3 21 | # i 22 more rows 22 | 23 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/shade_confidence_interval.md: -------------------------------------------------------------------------------- 1 | # shade_confidence_interval throws errors and warnings 2 | 3 | Code 4 | res_ <- gss_viz_sim + shade_confidence_interval(c(1, 2, 3)) 5 | Condition 6 | Warning: 7 | Expecting `endpoints` to be a 1 x 2 data frame or 2 element vector. Using the first two entries as the `endpoints`. 8 | 9 | --- 10 | 11 | Code 12 | res_ <- gss_viz_sim + shade_confidence_interval(data.frame(x = 1)) 13 | Condition 14 | Error in `shade_confidence_interval()`: 15 | ! Expecting `endpoints` to be a 1 x 2 data frame or 2 element vector. 16 | 17 | --- 18 | 19 | Code 20 | res_ <- gss_viz_sim + shade_confidence_interval(c(-1, 1), color = "x") 21 | Condition 22 | Error in `shade_confidence_interval_term()`: 23 | ! `color` must be 'color string', not 'character'. 24 | 25 | --- 26 | 27 | Code 28 | res_ <- gss_viz_sim + shade_confidence_interval(c(-1, 1), fill = "x") 29 | Condition 30 | Error in `shade_confidence_interval_term()`: 31 | ! `fill` must be 'color string', not 'character'. 32 | 33 | --- 34 | 35 | Code 36 | res_ <- shade_confidence_interval(gss_viz_sim, c(-1, 1)) 37 | Condition 38 | Error in `shade_confidence_interval()`: 39 | ! It looks like you piped the result of `visualize()` into `shade_confidence_interval()` rather than adding the result of `shade_confidence_interval()` as a layer with `+`. 40 | i Consider changing `|>` (or `%>%`) to `+`. 41 | 42 | --- 43 | 44 | Code 45 | res_ <- shade_confidence_interval(gss_viz_sim, endpoints = c(-1, 1)) 46 | Condition 47 | Error in `shade_confidence_interval()`: 48 | ! It looks like you piped the result of `visualize()` into `shade_confidence_interval()` rather than adding the result of `shade_confidence_interval()` as a layer with `+`. 49 | i Consider changing `|>` (or `%>%`) to `+`. 50 | 51 | --- 52 | 53 | Code 54 | res_ <- shade_ci(gss_viz_sim, c(-1, 1)) 55 | Condition 56 | Error in `shade_ci()`: 57 | ! It looks like you piped the result of `visualize()` into `shade_ci()` rather than adding the result of `shade_ci()` as a layer with `+`. 58 | i Consider changing `|>` (or `%>%`) to `+`. 59 | 60 | --- 61 | 62 | Code 63 | res_ <- shade_ci(gss_viz_sim, endpoints = c(-1, 1)) 64 | Condition 65 | Error in `shade_ci()`: 66 | ! It looks like you piped the result of `visualize()` into `shade_ci()` rather than adding the result of `shade_ci()` as a layer with `+`. 67 | i Consider changing `|>` (or `%>%`) to `+`. 68 | 69 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/shade_confidence_interval/ci-null-endpoints.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/tests/testthat/_snaps/shade_confidence_interval/ci-null-endpoints.svg -------------------------------------------------------------------------------- /tests/testthat/_snaps/shade_p_value.md: -------------------------------------------------------------------------------- 1 | # shade_p_value throws errors 2 | 3 | Code 4 | gss_viz_sim + shade_p_value("a", "right") 5 | Condition 6 | Error in `shade_p_value()`: 7 | ! `obs_stat` must be 'numeric', not 'character'. 8 | 9 | --- 10 | 11 | Code 12 | gss_viz_sim + shade_p_value(1, 1) 13 | Condition 14 | Error in `shade_p_value()`: 15 | ! `direction` must be 'character', not 'double'. 16 | 17 | --- 18 | 19 | Code 20 | gss_viz_sim + shade_p_value(1, "right", color = "x") 21 | Condition 22 | Error in `shade_p_value()`: 23 | ! `color` must be 'color string', not 'character'. 24 | 25 | --- 26 | 27 | Code 28 | gss_viz_sim + shade_p_value(1, "right", fill = "x") 29 | Condition 30 | Error in `shade_p_value()`: 31 | ! `fill` must be 'color string', not 'character'. 32 | 33 | --- 34 | 35 | Code 36 | shade_p_value(gss_viz_sim, 1, "right") 37 | Condition 38 | Error in `shade_p_value()`: 39 | ! It looks like you piped the result of `visualize()` into `shade_p_value()` rather than adding the result of `shade_p_value()` as a layer with `+`. 40 | i Consider changing `|>` (or `%>%`) to `+`. 41 | 42 | --- 43 | 44 | Code 45 | shade_p_value(gss_viz_sim, obs_stat = 1) 46 | Condition 47 | Error in `shade_p_value()`: 48 | ! It looks like you piped the result of `visualize()` into `shade_p_value()` rather than adding the result of `shade_p_value()` as a layer with `+`. 49 | i Consider changing `|>` (or `%>%`) to `+`. 50 | 51 | --- 52 | 53 | Code 54 | shade_p_value(gss_viz_sim, obs_stat = 1, direction = "right") 55 | Condition 56 | Error in `shade_p_value()`: 57 | ! It looks like you piped the result of `visualize()` into `shade_p_value()` rather than adding the result of `shade_p_value()` as a layer with `+`. 58 | i Consider changing `|>` (or `%>%`) to `+`. 59 | 60 | --- 61 | 62 | Code 63 | shade_pvalue(gss_viz_sim, 1, "right") 64 | Condition 65 | Error in `shade_pvalue()`: 66 | ! It looks like you piped the result of `visualize()` into `shade_pvalue()` rather than adding the result of `shade_pvalue()` as a layer with `+`. 67 | i Consider changing `|>` (or `%>%`) to `+`. 68 | 69 | --- 70 | 71 | Code 72 | shade_pvalue(gss_viz_sim, obs_stat = 1) 73 | Condition 74 | Error in `shade_pvalue()`: 75 | ! It looks like you piped the result of `visualize()` into `shade_pvalue()` rather than adding the result of `shade_pvalue()` as a layer with `+`. 76 | i Consider changing `|>` (or `%>%`) to `+`. 77 | 78 | --- 79 | 80 | Code 81 | shade_pvalue(gss_viz_sim, obs_stat = 1, direction = "right") 82 | Condition 83 | Error in `shade_pvalue()`: 84 | ! It looks like you piped the result of `visualize()` into `shade_pvalue()` rather than adding the result of `shade_pvalue()` as a layer with `+`. 85 | i Consider changing `|>` (or `%>%`) to `+`. 86 | 87 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/shade_p_value/pval-null-obs-stat.svg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/tests/testthat/_snaps/shade_p_value/pval-null-obs-stat.svg -------------------------------------------------------------------------------- /tests/testthat/_snaps/specify.md: -------------------------------------------------------------------------------- 1 | # data argument 2 | 3 | Code 4 | specify(blah ~ cyl) 5 | Condition 6 | Error in `specify()`: 7 | ! `x` must be 'data.frame', not 'language'. 8 | 9 | --- 10 | 11 | Code 12 | specify(1:3) 13 | Condition 14 | Error in `specify()`: 15 | ! `x` must be 'data.frame', not 'integer'. 16 | 17 | --- 18 | 19 | Code 20 | specify(mtcars_df, mtcars_df$mpg) 21 | Condition 22 | Error in `specify()`: 23 | ! The first unnamed argument must be a formula. 24 | i You passed in 'double'. 25 | x Did you forget to name one or more arguments? 26 | 27 | # response and explanatory arguments 28 | 29 | Code 30 | specify(mtcars_df, response = blah) 31 | Condition 32 | Error in `specify()`: 33 | ! The response variable `blah` cannot be found in this dataframe. 34 | 35 | --- 36 | 37 | Code 38 | specify(mtcars_df, response = "blah") 39 | Condition 40 | Error in `specify()`: 41 | ! The response should be a bare variable name (not a string in quotation marks). 42 | 43 | --- 44 | 45 | Code 46 | specify(mtcars_df, formula = mpg ~ blah) 47 | Condition 48 | Error in `specify()`: 49 | ! The explanatory variable `blah` cannot be found in this dataframe. 50 | 51 | --- 52 | 53 | Code 54 | specify(mtcars_df, blah2 ~ cyl) 55 | Condition 56 | Error in `specify()`: 57 | ! The response variable `blah2` cannot be found in this dataframe. 58 | 59 | --- 60 | 61 | Code 62 | specify(mtcars_df) 63 | Condition 64 | Error in `specify()`: 65 | ! Please supply a response variable that is not `NULL`. 66 | 67 | --- 68 | 69 | Code 70 | specify(mtcars_df, formula = mpg ~ mpg) 71 | Condition 72 | Error in `specify()`: 73 | ! The response and explanatory variables must be different from one another. 74 | 75 | --- 76 | 77 | Code 78 | specify(mtcars_df, formula = "mpg" ~ cyl) 79 | Condition 80 | Error in `specify()`: 81 | ! The response should be a bare variable name (not a string in quotation marks). 82 | 83 | --- 84 | 85 | Code 86 | specify(mtcars_df, formula = mpg ~ "cyl") 87 | Condition 88 | Error in `specify()`: 89 | ! The explanatory should be a bare variable name (not a string in quotation marks). 90 | 91 | --- 92 | 93 | Code 94 | specify(mtcars_df, formula = NULL ~ cyl) 95 | Condition 96 | Error in `specify()`: 97 | ! Please supply a response variable that is not `NULL`. 98 | 99 | # success argument 100 | 101 | Code 102 | specify(mtcars_df, response = vs, success = 1) 103 | Condition 104 | Error in `specify()`: 105 | ! `success` must be a string. 106 | 107 | --- 108 | 109 | Code 110 | specify(mtcars_df, response = vs, success = "bogus") 111 | Condition 112 | Error in `specify()`: 113 | ! bogus is not a valid level of vs. 114 | 115 | --- 116 | 117 | Code 118 | specify(mtcars_df, response = mpg, success = "1") 119 | Condition 120 | Error in `specify()`: 121 | ! `success` should only be specified if the response is a categorical variable. 122 | 123 | --- 124 | 125 | Code 126 | specify(mtcars_df, response = cyl, success = "4") 127 | Condition 128 | Error in `specify()`: 129 | ! `success` can only be used if the response has two levels. `filter()` can reduce a variable to two levels. 130 | 131 | --- 132 | 133 | Code 134 | specify(mtcars_df, response = am) 135 | Condition 136 | Error in `specify()`: 137 | ! A level of the response variable `am` needs to be specified for the `success` argument in `specify()`. 138 | 139 | # formula argument is a formula 140 | 141 | Code 142 | specify(mtcars_df, formula = "vs", success = 1) 143 | Condition 144 | Error in `specify()`: 145 | ! The first unnamed argument must be a formula. 146 | i You passed in 'character'. 147 | x Did you forget to name one or more arguments? 148 | 149 | --- 150 | 151 | Code 152 | specify(mtcars, am, success = "1") 153 | Condition 154 | Error in `specify()`: 155 | ! The argument you passed in for the formula does not exist. 156 | i Were you trying to pass in an unquoted column name? 157 | i Did you forget to name one or more arguments? 158 | 159 | --- 160 | 161 | Code 162 | specify(mtcars, response = am, "1") 163 | Condition 164 | Error in `specify()`: 165 | ! The first unnamed argument must be a formula. 166 | i You passed in 'character'. 167 | x Did you forget to name one or more arguments? 168 | 169 | # is_complete works 170 | 171 | Code 172 | res_ <- specify(some_missing, response = vec) 173 | Condition 174 | Warning: 175 | Removed 1 rows containing missing values. 176 | 177 | # specify messages when dropping unused levels 178 | 179 | Code 180 | res_ <- specify(dplyr::filter(gss, partyid %in% c("rep", "dem")), age ~ partyid) 181 | Message 182 | Dropping unused factor levels c("ind", "other", "DK") from the supplied explanatory variable 'partyid'. 183 | 184 | --- 185 | 186 | Code 187 | res_ <- specify(dplyr::filter(gss, partyid %in% c("rep", "dem")), partyid ~ age) 188 | Message 189 | Dropping unused factor levels c("ind", "other", "DK") from the supplied response variable 'partyid'. 190 | 191 | --- 192 | 193 | Code 194 | res_ <- specify(dplyr::filter(gss, partyid %in% c("rep", "dem")), partyid ~ 195 | NULL) 196 | Message 197 | Dropping unused factor levels c("ind", "other", "DK") from the supplied response variable 'partyid'. 198 | 199 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/utils.md: -------------------------------------------------------------------------------- 1 | # check_type works 2 | 3 | Code 4 | check_type(x_var, is.character) 5 | Condition 6 | Error: 7 | ! `x_var` must be 'character', not 'integer'. 8 | 9 | --- 10 | 11 | Code 12 | check_type(x_var, is.character, "symbolic") 13 | Condition 14 | Error: 15 | ! `x_var` must be 'symbolic', not 'integer'. 16 | 17 | --- 18 | 19 | Code 20 | check_type(x_df, is.logical) 21 | Condition 22 | Error: 23 | ! `x_df` must be 'logical', not 'data.frame'. 24 | 25 | # check_type allows custom name for `x` 26 | 27 | Code 28 | check_type(input, is.numeric, x_name = "aaa") 29 | Condition 30 | Error: 31 | ! `aaa` must be 'numeric', not 'character'. 32 | 33 | # check_type allows extra arguments for `predicate` 34 | 35 | Code 36 | check_type(1, is_geq, min_val = 2) 37 | Condition 38 | Error: 39 | ! `1` must be 'geq', not 'double'. 40 | 41 | # check_type allows formula `predicate` 42 | 43 | Code 44 | check_type("a", ~ is.numeric(.)) 45 | Condition 46 | Error: 47 | ! `"a"` must be '~is.numeric(.)', not 'character'. 48 | 49 | # hypothesize errors out when x isn't a dataframe 50 | 51 | Code 52 | hypothesize(c(1, 2, 3), null = "point") 53 | Condition 54 | Error in `hypothesize()`: 55 | ! x must be a data.frame or tibble 56 | 57 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/visualize/vis-theor-none-1.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 0.0 35 | 0.1 36 | 0.2 37 | 0.3 38 | 0.4 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | -2 48 | 0 49 | 2 50 | z stat 51 | density 52 | Theoretical z Null Distribution 53 | 54 | 55 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/visualize/visualise.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 0.0 35 | 2.5 36 | 5.0 37 | 7.5 38 | 10.0 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 2.75 48 | 3.00 49 | 3.25 50 | stat 51 | count 52 | Simulation-Based Null Distribution 53 | 54 | 55 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/visualize/visualize.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 0.0 35 | 2.5 36 | 5.0 37 | 7.5 38 | 10.0 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | 2.75 48 | 3.00 49 | 3.25 50 | stat 51 | count 52 | Simulation-Based Null Distribution 53 | 54 | 55 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/visualize/viz-assume-2t.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 0.0 35 | 0.1 36 | 0.2 37 | 0.3 38 | 0.4 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | -2 48 | 0 49 | 2 50 | t stat 51 | density 52 | Theoretical t Null Distribution 53 | 54 | 55 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/visualize/viz-assume-2z.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 0.0 35 | 0.1 36 | 0.2 37 | 0.3 38 | 0.4 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | -2 48 | 0 49 | 2 50 | z stat 51 | density 52 | Theoretical z Null Distribution 53 | 54 | 55 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/visualize/viz-assume-f.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 0.0 35 | 0.2 36 | 0.4 37 | 0.6 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 0 46 | 2 47 | 4 48 | F stat 49 | density 50 | Theoretical F Null Distribution 51 | 52 | 53 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/visualize/viz-assume-t.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 0.0 35 | 0.1 36 | 0.2 37 | 0.3 38 | 0.4 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | -2 48 | 0 49 | 2 50 | t stat 51 | density 52 | Theoretical t Null Distribution 53 | 54 | 55 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/visualize/viz-assume-z.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | 28 | 29 | 30 | 31 | 32 | 33 | 34 | 0.0 35 | 0.1 36 | 0.2 37 | 0.3 38 | 0.4 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | 47 | -2 48 | 0 49 | 2 50 | z stat 51 | density 52 | Theoretical z Null Distribution 53 | 54 | 55 | -------------------------------------------------------------------------------- /tests/testthat/helper-data.R: -------------------------------------------------------------------------------- 1 | set.seed(4242) 2 | 3 | expect_doppelganger <- function(title, fig, ...) { 4 | testthat::skip_if_not_installed("vdiffr") 5 | vdiffr::expect_doppelganger(title, fig, ...) 6 | } 7 | 8 | eps <- if (capabilities("long.double")) { 9 | sqrt(.Machine$double.eps) 10 | } else { 11 | 0.01 12 | } 13 | 14 | gss_tbl <- tibble::as_tibble(gss) |> 15 | dplyr::filter(!(is.na(sex) | is.na(college))) |> 16 | dplyr::mutate(partyid = as.character(partyid)) |> 17 | dplyr::filter(partyid %in% c("ind", "rep", "dem")) 18 | 19 | gss_calc <- gss_tbl |> 20 | specify(college ~ sex, success = "no degree") |> 21 | hypothesize(null = "independence") |> 22 | generate(reps = 1000, type = "permute") |> 23 | calculate(stat = "diff in props", order = c("female", "male")) 24 | 25 | mtcars_df <- mtcars |> 26 | dplyr::mutate( 27 | cyl = factor(cyl), 28 | vs = factor(vs), 29 | am = factor(am), 30 | gear = factor(gear), 31 | carb = factor(carb) 32 | ) 33 | 34 | obs_diff <- gss_tbl |> 35 | specify(college ~ sex, success = "no degree") |> 36 | calculate(stat = "diff in props", order = c("female", "male")) 37 | 38 | set.seed(2018) 39 | test_df <- tibble::tibble(stat = rnorm(100)) 40 | 41 | # Data for visualization tests 42 | 43 | gss_permute <- gss_tbl |> 44 | specify(college ~ sex, success = "no degree") |> 45 | hypothesize(null = "independence") |> 46 | generate(reps = 100, type = "permute") |> 47 | calculate(stat = "z", order = c("female", "male")) 48 | 49 | gss_viz_sim <- gss_permute |> visualize(method = "simulation") 50 | 51 | # Warnings are about checking conditions for the theoretical method. 52 | gss_viz_theor <- suppressWarnings(suppressMessages( 53 | gss_permute |> visualize(method = "theoretical") 54 | )) 55 | gss_viz_both <- suppressWarnings( 56 | gss_permute |> visualize(method = "both") 57 | ) 58 | -------------------------------------------------------------------------------- /tests/testthat/test-aliases.R: -------------------------------------------------------------------------------- 1 | test_that("aliases work", { 2 | expect_equal( 3 | gss_calc |> 4 | get_pvalue(obs_stat = -0.2, direction = "right") |> 5 | dplyr::pull(), 6 | expected = 1, 7 | tolerance = eps 8 | ) 9 | 10 | expect_silent(gss_permute |> get_ci()) 11 | }) 12 | 13 | test_that("old aliases produce informative error", { 14 | expect_snapshot( 15 | error = TRUE, 16 | res <- gss_calc |> 17 | p_value(obs_stat = -0.2, direction = "right") 18 | ) 19 | 20 | expect_snapshot( 21 | error = TRUE, 22 | res_ <- gss_permute |> conf_int() 23 | ) 24 | }) 25 | -------------------------------------------------------------------------------- /tests/testthat/test-fit.R: -------------------------------------------------------------------------------- 1 | x1 <- gss[1:100, ] |> specify(response = hours) 2 | x2 <- gss[1:100, ] |> specify(hours ~ NULL) 3 | x3 <- gss[1:100, ] |> specify(response = hours, explanatory = c(age, college)) 4 | x4 <- gss[1:100, ] |> specify(hours ~ age + college) 5 | 6 | test_that("get_formula helper works", { 7 | expect_false(has_attr(x1, "formula")) 8 | expect_true(has_attr(x2, "formula")) 9 | expect_false(has_attr(x3, "formula")) 10 | expect_true(has_attr(x4, "formula")) 11 | 12 | expect_equal(get_formula(x1), get_formula(x2), ignore_attr = TRUE) 13 | expect_equal(get_formula(x3), get_formula(x4), ignore_attr = TRUE) 14 | }) 15 | 16 | test_that("fit_linear_model helper works", { 17 | x3_m <- 18 | fit_linear_model( 19 | x3, 20 | get_formula(x3) 21 | ) 22 | 23 | x4_m <- 24 | fit_linear_model( 25 | x3, 26 | get_formula(x3) 27 | ) 28 | 29 | expect_equal(x3_m, x4_m) 30 | expect_equal(nrow(x3_m), 3) 31 | expect_equal(ncol(x3_m), 2) 32 | 33 | expect_equal( 34 | c("term", "estimate"), 35 | colnames(x3_m) 36 | ) 37 | 38 | expect_equal( 39 | c("character", "numeric"), 40 | purrr::map_chr(x3_m, class) |> unname() 41 | ) 42 | 43 | expect_equal( 44 | c("intercept", "age", "collegedegree"), 45 | x3_m$term 46 | ) 47 | }) 48 | 49 | test_that("fit.infer can handle generated objects", { 50 | x3_fit <- x3 |> fit() 51 | 52 | x3_gen_fit <- x3 |> 53 | hypothesize(null = 'independence') |> 54 | generate(reps = 2, type = "permute") |> 55 | fit() 56 | 57 | expect_equal(unique(x3_fit$term), unique(x3_gen_fit$term)) 58 | expect_equal(nrow(x3_fit) * 2, nrow(x3_gen_fit)) 59 | expect_equal(ncol(x3_fit) + 1, ncol(x3_gen_fit)) 60 | expect_equal(length(unique(x3_gen_fit$replicate)), 2) 61 | expect_equal( 62 | colnames(x3_fit), 63 | colnames(x3_gen_fit)[colnames(x3_gen_fit) != "replicate"] 64 | ) 65 | }) 66 | 67 | test_that("fit.infer messages informatively on excessive null", { 68 | expect_snapshot( 69 | res_ <- gss |> 70 | specify(hours ~ age + college) |> 71 | hypothesize(null = "independence") |> 72 | fit() 73 | ) 74 | 75 | expect_silent( 76 | gss |> 77 | specify(hours ~ age + college) |> 78 | fit() 79 | ) 80 | }) 81 | 82 | test_that("fit.infer logistic regression works", { 83 | # linear regression default works 84 | expect_equal( 85 | gss |> 86 | specify(hours ~ age + college) |> 87 | fit(), 88 | gss |> 89 | specify(hours ~ age + college) |> 90 | fit(family = stats::gaussian) 91 | ) 92 | 93 | # logistic regression default works 94 | expect_equal( 95 | gss |> 96 | specify(college ~ age + hours) |> 97 | fit(family = stats::binomial), 98 | gss |> 99 | specify(college ~ age + hours) |> 100 | fit() 101 | ) 102 | 103 | # errors informatively with multinomial response variable 104 | expect_snapshot( 105 | error = TRUE, 106 | gss |> 107 | specify(finrela ~ age + college) |> 108 | fit() 109 | ) 110 | 111 | # works as expected for `generate()`d objects 112 | fit_gen <- gss |> 113 | specify(college ~ age + hours) |> 114 | hypothesize(null = "independence") |> 115 | generate(type = "permute", reps = 2) |> 116 | fit() 117 | 118 | fit_obs <- gss |> 119 | specify(college ~ age + hours) |> 120 | fit() 121 | 122 | expect_equal(nrow(fit_gen), nrow(fit_obs) * 2) 123 | expect_equal(ncol(fit_gen), ncol(fit_obs) + 1) 124 | 125 | # responds to success argument 126 | fit_deg <- gss |> 127 | specify(college ~ age + hours, success = "degree") |> 128 | fit() 129 | 130 | fit_no_deg <- gss |> 131 | specify(college ~ age + hours, success = "no degree") |> 132 | fit() 133 | 134 | expect_equal(fit_deg$term, fit_no_deg$term) 135 | expect_equal(fit_deg$estimate, -fit_no_deg$estimate) 136 | }) 137 | -------------------------------------------------------------------------------- /tests/testthat/test-observe.R: -------------------------------------------------------------------------------- 1 | test_that("observe() output is equal to core verbs", { 2 | expect_equal( 3 | gss |> 4 | observe(hours ~ NULL, stat = "mean"), 5 | gss |> 6 | specify(hours ~ NULL) |> 7 | calculate(stat = "mean") 8 | ) 9 | 10 | expect_equal( 11 | gss |> 12 | observe(hours ~ NULL, stat = "t", null = "point", mu = 40), 13 | gss |> 14 | specify(hours ~ NULL) |> 15 | hypothesize(null = "point", mu = 40) |> 16 | calculate(stat = "t") 17 | ) 18 | 19 | expect_equal( 20 | observe( 21 | gss, 22 | age ~ college, 23 | stat = "diff in means", 24 | order = c("degree", "no degree") 25 | ), 26 | gss |> 27 | specify(age ~ college) |> 28 | calculate("diff in means", order = c("degree", "no degree")), 29 | ignore_attr = TRUE 30 | ) 31 | }) 32 | 33 | test_that("observe messages/warns/errors informatively", { 34 | expect_equal( 35 | expect_message( 36 | gss |> 37 | observe(hours ~ NULL, stat = "mean", mu = 40) 38 | ) |> 39 | conditionMessage(), 40 | expect_message( 41 | gss |> 42 | specify(hours ~ NULL) |> 43 | hypothesize(null = "point", mu = 40) |> 44 | calculate(stat = "mean") 45 | ) |> 46 | conditionMessage() 47 | ) 48 | 49 | expect_equal( 50 | expect_warning( 51 | gss |> 52 | observe(hours ~ NULL, stat = "t") 53 | ) |> 54 | conditionMessage(), 55 | expect_warning( 56 | gss |> 57 | specify(hours ~ NULL) |> 58 | calculate(stat = "t") 59 | ) |> 60 | conditionMessage() 61 | ) 62 | 63 | expect_error( 64 | expect_equal( 65 | capture.output( 66 | gss |> 67 | observe(hours ~ age, stat = "diff in means"), 68 | type = "message" 69 | ), 70 | capture.output( 71 | gss |> 72 | specify(hours ~ age) |> 73 | calculate(stat = "diff in means"), 74 | type = "message" 75 | ), 76 | ) 77 | ) 78 | 79 | expect_error( 80 | expect_equal( 81 | gss |> 82 | observe(explanatory = age, stat = "diff in means"), 83 | gss |> 84 | specify(explanatory = age) |> 85 | calculate(stat = "diff in means") 86 | ) 87 | ) 88 | }) 89 | 90 | test_that("observe() works with either specify() interface", { 91 | # unnamed formula argument 92 | expect_equal( 93 | gss |> 94 | observe(hours ~ NULL, stat = "mean"), 95 | gss |> 96 | observe(response = hours, stat = "mean"), 97 | ignore_attr = TRUE 98 | ) 99 | 100 | expect_equal( 101 | gss |> 102 | observe( 103 | hours ~ college, 104 | stat = "diff in means", 105 | order = c("degree", "no degree") 106 | ), 107 | gss |> 108 | specify(hours ~ college) |> 109 | calculate(stat = "diff in means", order = c("degree", "no degree")) 110 | ) 111 | 112 | # named formula argument 113 | expect_equal( 114 | gss |> 115 | observe(formula = hours ~ NULL, stat = "mean"), 116 | gss |> 117 | observe(response = hours, stat = "mean"), 118 | ignore_attr = TRUE 119 | ) 120 | 121 | expect_equal( 122 | gss |> 123 | observe(formula = hours ~ NULL, stat = "mean"), 124 | gss |> 125 | observe(response = hours, stat = "mean"), 126 | ignore_attr = TRUE 127 | ) 128 | 129 | expect_equal( 130 | gss |> 131 | observe( 132 | formula = hours ~ college, 133 | stat = "diff in means", 134 | order = c("degree", "no degree") 135 | ), 136 | gss |> 137 | specify(formula = hours ~ college) |> 138 | calculate(stat = "diff in means", order = c("degree", "no degree")) 139 | ) 140 | }) 141 | 142 | test_that("observe() output is the same as the old wrappers", { 143 | expect_snapshot( 144 | res_wrap <- gss_tbl |> 145 | chisq_stat(college ~ partyid) 146 | ) 147 | 148 | expect_equal( 149 | gss_tbl |> 150 | observe(college ~ partyid, stat = "Chisq") |> 151 | dplyr::pull(), 152 | res_wrap 153 | ) 154 | 155 | expect_snapshot( 156 | res_wrap_2 <- gss_tbl |> 157 | t_stat(hours ~ sex, order = c("male", "female")) 158 | ) 159 | 160 | expect_equal( 161 | gss_tbl |> 162 | observe(stat = "t", hours ~ sex, order = c("male", "female")) |> 163 | dplyr::pull(), 164 | res_wrap_2 165 | ) 166 | }) 167 | -------------------------------------------------------------------------------- /tests/testthat/test-print.R: -------------------------------------------------------------------------------- 1 | test_that("print works", { 2 | expect_output(print( 3 | gss_tbl |> 4 | specify(age ~ hours) |> 5 | hypothesize(null = "independence") |> 6 | generate(reps = 10, type = "permute") 7 | )) 8 | }) 9 | 10 | test_that("print method fits linewidth with many predictors (#543)", { 11 | expect_snapshot(specify(mtcars, mpg ~ cyl + disp + hp + drat + wt + qsec)) 12 | }) 13 | -------------------------------------------------------------------------------- /tests/testthat/test-shade_confidence_interval.R: -------------------------------------------------------------------------------- 1 | # shade_confidence_interval ----------------------------------------------- 2 | test_that("shade_confidence_interval works", { 3 | skip_if(getRversion() < "4.1.0") 4 | 5 | # Adding `shade_confidence_interval()` to simulation plot 6 | expect_doppelganger( 7 | "ci-sim-fill", 8 | gss_viz_sim + shade_confidence_interval(c(-1, 1)) 9 | ) 10 | expect_doppelganger( 11 | "ci-sim-nofill", 12 | gss_viz_sim + shade_confidence_interval(c(-1, 1), fill = NULL) 13 | ) 14 | 15 | # Adding `shade_confidence_interval()` to theoretical plot 16 | expect_doppelganger( 17 | "ci-theor-fill", 18 | gss_viz_theor + shade_confidence_interval(c(-1, 1)) 19 | ) 20 | expect_doppelganger( 21 | "ci-theor-nofill", 22 | gss_viz_theor + shade_confidence_interval(c(-1, 1), fill = NULL) 23 | ) 24 | 25 | # Adding `shade_confidence_interval()` to "both" plot 26 | expect_doppelganger( 27 | "ci-both-fill", 28 | gss_viz_both + shade_confidence_interval(c(-1, 1)) 29 | ) 30 | expect_doppelganger( 31 | "ci-both-nofill", 32 | gss_viz_both + shade_confidence_interval(c(-1, 1), fill = NULL) 33 | ) 34 | }) 35 | 36 | test_that("shade_confidence_interval accepts `NULL` as `endpoints`", { 37 | skip_if(getRversion() < "4.1.0") 38 | 39 | expect_doppelganger( 40 | "ci-null-endpoints", 41 | gss_viz_sim + shade_confidence_interval(NULL) 42 | ) 43 | }) 44 | 45 | test_that("shade_confidence_interval uses extra aesthetic", { 46 | skip_if(getRversion() < "4.1.0") 47 | 48 | expect_doppelganger( 49 | "ci-extra-aes-1", 50 | gss_viz_sim + shade_confidence_interval(c(-1, 1), alpha = 1) 51 | ) 52 | expect_doppelganger( 53 | "ci-extra-aes-2", 54 | gss_viz_sim + shade_confidence_interval(c(-1, 1), linetype = "dotted") 55 | ) 56 | }) 57 | 58 | test_that("shade_confidence_interval throws errors and warnings", { 59 | skip_if(getRversion() < "4.1.0") 60 | 61 | expect_snapshot(res_ <- gss_viz_sim + shade_confidence_interval(c(1, 2, 3))) 62 | expect_snapshot( 63 | error = TRUE, 64 | res_ <- gss_viz_sim + shade_confidence_interval(data.frame(x = 1)) 65 | ) 66 | expect_snapshot( 67 | error = TRUE, 68 | res_ <- gss_viz_sim + shade_confidence_interval(c(-1, 1), color = "x") 69 | ) 70 | expect_snapshot( 71 | error = TRUE, 72 | res_ <- gss_viz_sim + shade_confidence_interval(c(-1, 1), fill = "x") 73 | ) 74 | expect_snapshot( 75 | error = TRUE, 76 | res_ <- gss_viz_sim |> shade_confidence_interval(c(-1, 1)) 77 | ) 78 | expect_snapshot( 79 | error = TRUE, 80 | res_ <- gss_viz_sim |> shade_confidence_interval(endpoints = c(-1, 1)) 81 | ) 82 | expect_snapshot(error = TRUE, res_ <- gss_viz_sim |> shade_ci(c(-1, 1))) 83 | expect_snapshot( 84 | error = TRUE, 85 | res_ <- gss_viz_sim |> shade_ci(endpoints = c(-1, 1)) 86 | ) 87 | }) 88 | 89 | # shade_ci ---------------------------------------------------------------- 90 | # Tested in `shade_confidence_interval()` 91 | -------------------------------------------------------------------------------- /tests/testthat/test-specify.R: -------------------------------------------------------------------------------- 1 | one_nonshift_mean <- mtcars_df |> specify(response = mpg) 2 | 3 | one_nonshift_prop <- mtcars_df |> specify(response = am, success = "1") 4 | 5 | two_means_boot <- mtcars_df |> specify(mpg ~ am) 6 | 7 | two_props_boot <- mtcars_df |> specify(am ~ vs, success = "1") 8 | 9 | slope_boot <- mtcars_df |> specify(mpg ~ hp) 10 | 11 | test_that("auto `type` works (specify)", { 12 | expect_equal(attr(one_nonshift_mean, "type"), "bootstrap") 13 | expect_equal(attr(one_nonshift_prop, "type"), "bootstrap") 14 | expect_equal(attr(two_means_boot, "type"), "bootstrap") 15 | expect_equal(attr(two_props_boot, "type"), "bootstrap") 16 | expect_equal(attr(slope_boot, "type"), "bootstrap") 17 | }) 18 | 19 | test_that("data argument", { 20 | expect_snapshot(error = TRUE, specify(blah ~ cyl)) 21 | expect_snapshot(error = TRUE, specify(1:3)) 22 | expect_s3_class(mtcars_df, "data.frame") 23 | expect_snapshot(error = TRUE, specify(mtcars_df, mtcars_df$mpg)) 24 | }) 25 | 26 | test_that("response and explanatory arguments", { 27 | expect_snapshot(error = TRUE, specify(mtcars_df, response = blah)) 28 | expect_snapshot(error = TRUE, specify(mtcars_df, response = "blah")) 29 | expect_snapshot(error = TRUE, specify(mtcars_df, formula = mpg ~ blah)) 30 | expect_snapshot(error = TRUE, specify(mtcars_df, blah2 ~ cyl)) 31 | expect_snapshot(error = TRUE, specify(mtcars_df)) 32 | expect_snapshot(error = TRUE, specify(mtcars_df, formula = mpg ~ mpg)) 33 | expect_snapshot(error = TRUE, specify(mtcars_df, formula = "mpg" ~ cyl)) 34 | expect_snapshot(error = TRUE, specify(mtcars_df, formula = mpg ~ "cyl")) 35 | expect_silent(specify(mtcars_df, formula = mpg ~ cyl)) 36 | 37 | expect_snapshot(error = TRUE, specify(mtcars_df, formula = NULL ~ cyl)) 38 | }) 39 | 40 | test_that("success argument", { 41 | expect_snapshot(error = TRUE, specify(mtcars_df, response = vs, success = 1)) 42 | expect_snapshot( 43 | error = TRUE, 44 | specify(mtcars_df, response = vs, success = "bogus") 45 | ) 46 | expect_snapshot( 47 | error = TRUE, 48 | specify(mtcars_df, response = mpg, success = "1") 49 | ) 50 | expect_snapshot( 51 | error = TRUE, 52 | specify(mtcars_df, response = cyl, success = "4") 53 | ) 54 | # success not given 55 | expect_snapshot(error = TRUE, specify(mtcars_df, response = am)) 56 | }) 57 | 58 | test_that("sensible output", { 59 | expect_equal(ncol(specify(mtcars_df, formula = mpg ~ NULL)), 1) 60 | expect_equal(ncol(specify(mtcars_df, formula = mpg ~ wt)), 2) 61 | expect_equal(class(specify(mtcars_df, formula = mpg ~ wt))[1], "infer") 62 | }) 63 | 64 | test_that("formula argument is a formula", { 65 | expect_snapshot(error = TRUE, specify(mtcars_df, formula = "vs", success = 1)) 66 | 67 | # Issue #110: https://github.com/tidymodels/infer/issues/110 68 | expect_snapshot(error = TRUE, specify(mtcars, am, success = "1")) 69 | expect_snapshot(error = TRUE, specify(mtcars, response = am, "1")) 70 | expect_silent({ 71 | mtcars |> 72 | dplyr::mutate(am = factor(am)) |> 73 | specify(response = am, success = "1") 74 | }) 75 | }) 76 | 77 | test_that("is_complete works", { 78 | some_missing <- data.frame(vec = c(NA, 2, 3)) 79 | expect_snapshot(res_ <- specify(some_missing, response = vec)) 80 | }) 81 | 82 | test_that("specify doesn't have NSE issues (#256)", { 83 | expect_silent(specify(tibble(x = 1:10), x ~ NULL)) 84 | }) 85 | 86 | test_that("specify messages when dropping unused levels", { 87 | expect_snapshot( 88 | res_ <- gss |> 89 | dplyr::filter(partyid %in% c("rep", "dem")) |> 90 | specify(age ~ partyid) 91 | ) 92 | 93 | expect_snapshot( 94 | res_ <- gss |> 95 | dplyr::filter(partyid %in% c("rep", "dem")) |> 96 | specify(partyid ~ age) 97 | ) 98 | 99 | expect_snapshot( 100 | res_ <- gss |> 101 | dplyr::filter(partyid %in% c("rep", "dem")) |> 102 | specify(partyid ~ NULL) 103 | ) 104 | 105 | expect_silent( 106 | gss |> 107 | dplyr::filter(partyid %in% c("rep", "dem")) |> 108 | specify(age ~ NULL) 109 | ) 110 | }) 111 | 112 | test_that("user can specify multiple explanatory variables", { 113 | x <- gss |> specify(hours ~ sex + college) 114 | 115 | expect_true(inherits(x, "infer")) 116 | expect_true(inherits(explanatory_variable(x), "tbl_df")) 117 | expect_true(inherits(explanatory_name(x), "character")) 118 | expect_true(inherits(explanatory_expr(x), "call")) 119 | 120 | expect_equal(explanatory_name(x), c("sex", "college")) 121 | expect_equal(response_name(x), "hours") 122 | }) 123 | -------------------------------------------------------------------------------- /tests/testthat/test-utils.R: -------------------------------------------------------------------------------- 1 | test_that("append_infer_class works", { 2 | expect_equal( 3 | class(append_infer_class(structure("a", class = "b"))), 4 | c("infer", "b") 5 | ) 6 | expect_equal( 7 | class(append_infer_class(structure("a", class = c("infer", "b")))), 8 | c("infer", "b") 9 | ) 10 | }) 11 | 12 | null_val <- NULL 13 | 14 | test_that("is_single_number works", { 15 | # Basic usage 16 | expect_true(is_single_number(1)) 17 | expect_true(is_single_number(1L)) 18 | expect_false(is_single_number("a")) 19 | expect_false(is_single_number(1:2)) 20 | 21 | # Infinity and `NA` are not allowed 22 | expect_false(is_single_number(Inf)) 23 | expect_false(is_single_number(-Inf)) 24 | expect_false(is_single_number(NA_real_)) 25 | 26 | # Using boundaries 27 | expect_true(is_single_number(1, min_val = -10)) 28 | expect_false(is_single_number(1, min_val = 10)) 29 | 30 | expect_true(is_single_number(1, max_val = 10)) 31 | expect_false(is_single_number(1, max_val = -10)) 32 | 33 | expect_true(is_single_number(1, min_val = -10, max_val = 10)) 34 | expect_false(is_single_number(1, min_val = -10, max_val = 0)) 35 | expect_false(is_single_number(1, min_val = 10, max_val = 100)) 36 | 37 | # Using boundary inclusivity 38 | ## Inclusive by default 39 | expect_true(is_single_number(1, min_val = 1)) 40 | expect_true(is_single_number(1, max_val = 1)) 41 | 42 | expect_false(is_single_number(1, min_val = 1, include_min_val = FALSE)) 43 | expect_false(is_single_number(1, max_val = 1, include_max_val = FALSE)) 44 | }) 45 | 46 | test_that("is_truefalse works", { 47 | expect_true(is_truefalse(TRUE)) 48 | expect_true(is_truefalse(FALSE)) 49 | expect_false(is_truefalse(c(TRUE, TRUE))) 50 | expect_false(is_truefalse("a")) 51 | expect_false(is_truefalse(1L)) 52 | }) 53 | 54 | test_that("check_type works", { 55 | x_var <- 1L 56 | 57 | expect_silent(check_type(x_var, is.integer)) 58 | 59 | expect_snapshot(error = TRUE, check_type(x_var, is.character)) 60 | expect_snapshot(error = TRUE, check_type(x_var, is.character, "symbolic")) 61 | 62 | x_df <- data.frame(x = TRUE) 63 | expect_silent(check_type(x_df, is.data.frame)) 64 | expect_snapshot(error = TRUE, check_type(x_df, is.logical)) 65 | }) 66 | 67 | test_that("check_type allows `NULL`", { 68 | input <- NULL 69 | expect_silent(check_type(input, is.numeric, allow_null = TRUE)) 70 | }) 71 | 72 | test_that("check_type allows custom name for `x`", { 73 | input <- "a" 74 | expect_snapshot(error = TRUE, check_type(input, is.numeric, x_name = "aaa")) 75 | }) 76 | 77 | test_that("check_type allows extra arguments for `predicate`", { 78 | is_geq <- function(x, min_val) { 79 | x >= min_val 80 | } 81 | expect_silent(check_type(1, is_geq, min_val = 0)) 82 | expect_snapshot(error = TRUE, check_type(1, is_geq, min_val = 2)) 83 | }) 84 | 85 | test_that("check_type allows formula `predicate`", { 86 | expect_silent(check_type(1, ~ is.numeric(.) && (. > 0))) 87 | 88 | # By default type should be inferred as the whole formula 89 | expect_snapshot(error = TRUE, check_type("a", ~ is.numeric(.))) 90 | }) 91 | 92 | 93 | test_that("get_type works", { 94 | expect_equal(get_type(data.frame(x = 1)), "data.frame") 95 | expect_equal(get_type(list(x = 1)), "list") 96 | expect_equal(get_type(TRUE), "logical") 97 | }) 98 | 99 | test_that("c_dedupl returns input when unnamed", { 100 | expect_equal(c_dedupl(c(1, 2, 3)), c(1, 2, 3)) 101 | }) 102 | 103 | test_that("hypothesize errors out when x isn't a dataframe", { 104 | expect_snapshot(error = TRUE, hypothesize(c(1, 2, 3), null = "point")) 105 | }) 106 | 107 | test_that("p_null supplies appropriate params", { 108 | expect_equal( 109 | gss |> specify(partyid ~ NULL) |> p_null(), 110 | c(p.dem = 0.2, p.ind = 0.2, p.rep = 0.2, p.other = 0.2, p.DK = 0.2) 111 | ) 112 | }) 113 | 114 | test_that("variables are standardized as expected", { 115 | gss_types <- 116 | gss |> 117 | dplyr::mutate( 118 | age = as.integer(age), 119 | is_dem = dplyr::if_else(partyid == "dem", TRUE, FALSE), 120 | finrela = as.character(finrela) 121 | ) 122 | 123 | gss_std <- standardize_variable_types(gss_types) 124 | 125 | expect_true(inherits(gss_types$age, "integer")) 126 | expect_true(inherits(gss_types$finrela, "character")) 127 | expect_true(inherits(gss_types$income, "ordered")) 128 | expect_true(inherits(gss_types$college, "factor")) 129 | expect_true(inherits(gss_types$is_dem, "logical")) 130 | 131 | expect_null(levels(gss_types$is_dem)) 132 | 133 | expect_true(inherits(gss_std$age, "numeric")) 134 | expect_true(inherits(gss_std$finrela, "factor")) 135 | expect_true(inherits(gss_std$income, "factor")) 136 | expect_true(inherits(gss_std$college, "factor")) 137 | expect_true(inherits(gss_std$is_dem, "factor")) 138 | 139 | expect_equal(levels(gss_std$is_dem), c("TRUE", "FALSE")) 140 | }) 141 | 142 | test_that("group_by_replicate() helper returns correct results", { 143 | reps <- 500 144 | nrow_gss <- nrow(gss) 145 | 146 | gss_gen <- 147 | gss |> 148 | specify(age ~ college) |> 149 | hypothesize(null = "independence") |> 150 | generate(reps = reps, type = "permute") |> 151 | dplyr::ungroup() 152 | 153 | expect_equal( 154 | dplyr::group_by(gss_gen, replicate), 155 | group_by_replicate(gss_gen, reps, nrow_gss) 156 | ) 157 | }) 158 | -------------------------------------------------------------------------------- /vignettes/infer_cache/html/__packages: -------------------------------------------------------------------------------- 1 | base 2 | usethis 3 | devtools 4 | dplyr 5 | testthat 6 | infer 7 | -------------------------------------------------------------------------------- /vignettes/infer_cache/html/calculate-point_94c073b633c3cf7bef3252dcad544ee2.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/calculate-point_94c073b633c3cf7bef3252dcad544ee2.RData -------------------------------------------------------------------------------- /vignettes/infer_cache/html/calculate-point_94c073b633c3cf7bef3252dcad544ee2.rdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/calculate-point_94c073b633c3cf7bef3252dcad544ee2.rdb -------------------------------------------------------------------------------- /vignettes/infer_cache/html/calculate-point_94c073b633c3cf7bef3252dcad544ee2.rdx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/calculate-point_94c073b633c3cf7bef3252dcad544ee2.rdx -------------------------------------------------------------------------------- /vignettes/infer_cache/html/generate-permute_21b25928d642a97a30057306d51f1b23.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/generate-permute_21b25928d642a97a30057306d51f1b23.RData -------------------------------------------------------------------------------- /vignettes/infer_cache/html/generate-permute_21b25928d642a97a30057306d51f1b23.rdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/generate-permute_21b25928d642a97a30057306d51f1b23.rdb -------------------------------------------------------------------------------- /vignettes/infer_cache/html/generate-permute_21b25928d642a97a30057306d51f1b23.rdx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/generate-permute_21b25928d642a97a30057306d51f1b23.rdx -------------------------------------------------------------------------------- /vignettes/infer_cache/html/generate-point_d562524427be20dbb4736ca1ea29b04b.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/generate-point_d562524427be20dbb4736ca1ea29b04b.RData -------------------------------------------------------------------------------- /vignettes/infer_cache/html/generate-point_d562524427be20dbb4736ca1ea29b04b.rdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/generate-point_d562524427be20dbb4736ca1ea29b04b.rdb -------------------------------------------------------------------------------- /vignettes/infer_cache/html/generate-point_d562524427be20dbb4736ca1ea29b04b.rdx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/generate-point_d562524427be20dbb4736ca1ea29b04b.rdx -------------------------------------------------------------------------------- /vignettes/infer_cache/html/hypothesize-40-hr-week_c8e33c404efa90c2ca0b2eacad95b06c.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/hypothesize-40-hr-week_c8e33c404efa90c2ca0b2eacad95b06c.RData -------------------------------------------------------------------------------- /vignettes/infer_cache/html/hypothesize-40-hr-week_c8e33c404efa90c2ca0b2eacad95b06c.rdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/hypothesize-40-hr-week_c8e33c404efa90c2ca0b2eacad95b06c.rdb -------------------------------------------------------------------------------- /vignettes/infer_cache/html/hypothesize-40-hr-week_c8e33c404efa90c2ca0b2eacad95b06c.rdx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/hypothesize-40-hr-week_c8e33c404efa90c2ca0b2eacad95b06c.rdx -------------------------------------------------------------------------------- /vignettes/infer_cache/html/hypothesize-independence_fe1c79b9f1dc0df488828fdd34c8145f.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/hypothesize-independence_fe1c79b9f1dc0df488828fdd34c8145f.RData -------------------------------------------------------------------------------- /vignettes/infer_cache/html/hypothesize-independence_fe1c79b9f1dc0df488828fdd34c8145f.rdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/hypothesize-independence_fe1c79b9f1dc0df488828fdd34c8145f.rdb -------------------------------------------------------------------------------- /vignettes/infer_cache/html/hypothesize-independence_fe1c79b9f1dc0df488828fdd34c8145f.rdx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/hypothesize-independence_fe1c79b9f1dc0df488828fdd34c8145f.rdx -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-diff-in-means_e4103c4c3e3daedd5c1429b7a1bc8727.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-diff-in-means_e4103c4c3e3daedd5c1429b7a1bc8727.RData -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-diff-in-means_e4103c4c3e3daedd5c1429b7a1bc8727.rdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-diff-in-means_e4103c4c3e3daedd5c1429b7a1bc8727.rdb -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-diff-in-means_e4103c4c3e3daedd5c1429b7a1bc8727.rdx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-diff-in-means_e4103c4c3e3daedd5c1429b7a1bc8727.rdx -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-example_3ea3cfa390233b127dc25b05b0354bcf.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-example_3ea3cfa390233b127dc25b05b0354bcf.RData -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-example_3ea3cfa390233b127dc25b05b0354bcf.rdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-example_3ea3cfa390233b127dc25b05b0354bcf.rdb -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-example_3ea3cfa390233b127dc25b05b0354bcf.rdx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-example_3ea3cfa390233b127dc25b05b0354bcf.rdx -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-one_149be66261b0606b7ddb80efd10fa81d.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-one_149be66261b0606b7ddb80efd10fa81d.RData -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-one_149be66261b0606b7ddb80efd10fa81d.rdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-one_149be66261b0606b7ddb80efd10fa81d.rdb -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-one_149be66261b0606b7ddb80efd10fa81d.rdx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-one_149be66261b0606b7ddb80efd10fa81d.rdx -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-success_e8eb15e9f621ccf60cb6527a6bccdb4b.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-success_e8eb15e9f621ccf60cb6527a6bccdb4b.RData -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-success_e8eb15e9f621ccf60cb6527a6bccdb4b.rdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-success_e8eb15e9f621ccf60cb6527a6bccdb4b.rdb -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-success_e8eb15e9f621ccf60cb6527a6bccdb4b.rdx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-success_e8eb15e9f621ccf60cb6527a6bccdb4b.rdx -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-two_20085531c110a936ee691162f225333b.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-two_20085531c110a936ee691162f225333b.RData -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-two_20085531c110a936ee691162f225333b.rdb: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-two_20085531c110a936ee691162f225333b.rdb -------------------------------------------------------------------------------- /vignettes/infer_cache/html/specify-two_20085531c110a936ee691162f225333b.rdx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/tidymodels/infer/90c91983dd5c78a50f51976141b57db516e29a05/vignettes/infer_cache/html/specify-two_20085531c110a936ee691162f225333b.rdx --------------------------------------------------------------------------------