├── .Rbuildignore ├── .github ├── .gitignore ├── CODE_OF_CONDUCT.md └── workflows │ ├── R-CMD-check.yaml │ ├── pkgdown.yaml │ ├── pr-commands.yaml │ └── test-coverage.yaml ├── .gitignore ├── DESCRIPTION ├── LICENSE ├── LICENSE.md ├── MAINTENANCE.md ├── NAMESPACE ├── NEWS.md ├── R ├── autoplot.R ├── bench-package.R ├── bench_process_memory.R ├── bench_time.R ├── bytes.R ├── expression.R ├── hires_time.R ├── import-standalone-s3-register.R ├── load.R ├── mark.R ├── press.R ├── time.R ├── utils.R ├── workout.R └── zzz.R ├── README.Rmd ├── README.md ├── _pkgdown.yml ├── air.toml ├── bench.Rproj ├── codecov.yml ├── cran-comments.md ├── inst └── examples │ └── exprs.R ├── man ├── as_bench_mark.Rd ├── as_bench_time.Rd ├── autoplot.bench_mark.Rd ├── bench-package.Rd ├── bench_bytes.Rd ├── bench_bytes_trans.Rd ├── bench_load_average.Rd ├── bench_memory.Rd ├── bench_process_memory.Rd ├── bench_time.Rd ├── bench_time_trans.Rd ├── figures │ ├── README-autoplot-1.png │ └── README-custom-plot-1.png ├── hires_time.Rd ├── knit_print.bench_mark.Rd ├── mark.Rd ├── press.Rd ├── scale_bench_expr.Rd ├── scale_bench_time.Rd ├── summary.bench_mark.Rd └── workout.Rd ├── revdep ├── .gitignore ├── README.md ├── cran.md ├── failures.md └── problems.md ├── src ├── .gitignore ├── Makevars.win ├── load.c ├── mark.c ├── nanotime.c ├── nanotime.h ├── os.h └── process_memory.c └── tests ├── testthat.R └── testthat ├── _snaps ├── mark.md └── press.md ├── test-autoplot.R ├── test-bench_process_memory.R ├── test-bench_time.R ├── test-bytes.R ├── test-expression.R ├── test-hires_time.R ├── test-mark.R ├── test-press.R ├── test-time.R └── test-workout.R /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^\.travis\.yml$ 4 | ^README\.Rmd$ 5 | ^appveyor\.yml$ 6 | ^codecov\.yml$ 7 | ^README\.html$ 8 | ^README_cache$ 9 | ^docs$ 10 | ^_pkgdown\.yml$ 11 | ^cran-comments\.md$ 12 | ^CRAN-RELEASE$ 13 | ^.*\.tar\.gz$ 14 | ^revdep$ 15 | ^vignettes$ 16 | ^\.github$ 17 | ^depends.Rds$ 18 | ^bench$ 19 | ^LICENSE\.md$ 20 | ^MAINTENANCE\.md$ 21 | ^CRAN-SUBMISSION$ 22 | ^air.toml$ 23 | -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /.github/CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Covenant Code of Conduct 2 | 3 | ## Our Pledge 4 | 5 | We as members, contributors, and leaders pledge to make participation in our 6 | community a harassment-free experience for everyone, regardless of age, body 7 | size, visible or invisible disability, ethnicity, sex characteristics, gender 8 | identity and expression, level of experience, education, socio-economic status, 9 | nationality, personal appearance, race, caste, color, religion, or sexual 10 | identity and orientation. 11 | 12 | We pledge to act and interact in ways that contribute to an open, welcoming, 13 | diverse, inclusive, and healthy community. 14 | 15 | ## Our Standards 16 | 17 | Examples of behavior that contributes to a positive environment for our 18 | community include: 19 | 20 | * Demonstrating empathy and kindness toward other people 21 | * Being respectful of differing opinions, viewpoints, and experiences 22 | * Giving and gracefully accepting constructive feedback 23 | * Accepting responsibility and apologizing to those affected by our mistakes, 24 | and learning from the experience 25 | * Focusing on what is best not just for us as individuals, but for the overall 26 | community 27 | 28 | Examples of unacceptable behavior include: 29 | 30 | * The use of sexualized language or imagery, and sexual attention or advances of 31 | any kind 32 | * Trolling, insulting or derogatory comments, and personal or political attacks 33 | * Public or private harassment 34 | * Publishing others' private information, such as a physical or email address, 35 | without their explicit permission 36 | * Other conduct which could reasonably be considered inappropriate in a 37 | professional setting 38 | 39 | ## Enforcement Responsibilities 40 | 41 | Community leaders are responsible for clarifying and enforcing our standards of 42 | acceptable behavior and will take appropriate and fair corrective action in 43 | response to any behavior that they deem inappropriate, threatening, offensive, 44 | or harmful. 45 | 46 | Community leaders have the right and responsibility to remove, edit, or reject 47 | comments, commits, code, wiki edits, issues, and other contributions that are 48 | not aligned to this Code of Conduct, and will communicate reasons for moderation 49 | decisions when appropriate. 50 | 51 | ## Scope 52 | 53 | This Code of Conduct applies within all community spaces, and also applies when 54 | an individual is officially representing the community in public spaces. 55 | Examples of representing our community include using an official e-mail address, 56 | posting via an official social media account, or acting as an appointed 57 | representative at an online or offline event. 58 | 59 | ## Enforcement 60 | 61 | Instances of abusive, harassing, or otherwise unacceptable behavior may be 62 | reported to the community leaders responsible for enforcement at codeofconduct@posit.co. 63 | All complaints will be reviewed and investigated promptly and fairly. 64 | 65 | All community leaders are obligated to respect the privacy and security of the 66 | reporter of any incident. 67 | 68 | ## Enforcement Guidelines 69 | 70 | Community leaders will follow these Community Impact Guidelines in determining 71 | the consequences for any action they deem in violation of this Code of Conduct: 72 | 73 | ### 1. Correction 74 | 75 | **Community Impact**: Use of inappropriate language or other behavior deemed 76 | unprofessional or unwelcome in the community. 77 | 78 | **Consequence**: A private, written warning from community leaders, providing 79 | clarity around the nature of the violation and an explanation of why the 80 | behavior was inappropriate. A public apology may be requested. 81 | 82 | ### 2. Warning 83 | 84 | **Community Impact**: A violation through a single incident or series of 85 | actions. 86 | 87 | **Consequence**: A warning with consequences for continued behavior. No 88 | interaction with the people involved, including unsolicited interaction with 89 | those enforcing the Code of Conduct, for a specified period of time. This 90 | includes avoiding interactions in community spaces as well as external channels 91 | like social media. Violating these terms may lead to a temporary or permanent 92 | ban. 93 | 94 | ### 3. Temporary Ban 95 | 96 | **Community Impact**: A serious violation of community standards, including 97 | sustained inappropriate behavior. 98 | 99 | **Consequence**: A temporary ban from any sort of interaction or public 100 | communication with the community for a specified period of time. No public or 101 | private interaction with the people involved, including unsolicited interaction 102 | with those enforcing the Code of Conduct, is allowed during this period. 103 | Violating these terms may lead to a permanent ban. 104 | 105 | ### 4. Permanent Ban 106 | 107 | **Community Impact**: Demonstrating a pattern of violation of community 108 | standards, including sustained inappropriate behavior, harassment of an 109 | individual, or aggression toward or disparagement of classes of individuals. 110 | 111 | **Consequence**: A permanent ban from any sort of public interaction within the 112 | community. 113 | 114 | ## Attribution 115 | 116 | This Code of Conduct is adapted from the [Contributor Covenant][homepage], 117 | version 2.1, available at 118 | . 119 | 120 | Community Impact Guidelines were inspired by 121 | [Mozilla's code of conduct enforcement ladder][https://github.com/mozilla/inclusion]. 122 | 123 | For answers to common questions about this code of conduct, see the FAQ at 124 | . Translations are available at . 125 | 126 | [homepage]: https://www.contributor-covenant.org 127 | -------------------------------------------------------------------------------- /.github/workflows/R-CMD-check.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | # 4 | # NOTE: This workflow is overkill for most R packages and 5 | # check-standard.yaml is likely a better choice. 6 | # usethis::use_github_action("check-standard") will install it. 7 | on: 8 | push: 9 | branches: [main, master] 10 | pull_request: 11 | 12 | name: R-CMD-check.yaml 13 | 14 | permissions: read-all 15 | 16 | jobs: 17 | R-CMD-check: 18 | runs-on: ${{ matrix.config.os }} 19 | 20 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 21 | 22 | strategy: 23 | fail-fast: false 24 | matrix: 25 | config: 26 | - {os: macos-latest, r: 'release'} 27 | 28 | - {os: windows-latest, r: 'release'} 29 | # use 4.0 or 4.1 to check with rtools40's older compiler 30 | - {os: windows-latest, r: 'oldrel-4'} 31 | 32 | - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'} 33 | - {os: ubuntu-latest, r: 'release'} 34 | - {os: ubuntu-latest, r: 'oldrel-1'} 35 | - {os: ubuntu-latest, r: 'oldrel-2'} 36 | - {os: ubuntu-latest, r: 'oldrel-3'} 37 | - {os: ubuntu-latest, r: 'oldrel-4'} 38 | 39 | env: 40 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 41 | R_KEEP_PKG_SOURCE: yes 42 | 43 | steps: 44 | - uses: actions/checkout@v4 45 | 46 | - uses: r-lib/actions/setup-pandoc@v2 47 | 48 | - uses: r-lib/actions/setup-r@v2 49 | with: 50 | r-version: ${{ matrix.config.r }} 51 | http-user-agent: ${{ matrix.config.http-user-agent }} 52 | use-public-rspm: true 53 | 54 | - uses: r-lib/actions/setup-r-dependencies@v2 55 | with: 56 | extra-packages: any::rcmdcheck 57 | needs: check 58 | 59 | - uses: r-lib/actions/check-r-package@v2 60 | with: 61 | upload-snapshots: true 62 | build_args: 'c("--no-manual","--compact-vignettes=gs+qpdf")' 63 | -------------------------------------------------------------------------------- /.github/workflows/pkgdown.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | release: 8 | types: [published] 9 | workflow_dispatch: 10 | 11 | name: pkgdown.yaml 12 | 13 | permissions: read-all 14 | 15 | jobs: 16 | pkgdown: 17 | runs-on: ubuntu-latest 18 | # Only restrict concurrency for non-PR jobs 19 | concurrency: 20 | group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }} 21 | env: 22 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 23 | permissions: 24 | contents: write 25 | steps: 26 | - uses: actions/checkout@v4 27 | 28 | - uses: r-lib/actions/setup-pandoc@v2 29 | 30 | - uses: r-lib/actions/setup-r@v2 31 | with: 32 | use-public-rspm: true 33 | 34 | - uses: r-lib/actions/setup-r-dependencies@v2 35 | with: 36 | extra-packages: any::pkgdown, local::. 37 | needs: website 38 | 39 | - name: Build site 40 | run: pkgdown::build_site_github_pages(new_process = FALSE, install = FALSE) 41 | shell: Rscript {0} 42 | 43 | - name: Deploy to GitHub pages 🚀 44 | if: github.event_name != 'pull_request' 45 | uses: JamesIves/github-pages-deploy-action@v4.5.0 46 | with: 47 | clean: false 48 | branch: gh-pages 49 | folder: docs 50 | -------------------------------------------------------------------------------- /.github/workflows/pr-commands.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | issue_comment: 5 | types: [created] 6 | 7 | name: pr-commands.yaml 8 | 9 | permissions: read-all 10 | 11 | jobs: 12 | document: 13 | if: ${{ github.event.issue.pull_request && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') && startsWith(github.event.comment.body, '/document') }} 14 | name: document 15 | runs-on: ubuntu-latest 16 | env: 17 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 18 | permissions: 19 | contents: write 20 | steps: 21 | - uses: actions/checkout@v4 22 | 23 | - uses: r-lib/actions/pr-fetch@v2 24 | with: 25 | repo-token: ${{ secrets.GITHUB_TOKEN }} 26 | 27 | - uses: r-lib/actions/setup-r@v2 28 | with: 29 | use-public-rspm: true 30 | 31 | - uses: r-lib/actions/setup-r-dependencies@v2 32 | with: 33 | extra-packages: any::roxygen2 34 | needs: pr-document 35 | 36 | - name: Document 37 | run: roxygen2::roxygenise() 38 | shell: Rscript {0} 39 | 40 | - name: commit 41 | run: | 42 | git config --local user.name "$GITHUB_ACTOR" 43 | git config --local user.email "$GITHUB_ACTOR@users.noreply.github.com" 44 | git add man/\* NAMESPACE 45 | git commit -m 'Document' 46 | 47 | - uses: r-lib/actions/pr-push@v2 48 | with: 49 | repo-token: ${{ secrets.GITHUB_TOKEN }} 50 | 51 | style: 52 | if: ${{ github.event.issue.pull_request && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') && startsWith(github.event.comment.body, '/style') }} 53 | name: style 54 | runs-on: ubuntu-latest 55 | env: 56 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 57 | permissions: 58 | contents: write 59 | steps: 60 | - uses: actions/checkout@v4 61 | 62 | - uses: r-lib/actions/pr-fetch@v2 63 | with: 64 | repo-token: ${{ secrets.GITHUB_TOKEN }} 65 | 66 | - uses: r-lib/actions/setup-r@v2 67 | 68 | - name: Install dependencies 69 | run: install.packages("styler") 70 | shell: Rscript {0} 71 | 72 | - name: Style 73 | run: styler::style_pkg() 74 | shell: Rscript {0} 75 | 76 | - name: commit 77 | run: | 78 | git config --local user.name "$GITHUB_ACTOR" 79 | git config --local user.email "$GITHUB_ACTOR@users.noreply.github.com" 80 | git add \*.R 81 | git commit -m 'Style' 82 | 83 | - uses: r-lib/actions/pr-push@v2 84 | with: 85 | repo-token: ${{ secrets.GITHUB_TOKEN }} 86 | -------------------------------------------------------------------------------- /.github/workflows/test-coverage.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | 8 | name: test-coverage.yaml 9 | 10 | permissions: read-all 11 | 12 | jobs: 13 | test-coverage: 14 | runs-on: ubuntu-latest 15 | env: 16 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 17 | 18 | steps: 19 | - uses: actions/checkout@v4 20 | 21 | - uses: r-lib/actions/setup-r@v2 22 | with: 23 | use-public-rspm: true 24 | 25 | - uses: r-lib/actions/setup-r-dependencies@v2 26 | with: 27 | extra-packages: any::covr, any::xml2 28 | needs: coverage 29 | 30 | - name: Test coverage 31 | run: | 32 | cov <- covr::package_coverage( 33 | quiet = FALSE, 34 | clean = FALSE, 35 | install_path = file.path(normalizePath(Sys.getenv("RUNNER_TEMP"), winslash = "/"), "package") 36 | ) 37 | covr::to_cobertura(cov) 38 | shell: Rscript {0} 39 | 40 | - uses: codecov/codecov-action@v4 41 | with: 42 | # Fail if error if not on PR, or if on PR and token is given 43 | fail_ci_if_error: ${{ github.event_name != 'pull_request' || secrets.CODECOV_TOKEN }} 44 | file: ./cobertura.xml 45 | plugin: noop 46 | disable_search: true 47 | token: ${{ secrets.CODECOV_TOKEN }} 48 | 49 | - name: Show testthat output 50 | if: always() 51 | run: | 52 | ## -------------------------------------------------------------------- 53 | find '${{ runner.temp }}/package' -name 'testthat.Rout*' -exec cat '{}' \; || true 54 | shell: bash 55 | 56 | - name: Upload test results 57 | if: failure() 58 | uses: actions/upload-artifact@v4 59 | with: 60 | name: coverage-test-failures 61 | path: ${{ runner.temp }}/package 62 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .RData 4 | README.html 5 | README_cache/ 6 | docs/ 7 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: bench 2 | Title: High Precision Timing of R Expressions 3 | Version: 1.1.4.9000 4 | Authors@R: c( 5 | person("Jim", "Hester", role = "aut"), 6 | person("Davis", "Vaughan", , "davis@posit.co", role = c("aut", "cre")), 7 | person("Drew", "Schmidt", role = "ctb", 8 | comment = "read_proc_file implementation"), 9 | person("Posit Software, PBC", role = c("cph", "fnd")) 10 | ) 11 | Description: Tools to accurately benchmark and analyze execution times for 12 | R expressions. 13 | License: MIT + file LICENSE 14 | URL: https://bench.r-lib.org/, https://github.com/r-lib/bench 15 | BugReports: https://github.com/r-lib/bench/issues 16 | Depends: 17 | R (>= 4.0.0) 18 | Imports: 19 | glue (>= 1.8.0), 20 | methods, 21 | pillar (>= 1.10.1), 22 | profmem (>= 0.6.0), 23 | rlang (>= 1.1.4), 24 | stats, 25 | tibble (>= 3.2.1), 26 | utils 27 | Suggests: 28 | covr, 29 | dplyr, 30 | forcats, 31 | ggbeeswarm, 32 | ggplot2 (>= 3.5.1), 33 | ggridges, 34 | parallel, 35 | scales, 36 | testthat (>= 3.2.3), 37 | tidyr (>= 1.3.1), 38 | vctrs (>= 0.6.5), 39 | withr 40 | Config/Needs/website: tidyverse/tidytemplate 41 | Config/testthat/edition: 3 42 | Config/usethis/last-upkeep: 2025-01-16 43 | Encoding: UTF-8 44 | Roxygen: list(markdown = TRUE) 45 | RoxygenNote: 7.3.2 46 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | YEAR: 2025 2 | COPYRIGHT HOLDER: bench authors 3 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | Copyright (c) 2025 bench authors 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /MAINTENANCE.md: -------------------------------------------------------------------------------- 1 | ## Current state 2 | 3 | The CRAN version of bench is very stable. The dev version has the half finished continuous benchmarking code, which should either be extended or removed, see future directions for discussion. 4 | 5 | ## Future directions 6 | 7 | Continuous benchmarking - feature development of this was largely derailed by COVID-19, I originally proposed it as a topic for a talk at UseR!2020, but when that conference went to virtual only it became less of a priority. 8 | In addition the ursa labs conbench package has provided a good alternative to what is done in the bench package (https://ursalabs.org/blog/announcing-conbench/) and https://github.com/ursacomputing/arrowbench. 9 | 10 | Possible future directions would be talking more with the arrow team on bench features which would be useful for their use cases and extending bench functionality with them. 11 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method("[",bench_bytes) 4 | S3method("[",bench_expr) 5 | S3method("[",bench_mark) 6 | S3method("[",bench_time) 7 | S3method("[[",bench_bytes) 8 | S3method("[[",bench_time) 9 | S3method(Ops,bench_bytes) 10 | S3method(Ops,bench_time) 11 | S3method(Summary,bench_time) 12 | S3method(as.character,bench_bytes) 13 | S3method(as.character,bench_expr) 14 | S3method(as.character,bench_time) 15 | S3method(as_bench_bytes,bench_bytes) 16 | S3method(as_bench_bytes,default) 17 | S3method(as_bench_bytes,numeric) 18 | S3method(as_bench_time,bench_time) 19 | S3method(as_bench_time,default) 20 | S3method(as_bench_time,numeric) 21 | S3method(format,bench_bytes) 22 | S3method(format,bench_expr) 23 | S3method(format,bench_time) 24 | S3method(max,bench_bytes) 25 | S3method(max,bench_time) 26 | S3method(mean,bench_time) 27 | S3method(min,bench_bytes) 28 | S3method(min,bench_time) 29 | S3method(pillar_shaft,bench_bytes) 30 | S3method(pillar_shaft,bench_expr) 31 | S3method(pillar_shaft,bench_time) 32 | S3method(plot,bench_mark) 33 | S3method(print,bench_bytes) 34 | S3method(print,bench_expr) 35 | S3method(print,bench_time) 36 | S3method(rbind,bench_mark) 37 | S3method(sum,bench_bytes) 38 | S3method(sum,bench_time) 39 | S3method(summary,bench_mark) 40 | S3method(type_sum,bench_bytes) 41 | S3method(type_sum,bench_expr) 42 | S3method(type_sum,bench_time) 43 | export(as_bench_bytes) 44 | export(as_bench_mark) 45 | export(as_bench_time) 46 | export(bench_bytes) 47 | export(bench_bytes_trans) 48 | export(bench_load_average) 49 | export(bench_memory) 50 | export(bench_process_memory) 51 | export(bench_time) 52 | export(bench_time_trans) 53 | export(hires_time) 54 | export(mark) 55 | export(press) 56 | export(scale_color_bench_expr) 57 | export(scale_colour_bench_expr) 58 | export(scale_x_bench_bytes) 59 | export(scale_x_bench_expr) 60 | export(scale_x_bench_time) 61 | export(scale_y_bench_bytes) 62 | export(scale_y_bench_expr) 63 | export(scale_y_bench_time) 64 | export(system_time) 65 | export(workout) 66 | export(workout_expressions) 67 | importFrom(methods,setOldClass) 68 | importFrom(pillar,pillar_shaft) 69 | importFrom(pillar,type_sum) 70 | importFrom(rlang,.data) 71 | useDynLib(bench, .registration = TRUE) 72 | -------------------------------------------------------------------------------- /NEWS.md: -------------------------------------------------------------------------------- 1 | # bench (development version) 2 | 3 | # bench 1.1.4 4 | 5 | * `press()` gains a new `.quiet` argument to silence progress messages (#145). 6 | 7 | * The `.grid` argument of `press()` now subsets data.frames and tibbles 8 | consistently (#142). 9 | 10 | * `bench_time_trans()` and `bench_bytes_trans()` once again apply pretty 11 | breaks correctly (#140, @plietar, @simonpcouch). 12 | 13 | * R >=4.0.0 is now required, which is aligned with tidyverse standards. 14 | 15 | * Switched to modern ggplot2 conventions internally (#141, @olivroy). 16 | 17 | # bench 1.1.3 18 | 19 | * Long unnamed `bench_expr` expressions are now truncated correctly when used as 20 | columns of a tibble (#94). 21 | 22 | * `bench_mark` tibbles now respect the knitr paged df option (#103). 23 | 24 | * Fixed an issue where macOS specific C code paths were accidentally being used 25 | on GNU Hurd (#118). 26 | 27 | * Fixed `-Wstrict-prototypes` warnings, as requested by CRAN (#124). 28 | 29 | * R >=3.5.0 is now required, which is aligned with tidyverse standards. 30 | 31 | * bench now uses testthat 3e (#129). 32 | 33 | * bench no longer Suggests mockery. 34 | 35 | # bench 1.1.2 36 | 37 | * Davis Vaughan is now the maintainer. 38 | 39 | * `autoplot.bench_mark()` again supports factor levels for `expression`, as intended (#82) 40 | 41 | * `bench::mark()` and `bench::workout()` no longer support unquote 42 | and splice operators. This fixes inconsistencies in performance 43 | results with functions like `rlang::list2()` (#61). 44 | 45 | * bench has been re-licensed as MIT (#101). 46 | 47 | # bench 1.1.1 48 | 49 | * `mark()` columns memory, result and mem_alloc columns are now always 50 | included, even if they are unused. 51 | 52 | # bench 1.1.0 53 | 54 | ## New features 55 | 56 | * New `bench_process_memory()` function, to return the current and maximum 57 | memory used by the current process. This uses system functions to track 58 | memory, so can measure memory outside of R's GC heap. 59 | 60 | * New `workout_expressions()` function, a low-level function to workout a list 61 | of expressions, like those obtained via `parse()` from a file. 62 | 63 | * `mark()` gains a `memory` argument to control if it records memory 64 | allocations, set `memory = FALSE` to disable recording memory allocations, 65 | which can be helpful when trying to benchmark long pieces of code with many 66 | allocations (#62). 67 | 68 | ## Minor improvements and fixes 69 | 70 | * `mark()` now permits empty arguments, e.g. accidental trailing commas (#61). 71 | 72 | * `mark()` now errors correctly when the expressions deparsed length is 73 | different. 74 | 75 | * `bench_expr` objects now work better with the upcoming versions of tibble and 76 | vctrs (@romainfrancois, #64) 77 | 78 | * `autoplot.bench_mark()` provides a more informative error if the `ggbeeswarm` package is not installed (@coatless, #69). 79 | 80 | * Update documentation of `bench_mark` columns (@jdblischak, #67). 81 | 82 | # bench 1.0.4 83 | 84 | * `bench_memory()` examples no longer fail if they are run with R that does not 85 | have memory profiling capability (#56). 86 | 87 | * `bench_expr` now has a class of `c("bench_expr", "list")` rather than 88 | `c("bench_expr", "expression")`, as it is really a list of calls rather than 89 | a true expression object. (https://github.com/r-lib/vctrs/issues/559) 90 | 91 | # bench 1.0.3 92 | 93 | * `summary.bench_mark()` gains a `time_unit` argument, so you can report all 94 | times in a consistent scale if desired (#18, #26). 95 | 96 | * `bench_mark()` now checks for user interrupts, to allow you to stop benchmarking 97 | if it takes longer than you were expecting (#49). 98 | 99 | * New `bench_memory()` to capture just the memory allocated by an expression. 100 | 101 | * `bench_time()` is now an alias for `system_time()`. 102 | 103 | * `unnest.bench_mark()` is now compatible with the upcoming tidyr 1.0.0 (#48, #51) 104 | 105 | * New `hires_time()` allows you to explicitly capture high resolution time 106 | points. 107 | 108 | # bench 1.0.2 109 | 110 | * `workout()` a new function which makes timing multiple expressions in turn 111 | simpler. 112 | 113 | * `mark()` now internally uses a tempfile rather than a 114 | textConnection, as the latter has a 100,000 character limit on 115 | some platforms (#27) 116 | 117 | * `mark()` no longer returns the mean or max values and the column order has 118 | been tweaked to try and put the most interesting columns first (#37) 119 | 120 | * Errors during evaluation now halt execution (#28, #43) 121 | 122 | * `scale_bench_time()` and `scale_bench_bytes()` now allow you to use a non-logarithmic scale. 123 | 124 | # bench 1.0.1 125 | 126 | * Add support for macOS versions prior to 10.12 127 | * Disable load sensitive tests on CRAN, to avoid failures 128 | 129 | # bench 1.0.0 130 | 131 | * Added a `NEWS.md` file to track changes to the package. 132 | -------------------------------------------------------------------------------- /R/autoplot.R: -------------------------------------------------------------------------------- 1 | #' Autoplot method for bench_mark objects 2 | #' 3 | #' @param object A `bench_mark` object. 4 | #' @param type The type of plot. Plotting geoms used for each type are 5 | #' - beeswarm - [ggbeeswarm::geom_quasirandom()] 6 | #' - jitter - [ggplot2::geom_jitter()] 7 | #' - ridge - [ggridges::geom_density_ridges()] 8 | #' - boxplot - [ggplot2::geom_boxplot()] 9 | #' - violin - [ggplot2::geom_violin()] 10 | #' @param ... Additional arguments passed to the plotting geom. 11 | #' @details This function requires some optional dependencies. [ggplot2][ggplot2::ggplot2-package], 12 | #' [tidyr][tidyr::tidyr-package], and depending on the plot type 13 | #' [ggbeeswarm][ggbeeswarm::ggbeeswarm], [ggridges][ggridges::ggridges-package]. 14 | #' 15 | #' For `type` of `beeswarm` and `jitter` the points are colored by the highest 16 | #' level garbage collection performed during each iteration. 17 | #' 18 | #' For plots with 2 parameters `ggplot2::facet_grid()` is used to construct a 19 | #' 2d facet. For other numbers of parameters `ggplot2::facet_wrap()` is used 20 | #' instead. 21 | #' 22 | #' @examples 23 | #' dat <- data.frame(x = runif(10000, 1, 1000), y=runif(10000, 1, 1000)) 24 | #' 25 | #' res <- bench::mark( 26 | #' dat[dat$x > 500, ], 27 | #' dat[which(dat$x > 500), ], 28 | #' subset(dat, x > 500)) 29 | #' 30 | #' if (require(ggplot2) && require(tidyr) && require(ggbeeswarm)) { 31 | #' 32 | #' # Beeswarm plot 33 | #' autoplot(res) 34 | #' 35 | #' # ridge (joyplot) 36 | #' autoplot(res, "ridge") 37 | #' 38 | #' # If you want to have the plots ordered by execution time you can do so by 39 | #' # ordering factor levels in the expressions. 40 | #' if (require(dplyr) && require(forcats)) { 41 | #' 42 | #' res %>% 43 | #' mutate(expression = forcats::fct_reorder(as.character(expression), min, .desc = TRUE)) %>% 44 | #' as_bench_mark() %>% 45 | #' autoplot("violin") 46 | #' } 47 | #' } 48 | # Lazily registered in `.onLoad()` 49 | autoplot.bench_mark <- function( 50 | object, 51 | type = c("beeswarm", "jitter", "ridge", "boxplot", "violin"), 52 | ... 53 | ) { 54 | rlang::check_installed(c("ggplot2", "tidyr (>= 1.0.0)"), "for `autoplot()`.") 55 | 56 | type <- match.arg(type) 57 | 58 | if (type == "beeswarm") { 59 | rlang::check_installed("ggbeeswarm", "to use `type = \"beeswarm\".") 60 | } 61 | 62 | # Just convert bench_expr to characters 63 | if (inherits(object$expression, "bench_expr")) { 64 | object$expression <- as.character(object$expression) 65 | } 66 | 67 | res <- tidyr::unnest(object, c(time, gc)) 68 | p <- ggplot2::ggplot(res) 69 | 70 | switch( 71 | type, 72 | beeswarm = p <- p + 73 | ggplot2::aes(.data$time, .data$expression, color = .data$gc) + 74 | ggbeeswarm::geom_quasirandom(..., orientation = "y"), 75 | 76 | jitter = p <- p + 77 | ggplot2::aes(.data$time, .data$expression, color = .data$gc) + 78 | ggplot2::geom_jitter(...), 79 | 80 | ridge = p <- p + 81 | ggplot2::aes(.data$time, .data$expression) + 82 | ggridges::geom_density_ridges(...), 83 | 84 | boxplot = p <- p + 85 | ggplot2::aes(.data$time, .data$expression) + 86 | ggplot2::geom_boxplot(...), 87 | 88 | violin = p <- p + 89 | ggplot2::aes(.data$time, .data$expression) + 90 | ggplot2::geom_violin(...) 91 | ) 92 | 93 | parameters <- setdiff( 94 | colnames(object), 95 | c("expression", summary_cols, data_cols, c("level0", "level1", "level2")) 96 | ) 97 | 98 | if (length(parameters) == 0) { 99 | return(p) 100 | } 101 | 102 | if (length(parameters) == 2) { 103 | return( 104 | p + 105 | ggplot2::facet_grid( 106 | paste0(parameters[[1]], "~", parameters[[2]]), 107 | labeller = ggplot2::label_both 108 | ) 109 | ) 110 | } 111 | 112 | p + ggplot2::facet_wrap(parameters, labeller = ggplot2::label_both) 113 | } 114 | 115 | #' @rdname autoplot.bench_mark 116 | #' @param x A `bench_mark` object. 117 | #' @param y Ignored, required for compatibility with the `plot()` generic. 118 | #' @export 119 | plot.bench_mark <- function( 120 | x, 121 | ..., 122 | type = c("beeswarm", "jitter", "ridge", "boxplot", "violin"), 123 | y 124 | ) { 125 | type <- match.arg(type) 126 | ggplot2::autoplot(x, type = type, ...) 127 | } 128 | -------------------------------------------------------------------------------- /R/bench-package.R: -------------------------------------------------------------------------------- 1 | ## usethis namespace: start 2 | #' @importFrom methods setOldClass 3 | #' @importFrom pillar pillar_shaft 4 | #' @importFrom pillar type_sum 5 | #' @importFrom rlang .data 6 | ## usethis namespace: end 7 | NULL 8 | 9 | #' @keywords internal 10 | #' @inherit summary.bench_mark examples 11 | "_PACKAGE" 12 | -------------------------------------------------------------------------------- /R/bench_process_memory.R: -------------------------------------------------------------------------------- 1 | #' Retrieve the current and maximum memory from the R process 2 | #' 3 | #' The memory reported here will likely differ from that reported by `gc()`, as 4 | #' this includes all memory from the R process, including any child processes 5 | #' and memory allocated outside R's garbage collector heap. 6 | #' 7 | #' The OS APIs used are as follows 8 | #' 9 | #' ## Windows 10 | #' - [PROCESS_MEMORY_COUNTERS.WorkingSetSize](https://learn.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-process_memory_counters) 11 | #' - [PROCESS_MEMORY_COUNTERS.PeakWorkingSetSize](https://learn.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-process_memory_counters) 12 | #' ## macOS 13 | #' - [task_info(TASK_BASIC_INFO)](https://developer.apple.com/documentation/kernel/1537934-task_info?language=occ) 14 | #' - [rusage.ru_maxrss](https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/getrusage.2.html) 15 | #' ## linux 16 | #' - [/proc/pid/status VmSize](https://man7.org/linux/man-pages/man5/proc.5.html) 17 | #' - [/proc/pid/status VmPeak](https://man7.org/linux/man-pages/man5/proc.5.html) 18 | #' @export 19 | bench_process_memory <- function() { 20 | stats::setNames( 21 | as_bench_bytes(.Call(bench_process_memory_)), 22 | c("current", "max") 23 | ) 24 | } 25 | -------------------------------------------------------------------------------- /R/bench_time.R: -------------------------------------------------------------------------------- 1 | #' Measure Process CPU and real time that an expression used. 2 | #' 3 | #' @param expr A expression to be timed. 4 | #' @return A `bench_time` object with two values. 5 | #' - `process` - The process CPU usage of the expression evaluation. 6 | #' - `real` - The wallclock time of the expression evaluation. 7 | #' @details On some systems (such as macOS) the process clock has lower 8 | #' precision than the realtime clock, as a result there may be cases where the 9 | #' process time is larger than the real time for fast expressions. 10 | #' @examples 11 | #' # This will use ~.5 seconds of real time, but very little process time. 12 | #' bench_time(Sys.sleep(.5)) 13 | #' @seealso [bench_memory()] To measure memory allocations for a given expression. 14 | #' @aliases system_time 15 | #' @export 16 | bench_time <- function(expr) { 17 | stats::setNames( 18 | as_bench_time(.Call(system_time_, substitute(expr), parent.frame())), 19 | c("process", "real") 20 | ) 21 | } 22 | 23 | #' @export 24 | system_time <- bench_time 25 | 26 | #' Measure memory that an expression used. 27 | #' 28 | #' @param expr A expression to be measured. 29 | #' @return A tibble with two columns 30 | #' - The total amount of memory allocated 31 | #' - The raw memory allocations as parsed by [profmem::readRprofmem()] 32 | #' @examples 33 | #' if (capabilities("profmem")) { 34 | #' bench_memory(1 + 1:10000) 35 | #' } 36 | #' @export 37 | bench_memory <- function(expr) { 38 | can_profile_memory <- capabilities("profmem") 39 | if (!can_profile_memory) { 40 | stop("Memory profiling not available in this version of R", call. = FALSE) 41 | } 42 | f <- tempfile() 43 | on.exit(unlink(f)) 44 | utils::Rprofmem(f, threshold = 1) 45 | force(expr) 46 | utils::Rprofmem(NULL) 47 | 48 | memory <- parse_allocations(f) 49 | 50 | tibble::tibble( 51 | mem_alloc = bench_bytes(sum(memory$bytes, na.rm = TRUE)), 52 | memory = list(memory) 53 | ) 54 | } 55 | -------------------------------------------------------------------------------- /R/bytes.R: -------------------------------------------------------------------------------- 1 | # This is mostly a copy of https://github.com/r-lib/fs/blob/0f5b6191935fe4c862d2e5003655e6c1669f4afd/R/fs_bytes.R 2 | # If I end up needing this in a third package it should probably live in a package somewhere, maybe prettyunits? 3 | 4 | byte_units <- c( 5 | 'B' = 1, 6 | 'K' = 1024, 7 | 'M' = 1024^2, 8 | 'G' = 1024^3, 9 | 'T' = 1024^4, 10 | 'P' = 1024^5, 11 | 'E' = 1024^6, 12 | 'Z' = 1024^7, 13 | 'Y' = 1024^8 14 | ) 15 | 16 | #' Human readable memory sizes 17 | #' 18 | #' Construct, manipulate and display vectors of byte sizes. These are numeric 19 | #' vectors, so you can compare them numerically, but they can also be compared 20 | #' to human readable values such as '10MB'. 21 | #' 22 | #' These memory sizes are always assumed to be base 1024, rather than 1000. 23 | #' 24 | #' @param x A numeric or character vector. Character representations can use 25 | #' shorthand sizes (see examples). 26 | #' @examples 27 | #' bench_bytes("1") 28 | #' bench_bytes("1K") 29 | #' bench_bytes("1Kb") 30 | #' bench_bytes("1KiB") 31 | #' bench_bytes("1MB") 32 | #' 33 | #' bench_bytes("1KB") < "1MB" 34 | #' 35 | #' sum(bench_bytes(c("1MB", "5MB", "500KB"))) 36 | #' @name bench_bytes 37 | #' @export 38 | as_bench_bytes <- function(x) { 39 | UseMethod("as_bench_bytes") 40 | } 41 | 42 | #' @export 43 | #' @rdname bench_bytes 44 | bench_bytes <- as_bench_bytes 45 | 46 | new_bench_bytes <- function(x) { 47 | structure(x, class = c("bench_bytes", "numeric")) 48 | } 49 | 50 | setOldClass(c("bench_bytes", "numeric"), numeric()) 51 | 52 | #' @export 53 | as_bench_bytes.default <- function(x) { 54 | x <- as.character(x) 55 | m <- captures( 56 | x, 57 | regexpr( 58 | "^(?[[:digit:].]+)\\s*(?[KMGTPEZY]?)i?[Bb]?$", 59 | x, 60 | perl = TRUE 61 | ) 62 | ) 63 | m$unit[m$unit == ""] <- "B" 64 | new_bench_bytes(unname(as.numeric(m$size) * byte_units[m$unit])) 65 | } 66 | 67 | #' @export 68 | as_bench_bytes.bench_bytes <- function(x) { 69 | return(x) 70 | } 71 | 72 | #' @export 73 | as_bench_bytes.numeric <- function(x) { 74 | new_bench_bytes(x) 75 | } 76 | 77 | # Adapted from https://github.com/gaborcsardi/prettyunits 78 | #' @export 79 | format.bench_bytes <- function( 80 | x, 81 | scientific = FALSE, 82 | digits = 3, 83 | drop0trailing = TRUE, 84 | ... 85 | ) { 86 | nms <- names(x) 87 | 88 | bytes <- unclass(x) 89 | 90 | unit <- vcapply(x, find_unit, byte_units) 91 | res <- round(bytes / byte_units[unit], digits = digits) 92 | 93 | ## Zero bytes 94 | res[bytes == 0] <- 0 95 | unit[bytes == 0] <- "B" 96 | 97 | ## NA and NaN bytes 98 | res[is.na(bytes)] <- NA_real_ 99 | res[is.nan(bytes)] <- NaN 100 | unit[is.na(bytes)] <- "" # Includes NaN as well 101 | 102 | # Append an extra B to each unit 103 | large_units <- unit %in% names(byte_units)[-1] 104 | unit[large_units] <- paste0(unit[large_units], "B") 105 | 106 | res <- format( 107 | res, 108 | scientific = scientific, 109 | digits = digits, 110 | drop0trailing = drop0trailing, 111 | ... 112 | ) 113 | 114 | stats::setNames(paste0(res, unit), nms) 115 | } 116 | 117 | #' @export 118 | as.character.bench_bytes <- format.bench_bytes 119 | 120 | #' @export 121 | print.bench_bytes <- function(x, ...) { 122 | print(format.bench_bytes(x, ...), quote = FALSE) 123 | } 124 | 125 | #' @export 126 | sum.bench_bytes <- function(x, ...) { 127 | new_bench_bytes(NextMethod()) 128 | } 129 | 130 | #' @export 131 | min.bench_bytes <- function(x, ...) { 132 | new_bench_bytes(NextMethod()) 133 | } 134 | 135 | #' @export 136 | max.bench_bytes <- function(x, ...) { 137 | new_bench_bytes(NextMethod()) 138 | } 139 | 140 | #' @export 141 | `[.bench_bytes` <- function(x, i) { 142 | new_bench_bytes(NextMethod("[")) 143 | } 144 | 145 | #' @export 146 | `[[.bench_bytes` <- function(x, i) { 147 | new_bench_bytes(NextMethod("[[")) 148 | } 149 | 150 | #' @export 151 | # Adapted from Ops.numeric_version 152 | Ops.bench_bytes <- function(e1, e2) { 153 | if (nargs() == 1L) { 154 | stop( 155 | sprintf("unary '%s' not defined for \"bench_bytes\" objects", .Generic), 156 | call. = FALSE 157 | ) 158 | } 159 | 160 | boolean <- switch( 161 | .Generic, 162 | `+` = TRUE, 163 | `-` = TRUE, 164 | `*` = TRUE, 165 | `/` = TRUE, 166 | `^` = TRUE, 167 | `<` = TRUE, 168 | `>` = TRUE, 169 | `==` = TRUE, 170 | `!=` = TRUE, 171 | `<=` = TRUE, 172 | `>=` = TRUE, 173 | FALSE 174 | ) 175 | if (!boolean) { 176 | stop( 177 | sprintf("'%s' not defined for \"bench_bytes\" objects", .Generic), 178 | call. = FALSE 179 | ) 180 | } 181 | e1 <- as_bench_bytes(e1) 182 | e2 <- as_bench_bytes(e2) 183 | NextMethod(.Generic) 184 | } 185 | 186 | #' @export 187 | pillar_shaft.bench_bytes <- function(x, ...) { 188 | pillar::new_pillar_shaft_simple(format.bench_bytes(x), align = "right", ...) 189 | } 190 | 191 | #' @export 192 | type_sum.bench_bytes <- function(x) { 193 | "bch:byt" 194 | } 195 | 196 | #' Benchmark time transformation 197 | #' 198 | #' This both log transforms the times and formats the labels as a `bench_time` 199 | #' object. 200 | #' @inheritParams bench_time_trans 201 | #' @keywords internal 202 | #' @export 203 | bench_bytes_trans <- function(base = 2) { 204 | if (is.null(base)) { 205 | return( 206 | scales::trans_new( 207 | name = "bch:byt", 208 | transform = as.numeric, 209 | inverse = as_bench_bytes, 210 | breaks = scales::pretty_breaks(), 211 | domain = c(1e-100, Inf) 212 | ) 213 | ) 214 | } 215 | trans <- function(x) log(as.numeric(x), base) 216 | inv <- function(x) as_bench_bytes(base^as.numeric(x)) 217 | 218 | scales::trans_new( 219 | name = paste0("bch:byt-", format(base)), 220 | transform = trans, 221 | inverse = inv, 222 | breaks = scales::log_breaks(base = base), 223 | domain = c(1e-100, Inf) 224 | ) 225 | } 226 | 227 | # Lazily registered in `.onLoad()` 228 | scale_type.bench_bytes <- function(x) "bench_bytes" 229 | 230 | #' Position scales for bench_time data 231 | #' 232 | #' Default scales for the `bench_time` class, these are added to plots using 233 | #' `bench_time` objects automatically. 234 | #' @name scale_bench_time 235 | #' @keywords internal 236 | #' @export 237 | scale_x_bench_bytes <- function(base = 10, ...) { 238 | ggplot2::scale_x_continuous(..., transform = bench_bytes_trans(base = base)) 239 | } 240 | 241 | #' @rdname scale_bench_time 242 | #' @keywords internal 243 | #' @export 244 | scale_y_bench_bytes <- function(base = 10, ...) { 245 | ggplot2::scale_y_continuous(..., transform = bench_bytes_trans(base = base)) 246 | } 247 | -------------------------------------------------------------------------------- /R/expression.R: -------------------------------------------------------------------------------- 1 | new_bench_expr <- function(x, description = names(x)) { 2 | if (is.null(description)) { 3 | description <- rep("", length(x)) 4 | } 5 | names(x) <- description 6 | structure(x, class = c("bench_expr", "list"), description = description) 7 | } 8 | 9 | #' @export 10 | format.bench_expr <- function(x, ...) { 11 | desc <- attr(x, "description") 12 | is_missing <- desc == "" 13 | desc[is_missing] <- vapply(x[is_missing], deparse_trunc, character(1)) 14 | desc 15 | } 16 | 17 | #' @export 18 | as.character.bench_expr <- format.bench_expr 19 | 20 | #' @export 21 | print.bench_expr <- function(x, ...) { 22 | x <- unclass(x) 23 | NextMethod() 24 | } 25 | 26 | #' @export 27 | type_sum.bench_expr <- function(x) { 28 | "bch:expr" 29 | } 30 | 31 | #' @export 32 | `[.bench_expr` <- function(x, i, ...) { 33 | new_x <- NextMethod("[") 34 | new_bench_expr(new_x) 35 | } 36 | 37 | # Lazily registered in `.onLoad()` 38 | vec_proxy.bench_expr <- function(x, ...) { 39 | desc <- attr(x, "description") 40 | attributes(x) <- NULL 41 | out <- list(x = x, desc = desc) 42 | vctrs::new_data_frame(out, n = length(x)) 43 | } 44 | 45 | # Lazily registered in `.onLoad()` 46 | vec_restore.bench_expr <- function(x, to, ...) { 47 | new_bench_expr(x$x, x$desc) 48 | } 49 | 50 | #' @export 51 | pillar_shaft.bench_expr <- function(x, ...) { 52 | # We format bench expressions exactly like character vectors. This ensures 53 | # they are truncated as needed, which is useful for long unnamed expressions 54 | # (#94). This is the same logic as `pillar:::pillar_shaft.factor()`. 55 | pillar_shaft(as.character(x), ...) 56 | } 57 | 58 | # Lazily registered in `.onLoad()` 59 | scale_type.bench_expr <- function(x) { 60 | "bench_expr" 61 | } 62 | 63 | setOldClass(c("bench_expr", "list"), list()) 64 | 65 | #' Position and color scales for bench_expr data 66 | #' 67 | #' Default scales for the `bench_expr` class, these are added to plots using 68 | #' `bench_expr` objects automatically. 69 | #' @name scale_bench_expr 70 | #' @keywords internal 71 | #' @export 72 | scale_x_bench_expr <- function(...) { 73 | sc <- ggplot2::scale_x_discrete(...) 74 | sc$transform <- as.character 75 | sc 76 | } 77 | 78 | #' @rdname scale_bench_expr 79 | #' @keywords internal 80 | #' @export 81 | scale_y_bench_expr <- function(...) { 82 | sc <- ggplot2::scale_y_discrete(...) 83 | sc$transform <- as.character 84 | sc 85 | } 86 | 87 | #' @rdname scale_bench_expr 88 | #' @keywords internal 89 | #' @export 90 | scale_colour_bench_expr <- function( 91 | palette = scales::hue_pal(...), 92 | ..., 93 | aesthetics = "colour" 94 | ) { 95 | sc <- ggplot2::discrete_scale(aesthetics, "bench_expr", palette, ...) 96 | sc$transform <- as.character 97 | sc 98 | } 99 | 100 | #' @rdname scale_bench_expr 101 | #' @keywords internal 102 | #' @export 103 | scale_color_bench_expr <- scale_colour_bench_expr 104 | -------------------------------------------------------------------------------- /R/hires_time.R: -------------------------------------------------------------------------------- 1 | #' Return the current high-resolution real time. 2 | #' 3 | #' Time is expressed as seconds since some arbitrary time in the past; it 4 | #' is not correlated in any way to the time of day, and thus is not subject to 5 | #' resetting or drifting. The hi-res 6 | #' timer is ideally suited to performance measurement tasks, where cheap, 7 | #' accurate interval timing is required. 8 | #' @export 9 | #' @examples 10 | #' hires_time() 11 | #' 12 | #' # R rounds doubles to 7 digits by default, see greater precision by setting 13 | #' # the digits argument when printing 14 | #' print(hires_time(), digits = 20) 15 | #' 16 | #' # Generally used by recording two times and then subtracting them 17 | #' start <- hires_time() 18 | #' end <- hires_time() 19 | #' elapsed <- end - start 20 | #' elapsed 21 | hires_time <- function() { 22 | .Call(hires_time_) 23 | } 24 | -------------------------------------------------------------------------------- /R/import-standalone-s3-register.R: -------------------------------------------------------------------------------- 1 | # Standalone file: do not edit by hand 2 | # Source: https://github.com/r-lib/rlang/blob/HEAD/R/standalone-s3-register.R 3 | # Generated by: usethis::use_standalone("r-lib/rlang", "s3-register") 4 | # ---------------------------------------------------------------------- 5 | # 6 | # --- 7 | # repo: r-lib/rlang 8 | # file: standalone-s3-register.R 9 | # last-updated: 2024-05-14 10 | # license: https://unlicense.org 11 | # --- 12 | # 13 | # ## Changelog 14 | # 15 | # 2024-05-14: 16 | # 17 | # * Mentioned `usethis::use_standalone()`. 18 | # 19 | # nocov start 20 | 21 | #' Register a method for a suggested dependency 22 | #' 23 | #' Generally, the recommended way to register an S3 method is to use the 24 | #' `S3Method()` namespace directive (often generated automatically by the 25 | #' `@export` roxygen2 tag). However, this technique requires that the generic 26 | #' be in an imported package, and sometimes you want to suggest a package, 27 | #' and only provide a method when that package is loaded. `s3_register()` 28 | #' can be called from your package's `.onLoad()` to dynamically register 29 | #' a method only if the generic's package is loaded. 30 | #' 31 | #' For R 3.5.0 and later, `s3_register()` is also useful when demonstrating 32 | #' class creation in a vignette, since method lookup no longer always involves 33 | #' the lexical scope. For R 3.6.0 and later, you can achieve a similar effect 34 | #' by using "delayed method registration", i.e. placing the following in your 35 | #' `NAMESPACE` file: 36 | #' 37 | #' ``` 38 | #' if (getRversion() >= "3.6.0") { 39 | #' S3method(package::generic, class) 40 | #' } 41 | #' ``` 42 | #' 43 | #' @section Usage in other packages: 44 | #' To avoid taking a dependency on rlang, you copy the source of 45 | #' [`s3_register()`](https://github.com/r-lib/rlang/blob/main/R/standalone-s3-register.R) 46 | #' into your own package or with 47 | #' `usethis::use_standalone("r-lib/rlang", "s3-register")`. It is licensed under 48 | #' the permissive [unlicense](https://choosealicense.com/licenses/unlicense/) to 49 | #' make it crystal clear that we're happy for you to do this. There's no need to 50 | #' include the license or even credit us when using this function. 51 | #' 52 | #' @param generic Name of the generic in the form `"pkg::generic"`. 53 | #' @param class Name of the class 54 | #' @param method Optionally, the implementation of the method. By default, 55 | #' this will be found by looking for a function called `generic.class` 56 | #' in the package environment. 57 | #' @examples 58 | #' # A typical use case is to dynamically register tibble/pillar methods 59 | #' # for your class. That way you avoid creating a hard dependency on packages 60 | #' # that are not essential, while still providing finer control over 61 | #' # printing when they are used. 62 | #' 63 | #' .onLoad <- function(...) { 64 | #' s3_register("pillar::pillar_shaft", "vctrs_vctr") 65 | #' s3_register("tibble::type_sum", "vctrs_vctr") 66 | #' } 67 | #' @keywords internal 68 | #' @noRd 69 | s3_register <- function(generic, class, method = NULL) { 70 | stopifnot(is.character(generic), length(generic) == 1) 71 | stopifnot(is.character(class), length(class) == 1) 72 | 73 | pieces <- strsplit(generic, "::")[[1]] 74 | stopifnot(length(pieces) == 2) 75 | package <- pieces[[1]] 76 | generic <- pieces[[2]] 77 | 78 | caller <- parent.frame() 79 | 80 | get_method_env <- function() { 81 | top <- topenv(caller) 82 | if (isNamespace(top)) { 83 | asNamespace(environmentName(top)) 84 | } else { 85 | caller 86 | } 87 | } 88 | get_method <- function(method) { 89 | if (is.null(method)) { 90 | get(paste0(generic, ".", class), envir = get_method_env()) 91 | } else { 92 | method 93 | } 94 | } 95 | 96 | register <- function(...) { 97 | envir <- asNamespace(package) 98 | 99 | # Refresh the method each time, it might have been updated by 100 | # `devtools::load_all()` 101 | method_fn <- get_method(method) 102 | stopifnot(is.function(method_fn)) 103 | 104 | # Only register if generic can be accessed 105 | if (exists(generic, envir)) { 106 | registerS3method(generic, class, method_fn, envir = envir) 107 | } else if (identical(Sys.getenv("NOT_CRAN"), "true")) { 108 | warn <- .rlang_s3_register_compat("warn") 109 | 110 | warn( 111 | c( 112 | sprintf( 113 | "Can't find generic `%s` in package %s to register S3 method.", 114 | generic, 115 | package 116 | ), 117 | "i" = "This message is only shown to developers using devtools.", 118 | "i" = sprintf( 119 | "Do you need to update %s to the latest version?", 120 | package 121 | ) 122 | ) 123 | ) 124 | } 125 | } 126 | 127 | # Always register hook in case package is later unloaded & reloaded 128 | setHook(packageEvent(package, "onLoad"), function(...) { 129 | register() 130 | }) 131 | 132 | # For compatibility with R < 4.1.0 where base isn't locked 133 | is_sealed <- function(pkg) { 134 | identical(pkg, "base") || environmentIsLocked(asNamespace(pkg)) 135 | } 136 | 137 | # Avoid registration failures during loading (pkgload or regular). 138 | # Check that environment is locked because the registering package 139 | # might be a dependency of the package that exports the generic. In 140 | # that case, the exports (and the generic) might not be populated 141 | # yet (#1225). 142 | if (isNamespaceLoaded(package) && is_sealed(package)) { 143 | register() 144 | } 145 | 146 | invisible() 147 | } 148 | 149 | .rlang_s3_register_compat <- function(fn, try_rlang = TRUE) { 150 | # Compats that behave the same independently of rlang's presence 151 | out <- switch( 152 | fn, 153 | is_installed = return(function(pkg) requireNamespace(pkg, quietly = TRUE)) 154 | ) 155 | 156 | # Only use rlang if it is fully loaded (#1482) 157 | if ( 158 | try_rlang && 159 | requireNamespace("rlang", quietly = TRUE) && 160 | environmentIsLocked(asNamespace("rlang")) 161 | ) { 162 | switch( 163 | fn, 164 | is_interactive = return(rlang::is_interactive) 165 | ) 166 | 167 | # Make sure rlang knows about "x" and "i" bullets 168 | if (utils::packageVersion("rlang") >= "0.4.2") { 169 | switch( 170 | fn, 171 | abort = return(rlang::abort), 172 | warn = return((rlang::warn)), 173 | inform = return(rlang::inform) 174 | ) 175 | } 176 | } 177 | 178 | # Fall back to base compats 179 | 180 | is_interactive_compat <- function() { 181 | opt <- getOption("rlang_interactive") 182 | if (!is.null(opt)) { 183 | opt 184 | } else { 185 | interactive() 186 | } 187 | } 188 | 189 | format_msg <- function(x) paste(x, collapse = "\n") 190 | switch( 191 | fn, 192 | is_interactive = return(is_interactive_compat), 193 | abort = return(function(msg) stop(format_msg(msg), call. = FALSE)), 194 | warn = return(function(msg) warning(format_msg(msg), call. = FALSE)), 195 | inform = return(function(msg) message(format_msg(msg))) 196 | ) 197 | 198 | stop(sprintf("Internal error in rlang shims: Unknown function `%s()`.", fn)) 199 | } 200 | 201 | # nocov end 202 | -------------------------------------------------------------------------------- /R/load.R: -------------------------------------------------------------------------------- 1 | #' Get system load averages 2 | #' 3 | #' Uses OS system APIs to return the load average for the past 1, 5 and 15 minutes. 4 | #' @export 5 | bench_load_average <- function() { 6 | stats::setNames( 7 | .Call(bench_load_average_), 8 | c("load_1_min", "load_5_min", "load_15_min") 9 | ) 10 | } 11 | -------------------------------------------------------------------------------- /R/mark.R: -------------------------------------------------------------------------------- 1 | #' @useDynLib bench, .registration = TRUE 2 | NULL 3 | 4 | #' Benchmark a series of functions 5 | #' 6 | #' Benchmark a list of quoted expressions. Each expression will always run at 7 | #' least twice, once to measure the memory allocation and store results and one 8 | #' or more times to measure timing. 9 | #' 10 | #' @param ... Expressions to benchmark, if named the `expression` column will 11 | #' be the name, otherwise it will be the deparsed expression. 12 | #' @param min_time The minimum number of seconds to run each expression, set to 13 | #' `Inf` to always run `max_iterations` times instead. 14 | #' @param iterations If not `NULL`, the default, run each expression for 15 | #' exactly this number of iterations. This overrides both `min_iterations` 16 | #' and `max_iterations`. 17 | #' @param exprs A list of quoted expressions. If supplied overrides expressions 18 | #' defined in `...`. 19 | #' @param min_iterations Each expression will be evaluated a minimum of `min_iterations` times. 20 | #' @param max_iterations Each expression will be evaluated a maximum of `max_iterations` times. 21 | #' @param check Check if results are consistent. If `TRUE`, checking is done 22 | #' with [all.equal()], if `FALSE` checking is disabled and results are not 23 | #' stored. If `check` is a function that function will be called with each 24 | #' pair of results to determine consistency. 25 | #' @param memory If `TRUE` (the default when R is compiled with memory 26 | #' profiling), track memory allocations using [utils::Rprofmem()]. If `FALSE` 27 | #' disable memory tracking. 28 | #' @param env The environment which to evaluate the expressions 29 | #' @inheritParams summary.bench_mark 30 | #' @inherit summary.bench_mark return 31 | #' @aliases bench_mark 32 | #' @seealso [press()] to run benchmarks across a grid of parameters. 33 | #' @examples 34 | #' dat <- data.frame(x = runif(100, 1, 1000), y=runif(10, 1, 1000)) 35 | #' mark( 36 | #' min_time = .1, 37 | #' 38 | #' dat[dat$x > 500, ], 39 | #' dat[which(dat$x > 500), ], 40 | #' subset(dat, x > 500)) 41 | #' @export 42 | mark <- function( 43 | ..., 44 | min_time = .5, 45 | iterations = NULL, 46 | min_iterations = 1, 47 | max_iterations = 10000, 48 | check = TRUE, 49 | memory = capabilities("profmem"), 50 | filter_gc = TRUE, 51 | relative = FALSE, 52 | time_unit = NULL, 53 | exprs = NULL, 54 | env = parent.frame() 55 | ) { 56 | if (!is.null(iterations)) { 57 | min_iterations <- iterations 58 | max_iterations <- iterations 59 | } 60 | 61 | if (isTRUE(check)) { 62 | check_fun <- all.equal 63 | } else if (is.function(check)) { 64 | check_fun <- check 65 | check <- TRUE 66 | } else { 67 | check <- FALSE 68 | } 69 | 70 | if (is.null(exprs)) { 71 | exprs <- dots(...) 72 | } 73 | 74 | n_exprs <- length(exprs) 75 | 76 | results <- list( 77 | expression = new_bench_expr(exprs), 78 | time = vector("list", n_exprs), 79 | gc = vector("list", n_exprs), 80 | memory = vector("list", n_exprs), 81 | result = vector("list", n_exprs) 82 | ) 83 | 84 | # Helper for evaluating with memory profiling 85 | eval_one <- function(e, profile_memory) { 86 | f <- tempfile() 87 | on.exit(unlink(f)) 88 | if (profile_memory) { 89 | utils::Rprofmem(f, threshold = 1) 90 | } 91 | 92 | res <- eval(e, env) 93 | if (profile_memory) { 94 | utils::Rprofmem(NULL) 95 | } 96 | list(result = res, memory = parse_allocations(f)) 97 | } 98 | 99 | # We only want to evaluate these first runs if we need to check memory or results. 100 | if (memory || check) { 101 | # Run allocation benchmark and check results 102 | for (i in seq_len(length(exprs))) { 103 | res <- eval_one(exprs[[i]], memory) 104 | if (check) { 105 | if (is.null(res$result)) { 106 | results$result[i] <- list(res$result) 107 | } else { 108 | results$result[[i]] <- res$result 109 | } 110 | } 111 | if (memory) { 112 | results$memory[[i]] <- res$memory 113 | } 114 | 115 | if (check && i > 1) { 116 | comp <- check_fun(results$result[[1]], results$result[[i]]) 117 | if (!isTRUE(comp)) { 118 | expressions <- as.character(results$expression) 119 | 120 | stop( 121 | glue::glue( 122 | " 123 | Each result must equal the first result: 124 | `{first}` does not equal `{current}` 125 | ", 126 | first = expressions[[1]], 127 | current = expressions[[i]] 128 | ), 129 | call. = FALSE 130 | ) 131 | } 132 | } 133 | } 134 | } 135 | 136 | for (i in seq_len(length(exprs))) { 137 | error <- NULL 138 | gc_msg <- with_gcinfo({ 139 | tryCatch( 140 | error = function(e) { 141 | e$call <- NULL 142 | error <<- e 143 | }, 144 | res <- .Call( 145 | mark_, 146 | exprs[[i]], 147 | env, 148 | min_time, 149 | as.integer(min_iterations), 150 | as.integer(max_iterations), 151 | TRUE 152 | ) 153 | ) 154 | }) 155 | if (!is.null(error)) { 156 | stop(error) 157 | } 158 | 159 | results$time[[i]] <- as_bench_time(res) 160 | results$gc[[i]] <- parse_gc(gc_msg) 161 | } 162 | 163 | out <- summary( 164 | bench_mark(tibble::as_tibble(results, .name_repair = "minimal")), 165 | filter_gc = filter_gc, 166 | relative = relative, 167 | time_unit = time_unit 168 | ) 169 | 170 | out 171 | } 172 | 173 | bench_mark <- function(x) { 174 | class(x) <- unique(c("bench_mark", class(x))) 175 | x 176 | } 177 | 178 | #' Coerce to a bench mark object Bench mark objects 179 | #' 180 | #' This is typically needed only if you are performing additional manipulations 181 | #' after calling [bench::mark()]. 182 | #' @param x Object to be coerced 183 | #' @export 184 | as_bench_mark <- function(x) { 185 | bench_mark(tibble::as_tibble(x)) 186 | } 187 | 188 | summary_cols <- c("min", "median", "itr/sec", "mem_alloc", "gc/sec") 189 | data_cols <- c("n_itr", "n_gc", "total_time", "result", "memory", "time", "gc") 190 | time_cols <- c("min", "median", "total_time") 191 | 192 | #' Summarize [bench::mark] results. 193 | #' 194 | #' @param object [bench_mark] object to summarize. 195 | #' @param filter_gc If `TRUE` remove iterations that contained at least one 196 | #' garbage collection before summarizing. If `TRUE` but an expression had 197 | #' a garbage collection in every iteration, filtering is disabled, with a warning. 198 | #' @param relative If `TRUE` all summaries are computed relative to the minimum 199 | #' execution time rather than absolute time. 200 | #' @param time_unit If `NULL` the times are reported in a human readable 201 | #' fashion depending on each value. If one of 'ns', 'us', 'ms', 's', 'm', 'h', 202 | #' 'd', 'w' the time units are instead expressed as nanoseconds, microseconds, 203 | #' milliseconds, seconds, hours, minutes, days or weeks respectively. 204 | #' @param ... Additional arguments ignored. 205 | #' @details 206 | #' If `filter_gc == TRUE` (the default) runs that contain a garbage 207 | #' collection will be removed before summarizing. This is most useful for fast 208 | #' expressions when the majority of runs do not contain a gc. Call 209 | #' `summary(filter_gc = FALSE)` if you would like to compute summaries _with_ 210 | #' these times, such as expressions with lots of allocations when all or most 211 | #' runs contain a gc. 212 | #' @return A [tibble][tibble::tibble] with the additional summary columns. 213 | #' The following summary columns are computed 214 | #' - `expression` - `bench_expr` The deparsed expression that was evaluated 215 | #' (or its name if one was provided). 216 | #' - `min` - `bench_time` The minimum execution time. 217 | #' - `median` - `bench_time` The sample median of execution time. 218 | #' - `itr/sec` - `double` The estimated number of executions performed per 219 | #' second. 220 | #' - `mem_alloc` - `bench_bytes` Total amount of memory allocated by R while 221 | #' running the expression. Memory allocated *outside* the R heap, e.g. by 222 | #' `malloc()` or `new` directly is *not* tracked, take care to avoid 223 | #' misinterpreting the results if running code that may do this. 224 | #' - `gc/sec` - `double` The number of garbage collections per second. 225 | #' - `n_itr` - `integer` Total number of iterations after filtering 226 | #' garbage collections (if `filter_gc == TRUE`). 227 | #' - `n_gc` - `double` Total number of garbage collections performed over all 228 | #' iterations. This is a psudo-measure of the pressure on the garbage collector, if 229 | #' it varies greatly between to alternatives generally the one with fewer 230 | #' collections will cause fewer allocation in real usage. 231 | #' - `total_time` - `bench_time` The total time to perform the benchmarks. 232 | #' - `result` - `list` A list column of the object(s) returned by the 233 | #' evaluated expression(s). 234 | #' - `memory` - `list` A list column with results from [Rprofmem()]. 235 | #' - `time` - `list` A list column of `bench_time` vectors for each evaluated 236 | #' expression. 237 | #' - `gc` - `list` A list column with tibbles containing the level of 238 | #' garbage collection (0-2, columns) for each iteration (rows). 239 | #' @examples 240 | #' dat <- data.frame(x = runif(10000, 1, 1000), y=runif(10000, 1, 1000)) 241 | #' 242 | #' # `bench::mark()` implicitly calls summary() automatically 243 | #' results <- bench::mark( 244 | #' dat[dat$x > 500, ], 245 | #' dat[which(dat$x > 500), ], 246 | #' subset(dat, x > 500)) 247 | #' 248 | #' # However you can also do so explicitly to filter gc differently. 249 | #' summary(results, filter_gc = FALSE) 250 | #' 251 | #' # Or output relative times 252 | #' summary(results, relative = TRUE) 253 | #' @export 254 | summary.bench_mark <- function( 255 | object, 256 | filter_gc = TRUE, 257 | relative = FALSE, 258 | time_unit = NULL, 259 | ... 260 | ) { 261 | nms <- colnames(object) 262 | parameters <- setdiff(nms, c("expression", summary_cols, data_cols)) 263 | 264 | num_gc <- lapply(object$gc, function(x) { 265 | res <- rowSums(x) 266 | if (length(res) == 0) { 267 | res <- rep(0, length(x)) 268 | } 269 | res 270 | }) 271 | if (isTRUE(filter_gc)) { 272 | no_gc <- lapply(num_gc, `==`, 0) 273 | times <- Map(`[`, object$time, no_gc) 274 | } else { 275 | times <- object$time 276 | } 277 | 278 | if (filter_gc && any(lengths(times) == 0)) { 279 | times <- object$time 280 | warning( 281 | call. = FALSE, 282 | "Some expressions had a GC in every iteration; so filtering is disabled." 283 | ) 284 | } 285 | 286 | object$min <- new_bench_time(vdapply(times, min)) 287 | object$median <- new_bench_time(vdapply(times, stats::median)) 288 | object$max <- new_bench_time(vdapply(times, max)) 289 | object$total_time <- new_bench_time(vdapply(times, sum)) 290 | 291 | object$n_itr <- viapply(times, length) 292 | object$`itr/sec` <- as.numeric(object$n_itr / object$total_time) 293 | 294 | object$n_gc <- vdapply(num_gc, sum) 295 | object$`gc/sec` <- as.numeric(object$n_gc / object$total_time) 296 | 297 | object$mem_alloc <- 298 | bench_bytes( 299 | vdapply( 300 | object$memory, 301 | function(x) if (is.null(x)) NA else sum(x$bytes, na.rm = TRUE) 302 | ) 303 | ) 304 | 305 | if (isTRUE(relative)) { 306 | object[summary_cols] <- lapply( 307 | object[summary_cols], 308 | function(x) as.numeric(x / min(x)) 309 | ) 310 | } 311 | 312 | if (!is.null(time_unit)) { 313 | time_unit <- match.arg(time_unit, names(time_units())) 314 | object[time_cols] <- lapply( 315 | object[time_cols], 316 | function(x) as.numeric(x / time_units()[time_unit]) 317 | ) 318 | } 319 | 320 | to_keep <- intersect( 321 | c("expression", parameters, summary_cols, data_cols), 322 | names(object) 323 | ) 324 | bench_mark(object[to_keep]) 325 | } 326 | 327 | #' @export 328 | `[.bench_mark` <- function(x, i, j, ...) { 329 | bench_mark(NextMethod("[")) 330 | } 331 | 332 | parse_allocations <- function(filename) { 333 | if (!file.exists(filename)) { 334 | empty_Rprofmem <- structure( 335 | list(what = character(), bytes = integer(), trace = list()), 336 | class = c("Rprofmem", "data.frame") 337 | ) 338 | 339 | return(empty_Rprofmem) 340 | } 341 | 342 | # TODO: remove this dependency / simplify parsing 343 | tryCatch( 344 | profmem::readRprofmem(filename), 345 | error = function(e) { 346 | stop( 347 | "Memory profiling failed.\n If you are benchmarking parallel code you must set `memory = FALSE`.", 348 | call. = FALSE 349 | ) 350 | } 351 | ) 352 | } 353 | 354 | #nocov start 355 | 356 | #' Custom printing function for `bench_mark` objects in knitr documents 357 | #' 358 | #' By default, data columns (`result`, `memory`, `time`, `gc`) are omitted when 359 | #' printing in knitr. If you would like to include these columns, set the knitr 360 | #' chunk option `bench.all_columns = TRUE`. 361 | #' 362 | #' @details 363 | #' You can set `bench.all_columns = TRUE` to show all columns of the bench mark 364 | #' object. 365 | #' 366 | #' ```{r, bench.all_columns = TRUE} 367 | #' bench::mark( 368 | #' subset(mtcars, cyl == 3), 369 | #' mtcars[mtcars$cyl == 3, ] 370 | #' ) 371 | #' ``` 372 | #' 373 | #' @inheritParams knitr::knit_print 374 | #' 375 | #' @param options A list of knitr chunk options set in the currently evaluated 376 | #' chunk. 377 | # Lazily registered in `.onLoad()` 378 | knit_print.bench_mark <- function(x, ..., options) { 379 | if (!isTRUE(options$bench.all_columns)) { 380 | x <- x[!colnames(x) %in% data_cols] 381 | } 382 | NextMethod() 383 | } 384 | 385 | #nocov end 386 | 387 | parse_gc <- function(x) { 388 | # \x1E is Record Separator 389 | x <- strsplit(paste0(x, collapse = ""), "\x1E")[[1]] 390 | tibble::as_tibble(.Call(parse_gc_, x)) 391 | } 392 | 393 | utils::globalVariables(c("time", "gc")) 394 | 395 | # Lazily registered in `.onLoad()` 396 | unnest.bench_mark <- function(data, ...) { 397 | if (inherits(data[["expression"]], "bench_expr")) { 398 | data[["expression"]] <- as.character(data[["expression"]]) 399 | } 400 | 401 | # suppressWarnings to avoid 'elements may not preserve their attributes' 402 | # warnings from dplyr::collapse 403 | data <- suppressWarnings(NextMethod(.Generic, data, ...)) 404 | 405 | # Add bench_time class back to the time column 406 | data$time <- as_bench_time(data$time) 407 | 408 | # Add a gc column, a factor with the highest gc performed for each expression. 409 | data$gc <- 410 | dplyr::case_when( 411 | data$level2 > 0 ~ "level2", 412 | data$level1 > 0 ~ "level1", 413 | data$level0 > 0 ~ "level0", 414 | TRUE ~ "none" 415 | ) 416 | data$gc <- factor(data$gc, c("none", "level0", "level1", "level2")) 417 | 418 | data 419 | } 420 | 421 | #' @export 422 | rbind.bench_mark <- function(..., deparse.level = 1) { 423 | args <- list(...) 424 | desc <- unlist(lapply(args, function(x) as.character(x$expression))) 425 | res <- rbind.data.frame(...) 426 | attr(res$expression, "description") <- desc 427 | res 428 | } 429 | 430 | # Lazily registered in `.onLoad()` 431 | filter.bench_mark <- function(.data, ...) { 432 | dots <- rlang::quos(...) 433 | idx <- Reduce(`&`, lapply(dots, rlang::eval_tidy, data = .data)) 434 | .data[idx, ] 435 | } 436 | 437 | # Lazily registered in `.onLoad()` 438 | group_by.bench_mark <- function(.data, ...) { 439 | bench_mark(NextMethod()) 440 | } 441 | -------------------------------------------------------------------------------- /R/press.R: -------------------------------------------------------------------------------- 1 | #' Run setup code and benchmarks across a grid of parameters 2 | #' 3 | #' @description 4 | #' `press()` is used to run [bench::mark()] across a grid of parameters and 5 | #' then _press_ the results together. 6 | #' 7 | #' The parameters you want to set are given as named arguments and a grid of 8 | #' all possible combinations is automatically created. 9 | #' 10 | #' The code to setup and benchmark is given by one unnamed expression (often 11 | #' delimited by `\{`). 12 | #' 13 | #' If replicates are desired a dummy variable can be used, e.g. `rep = 1:5` for 14 | #' replicates. 15 | #' 16 | #' @param ... If named, parameters to define, if unnamed the expression to run. 17 | #' Only one unnamed expression is permitted. 18 | #' @param .grid A pre-built grid of values to use, typically a [data.frame()] or 19 | #' [tibble::tibble()]. This is useful if you only want to benchmark a subset 20 | #' of all possible combinations. 21 | #' @param .quiet If `TRUE`, progress messages will not be emitted. 22 | #' @export 23 | #' @examples 24 | #' # Helper function to create a simple data.frame of the specified dimensions 25 | #' create_df <- function(rows, cols) { 26 | #' as.data.frame(setNames( 27 | #' replicate(cols, runif(rows, 1, 1000), simplify = FALSE), 28 | #' rep_len(c("x", letters), cols))) 29 | #' } 30 | #' 31 | #' # Run 4 data sizes across 3 samples with 2 replicates (24 total benchmarks) 32 | #' press( 33 | #' rows = c(1000, 10000), 34 | #' cols = c(10, 100), 35 | #' rep = 1:2, 36 | #' { 37 | #' dat <- create_df(rows, cols) 38 | #' bench::mark( 39 | #' min_time = .05, 40 | #' bracket = dat[dat$x > 500, ], 41 | #' which = dat[which(dat$x > 500), ], 42 | #' subset = subset(dat, x > 500) 43 | #' ) 44 | #' } 45 | #' ) 46 | press <- function(..., .grid = NULL, .quiet = FALSE) { 47 | args <- rlang::quos(...) 48 | 49 | assert( 50 | "`.quiet` must be `TRUE` or `FALSE`", 51 | isTRUE(.quiet) || isFALSE(.quiet) 52 | ) 53 | 54 | unnamed <- names(args) == "" 55 | 56 | if (sum(unnamed) < 1) { 57 | stop("Must supply one unnamed argument", call. = FALSE) 58 | } 59 | 60 | if (sum(unnamed) > 1) { 61 | stop("Must supply no more than one unnamed argument", call. = FALSE) 62 | } 63 | 64 | if (!is.null(.grid)) { 65 | if (any(!unnamed)) { 66 | stop( 67 | "Must supply either `.grid` or named arguments, not both", 68 | call. = FALSE 69 | ) 70 | } 71 | parameters <- .grid 72 | } else { 73 | parameters <- expand.grid( 74 | lapply(args[!unnamed], rlang::eval_tidy), 75 | stringsAsFactors = FALSE 76 | ) 77 | } 78 | 79 | # For consistent `[` methods 80 | parameters <- tibble::as_tibble(parameters) 81 | 82 | if (!.quiet) { 83 | status <- format(parameters, n = Inf) 84 | message(glue::glue("Running with:\n{status[[2]]}")) 85 | } 86 | 87 | eval_one <- function(row) { 88 | env <- rlang::new_data_mask(new.env(parent = emptyenv())) 89 | names <- names(parameters) 90 | 91 | for (i in seq_along(parameters)) { 92 | name <- names[[i]] 93 | column <- parameters[[i]] 94 | value <- column[row] 95 | assign(name, value, envir = env) 96 | } 97 | 98 | if (!.quiet) { 99 | message(status[[row + 3L]]) 100 | } 101 | 102 | rlang::eval_tidy(args[[which(unnamed)]], data = env) 103 | } 104 | 105 | res <- lapply(seq_len(nrow(parameters)), eval_one) 106 | rows <- vapply(res, NROW, integer(1)) 107 | 108 | if (!all(rows == rows[[1]])) { 109 | stop("Results must have equal rows", call. = FALSE) 110 | # TODO: print parameters / results that are unequal? 111 | } 112 | res <- do.call(rbind, res) 113 | parameters <- parameters[rep(seq_len(nrow(parameters)), each = rows[[1]]), ] 114 | bench_mark(tibble::as_tibble(cbind(res[1], parameters, res[-1]))) 115 | } 116 | -------------------------------------------------------------------------------- /R/time.R: -------------------------------------------------------------------------------- 1 | time_units <- function() { 2 | stats::setNames( 3 | c( 4 | 1e-9, 5 | 1e-6, 6 | if (is_utf8_output()) 1e-6, 7 | 1e-3, 8 | 1, 9 | 60, 10 | 60 * 60, 11 | 60 * 60 * 24, 12 | 60 * 60 * 24 * 7 13 | ), 14 | c( 15 | "ns", 16 | "us", 17 | if (is_utf8_output()) "\U00B5s", 18 | "ms", 19 | "s", 20 | "m", 21 | "h", 22 | "d", 23 | "w" 24 | ) 25 | ) 26 | } 27 | 28 | #' Human readable times 29 | #' 30 | #' Construct, manipulate and display vectors of elapsed times in seconds. These 31 | #' are numeric vectors, so you can compare them numerically, but they can also 32 | #' be compared to human readable values such as '10ms'. 33 | #' 34 | #' @param x A numeric or character vector. Character representations can use 35 | #' shorthand sizes (see examples). 36 | #' @examples 37 | #' as_bench_time("1ns") 38 | #' as_bench_time("1") 39 | #' as_bench_time("1us") 40 | #' as_bench_time("1ms") 41 | #' as_bench_time("1s") 42 | #' 43 | #' as_bench_time("100ns") < "1ms" 44 | #' 45 | #' sum(as_bench_time(c("1MB", "5MB", "500KB"))) 46 | #' @export 47 | as_bench_time <- function(x) { 48 | UseMethod("as_bench_time") 49 | } 50 | 51 | new_bench_time <- function(x) { 52 | structure(x, class = c("bench_time", "numeric")) 53 | } 54 | setOldClass(c("bench_time", "numeric"), numeric()) 55 | 56 | #' @export 57 | as_bench_time.default <- function(x) { 58 | x <- as.character(x) 59 | re <- glue::glue( 60 | "^(?[[:digit:].]+)\\s*(?{nms}?)$", 61 | nms = paste0(names(time_units()), collapse = "|") 62 | ) 63 | 64 | m <- captures(x, regexpr(re, x, perl = TRUE)) 65 | m$unit[m$unit == ""] <- "s" 66 | new_bench_time(unname(as.numeric(m$size) * time_units()[m$unit])) 67 | } 68 | 69 | #' @export 70 | as_bench_time.bench_time <- function(x) { 71 | return(x) 72 | } 73 | 74 | #' @export 75 | as_bench_time.numeric <- function(x) { 76 | is_small <- x < 1e-9 & !is.infinite(x) & x != 0 77 | x[is_small] <- 1e-9 78 | 79 | new_bench_time(x) 80 | } 81 | tolerance <- sqrt(.Machine$double.eps) 82 | find_unit <- function(x, units) { 83 | if (is.na(x) || is.nan(x) || x <= 0 || is.infinite(x)) { 84 | return(NA_character_) 85 | } 86 | epsilon <- 1 - (x * (1 / units)) 87 | names( 88 | utils::tail(n = 1, which(epsilon < tolerance)) 89 | ) 90 | } 91 | 92 | # Adapted from https://github.com/gaborcsardi/prettyunits 93 | # Aims to be consistent with ls -lh, so uses 1024 KiB units, 3 or less digits etc. 94 | #' @export 95 | format.bench_time <- function( 96 | x, 97 | scientific = FALSE, 98 | digits = 3, 99 | drop0trailing = TRUE, 100 | ... 101 | ) { 102 | nms <- names(x) 103 | 104 | # convert negative times to 1ns, this can happen if the minimum calculated 105 | # overhead is higher than the time. 106 | x[x < 1e-9 & !is.infinite(x) & x != 0] <- 1e-9 107 | 108 | seconds <- unclass(x) 109 | 110 | unit <- vcapply(x, find_unit, time_units()) 111 | res <- round(seconds / time_units()[unit], digits = digits) 112 | 113 | ## Zero seconds 114 | res[seconds == 0] <- 0 115 | unit[seconds == 0] <- "" 116 | 117 | ## NA, NaN, Inf, -Inf, seconds 118 | res[is.na(seconds)] <- NA_real_ 119 | res[is.nan(seconds)] <- NaN 120 | res[is.infinite(seconds)] <- Inf 121 | res[is.infinite(seconds) & seconds < 0] <- -Inf 122 | unit[is.na(seconds) | is.infinite(seconds)] <- "" 123 | 124 | res <- format( 125 | res, 126 | scientific = scientific, 127 | digits = digits, 128 | drop0trailing = drop0trailing, 129 | ... 130 | ) 131 | 132 | stats::setNames(paste0(res, unit), nms) 133 | } 134 | 135 | #' @export 136 | as.character.bench_time <- format.bench_time 137 | 138 | #' @export 139 | print.bench_time <- function(x, ...) { 140 | print(format.bench_time(x, ...), quote = FALSE) 141 | } 142 | 143 | #' @export 144 | sum.bench_time <- function(x, ...) { 145 | new_bench_time(NextMethod()) 146 | } 147 | 148 | #' @export 149 | min.bench_time <- function(x, ...) { 150 | new_bench_time(NextMethod()) 151 | } 152 | 153 | #' @export 154 | max.bench_time <- function(x, ...) { 155 | new_bench_time(NextMethod()) 156 | } 157 | 158 | #' @export 159 | `[.bench_time` <- function(x, i, ...) { 160 | new_bench_time(NextMethod("[")) 161 | } 162 | 163 | #' @export 164 | `[[.bench_time` <- function(x, i, ...) { 165 | new_bench_time(NextMethod("[[")) 166 | } 167 | 168 | #' @export 169 | # Adapted from Ops.numeric_version 170 | Ops.bench_time <- function(e1, e2, ...) { 171 | if (nargs() == 1L) { 172 | stop( 173 | sprintf("unary '%s' not defined for \"bench_time\" objects", .Generic), 174 | call. = FALSE 175 | ) 176 | } 177 | 178 | boolean <- switch( 179 | .Generic, 180 | `+` = TRUE, 181 | `-` = TRUE, 182 | `*` = TRUE, 183 | `/` = TRUE, 184 | `^` = TRUE, 185 | `<` = TRUE, 186 | `>` = TRUE, 187 | `==` = TRUE, 188 | `!=` = TRUE, 189 | `<=` = TRUE, 190 | `>=` = TRUE, 191 | `%%` = TRUE, 192 | FALSE 193 | ) 194 | if (!boolean) { 195 | stop( 196 | sprintf("'%s' not defined for \"bench_time\" objects", .Generic), 197 | call. = FALSE 198 | ) 199 | } 200 | e1 <- as_bench_time(e1) 201 | e2 <- as_bench_time(e2) 202 | NextMethod(.Generic) 203 | } 204 | 205 | #' @export 206 | Summary.bench_time <- function(..., na.rm = FALSE) { 207 | new_bench_time(NextMethod(.Generic)) 208 | } 209 | 210 | #' @export 211 | mean.bench_time <- function(x, ...) { 212 | new_bench_time(NextMethod(.Generic)) 213 | } 214 | 215 | #' @export 216 | pillar_shaft.bench_time <- function(x, ...) { 217 | pillar::new_pillar_shaft_simple(format.bench_time(x), align = "right", ...) 218 | } 219 | 220 | #' @export 221 | type_sum.bench_time <- function(x) { 222 | "bch:tm" 223 | } 224 | 225 | #' Benchmark time transformation 226 | #' 227 | #' This both log transforms the times and formats the labels as a `bench_time` 228 | #' object. 229 | #' @inheritParams scales::log_trans 230 | #' @keywords internal 231 | #' @export 232 | bench_time_trans <- function(base = 10) { 233 | if (is.null(base)) { 234 | return( 235 | scales::trans_new( 236 | name = "bch:tm", 237 | transform = as.numeric, 238 | inverse = as_bench_time, 239 | breaks = scales::pretty_breaks(), 240 | domain = c(1e-100, Inf) 241 | ) 242 | ) 243 | } 244 | 245 | trans <- function(x) log(as.numeric(x), base) 246 | inv <- function(x) as_bench_time(base^as.numeric(x)) 247 | 248 | scales::trans_new( 249 | name = paste0("bch:tm-", format(base)), 250 | transform = trans, 251 | inverse = inv, 252 | breaks = scales::log_breaks(base = base), 253 | domain = c(1e-100, Inf) 254 | ) 255 | } 256 | 257 | # Lazily registered in `.onLoad()` 258 | scale_type.bench_time <- function(x) "bench_time" 259 | 260 | #' Position scales for bench_time data 261 | #' 262 | #' Default scales for the `bench_time` class, these are added to plots using 263 | #' `bench_time` objects automatically. 264 | #' @name scale_bench_time 265 | #' @param base The base of the logarithm, if `NULL` instead use a 266 | #' non-logarithmic scale. 267 | #' @keywords internal 268 | #' @export 269 | scale_x_bench_time <- function(base = 10, ...) { 270 | ggplot2::scale_x_continuous(..., transform = bench_time_trans(base = base)) 271 | } 272 | 273 | #' @rdname scale_bench_time 274 | #' @keywords internal 275 | #' @export 276 | scale_y_bench_time <- function(base = 10, ...) { 277 | ggplot2::scale_y_continuous(..., transform = bench_time_trans(base = base)) 278 | } 279 | -------------------------------------------------------------------------------- /R/utils.R: -------------------------------------------------------------------------------- 1 | viapply <- function(x, f, ...) vapply(x, f, integer(1), ...) 2 | vdapply <- function(x, f, ...) vapply(x, f, double(1), ...) 3 | vcapply <- function(x, f, ...) vapply(x, f, character(1), ...) 4 | vlapply <- function(x, f, ...) vapply(x, f, logical(1), ...) 5 | 6 | captures <- function(x, m) { 7 | assert("`x` must be a character", is.character(x)) 8 | assert( 9 | "`m` must be a match object from `regexpr()`", 10 | inherits(m, "integer") && 11 | all( 12 | c( 13 | "match.length", 14 | "capture.start", 15 | "capture.length", 16 | "capture.names" 17 | ) %in% 18 | names(attributes(m)) 19 | ) 20 | ) 21 | 22 | starts <- attr(m, "capture.start") 23 | strings <- substring(x, starts, starts + attr(m, "capture.length") - 1L) 24 | res <- data.frame( 25 | matrix(strings, ncol = NCOL(starts)), 26 | stringsAsFactors = FALSE 27 | ) 28 | colnames(res) <- auto_name_vec(attr(m, "capture.names")) 29 | res[is.na(m) | m == -1, ] <- NA_character_ 30 | res 31 | } 32 | 33 | assert <- function(msg, ..., class = "invalid_argument") { 34 | tests <- unlist(list(...)) 35 | 36 | if (!all(tests)) { 37 | stop(bench_error(msg, class = class)) 38 | } 39 | } 40 | 41 | bench_error <- function(msg, class = "invalid_argument") { 42 | structure( 43 | class = c(class, "bench_error", "error", "condition"), 44 | list(message = msg) 45 | ) 46 | } 47 | 48 | auto_name_vec <- function(names) { 49 | missing <- names == "" 50 | if (all(!missing)) { 51 | return(names) 52 | } 53 | names[missing] <- seq_along(names)[missing] 54 | names 55 | } 56 | 57 | with_gcinfo <- function(expr) { 58 | tf <- tempfile() 59 | con <- file(tf, "wb") 60 | sink(con, type = "message") 61 | { 62 | old <- gcinfo(TRUE) 63 | on.exit({ 64 | gcinfo(old) 65 | sink(NULL, type = "message") 66 | close(con) 67 | output <- readLines(tf, warn = FALSE) 68 | unlink(tf) 69 | return(output) 70 | }) 71 | force(expr) 72 | } 73 | } 74 | 75 | deparse_trunc <- function(x, width = getOption("width")) { 76 | text <- deparse(x, width.cutoff = width) 77 | if (length(text) == 1 && nchar(text) < width) return(text) 78 | 79 | # Remove any leading spaces 80 | text <- sub("^[[:space:]]*", "", text) 81 | 82 | # Collapse all together 83 | glue::glue_collapse(text, " ", width = width) 84 | } 85 | 86 | # inlined from https://github.com/r-lib/cli/blob/master/R/utf8.R 87 | is_utf8_output <- function() { 88 | opt <- getOption("cli.unicode", NULL) 89 | if (!is.null(opt)) { 90 | isTRUE(opt) 91 | } else { 92 | l10n_info()$`UTF-8` && !is_latex_output() 93 | } 94 | } 95 | 96 | is_latex_output <- function() { 97 | if (!("knitr" %in% loadedNamespaces())) return(FALSE) 98 | get("is_latex_output", asNamespace("knitr"))() 99 | } 100 | 101 | lengths <- function(x, use.names = TRUE) { 102 | viapply(x, length, USE.NAMES = use.names) 103 | } 104 | 105 | dots <- function(...) { 106 | dots <- as.list(substitute(...())) 107 | 108 | n <- length(dots) 109 | if (n && rlang::is_missing(dots[[n]])) { 110 | dots <- dots[-n] 111 | } 112 | 113 | dots 114 | } 115 | -------------------------------------------------------------------------------- /R/workout.R: -------------------------------------------------------------------------------- 1 | #' Workout a group of expressions individually 2 | #' 3 | #' Given an block of expressions in `{}` [workout()] individually times each 4 | #' expression in the group. [workout_expressions()] is a lower level function most 5 | #' useful when reading lists of calls from a file. 6 | #' 7 | #' @param expr one or more expressions to workout, use `{}` to pass multiple 8 | #' expressions. 9 | #' @param exprs A list of calls to measure. 10 | #' @param description A name to label each expression, if not supplied the 11 | #' deparsed expression will be used. 12 | #' @param env The environment in which the expressions should be evaluated. 13 | #' @export 14 | #' @examples 15 | #' workout({ 16 | #' x <- 1:1000 17 | #' evens <- x %% 2 == 0 18 | #' y <- x[evens] 19 | #' length(y) 20 | #' length(which(evens)) 21 | #' sum(evens) 22 | #' }) 23 | #' 24 | #' # The equivalent to the above, reading the code from a file 25 | #' workout_expressions(as.list(parse(system.file("examples/exprs.R", package = "bench")))) 26 | workout <- function(expr, description = NULL) { 27 | expr <- substitute(expr) 28 | env <- parent.frame() 29 | if (rlang::is_call(expr, "{")) { 30 | exprs <- as.list(expr[-1]) 31 | } else { 32 | exprs <- list(expr) 33 | } 34 | workout_expressions(exprs, env, description) 35 | } 36 | 37 | #' @rdname workout 38 | #' @export 39 | workout_expressions <- function( 40 | exprs, 41 | env = parent.frame(), 42 | description = NULL 43 | ) { 44 | if (is.null(description)) { 45 | description <- names(exprs) 46 | } 47 | 48 | out <- list( 49 | exprs = new_bench_expr(exprs, description), 50 | process = numeric(length(exprs)), 51 | real = numeric(length(exprs)) 52 | ) 53 | 54 | for (i in seq_along(exprs)) { 55 | res <- as_bench_time(.Call(system_time_, exprs[[i]], env)) 56 | out[[2]][[i]] <- res[[1]] 57 | out[[3]][[i]] <- res[[2]] 58 | } 59 | 60 | out[[2]] <- new_bench_time(out[[2]]) 61 | out[[3]] <- new_bench_time(out[[3]]) 62 | 63 | tibble::as_tibble(out) 64 | } 65 | -------------------------------------------------------------------------------- /R/zzz.R: -------------------------------------------------------------------------------- 1 | #nocov start 2 | .onLoad <- function(...) { 3 | s3_register("tidyr::unnest", "bench_mark") 4 | s3_register("dplyr::filter", "bench_mark") 5 | s3_register("dplyr::group_by", "bench_mark") 6 | s3_register("ggplot2::autoplot", "bench_mark") 7 | 8 | s3_register("ggplot2::scale_type", "bench_expr") 9 | s3_register("ggplot2::scale_type", "bench_time") 10 | s3_register("ggplot2::scale_type", "bench_bytes") 11 | 12 | s3_register("knitr::knit_print", "bench_mark") 13 | 14 | s3_register("vctrs::vec_proxy", "bench_expr") 15 | s3_register("vctrs::vec_restore", "bench_expr") 16 | } 17 | #nocov end 18 | -------------------------------------------------------------------------------- /README.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | output: github_document 3 | --- 4 | 5 | 6 | 7 | ```{r setup, include = FALSE} 8 | knitr::opts_chunk$set( 9 | collapse = TRUE, 10 | comment = "#>", 11 | fig.path = "man/figures/README-", 12 | out.width = "100%" 13 | ) 14 | options(width = 120) 15 | ``` 16 | 17 | # bench 18 | 19 | 20 | 21 | [![CRAN status](https://www.r-pkg.org/badges/version/bench)](https://cran.r-project.org/package=bench) 22 | [![R-CMD-check](https://github.com/r-lib/bench/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/r-lib/bench/actions/workflows/R-CMD-check.yaml) 23 | [![Codecov test coverage](https://codecov.io/gh/r-lib/bench/graph/badge.svg)](https://app.codecov.io/gh/r-lib/bench) 24 | 25 | 26 | 27 | The goal of bench is to benchmark code, tracking execution time, memory allocations and garbage collections. 28 | 29 | ## Installation 30 | 31 | You can install the release version from [CRAN](https://cran.r-project.org/) with: 32 | 33 | ```{r, eval = FALSE} 34 | install.packages("bench") 35 | ``` 36 | 37 | Or you can install the development version from [GitHub](https://github.com/) with: 38 | 39 | ``` r 40 | # install.packages("pak") 41 | pak::pak("r-lib/bench") 42 | ``` 43 | 44 | ## Features 45 | 46 | `bench::mark()` is used to benchmark one or a series of expressions, we feel it has a number of advantages over [alternatives](#alternatives). 47 | 48 | - Always uses the highest precision APIs available for each operating system (often nanoseconds). 49 | - Tracks memory allocations for each expression. 50 | - Tracks the number and type of R garbage collections per expression iteration. 51 | - Verifies equality of expression results by default, to avoid accidentally benchmarking inequivalent code. 52 | - Has `bench::press()`, which allows you to easily perform and combine benchmarks across a large grid of values. 53 | - Uses adaptive stopping by default, running each expression for a set amount of time rather than for a specific number of iterations. 54 | - Expressions are run in batches and summary statistics are calculated after filtering out iterations with garbage collections. This allows you to isolate the performance and effects of garbage collection on running time (for more details see [Neal 2014](https://radfordneal.wordpress.com/2014/02/02/inaccurate-results-from-microbenchmark/)). 55 | 56 | The times and memory usage are returned as custom objects which have human readable formatting for display (e.g. `104ns`) and comparisons (e.g. `x$mem_alloc > "10MB"`). 57 | 58 | There is also full support for plotting with [ggplot2](https://ggplot2.tidyverse.org/) including custom scales and formatting. 59 | 60 | ## Usage 61 | 62 | ### `bench::mark()` 63 | 64 | Benchmarks can be run with `bench::mark()`, which takes one or more expressions to benchmark against each other. 65 | 66 | ```{r example, cache = TRUE} 67 | library(bench) 68 | set.seed(42) 69 | 70 | dat <- data.frame( 71 | x = runif(10000, 1, 1000), 72 | y = runif(10000, 1, 1000) 73 | ) 74 | ``` 75 | 76 | `bench::mark()` will throw an error if the results are not equivalent, so you don't accidentally benchmark inequivalent code. 77 | 78 | ```{r example-1, error = TRUE, cache = TRUE, dependson = "example"} 79 | bench::mark( 80 | dat[dat$x > 500, ], 81 | dat[which(dat$x > 499), ], 82 | subset(dat, x > 500) 83 | ) 84 | ``` 85 | 86 | Results are easy to interpret, with human readable units. 87 | 88 | ```{r example-2, cache = TRUE, dependson = "example"} 89 | bnch <- bench::mark( 90 | dat[dat$x > 500, ], 91 | dat[which(dat$x > 500), ], 92 | subset(dat, x > 500) 93 | ) 94 | bnch 95 | ``` 96 | 97 | By default the summary uses absolute measures, however relative results can be obtained by using `relative = TRUE` in your call to `bench::mark()` or calling `summary(relative = TRUE)` on the results. 98 | 99 | ```{r example-3, cache = TRUE, dependson = "example-2"} 100 | summary(bnch, relative = TRUE) 101 | ``` 102 | 103 | ### `bench::press()` 104 | 105 | `bench::press()` is used to run benchmarks against a grid of parameters. 106 | Provide setup and benchmarking code as a single unnamed argument then define sets of values as named arguments. 107 | The full combination of values will be expanded and the benchmarks are then *pressed* together in the result. 108 | This allows you to benchmark a set of expressions across a wide variety of input sizes, perform replications and other useful tasks. 109 | 110 | ```{r example2, cache = TRUE} 111 | set.seed(42) 112 | 113 | create_df <- function(rows, cols) { 114 | out <- replicate(cols, runif(rows, 1, 100), simplify = FALSE) 115 | out <- setNames(out, rep_len(c("x", letters), cols)) 116 | as.data.frame(out) 117 | } 118 | 119 | results <- bench::press( 120 | rows = c(1000, 10000), 121 | cols = c(2, 10), 122 | { 123 | dat <- create_df(rows, cols) 124 | bench::mark( 125 | min_iterations = 100, 126 | bracket = dat[dat$x > 500, ], 127 | which = dat[which(dat$x > 500), ], 128 | subset = subset(dat, x > 500) 129 | ) 130 | } 131 | ) 132 | 133 | results 134 | ``` 135 | 136 | ## Plotting 137 | 138 | `ggplot2::autoplot()` can be used to generate an informative default plot. 139 | This plot is colored by gc level (0, 1, or 2) and faceted by parameters (if any). 140 | By default it generates a [beeswarm](https://github.com/eclarke/ggbeeswarm#geom_quasirandom) plot, however you can also specify other plot types (`jitter`, `ridge`, `boxplot`, `violin`). 141 | See `?autoplot.bench_mark` for full details. 142 | 143 | ```{r autoplot, message = FALSE, warning = FALSE, cache = TRUE, dependson = "example2", dpi = 300} 144 | ggplot2::autoplot(results) 145 | ``` 146 | 147 | You can also produce fully custom plots by un-nesting the results and working with the data directly. 148 | 149 | ```{r custom-plot, message = FALSE, cache = TRUE, dependson = "example2", dpi = 300} 150 | library(tidyverse) 151 | 152 | results %>% 153 | unnest(c(time, gc)) %>% 154 | filter(gc == "none") %>% 155 | mutate(expression = as.character(expression)) %>% 156 | ggplot(aes(x = mem_alloc, y = time, color = expression)) + 157 | geom_point() + 158 | scale_color_bench_expr(scales::brewer_pal(type = "qual", palette = 3)) 159 | ``` 160 | 161 | ## `system_time()` 162 | 163 | **bench** also includes `system_time()`, a higher precision alternative to [system.time()](https://www.rdocumentation.org/packages/base/versions/3.5.0/topics/system.time). 164 | 165 | ```{r system-time, cache = TRUE} 166 | bench::system_time({ 167 | i <- 1 168 | while (i < 1e7) { 169 | i <- i + 1 170 | } 171 | }) 172 | 173 | bench::system_time(Sys.sleep(.5)) 174 | ``` 175 | 176 | ## Alternatives {#alternatives} 177 | 178 | - [rbenchmark](https://cran.r-project.org/package=rbenchmark) 179 | - [microbenchmark](https://cran.r-project.org/package=microbenchmark) 180 | - [tictoc](https://cran.r-project.org/package=tictoc) 181 | - [system.time()](https://www.rdocumentation.org/packages/base/versions/3.5.0/topics/system.time) 182 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | # bench 5 | 6 | 7 | 8 | [![CRAN 9 | status](https://www.r-pkg.org/badges/version/bench)](https://cran.r-project.org/package=bench) 10 | [![R-CMD-check](https://github.com/r-lib/bench/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/r-lib/bench/actions/workflows/R-CMD-check.yaml) 11 | [![Codecov test 12 | coverage](https://codecov.io/gh/r-lib/bench/graph/badge.svg)](https://app.codecov.io/gh/r-lib/bench) 13 | 14 | 15 | 16 | The goal of bench is to benchmark code, tracking execution time, memory 17 | allocations and garbage collections. 18 | 19 | ## Installation 20 | 21 | You can install the release version from 22 | [CRAN](https://cran.r-project.org/) with: 23 | 24 | ``` r 25 | install.packages("bench") 26 | ``` 27 | 28 | Or you can install the development version from 29 | [GitHub](https://github.com/) with: 30 | 31 | ``` r 32 | # install.packages("pak") 33 | pak::pak("r-lib/bench") 34 | ``` 35 | 36 | ## Features 37 | 38 | `bench::mark()` is used to benchmark one or a series of expressions, we 39 | feel it has a number of advantages over [alternatives](#alternatives). 40 | 41 | - Always uses the highest precision APIs available for each operating 42 | system (often nanoseconds). 43 | - Tracks memory allocations for each expression. 44 | - Tracks the number and type of R garbage collections per expression 45 | iteration. 46 | - Verifies equality of expression results by default, to avoid 47 | accidentally benchmarking inequivalent code. 48 | - Has `bench::press()`, which allows you to easily perform and combine 49 | benchmarks across a large grid of values. 50 | - Uses adaptive stopping by default, running each expression for a set 51 | amount of time rather than for a specific number of iterations. 52 | - Expressions are run in batches and summary statistics are calculated 53 | after filtering out iterations with garbage collections. This allows 54 | you to isolate the performance and effects of garbage collection on 55 | running time (for more details see [Neal 56 | 2014](https://radfordneal.wordpress.com/2014/02/02/inaccurate-results-from-microbenchmark/)). 57 | 58 | The times and memory usage are returned as custom objects which have 59 | human readable formatting for display (e.g. `104ns`) and comparisons 60 | (e.g. `x$mem_alloc > "10MB"`). 61 | 62 | There is also full support for plotting with 63 | [ggplot2](https://ggplot2.tidyverse.org/) including custom scales and 64 | formatting. 65 | 66 | ## Usage 67 | 68 | ### `bench::mark()` 69 | 70 | Benchmarks can be run with `bench::mark()`, which takes one or more 71 | expressions to benchmark against each other. 72 | 73 | ``` r 74 | library(bench) 75 | set.seed(42) 76 | 77 | dat <- data.frame( 78 | x = runif(10000, 1, 1000), 79 | y = runif(10000, 1, 1000) 80 | ) 81 | ``` 82 | 83 | `bench::mark()` will throw an error if the results are not equivalent, 84 | so you don’t accidentally benchmark inequivalent code. 85 | 86 | ``` r 87 | bench::mark( 88 | dat[dat$x > 500, ], 89 | dat[which(dat$x > 499), ], 90 | subset(dat, x > 500) 91 | ) 92 | #> Error: Each result must equal the first result: 93 | #> `dat[dat$x > 500, ]` does not equal `dat[which(dat$x > 499), ]` 94 | ``` 95 | 96 | Results are easy to interpret, with human readable units. 97 | 98 | ``` r 99 | bnch <- bench::mark( 100 | dat[dat$x > 500, ], 101 | dat[which(dat$x > 500), ], 102 | subset(dat, x > 500) 103 | ) 104 | bnch 105 | #> # A tibble: 3 × 6 106 | #> expression min median `itr/sec` mem_alloc `gc/sec` 107 | #> 108 | #> 1 dat[dat$x > 500, ] 151µs 202µs 4994. 377KB 36.8 109 | #> 2 dat[which(dat$x > 500), ] 124µs 168µs 6169. 260KB 33.2 110 | #> 3 subset(dat, x > 500) 184µs 232µs 3911. 510KB 40.3 111 | ``` 112 | 113 | By default the summary uses absolute measures, however relative results 114 | can be obtained by using `relative = TRUE` in your call to 115 | `bench::mark()` or calling `summary(relative = TRUE)` on the results. 116 | 117 | ``` r 118 | summary(bnch, relative = TRUE) 119 | #> # A tibble: 3 × 6 120 | #> expression min median `itr/sec` mem_alloc `gc/sec` 121 | #> 122 | #> 1 dat[dat$x > 500, ] 1.22 1.20 1.28 1.45 1.11 123 | #> 2 dat[which(dat$x > 500), ] 1 1 1.58 1 1 124 | #> 3 subset(dat, x > 500) 1.48 1.38 1 1.96 1.21 125 | ``` 126 | 127 | ### `bench::press()` 128 | 129 | `bench::press()` is used to run benchmarks against a grid of parameters. 130 | Provide setup and benchmarking code as a single unnamed argument then 131 | define sets of values as named arguments. The full combination of values 132 | will be expanded and the benchmarks are then *pressed* together in the 133 | result. This allows you to benchmark a set of expressions across a wide 134 | variety of input sizes, perform replications and other useful tasks. 135 | 136 | ``` r 137 | set.seed(42) 138 | 139 | create_df <- function(rows, cols) { 140 | out <- replicate(cols, runif(rows, 1, 100), simplify = FALSE) 141 | out <- setNames(out, rep_len(c("x", letters), cols)) 142 | as.data.frame(out) 143 | } 144 | 145 | results <- bench::press( 146 | rows = c(1000, 10000), 147 | cols = c(2, 10), 148 | { 149 | dat <- create_df(rows, cols) 150 | bench::mark( 151 | min_iterations = 100, 152 | bracket = dat[dat$x > 500, ], 153 | which = dat[which(dat$x > 500), ], 154 | subset = subset(dat, x > 500) 155 | ) 156 | } 157 | ) 158 | #> Running with: 159 | #> rows cols 160 | #> 1 1000 2 161 | #> 2 10000 2 162 | #> 3 1000 10 163 | #> 4 10000 10 164 | 165 | results 166 | #> # A tibble: 12 × 8 167 | #> expression rows cols min median `itr/sec` mem_alloc `gc/sec` 168 | #> 169 | #> 1 bracket 1000 2 27µs 34µs 27964. 15.84KB 19.6 170 | #> 2 which 1000 2 25.7µs 33.4µs 29553. 7.91KB 17.7 171 | #> 3 subset 1000 2 45.9µs 58.2µs 16793. 27.7KB 17.1 172 | #> 4 bracket 10000 2 64.1µs 70.8µs 13447. 156.46KB 40.5 173 | #> 5 which 10000 2 46.7µs 54.7µs 17586. 78.23KB 23.3 174 | #> 6 subset 10000 2 116.2µs 132.1µs 7228. 273.79KB 40.9 175 | #> 7 bracket 1000 10 77.2µs 85.4µs 11335. 47.52KB 19.9 176 | #> 8 which 1000 10 67.8µs 75.2µs 13073. 7.91KB 23.2 177 | #> 9 subset 1000 10 84.7µs 107.5µs 9281. 59.38KB 18.8 178 | #> 10 bracket 10000 10 130.2µs 169.1µs 5799. 469.4KB 52.2 179 | #> 11 which 10000 10 75.1µs 96µs 10187. 78.23KB 17.4 180 | #> 12 subset 10000 10 222.7µs 253µs 3810. 586.73KB 43.3 181 | ``` 182 | 183 | ## Plotting 184 | 185 | `ggplot2::autoplot()` can be used to generate an informative default 186 | plot. This plot is colored by gc level (0, 1, or 2) and faceted by 187 | parameters (if any). By default it generates a 188 | [beeswarm](https://github.com/eclarke/ggbeeswarm#geom_quasirandom) plot, 189 | however you can also specify other plot types (`jitter`, `ridge`, 190 | `boxplot`, `violin`). See `?autoplot.bench_mark` for full details. 191 | 192 | ``` r 193 | ggplot2::autoplot(results) 194 | ``` 195 | 196 | 197 | 198 | You can also produce fully custom plots by un-nesting the results and 199 | working with the data directly. 200 | 201 | ``` r 202 | library(tidyverse) 203 | 204 | results %>% 205 | unnest(c(time, gc)) %>% 206 | filter(gc == "none") %>% 207 | mutate(expression = as.character(expression)) %>% 208 | ggplot(aes(x = mem_alloc, y = time, color = expression)) + 209 | geom_point() + 210 | scale_color_bench_expr(scales::brewer_pal(type = "qual", palette = 3)) 211 | ``` 212 | 213 | 214 | 215 | ## `system_time()` 216 | 217 | **bench** also includes `system_time()`, a higher precision alternative 218 | to 219 | [system.time()](https://www.rdocumentation.org/packages/base/versions/3.5.0/topics/system.time). 220 | 221 | ``` r 222 | bench::system_time({ 223 | i <- 1 224 | while (i < 1e7) { 225 | i <- i + 1 226 | } 227 | }) 228 | #> process real 229 | #> 1.73s 1.7s 230 | 231 | bench::system_time(Sys.sleep(.5)) 232 | #> process real 233 | #> 58µs 497ms 234 | ``` 235 | 236 | ## Alternatives 237 | 238 | - [rbenchmark](https://cran.r-project.org/package=rbenchmark) 239 | - [microbenchmark](https://cran.r-project.org/package=microbenchmark) 240 | - [tictoc](https://cran.r-project.org/package=tictoc) 241 | - [system.time()](https://www.rdocumentation.org/packages/base/versions/3.5.0/topics/system.time) 242 | -------------------------------------------------------------------------------- /_pkgdown.yml: -------------------------------------------------------------------------------- 1 | url: https://bench.r-lib.org 2 | 3 | authors: 4 | "Jim Hester": 5 | href: https://www.jimhester.com/ 6 | 7 | development: 8 | mode: auto 9 | 10 | template: 11 | bootstrap: 5 12 | package: tidytemplate 13 | params: 14 | ganalytics: UA-115082821-1 15 | 16 | news: 17 | releases: 18 | - text: "bench 1.0.1" 19 | href: https://www.tidyverse.org/blog/2018/06/bench-1.0.1/ 20 | -------------------------------------------------------------------------------- /air.toml: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/r-lib/bench/e9104efd77dd502a7ffe39eafb9ca34a9951ec06/air.toml -------------------------------------------------------------------------------- /bench.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: No 4 | SaveWorkspace: No 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source 21 | PackageRoxygenize: rd,collate,namespace 22 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: false 2 | 3 | coverage: 4 | status: 5 | project: 6 | default: 7 | target: auto 8 | threshold: 1% 9 | informational: true 10 | patch: 11 | default: 12 | target: auto 13 | threshold: 1% 14 | informational: true 15 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | There are no expected revdep failures. 2 | -------------------------------------------------------------------------------- /inst/examples/exprs.R: -------------------------------------------------------------------------------- 1 | x <- 1:1000 2 | evens <- x %% 2 == 0 3 | y <- x[evens] 4 | length(y) 5 | length(which(evens)) 6 | sum(evens) 7 | -------------------------------------------------------------------------------- /man/as_bench_mark.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mark.R 3 | \name{as_bench_mark} 4 | \alias{as_bench_mark} 5 | \title{Coerce to a bench mark object Bench mark objects} 6 | \usage{ 7 | as_bench_mark(x) 8 | } 9 | \arguments{ 10 | \item{x}{Object to be coerced} 11 | } 12 | \description{ 13 | This is typically needed only if you are performing additional manipulations 14 | after calling \code{\link[=mark]{mark()}}. 15 | } 16 | -------------------------------------------------------------------------------- /man/as_bench_time.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/time.R 3 | \name{as_bench_time} 4 | \alias{as_bench_time} 5 | \title{Human readable times} 6 | \usage{ 7 | as_bench_time(x) 8 | } 9 | \arguments{ 10 | \item{x}{A numeric or character vector. Character representations can use 11 | shorthand sizes (see examples).} 12 | } 13 | \description{ 14 | Construct, manipulate and display vectors of elapsed times in seconds. These 15 | are numeric vectors, so you can compare them numerically, but they can also 16 | be compared to human readable values such as '10ms'. 17 | } 18 | \examples{ 19 | as_bench_time("1ns") 20 | as_bench_time("1") 21 | as_bench_time("1us") 22 | as_bench_time("1ms") 23 | as_bench_time("1s") 24 | 25 | as_bench_time("100ns") < "1ms" 26 | 27 | sum(as_bench_time(c("1MB", "5MB", "500KB"))) 28 | } 29 | -------------------------------------------------------------------------------- /man/autoplot.bench_mark.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/autoplot.R 3 | \name{autoplot.bench_mark} 4 | \alias{autoplot.bench_mark} 5 | \alias{plot.bench_mark} 6 | \title{Autoplot method for bench_mark objects} 7 | \usage{ 8 | autoplot.bench_mark( 9 | object, 10 | type = c("beeswarm", "jitter", "ridge", "boxplot", "violin"), 11 | ... 12 | ) 13 | 14 | \method{plot}{bench_mark}(x, ..., type = c("beeswarm", "jitter", "ridge", "boxplot", "violin"), y) 15 | } 16 | \arguments{ 17 | \item{object}{A \code{bench_mark} object.} 18 | 19 | \item{type}{The type of plot. Plotting geoms used for each type are 20 | \itemize{ 21 | \item beeswarm - \code{\link[ggbeeswarm:geom_quasirandom]{ggbeeswarm::geom_quasirandom()}} 22 | \item jitter - \code{\link[ggplot2:geom_jitter]{ggplot2::geom_jitter()}} 23 | \item ridge - \code{\link[ggridges:geom_density_ridges]{ggridges::geom_density_ridges()}} 24 | \item boxplot - \code{\link[ggplot2:geom_boxplot]{ggplot2::geom_boxplot()}} 25 | \item violin - \code{\link[ggplot2:geom_violin]{ggplot2::geom_violin()}} 26 | }} 27 | 28 | \item{...}{Additional arguments passed to the plotting geom.} 29 | 30 | \item{x}{A \code{bench_mark} object.} 31 | 32 | \item{y}{Ignored, required for compatibility with the \code{plot()} generic.} 33 | } 34 | \description{ 35 | Autoplot method for bench_mark objects 36 | } 37 | \details{ 38 | This function requires some optional dependencies. \link[ggplot2:ggplot2-package]{ggplot2}, 39 | \link[tidyr:tidyr-package]{tidyr}, and depending on the plot type 40 | \link[ggbeeswarm:ggbeeswarm]{ggbeeswarm}, \link[ggridges:ggridges-package]{ggridges}. 41 | 42 | For \code{type} of \code{beeswarm} and \code{jitter} the points are colored by the highest 43 | level garbage collection performed during each iteration. 44 | 45 | For plots with 2 parameters \code{ggplot2::facet_grid()} is used to construct a 46 | 2d facet. For other numbers of parameters \code{ggplot2::facet_wrap()} is used 47 | instead. 48 | } 49 | \examples{ 50 | dat <- data.frame(x = runif(10000, 1, 1000), y=runif(10000, 1, 1000)) 51 | 52 | res <- bench::mark( 53 | dat[dat$x > 500, ], 54 | dat[which(dat$x > 500), ], 55 | subset(dat, x > 500)) 56 | 57 | if (require(ggplot2) && require(tidyr) && require(ggbeeswarm)) { 58 | 59 | # Beeswarm plot 60 | autoplot(res) 61 | 62 | # ridge (joyplot) 63 | autoplot(res, "ridge") 64 | 65 | # If you want to have the plots ordered by execution time you can do so by 66 | # ordering factor levels in the expressions. 67 | if (require(dplyr) && require(forcats)) { 68 | 69 | res \%>\% 70 | mutate(expression = forcats::fct_reorder(as.character(expression), min, .desc = TRUE)) \%>\% 71 | as_bench_mark() \%>\% 72 | autoplot("violin") 73 | } 74 | } 75 | } 76 | -------------------------------------------------------------------------------- /man/bench-package.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/bench-package.R 3 | \docType{package} 4 | \name{bench-package} 5 | \alias{bench} 6 | \alias{bench-package} 7 | \title{bench: High Precision Timing of R Expressions} 8 | \description{ 9 | Tools to accurately benchmark and analyze execution times for R expressions. 10 | } 11 | \examples{ 12 | dat <- data.frame(x = runif(10000, 1, 1000), y=runif(10000, 1, 1000)) 13 | 14 | # `bench::mark()` implicitly calls summary() automatically 15 | results <- bench::mark( 16 | dat[dat$x > 500, ], 17 | dat[which(dat$x > 500), ], 18 | subset(dat, x > 500)) 19 | 20 | # However you can also do so explicitly to filter gc differently. 21 | summary(results, filter_gc = FALSE) 22 | 23 | # Or output relative times 24 | summary(results, relative = TRUE) 25 | } 26 | \seealso{ 27 | Useful links: 28 | \itemize{ 29 | \item \url{https://bench.r-lib.org/} 30 | \item \url{https://github.com/r-lib/bench} 31 | \item Report bugs at \url{https://github.com/r-lib/bench/issues} 32 | } 33 | 34 | } 35 | \author{ 36 | \strong{Maintainer}: Davis Vaughan \email{davis@posit.co} 37 | 38 | Authors: 39 | \itemize{ 40 | \item Jim Hester 41 | } 42 | 43 | Other contributors: 44 | \itemize{ 45 | \item Drew Schmidt (read_proc_file implementation) [contributor] 46 | \item Posit Software, PBC [copyright holder, funder] 47 | } 48 | 49 | } 50 | \keyword{internal} 51 | -------------------------------------------------------------------------------- /man/bench_bytes.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/bytes.R 3 | \name{bench_bytes} 4 | \alias{bench_bytes} 5 | \alias{as_bench_bytes} 6 | \title{Human readable memory sizes} 7 | \usage{ 8 | as_bench_bytes(x) 9 | 10 | bench_bytes(x) 11 | } 12 | \arguments{ 13 | \item{x}{A numeric or character vector. Character representations can use 14 | shorthand sizes (see examples).} 15 | } 16 | \description{ 17 | Construct, manipulate and display vectors of byte sizes. These are numeric 18 | vectors, so you can compare them numerically, but they can also be compared 19 | to human readable values such as '10MB'. 20 | } 21 | \details{ 22 | These memory sizes are always assumed to be base 1024, rather than 1000. 23 | } 24 | \examples{ 25 | bench_bytes("1") 26 | bench_bytes("1K") 27 | bench_bytes("1Kb") 28 | bench_bytes("1KiB") 29 | bench_bytes("1MB") 30 | 31 | bench_bytes("1KB") < "1MB" 32 | 33 | sum(bench_bytes(c("1MB", "5MB", "500KB"))) 34 | } 35 | -------------------------------------------------------------------------------- /man/bench_bytes_trans.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/bytes.R 3 | \name{bench_bytes_trans} 4 | \alias{bench_bytes_trans} 5 | \title{Benchmark time transformation} 6 | \usage{ 7 | bench_bytes_trans(base = 2) 8 | } 9 | \arguments{ 10 | \item{base}{base of logarithm} 11 | } 12 | \description{ 13 | This both log transforms the times and formats the labels as a \code{bench_time} 14 | object. 15 | } 16 | \keyword{internal} 17 | -------------------------------------------------------------------------------- /man/bench_load_average.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/load.R 3 | \name{bench_load_average} 4 | \alias{bench_load_average} 5 | \title{Get system load averages} 6 | \usage{ 7 | bench_load_average() 8 | } 9 | \description{ 10 | Uses OS system APIs to return the load average for the past 1, 5 and 15 minutes. 11 | } 12 | -------------------------------------------------------------------------------- /man/bench_memory.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/bench_time.R 3 | \name{bench_memory} 4 | \alias{bench_memory} 5 | \title{Measure memory that an expression used.} 6 | \usage{ 7 | bench_memory(expr) 8 | } 9 | \arguments{ 10 | \item{expr}{A expression to be measured.} 11 | } 12 | \value{ 13 | A tibble with two columns 14 | \itemize{ 15 | \item The total amount of memory allocated 16 | \item The raw memory allocations as parsed by \code{\link[profmem:readRprofmem]{profmem::readRprofmem()}} 17 | } 18 | } 19 | \description{ 20 | Measure memory that an expression used. 21 | } 22 | \examples{ 23 | if (capabilities("profmem")) { 24 | bench_memory(1 + 1:10000) 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /man/bench_process_memory.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/bench_process_memory.R 3 | \name{bench_process_memory} 4 | \alias{bench_process_memory} 5 | \title{Retrieve the current and maximum memory from the R process} 6 | \usage{ 7 | bench_process_memory() 8 | } 9 | \description{ 10 | The memory reported here will likely differ from that reported by \code{gc()}, as 11 | this includes all memory from the R process, including any child processes 12 | and memory allocated outside R's garbage collector heap. 13 | } 14 | \details{ 15 | The OS APIs used are as follows 16 | \subsection{Windows}{ 17 | \itemize{ 18 | \item \href{https://learn.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-process_memory_counters}{PROCESS_MEMORY_COUNTERS.WorkingSetSize} 19 | \item \href{https://learn.microsoft.com/en-us/windows/win32/api/psapi/ns-psapi-process_memory_counters}{PROCESS_MEMORY_COUNTERS.PeakWorkingSetSize} 20 | } 21 | } 22 | 23 | \subsection{macOS}{ 24 | \itemize{ 25 | \item \href{https://developer.apple.com/documentation/kernel/1537934-task_info?language=occ}{task_info(TASK_BASIC_INFO)} 26 | \item \href{https://developer.apple.com/library/archive/documentation/System/Conceptual/ManPages_iPhoneOS/man2/getrusage.2.html}{rusage.ru_maxrss} 27 | } 28 | } 29 | 30 | \subsection{linux}{ 31 | \itemize{ 32 | \item \href{https://man7.org/linux/man-pages/man5/proc.5.html}{/proc/pid/status VmSize} 33 | \item \href{https://man7.org/linux/man-pages/man5/proc.5.html}{/proc/pid/status VmPeak} 34 | } 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /man/bench_time.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/bench_time.R 3 | \name{bench_time} 4 | \alias{bench_time} 5 | \alias{system_time} 6 | \title{Measure Process CPU and real time that an expression used.} 7 | \usage{ 8 | bench_time(expr) 9 | } 10 | \arguments{ 11 | \item{expr}{A expression to be timed.} 12 | } 13 | \value{ 14 | A \code{bench_time} object with two values. 15 | \itemize{ 16 | \item \code{process} - The process CPU usage of the expression evaluation. 17 | \item \code{real} - The wallclock time of the expression evaluation. 18 | } 19 | } 20 | \description{ 21 | Measure Process CPU and real time that an expression used. 22 | } 23 | \details{ 24 | On some systems (such as macOS) the process clock has lower 25 | precision than the realtime clock, as a result there may be cases where the 26 | process time is larger than the real time for fast expressions. 27 | } 28 | \examples{ 29 | # This will use ~.5 seconds of real time, but very little process time. 30 | bench_time(Sys.sleep(.5)) 31 | } 32 | \seealso{ 33 | \code{\link[=bench_memory]{bench_memory()}} To measure memory allocations for a given expression. 34 | } 35 | -------------------------------------------------------------------------------- /man/bench_time_trans.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/time.R 3 | \name{bench_time_trans} 4 | \alias{bench_time_trans} 5 | \title{Benchmark time transformation} 6 | \usage{ 7 | bench_time_trans(base = 10) 8 | } 9 | \arguments{ 10 | \item{base}{base of logarithm} 11 | } 12 | \description{ 13 | This both log transforms the times and formats the labels as a \code{bench_time} 14 | object. 15 | } 16 | \keyword{internal} 17 | -------------------------------------------------------------------------------- /man/figures/README-autoplot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/r-lib/bench/e9104efd77dd502a7ffe39eafb9ca34a9951ec06/man/figures/README-autoplot-1.png -------------------------------------------------------------------------------- /man/figures/README-custom-plot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/r-lib/bench/e9104efd77dd502a7ffe39eafb9ca34a9951ec06/man/figures/README-custom-plot-1.png -------------------------------------------------------------------------------- /man/hires_time.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/hires_time.R 3 | \name{hires_time} 4 | \alias{hires_time} 5 | \title{Return the current high-resolution real time.} 6 | \usage{ 7 | hires_time() 8 | } 9 | \description{ 10 | Time is expressed as seconds since some arbitrary time in the past; it 11 | is not correlated in any way to the time of day, and thus is not subject to 12 | resetting or drifting. The hi-res 13 | timer is ideally suited to performance measurement tasks, where cheap, 14 | accurate interval timing is required. 15 | } 16 | \examples{ 17 | hires_time() 18 | 19 | # R rounds doubles to 7 digits by default, see greater precision by setting 20 | # the digits argument when printing 21 | print(hires_time(), digits = 20) 22 | 23 | # Generally used by recording two times and then subtracting them 24 | start <- hires_time() 25 | end <- hires_time() 26 | elapsed <- end - start 27 | elapsed 28 | } 29 | -------------------------------------------------------------------------------- /man/knit_print.bench_mark.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mark.R 3 | \name{knit_print.bench_mark} 4 | \alias{knit_print.bench_mark} 5 | \title{Custom printing function for \code{bench_mark} objects in knitr documents} 6 | \usage{ 7 | knit_print.bench_mark(x, ..., options) 8 | } 9 | \arguments{ 10 | \item{x}{An R object to be printed} 11 | 12 | \item{...}{Additional arguments passed to the S3 method. Currently ignored, 13 | except two optional arguments \code{options} and \code{inline}; see 14 | the references below.} 15 | 16 | \item{options}{A list of knitr chunk options set in the currently evaluated 17 | chunk.} 18 | } 19 | \description{ 20 | By default, data columns (\code{result}, \code{memory}, \code{time}, \code{gc}) are omitted when 21 | printing in knitr. If you would like to include these columns, set the knitr 22 | chunk option \code{bench.all_columns = TRUE}. 23 | } 24 | \details{ 25 | You can set \code{bench.all_columns = TRUE} to show all columns of the bench mark 26 | object. 27 | 28 | \if{html}{\out{
}}\preformatted{```\{r, bench.all_columns = TRUE\} 29 | bench::mark( 30 | subset(mtcars, cyl == 3), 31 | mtcars[mtcars$cyl == 3, ] 32 | ) 33 | ``` 34 | }\if{html}{\out{
}} 35 | } 36 | -------------------------------------------------------------------------------- /man/mark.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mark.R 3 | \name{mark} 4 | \alias{mark} 5 | \alias{bench_mark} 6 | \title{Benchmark a series of functions} 7 | \usage{ 8 | mark( 9 | ..., 10 | min_time = 0.5, 11 | iterations = NULL, 12 | min_iterations = 1, 13 | max_iterations = 10000, 14 | check = TRUE, 15 | memory = capabilities("profmem"), 16 | filter_gc = TRUE, 17 | relative = FALSE, 18 | time_unit = NULL, 19 | exprs = NULL, 20 | env = parent.frame() 21 | ) 22 | } 23 | \arguments{ 24 | \item{...}{Expressions to benchmark, if named the \code{expression} column will 25 | be the name, otherwise it will be the deparsed expression.} 26 | 27 | \item{min_time}{The minimum number of seconds to run each expression, set to 28 | \code{Inf} to always run \code{max_iterations} times instead.} 29 | 30 | \item{iterations}{If not \code{NULL}, the default, run each expression for 31 | exactly this number of iterations. This overrides both \code{min_iterations} 32 | and \code{max_iterations}.} 33 | 34 | \item{min_iterations}{Each expression will be evaluated a minimum of \code{min_iterations} times.} 35 | 36 | \item{max_iterations}{Each expression will be evaluated a maximum of \code{max_iterations} times.} 37 | 38 | \item{check}{Check if results are consistent. If \code{TRUE}, checking is done 39 | with \code{\link[=all.equal]{all.equal()}}, if \code{FALSE} checking is disabled and results are not 40 | stored. If \code{check} is a function that function will be called with each 41 | pair of results to determine consistency.} 42 | 43 | \item{memory}{If \code{TRUE} (the default when R is compiled with memory 44 | profiling), track memory allocations using \code{\link[utils:Rprofmem]{utils::Rprofmem()}}. If \code{FALSE} 45 | disable memory tracking.} 46 | 47 | \item{filter_gc}{If \code{TRUE} remove iterations that contained at least one 48 | garbage collection before summarizing. If \code{TRUE} but an expression had 49 | a garbage collection in every iteration, filtering is disabled, with a warning.} 50 | 51 | \item{relative}{If \code{TRUE} all summaries are computed relative to the minimum 52 | execution time rather than absolute time.} 53 | 54 | \item{time_unit}{If \code{NULL} the times are reported in a human readable 55 | fashion depending on each value. If one of 'ns', 'us', 'ms', 's', 'm', 'h', 56 | 'd', 'w' the time units are instead expressed as nanoseconds, microseconds, 57 | milliseconds, seconds, hours, minutes, days or weeks respectively.} 58 | 59 | \item{exprs}{A list of quoted expressions. If supplied overrides expressions 60 | defined in \code{...}.} 61 | 62 | \item{env}{The environment which to evaluate the expressions} 63 | } 64 | \value{ 65 | A \link[tibble:tibble]{tibble} with the additional summary columns. 66 | The following summary columns are computed 67 | \itemize{ 68 | \item \code{expression} - \code{bench_expr} The deparsed expression that was evaluated 69 | (or its name if one was provided). 70 | \item \code{min} - \code{bench_time} The minimum execution time. 71 | \item \code{median} - \code{bench_time} The sample median of execution time. 72 | \item \code{itr/sec} - \code{double} The estimated number of executions performed per 73 | second. 74 | \item \code{mem_alloc} - \code{bench_bytes} Total amount of memory allocated by R while 75 | running the expression. Memory allocated \emph{outside} the R heap, e.g. by 76 | \code{malloc()} or \code{new} directly is \emph{not} tracked, take care to avoid 77 | misinterpreting the results if running code that may do this. 78 | \item \code{gc/sec} - \code{double} The number of garbage collections per second. 79 | \item \code{n_itr} - \code{integer} Total number of iterations after filtering 80 | garbage collections (if \code{filter_gc == TRUE}). 81 | \item \code{n_gc} - \code{double} Total number of garbage collections performed over all 82 | iterations. This is a psudo-measure of the pressure on the garbage collector, if 83 | it varies greatly between to alternatives generally the one with fewer 84 | collections will cause fewer allocation in real usage. 85 | \item \code{total_time} - \code{bench_time} The total time to perform the benchmarks. 86 | \item \code{result} - \code{list} A list column of the object(s) returned by the 87 | evaluated expression(s). 88 | \item \code{memory} - \code{list} A list column with results from \code{\link[=Rprofmem]{Rprofmem()}}. 89 | \item \code{time} - \code{list} A list column of \code{bench_time} vectors for each evaluated 90 | expression. 91 | \item \code{gc} - \code{list} A list column with tibbles containing the level of 92 | garbage collection (0-2, columns) for each iteration (rows). 93 | } 94 | } 95 | \description{ 96 | Benchmark a list of quoted expressions. Each expression will always run at 97 | least twice, once to measure the memory allocation and store results and one 98 | or more times to measure timing. 99 | } 100 | \examples{ 101 | dat <- data.frame(x = runif(100, 1, 1000), y=runif(10, 1, 1000)) 102 | mark( 103 | min_time = .1, 104 | 105 | dat[dat$x > 500, ], 106 | dat[which(dat$x > 500), ], 107 | subset(dat, x > 500)) 108 | } 109 | \seealso{ 110 | \code{\link[=press]{press()}} to run benchmarks across a grid of parameters. 111 | } 112 | -------------------------------------------------------------------------------- /man/press.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/press.R 3 | \name{press} 4 | \alias{press} 5 | \title{Run setup code and benchmarks across a grid of parameters} 6 | \usage{ 7 | press(..., .grid = NULL, .quiet = FALSE) 8 | } 9 | \arguments{ 10 | \item{...}{If named, parameters to define, if unnamed the expression to run. 11 | Only one unnamed expression is permitted.} 12 | 13 | \item{.grid}{A pre-built grid of values to use, typically a \code{\link[=data.frame]{data.frame()}} or 14 | \code{\link[tibble:tibble]{tibble::tibble()}}. This is useful if you only want to benchmark a subset 15 | of all possible combinations.} 16 | 17 | \item{.quiet}{If \code{TRUE}, progress messages will not be emitted.} 18 | } 19 | \description{ 20 | \code{press()} is used to run \code{\link[=mark]{mark()}} across a grid of parameters and 21 | then \emph{press} the results together. 22 | 23 | The parameters you want to set are given as named arguments and a grid of 24 | all possible combinations is automatically created. 25 | 26 | The code to setup and benchmark is given by one unnamed expression (often 27 | delimited by \verb{\\\{}). 28 | 29 | If replicates are desired a dummy variable can be used, e.g. \code{rep = 1:5} for 30 | replicates. 31 | } 32 | \examples{ 33 | # Helper function to create a simple data.frame of the specified dimensions 34 | create_df <- function(rows, cols) { 35 | as.data.frame(setNames( 36 | replicate(cols, runif(rows, 1, 1000), simplify = FALSE), 37 | rep_len(c("x", letters), cols))) 38 | } 39 | 40 | # Run 4 data sizes across 3 samples with 2 replicates (24 total benchmarks) 41 | press( 42 | rows = c(1000, 10000), 43 | cols = c(10, 100), 44 | rep = 1:2, 45 | { 46 | dat <- create_df(rows, cols) 47 | bench::mark( 48 | min_time = .05, 49 | bracket = dat[dat$x > 500, ], 50 | which = dat[which(dat$x > 500), ], 51 | subset = subset(dat, x > 500) 52 | ) 53 | } 54 | ) 55 | } 56 | -------------------------------------------------------------------------------- /man/scale_bench_expr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/expression.R 3 | \name{scale_bench_expr} 4 | \alias{scale_bench_expr} 5 | \alias{scale_x_bench_expr} 6 | \alias{scale_y_bench_expr} 7 | \alias{scale_colour_bench_expr} 8 | \alias{scale_color_bench_expr} 9 | \title{Position and color scales for bench_expr data} 10 | \usage{ 11 | scale_x_bench_expr(...) 12 | 13 | scale_y_bench_expr(...) 14 | 15 | scale_colour_bench_expr( 16 | palette = scales::hue_pal(...), 17 | ..., 18 | aesthetics = "colour" 19 | ) 20 | 21 | scale_color_bench_expr( 22 | palette = scales::hue_pal(...), 23 | ..., 24 | aesthetics = "colour" 25 | ) 26 | } 27 | \description{ 28 | Default scales for the \code{bench_expr} class, these are added to plots using 29 | \code{bench_expr} objects automatically. 30 | } 31 | \keyword{internal} 32 | -------------------------------------------------------------------------------- /man/scale_bench_time.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/bytes.R, R/time.R 3 | \name{scale_bench_time} 4 | \alias{scale_bench_time} 5 | \alias{scale_x_bench_bytes} 6 | \alias{scale_y_bench_bytes} 7 | \alias{scale_x_bench_time} 8 | \alias{scale_y_bench_time} 9 | \title{Position scales for bench_time data} 10 | \usage{ 11 | scale_x_bench_bytes(base = 10, ...) 12 | 13 | scale_y_bench_bytes(base = 10, ...) 14 | 15 | scale_x_bench_time(base = 10, ...) 16 | 17 | scale_y_bench_time(base = 10, ...) 18 | } 19 | \arguments{ 20 | \item{base}{The base of the logarithm, if \code{NULL} instead use a 21 | non-logarithmic scale.} 22 | } 23 | \description{ 24 | Default scales for the \code{bench_time} class, these are added to plots using 25 | \code{bench_time} objects automatically. 26 | 27 | Default scales for the \code{bench_time} class, these are added to plots using 28 | \code{bench_time} objects automatically. 29 | } 30 | \keyword{internal} 31 | -------------------------------------------------------------------------------- /man/summary.bench_mark.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mark.R 3 | \name{summary.bench_mark} 4 | \alias{summary.bench_mark} 5 | \title{Summarize \link{mark} results.} 6 | \usage{ 7 | \method{summary}{bench_mark}(object, filter_gc = TRUE, relative = FALSE, time_unit = NULL, ...) 8 | } 9 | \arguments{ 10 | \item{object}{\link{bench_mark} object to summarize.} 11 | 12 | \item{filter_gc}{If \code{TRUE} remove iterations that contained at least one 13 | garbage collection before summarizing. If \code{TRUE} but an expression had 14 | a garbage collection in every iteration, filtering is disabled, with a warning.} 15 | 16 | \item{relative}{If \code{TRUE} all summaries are computed relative to the minimum 17 | execution time rather than absolute time.} 18 | 19 | \item{time_unit}{If \code{NULL} the times are reported in a human readable 20 | fashion depending on each value. If one of 'ns', 'us', 'ms', 's', 'm', 'h', 21 | 'd', 'w' the time units are instead expressed as nanoseconds, microseconds, 22 | milliseconds, seconds, hours, minutes, days or weeks respectively.} 23 | 24 | \item{...}{Additional arguments ignored.} 25 | } 26 | \value{ 27 | A \link[tibble:tibble]{tibble} with the additional summary columns. 28 | The following summary columns are computed 29 | \itemize{ 30 | \item \code{expression} - \code{bench_expr} The deparsed expression that was evaluated 31 | (or its name if one was provided). 32 | \item \code{min} - \code{bench_time} The minimum execution time. 33 | \item \code{median} - \code{bench_time} The sample median of execution time. 34 | \item \code{itr/sec} - \code{double} The estimated number of executions performed per 35 | second. 36 | \item \code{mem_alloc} - \code{bench_bytes} Total amount of memory allocated by R while 37 | running the expression. Memory allocated \emph{outside} the R heap, e.g. by 38 | \code{malloc()} or \code{new} directly is \emph{not} tracked, take care to avoid 39 | misinterpreting the results if running code that may do this. 40 | \item \code{gc/sec} - \code{double} The number of garbage collections per second. 41 | \item \code{n_itr} - \code{integer} Total number of iterations after filtering 42 | garbage collections (if \code{filter_gc == TRUE}). 43 | \item \code{n_gc} - \code{double} Total number of garbage collections performed over all 44 | iterations. This is a psudo-measure of the pressure on the garbage collector, if 45 | it varies greatly between to alternatives generally the one with fewer 46 | collections will cause fewer allocation in real usage. 47 | \item \code{total_time} - \code{bench_time} The total time to perform the benchmarks. 48 | \item \code{result} - \code{list} A list column of the object(s) returned by the 49 | evaluated expression(s). 50 | \item \code{memory} - \code{list} A list column with results from \code{\link[=Rprofmem]{Rprofmem()}}. 51 | \item \code{time} - \code{list} A list column of \code{bench_time} vectors for each evaluated 52 | expression. 53 | \item \code{gc} - \code{list} A list column with tibbles containing the level of 54 | garbage collection (0-2, columns) for each iteration (rows). 55 | } 56 | } 57 | \description{ 58 | Summarize \link{mark} results. 59 | } 60 | \details{ 61 | If \code{filter_gc == TRUE} (the default) runs that contain a garbage 62 | collection will be removed before summarizing. This is most useful for fast 63 | expressions when the majority of runs do not contain a gc. Call 64 | \code{summary(filter_gc = FALSE)} if you would like to compute summaries \emph{with} 65 | these times, such as expressions with lots of allocations when all or most 66 | runs contain a gc. 67 | } 68 | \examples{ 69 | dat <- data.frame(x = runif(10000, 1, 1000), y=runif(10000, 1, 1000)) 70 | 71 | # `bench::mark()` implicitly calls summary() automatically 72 | results <- bench::mark( 73 | dat[dat$x > 500, ], 74 | dat[which(dat$x > 500), ], 75 | subset(dat, x > 500)) 76 | 77 | # However you can also do so explicitly to filter gc differently. 78 | summary(results, filter_gc = FALSE) 79 | 80 | # Or output relative times 81 | summary(results, relative = TRUE) 82 | } 83 | -------------------------------------------------------------------------------- /man/workout.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/workout.R 3 | \name{workout} 4 | \alias{workout} 5 | \alias{workout_expressions} 6 | \title{Workout a group of expressions individually} 7 | \usage{ 8 | workout(expr, description = NULL) 9 | 10 | workout_expressions(exprs, env = parent.frame(), description = NULL) 11 | } 12 | \arguments{ 13 | \item{expr}{one or more expressions to workout, use \code{{}} to pass multiple 14 | expressions.} 15 | 16 | \item{description}{A name to label each expression, if not supplied the 17 | deparsed expression will be used.} 18 | 19 | \item{exprs}{A list of calls to measure.} 20 | 21 | \item{env}{The environment in which the expressions should be evaluated.} 22 | } 23 | \description{ 24 | Given an block of expressions in \code{{}} \code{\link[=workout]{workout()}} individually times each 25 | expression in the group. \code{\link[=workout_expressions]{workout_expressions()}} is a lower level function most 26 | useful when reading lists of calls from a file. 27 | } 28 | \examples{ 29 | workout({ 30 | x <- 1:1000 31 | evens <- x \%\% 2 == 0 32 | y <- x[evens] 33 | length(y) 34 | length(which(evens)) 35 | sum(evens) 36 | }) 37 | 38 | # The equivalent to the above, reading the code from a file 39 | workout_expressions(as.list(parse(system.file("examples/exprs.R", package = "bench")))) 40 | } 41 | -------------------------------------------------------------------------------- /revdep/.gitignore: -------------------------------------------------------------------------------- 1 | checks 2 | library 3 | checks.noindex 4 | library.noindex 5 | cloud.noindex 6 | data.sqlite 7 | *.html 8 | -------------------------------------------------------------------------------- /revdep/README.md: -------------------------------------------------------------------------------- 1 | # Revdeps 2 | 3 | ## New problems (1) 4 | 5 | |package |version |error |warning |note | 6 | |:-------|:-------|:---------|:-------|:----| 7 | |[httr2](problems.md#httr2)|0.2.2 |-1 __+1__ | | | 8 | 9 | -------------------------------------------------------------------------------- /revdep/cran.md: -------------------------------------------------------------------------------- 1 | ## revdepcheck results 2 | 3 | We checked 30 reverse dependencies, comparing R CMD check results across CRAN and dev versions of this package. 4 | 5 | * We saw 1 new problems 6 | * We failed to check 0 packages 7 | 8 | Issues with CRAN packages are summarised below. 9 | 10 | ### New problems 11 | (This reports the first line of each new failure) 12 | 13 | * httr2 14 | checking examples ... ERROR 15 | 16 | -------------------------------------------------------------------------------- /revdep/failures.md: -------------------------------------------------------------------------------- 1 | *Wow, no problems at all. :)* -------------------------------------------------------------------------------- /revdep/problems.md: -------------------------------------------------------------------------------- 1 | # httr2 2 | 3 |
4 | 5 | * Version: 0.2.2 6 | * GitHub: https://github.com/r-lib/httr2 7 | * Source code: https://github.com/cran/httr2 8 | * Date/Publication: 2022-09-25 17:50:03 UTC 9 | * Number of recursive dependencies: 70 10 | 11 | Run `revdepcheck::cloud_details(, "httr2")` for more info 12 | 13 |
14 | 15 | ## Newly broken 16 | 17 | * checking examples ... ERROR 18 | ``` 19 | Running examples in ‘httr2-Ex.R’ failed 20 | The error most likely occurred in: 21 | 22 | > ### Name: resp_raw 23 | > ### Title: Show the raw response 24 | > ### Aliases: resp_raw 25 | > 26 | > ### ** Examples 27 | > 28 | > resp <- request("https://httpbin.org/json") %>% req_perform() 29 | Error in `resp_abort()`: 30 | ! HTTP 504 Gateway Timeout. 31 | Backtrace: 32 | ▆ 33 | 1. ├─request("https://httpbin.org/json") %>% req_perform() 34 | 2. └─httr2::req_perform(.) 35 | 3. └─httr2:::resp_abort(resp, error_body(req, resp)) 36 | 4. └─rlang::abort(...) 37 | Execution halted 38 | ``` 39 | 40 | ## Newly fixed 41 | 42 | * checking re-building of vignette outputs ... ERROR 43 | ``` 44 | Error(s) in re-building vignettes: 45 | ... 46 | --- re-building ‘httr2.Rmd’ using rmarkdown 47 | Quitting from lines 117-120 (httr2.Rmd) 48 | Error: processing vignette 'httr2.Rmd' failed with diagnostics: 49 | HTTP 504 Gateway Timeout. 50 | --- failed re-building ‘httr2.Rmd’ 51 | 52 | SUMMARY: processing the following file failed: 53 | ‘httr2.Rmd’ 54 | 55 | Error: Vignette re-building failed. 56 | Execution halted 57 | ``` 58 | 59 | -------------------------------------------------------------------------------- /src/.gitignore: -------------------------------------------------------------------------------- 1 | *.o 2 | *.so 3 | *.dll 4 | -------------------------------------------------------------------------------- /src/Makevars.win: -------------------------------------------------------------------------------- 1 | PKG_LIBS = -lpsapi 2 | -------------------------------------------------------------------------------- /src/load.c: -------------------------------------------------------------------------------- 1 | #include "os.h" 2 | #include 3 | 4 | #if OS_WINDOWS 5 | // Currently does nothing, there is an example of emulating linux style load on 6 | // Windows at https://github.com/giampaolo/psutil/pull/1485 7 | #elif OS_MACOS || OS_LINUX 8 | #include 9 | #endif 10 | 11 | SEXP bench_load_average_(void) { 12 | 13 | SEXP out = PROTECT(Rf_allocVector(REALSXP, 3)); 14 | REAL(out)[0] = NA_REAL; 15 | REAL(out)[1] = NA_REAL; 16 | REAL(out)[2] = NA_REAL; 17 | 18 | #if OS_MACOS 19 | double loadavg[3]; 20 | int num_load = getloadavg(loadavg, 3); 21 | if (num_load <= 0) { 22 | Rf_error("getloadavg() failed"); 23 | } 24 | 25 | for (int i = 0; i < num_load; ++i) { 26 | REAL(out)[i] = loadavg[i]; 27 | } 28 | #endif 29 | 30 | UNPROTECT(1); 31 | return out; 32 | } 33 | -------------------------------------------------------------------------------- /src/mark.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | #include 4 | #include 5 | #include "nanotime.h" 6 | #include 7 | #include 8 | #include 9 | 10 | double get_overhead(SEXP env) { 11 | long double overhead = 100.0; 12 | for (int i = 0; i < 10; ++i) { 13 | long double diff = expr_elapsed_time(R_NilValue, env); 14 | if (diff > 0 && diff < overhead) { 15 | overhead = diff; 16 | } 17 | } 18 | 19 | if (overhead == 100.0) { 20 | overhead = 0.0; 21 | } 22 | 23 | return overhead; 24 | } 25 | 26 | SEXP mark_(SEXP expr, SEXP env, SEXP min_time, SEXP min_itr, SEXP max_itr, SEXP gcinfo) { 27 | R_xlen_t min_itr_ = INTEGER(min_itr)[0]; 28 | R_xlen_t max_itr_ = INTEGER(max_itr)[0]; 29 | double min_time_ = REAL(min_time)[0]; 30 | Rboolean gcinfo_ = LOGICAL(gcinfo)[0]; 31 | 32 | SEXP out = PROTECT(Rf_allocVector(REALSXP, max_itr_)); 33 | 34 | long double total = 0; 35 | 36 | double overhead = get_overhead(env); 37 | 38 | R_xlen_t i = 0; 39 | for (; i < max_itr_ && ( (total < min_time_) || i < min_itr_); ++i) { 40 | 41 | long double elapsed = expr_elapsed_time(expr, env); 42 | 43 | if (gcinfo_) { 44 | // We don't emit the separator during low level testing of `mark_()` 45 | // 1E is record separator 46 | REprintf("\x1E"); 47 | } 48 | 49 | REAL(out)[i] = elapsed - overhead; 50 | total+=elapsed; 51 | 52 | // We could do this less than every iteration, but even with 500,000 iterations 53 | // the overhead seems to be less than 200 ms, so it seems ok and simpler 54 | // to just do it unconditionally on every iteration. 55 | R_CheckUserInterrupt(); 56 | } 57 | 58 | out = Rf_xlengthgets(out, i); 59 | 60 | UNPROTECT(1); 61 | 62 | return out; 63 | } 64 | 65 | SEXP system_time_(SEXP expr, SEXP env) { 66 | double real_begin = real_time(); 67 | double process_begin = process_cpu_time(); 68 | Rf_eval(expr, env); 69 | double process_end = process_cpu_time(); 70 | double real_end = real_time(); 71 | 72 | SEXP out = PROTECT(Rf_allocVector(REALSXP, 2)); 73 | REAL(out)[0] = process_end - process_begin; 74 | REAL(out)[1] = real_end - real_begin; 75 | 76 | UNPROTECT(1); 77 | return out; 78 | } 79 | 80 | SEXP hires_time_(void) { 81 | double time = real_time(); 82 | SEXP out = PROTECT(Rf_allocVector(REALSXP, 1)); 83 | REAL(out)[0] = time; 84 | 85 | UNPROTECT(1); 86 | return out; 87 | } 88 | 89 | SEXP parse_gc_(SEXP x) { 90 | 91 | R_xlen_t n = Rf_xlength(x); 92 | const char *out_nms[] = {"level0", "level1", "level2", ""}; 93 | SEXP out = PROTECT(Rf_mkNamed(VECSXP, out_nms)); 94 | SET_VECTOR_ELT(out, 0, Rf_allocVector(INTSXP, n)); 95 | SET_VECTOR_ELT(out, 1, Rf_allocVector(INTSXP, n)); 96 | SET_VECTOR_ELT(out, 2, Rf_allocVector(INTSXP, n)); 97 | 98 | int* level0 = INTEGER(VECTOR_ELT(out, 0)); 99 | int* level1 = INTEGER(VECTOR_ELT(out, 1)); 100 | int* level2 = INTEGER(VECTOR_ELT(out, 2)); 101 | for (int i = 0; i < n; ++i) { 102 | level0[i] = 0; 103 | level1[i] = 0; 104 | level2[i] = 0; 105 | const char* str = CHAR(STRING_ELT(x, i)); 106 | while((str = strstr(str, " (level ")) != NULL) { 107 | if (strncmp(str, " (level 0) ...", 13) == 0) { 108 | level0[i]++; 109 | } else if (strncmp(str, " (level 1) ...", 13) == 0) { 110 | level1[i]++; 111 | } else if (strncmp(str, " (level 2) ...", 13) == 0) { 112 | level2[i]++; 113 | } 114 | str+=8; 115 | } 116 | } 117 | 118 | UNPROTECT(1); 119 | 120 | return out; 121 | } 122 | 123 | extern SEXP bench_process_memory_(void); 124 | extern SEXP bench_load_average_(void); 125 | 126 | static const R_CallMethodDef CallEntries[] = { 127 | {"mark_", (DL_FUNC) &mark_, 6}, 128 | {"system_time_", (DL_FUNC) &system_time_, 2}, 129 | {"bench_process_memory_", (DL_FUNC) &bench_process_memory_, 0}, 130 | {"bench_load_average_", (DL_FUNC) &bench_load_average_, 0}, 131 | {"hires_time_", (DL_FUNC) &hires_time_, 0}, 132 | {"parse_gc_", (DL_FUNC) &parse_gc_, 1}, 133 | {NULL, NULL, 0} 134 | }; 135 | 136 | void R_init_bench(DllInfo *dll) { 137 | R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); 138 | R_useDynamicSymbols(dll, FALSE); 139 | } 140 | -------------------------------------------------------------------------------- /src/nanotime.c: -------------------------------------------------------------------------------- 1 | #include "nanotime.h" 2 | #include "os.h" 3 | 4 | #if OS_WINDOWS 5 | #include 6 | #elif OS_MACOS 7 | #include 8 | #include 9 | #include 10 | #include 11 | #else 12 | #define __EXTENSIONS__ 13 | #include 14 | #include 15 | #define NSEC_PER_SEC 1000000000 /* nanoseconds per second */ 16 | #endif 17 | 18 | 19 | #if OS_WINDOWS 20 | long double real_time(void) { 21 | // https://msdn.microsoft.com/en-us/library/windows/desktop/ms644904(v=vs.85).aspx 22 | static LARGE_INTEGER frequency; 23 | frequency.QuadPart = 0; 24 | if (frequency.QuadPart == 0) { 25 | if (QueryPerformanceFrequency(&frequency) == FALSE) { 26 | Rf_error("QueryPerformanceFrequency(...) failed"); 27 | } 28 | } 29 | LARGE_INTEGER count; 30 | if (QueryPerformanceCounter(&count) == FALSE) { 31 | Rf_error("QueryPerformanceCounter(...) failed"); 32 | } 33 | return (long double) count.QuadPart / frequency.QuadPart; 34 | } 35 | #elif OS_MACOS 36 | long double real_time(void) { 37 | 38 | // https://developer.apple.com/library/content/qa/qa1398/_index.html 39 | //static mach_timebase_info_data_t info; 40 | static uint64_t ratio = 0; 41 | 42 | if (ratio == 0) { 43 | mach_timebase_info_data_t info; 44 | if (mach_timebase_info(&info) != KERN_SUCCESS) { 45 | Rf_error("mach_timebase_info(...) failed"); 46 | } 47 | ratio = info.numer / info.denom; 48 | } 49 | 50 | uint64_t time = mach_absolute_time(); 51 | uint64_t nanos = time * ratio; 52 | return (long double)nanos / NSEC_PER_SEC; 53 | } 54 | #elif OS_SOLARIS 55 | long double real_time(void) { 56 | hrtime_t time = gethrtime(); 57 | // The man page doesn't mention any error return values 58 | 59 | return (long double)time / NSEC_PER_SEC; 60 | } 61 | #else 62 | long double real_time(void) { 63 | struct timespec ts; 64 | if (clock_gettime(CLOCK_REALTIME, &ts) != 0) { 65 | Rf_error("clock_gettime(CLOCK_REALTIME, ...) failed"); 66 | } 67 | 68 | return ts.tv_sec + (long double)ts.tv_nsec / NSEC_PER_SEC; 69 | } 70 | #endif 71 | 72 | long double process_cpu_time(void) { 73 | #if OS_WINDOWS 74 | HANDLE proc = GetCurrentProcess(); 75 | FILETIME creation_time; 76 | FILETIME exit_time; 77 | FILETIME kernel_time; 78 | FILETIME user_time; 79 | if (GetProcessTimes(proc, &creation_time, &exit_time, &kernel_time, 80 | &user_time) == FALSE) { 81 | Rf_error("GetProcessTimes(...) failed"); 82 | } 83 | ULARGE_INTEGER kernel; 84 | ULARGE_INTEGER user; 85 | kernel.HighPart = kernel_time.dwHighDateTime; 86 | kernel.LowPart = kernel_time.dwLowDateTime; 87 | user.HighPart = user_time.dwHighDateTime; 88 | user.LowPart = user_time.dwLowDateTime; 89 | return (((long double)kernel.QuadPart + (long double)user.QuadPart) * 1e-7); 90 | #elif OS_SOLARIS 91 | hrtime_t time = gethrvtime(); 92 | // The man page doesn't mention any error return values 93 | 94 | return (long double)time / NSEC_PER_SEC; 95 | #elif defined(CLOCK_PROCESS_CPUTIME_ID) 96 | // Modern macOS and Linux 97 | struct timespec ts; 98 | if (clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts) != 0) { 99 | Rf_error("clock_gettime(CLOCK_PROCESS_CPUTIME_ID, ...) failed"); 100 | } 101 | return ts.tv_sec + (long double)ts.tv_nsec / NSEC_PER_SEC; 102 | #else 103 | // macOS before 10.12 didn't define `CLOCK_PROCESS_CPUTIME_ID` 104 | // https://github.com/r-lib/bench/commit/cfd4e2392f980e29d833f4df42a43ea2ba131aaf 105 | struct rusage ru; 106 | if (getrusage(RUSAGE_SELF, &ru) != 0) { 107 | Rf_error("getrusage(RUSAGE_SELF, ...) failed"); 108 | } 109 | return ru.ru_utime.tv_sec + (long double) ru.ru_utime.tv_usec * 1e-6 + 110 | ru.ru_stime.tv_sec + (long double) ru.ru_stime.tv_usec * 1e-6; 111 | #endif 112 | } 113 | 114 | long double expr_elapsed_time(SEXP expr, SEXP env) { 115 | long double start = real_time(); 116 | 117 | // Actually evaluate the R code 118 | Rf_eval(expr, env); 119 | 120 | long double end = real_time(); 121 | 122 | return end - start; 123 | } 124 | -------------------------------------------------------------------------------- /src/nanotime.h: -------------------------------------------------------------------------------- 1 | #ifndef NANOTIME_H 2 | #define NANOTIME_H 3 | 4 | #include "Rinternals.h" 5 | 6 | long double real_time(void); 7 | long double process_cpu_time(void); 8 | long double expr_elapsed_time(SEXP expr, SEXP env); 9 | 10 | #endif 11 | -------------------------------------------------------------------------------- /src/os.h: -------------------------------------------------------------------------------- 1 | #ifndef __OS__ 2 | #define __OS__ 3 | 4 | #ifdef _WIN32 5 | #define OS_WINDOWS 1 6 | #else 7 | #define OS_WINDOWS 0 8 | #endif 9 | 10 | #ifdef __APPLE__ 11 | #define OS_MACOS 1 12 | #else 13 | #define OS_MACOS 0 14 | #endif 15 | 16 | #ifdef __linux__ 17 | #define OS_LINUX 1 18 | #else 19 | #define OS_LINUX 0 20 | #endif 21 | 22 | #ifdef __sun 23 | #define OS_SOLARIS 1 24 | #else 25 | #define OS_SOLARIS 0 26 | #endif 27 | 28 | #endif 29 | -------------------------------------------------------------------------------- /src/process_memory.c: -------------------------------------------------------------------------------- 1 | #include "os.h" 2 | 3 | #include 4 | 5 | #if OS_WINDOWS 6 | #include 7 | #define PSAPI_VERSION 1 8 | #include 9 | #elif OS_MACOS 10 | #include 11 | #include 12 | #include 13 | #include 14 | #elif OS_LINUX 15 | #include 16 | #include 17 | #include 18 | #endif 19 | 20 | #define FAILURE -1 21 | 22 | #if OS_LINUX 23 | /* read_proc_file is derived from https://github.com/cran/memuse/blob/f2be8bc6f6af3771161c6e58ea5b6c1dd0eafcd7/src/meminfo/src/platform.c#L44 24 | * Copyright (c) 2014-2017 Drew Schmidt 25 | All rights reserved. 26 | 27 | Redistribution and use in source and binary forms, with or without 28 | modification, are permitted provided that the following conditions are met: 29 | 30 | 1. Redistributions of source code must retain the above copyright notice, 31 | this list of conditions and the following disclaimer. 32 | 33 | 2. Redistributions in binary form must reproduce the above copyright 34 | notice, this list of conditions and the following disclaimer in the 35 | documentation and/or other materials provided with the distribution. 36 | 37 | THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 38 | "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED 39 | TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 40 | PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR 41 | CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, 42 | EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, 43 | PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR 44 | PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF 45 | LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING 46 | NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS 47 | SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 48 | */ 49 | int read_proc_file(const char *file, uint64_t *val, char *field, int fieldlen) { 50 | size_t len = 0; 51 | char *tmp = NULL; 52 | uint64_t value = FAILURE; 53 | 54 | *val = 0L; 55 | 56 | FILE* fp = fopen(file, "r"); 57 | 58 | if (fp != NULL) { 59 | while (getline(&tmp, &len, fp) >= 0) { 60 | if (strncmp(tmp, field, fieldlen) == 0) { 61 | sscanf(tmp, "%*s%" SCNu64, &value); 62 | break; 63 | } 64 | } 65 | 66 | fclose(fp); 67 | free(tmp); 68 | 69 | if (value != (uint64_t)FAILURE) { 70 | *val = value; 71 | return 0; 72 | } 73 | } 74 | 75 | return FAILURE; 76 | } 77 | #endif 78 | 79 | SEXP bench_process_memory_(void) { 80 | 81 | SEXP out = PROTECT(Rf_allocVector(REALSXP, 2)); 82 | REAL(out)[0] = NA_REAL; 83 | REAL(out)[1] = NA_REAL; 84 | 85 | #if OS_LINUX 86 | uint64_t current_size = 0; 87 | uint64_t peak_size = 0; 88 | 89 | if(read_proc_file("/proc/self/status", ¤t_size, "VmSize:", 7) != 0) { 90 | Rf_error("read_proc_file(...) failed"); 91 | } 92 | 93 | if(read_proc_file("/proc/self/status", &peak_size, "VmPeak:", 7) != 0) { 94 | Rf_error("read_proc_file(...) failed"); 95 | } 96 | 97 | REAL(out)[0] = current_size * 1024; 98 | REAL(out)[1] = peak_size * 1024; 99 | 100 | #elif OS_WINDOWS 101 | PROCESS_MEMORY_COUNTERS pmc; 102 | if (!GetProcessMemoryInfo(GetCurrentProcess(), &pmc, sizeof(pmc))) { 103 | Rf_error("GetProcessMemoryInfo(...) failed"); 104 | } 105 | 106 | REAL(out)[0] = pmc.WorkingSetSize; 107 | REAL(out)[1] = pmc.PeakWorkingSetSize; 108 | 109 | #elif OS_MACOS 110 | 111 | struct task_basic_info info; 112 | mach_msg_type_number_t info_count = TASK_BASIC_INFO_COUNT; 113 | 114 | if (task_info(mach_task_self(), TASK_BASIC_INFO, (task_info_t)&info, &info_count) != 0) { 115 | Rf_error("task_info(TASK_BASIC_INFO, ...) failed"); 116 | } 117 | 118 | struct rusage ru; 119 | if (getrusage(RUSAGE_SELF, &ru) != 0) { 120 | Rf_error("getrusage(RUSAGE_SELF, ...) failed"); 121 | } 122 | 123 | REAL(out)[0] = info.resident_size; 124 | REAL(out)[1] = ru.ru_maxrss; 125 | #endif 126 | 127 | UNPROTECT(1); 128 | return out; 129 | } 130 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | # This file is part of the standard setup for testthat. 2 | # It is recommended that you do not modify it. 3 | # 4 | # Where should you do additional test configuration? 5 | # Learn more about the roles of various files in: 6 | # * https://r-pkgs.org/tests.html 7 | # * https://testthat.r-lib.org/reference/test_package.html#special-files 8 | 9 | library(testthat) 10 | library(bench) 11 | 12 | test_check("bench") 13 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/mark.md: -------------------------------------------------------------------------------- 1 | # mark: Can errors with the deparsed expressions 2 | 3 | Code 4 | mark(1, 1, 3, max_iterations = 10) 5 | Condition 6 | Error: 7 | ! Each result must equal the first result: 8 | `1` does not equal `3` 9 | 10 | # mark: Works when calls are different lengths 11 | 12 | Code 13 | mark(if (TRUE) 2, if (TRUE) 1 else 3) 14 | Condition 15 | Error: 16 | ! Each result must equal the first result: 17 | `if (TRUE) 2` does not equal `if (TRUE) 1 else 3` 18 | 19 | # mark: truncates long expressions when printing (#94) 20 | 21 | Code 22 | out 23 | Output 24 | # A tibble: 1 x 2 25 | expression result 26 | 27 | 1 aaaaaaaaaaaaaaaaaaaa~ 28 | 29 | -------------------------------------------------------------------------------- /tests/testthat/_snaps/press.md: -------------------------------------------------------------------------------- 1 | # press: Outputs status message before evaluating each parameter 2 | 3 | Code 4 | res <- press(x = 1, mark(rep(1, x), max_iterations = 10)) 5 | Message 6 | Running with: 7 | x 8 | 1 1 9 | 10 | --- 11 | 12 | Code 13 | messages 14 | Output 15 | [1] "Running with:\n x\n" "1 1\n" 16 | [3] "2 2\n" "3 3\n" 17 | 18 | -------------------------------------------------------------------------------- /tests/testthat/test-autoplot.R: -------------------------------------------------------------------------------- 1 | test_that("autoplot works", { 2 | skip_on_cran() 3 | skip_if_not_installed("ggplot2") 4 | skip_if_not_installed("tidyr") 5 | skip_if_not_installed("ggbeeswarm") 6 | y <- mark(x = 1:1000) 7 | expect_s3_class(ggplot2::autoplot(y), "ggplot") 8 | expect_s3_class(plot(y), "ggplot") 9 | }) 10 | -------------------------------------------------------------------------------- /tests/testthat/test-bench_process_memory.R: -------------------------------------------------------------------------------- 1 | describe("bench_process_memory", { 2 | it("has a current and max memory of bench bytes", { 3 | res <- bench_process_memory() 4 | expect_named(res, c("current", "max")) 5 | expect_s3_class(res[["current"]], "bench_bytes") 6 | expect_s3_class(res[["max"]], "bench_bytes") 7 | }) 8 | 9 | # This test is unreliable due to when gcs happen when run repeatedly, so it 10 | # is commented out. 11 | #it("current memory increases when you allocate a medium size vector", { 12 | #res1 <- bench_process_memory() 13 | #x <- rep(1, 1e8) 14 | #res2 <- bench_process_memory() 15 | #expect_true(res2[["current"]] > res1[["current"]]) 16 | #}) 17 | }) 18 | -------------------------------------------------------------------------------- /tests/testthat/test-bench_time.R: -------------------------------------------------------------------------------- 1 | describe("bench_time", { 2 | skip_on_cran() 3 | 4 | res <- bench_time(1 + 1:1e7) 5 | it("returns process and real time", { 6 | expect_equal(names(res), c("process", "real")) 7 | }) 8 | it("returns times that are reasonable, system and real time are relatively 9 | close for process bound expressions", { 10 | epsilon <- abs(res[[1]] - res[[2]]) 11 | expect_true((epsilon / res[[1]]) < 5) 12 | }) 13 | it("returns times that are reasonable, system and real time are far apart 14 | for non-process bound expressions", { 15 | res <- bench_time(Sys.sleep(.5)) 16 | epsilon <- abs(res[[1]] - res[[2]]) 17 | expect_true((epsilon / res[[1]]) > 20) 18 | }) 19 | }) 20 | 21 | describe("bench_memory", { 22 | skip_on_cran() 23 | 24 | res <- bench_memory(1 + 1:1e7) 25 | it("returns memory allocation and the raw memory used", { 26 | expect_equal(names(res), c("mem_alloc", "memory")) 27 | }) 28 | it("returns reasonable memory allocation", { 29 | expect_true(res[["mem_alloc"]] > "10MB") 30 | }) 31 | }) 32 | -------------------------------------------------------------------------------- /tests/testthat/test-bytes.R: -------------------------------------------------------------------------------- 1 | describe("as_bench_bytes", { 2 | it("accepts numeric input unchanged", { 3 | expect_equal(unclass(as_bench_bytes(123L)), 123L) 4 | expect_equal(unclass(as_bench_bytes(123)), 123) 5 | }) 6 | it("accepts bench_byte input unchanged", { 7 | x <- as_bench_bytes(123) 8 | expect_equal(as_bench_bytes(x), x) 9 | }) 10 | it("coerces character input", { 11 | expect_equal(unclass(as_bench_bytes("1")), 1) 12 | expect_equal(unclass(as_bench_bytes("1K")), 1024) 13 | expect_equal(unclass(as_bench_bytes("1M")), 1024 * 1024) 14 | expect_equal(unclass(as_bench_bytes("10M")), 10 * 1024 * 1024) 15 | expect_equal(unclass(as_bench_bytes("1G")), 1024 * 1024 * 1024) 16 | }) 17 | }) 18 | 19 | describe("format.bench_bytes", { 20 | it("formats bytes under 1024 as whole numbers", { 21 | expect_equal(format(bench_bytes(0)), "0B") 22 | expect_equal(format(bench_bytes(1)), "1B") 23 | expect_equal(format(bench_bytes(1023)), "1023B") 24 | }) 25 | it("formats bytes 1024 and up as abbreviated numbers", { 26 | expect_equal(format(bench_bytes(1024)), "1KB") 27 | expect_equal(format(bench_bytes(1025)), "1KB") 28 | expect_equal(format(bench_bytes(2^16)), "64KB") 29 | expect_equal(format(bench_bytes(2^24)), "16MB") 30 | expect_equal(format(bench_bytes(2^24 + 555555)), "16.5MB") 31 | expect_equal(format(bench_bytes(2^32)), "4GB") 32 | expect_equal(format(bench_bytes(2^48)), "256TB") 33 | expect_equal(format(bench_bytes(2^64)), "16EB") 34 | }) 35 | it("handles NA and NaN", { 36 | expect_equal(format(bench_bytes(NA)), "NA") 37 | expect_equal(format(bench_bytes(NaN)), "NaN") 38 | }) 39 | it("works with vectors", { 40 | v <- c(NA, 1, 2^13, 2^20, NaN, 2^15) 41 | expect_equal( 42 | format(bench_bytes(v), trim = TRUE), 43 | c("NA", "1B", "8KB", "1MB", "NaN", "32KB") 44 | ) 45 | 46 | expect_equal(format(bench_bytes(numeric())), character()) 47 | }) 48 | }) 49 | 50 | describe("sum.bench_bytes", { 51 | it("sums its input and returns a bench_byte", { 52 | expect_equal(sum(bench_bytes(0)), new_bench_bytes(0)) 53 | expect_equal(sum(bench_bytes(c(1, 2))), new_bench_bytes(3)) 54 | expect_equal(sum(bench_bytes(c(1, NA))), new_bench_bytes(NA_real_)) 55 | }) 56 | }) 57 | 58 | describe("min.bench_bytes", { 59 | it("finds minimum input and returns a bench_byte", { 60 | expect_equal(min(bench_bytes(0)), new_bench_bytes(0)) 61 | expect_equal(min(bench_bytes(c(1, 2))), new_bench_bytes(1)) 62 | expect_equal(min(bench_bytes(c(1, NA))), new_bench_bytes(NA_real_)) 63 | }) 64 | }) 65 | 66 | describe("max.bench_bytes", { 67 | it("finds maximum input and returns a bench_byte", { 68 | expect_equal(max(bench_bytes(0)), new_bench_bytes(0)) 69 | expect_equal(max(bench_bytes(c(1, 2))), new_bench_bytes(2)) 70 | expect_equal(max(bench_bytes(c(1, NA))), new_bench_bytes(NA_real_)) 71 | }) 72 | }) 73 | 74 | describe("[.bench_bytes", { 75 | it("retains the bench_bytes class", { 76 | x <- bench_bytes(c(100, 200, 300)) 77 | expect_equal(x[], x) 78 | expect_equal(x[1], new_bench_bytes(100)) 79 | expect_equal(x[1:2], new_bench_bytes(c(100, 200))) 80 | }) 81 | }) 82 | 83 | describe("Ops.bench_bytes", { 84 | it("errors for unary operators", { 85 | x <- bench_bytes(c(100, 200, 300)) 86 | expect_error(!x, "unary '!' not defined for \"bench_bytes\" objects") 87 | expect_error(+x, "unary '\\+' not defined for \"bench_bytes\" objects") 88 | expect_error(-x, "unary '-' not defined for \"bench_bytes\" objects") 89 | }) 90 | 91 | it("works with boolean comparison operators", { 92 | x <- bench_bytes(c(100, 200, 300)) 93 | 94 | expect_equal(x == 100, c(TRUE, FALSE, FALSE)) 95 | expect_equal(x != 100, c(FALSE, TRUE, TRUE)) 96 | expect_equal(x > 100, c(FALSE, TRUE, TRUE)) 97 | expect_equal(x >= 100, c(TRUE, TRUE, TRUE)) 98 | expect_equal(x < 200, c(TRUE, FALSE, FALSE)) 99 | expect_equal(x <= 200, c(TRUE, TRUE, FALSE)) 100 | }) 101 | 102 | it("works with arithmetic operators", { 103 | x <- bench_bytes(c(100, 200, 300)) 104 | 105 | expect_equal(x + 100, bench_bytes(c(200, 300, 400))) 106 | expect_equal(x - 100, bench_bytes(c(0, 100, 200))) 107 | expect_equal(x * 100, bench_bytes(c(10000, 20000, 30000))) 108 | expect_equal(x / 2, bench_bytes(c(50, 100, 150))) 109 | expect_equal(x^2, bench_bytes(c(10000, 40000, 90000))) 110 | }) 111 | 112 | it("errors for other binary operators", { 113 | x <- bench_bytes(c(100, 200, 300)) 114 | expect_error(x %% 2, "'%%' not defined for \"bench_bytes\" objects") 115 | expect_error(x %/% 2, "'%/%' not defined for \"bench_bytes\" objects") 116 | expect_error(x & TRUE, "'&' not defined for \"bench_bytes\" objects") 117 | expect_error(x | TRUE, "'|' not defined for \"bench_bytes\" objects") 118 | }) 119 | }) 120 | -------------------------------------------------------------------------------- /tests/testthat/test-expression.R: -------------------------------------------------------------------------------- 1 | test_that("`description` is sliced along with expressions", { 2 | x <- as.list(expression(x + y, z + b)) 3 | x <- new_bench_expr(x, c("a", "b")) 4 | 5 | expect_identical(attr(x[2], "description"), "b") 6 | expect_identical(attr(x[c(2, 2, 1)], "description"), c("b", "b", "a")) 7 | }) 8 | 9 | test_that("`vec_slice()` slices `description` attribute", { 10 | skip_if_not_installed("vctrs") 11 | 12 | x <- as.list(expression(x + y, z + b)) 13 | x <- new_bench_expr(x, c("a", "b")) 14 | 15 | expect_identical(attr(vctrs::vec_slice(x, 2), "description"), "b") 16 | expect_identical( 17 | attr(vctrs::vec_slice(x, c(2, 2, 1)), "description"), 18 | c("b", "b", "a") 19 | ) 20 | }) 21 | -------------------------------------------------------------------------------- /tests/testthat/test-hires_time.R: -------------------------------------------------------------------------------- 1 | describe("hires_time", { 2 | skip_on_cran() 3 | 4 | it("returns hi resolution times", { 5 | # it is hard to test this, we will just sleep and verify the second time is 6 | # after the first. 7 | start <- hires_time() 8 | Sys.sleep(.1) 9 | end <- hires_time() 10 | expect_type(start, "double") 11 | expect_true(end > start) 12 | }) 13 | }) 14 | -------------------------------------------------------------------------------- /tests/testthat/test-mark.R: -------------------------------------------------------------------------------- 1 | describe("mark_", { 2 | it("If min_time is Inf, runs for max_iterations", { 3 | res <- .Call( 4 | mark_, 5 | quote(1), 6 | new.env(), 7 | Inf, 8 | as.integer(0), 9 | as.integer(10), 10 | FALSE 11 | ) 12 | expect_length(res, 10) 13 | 14 | res <- .Call( 15 | mark_, 16 | quote(1), 17 | new.env(), 18 | Inf, 19 | as.integer(0), 20 | as.integer(20), 21 | FALSE 22 | ) 23 | expect_length(res, 20) 24 | }) 25 | 26 | it("If min_time is 0, runs for min_iterations", { 27 | res <- .Call( 28 | mark_, 29 | quote(1), 30 | new.env(), 31 | 0, 32 | as.integer(1), 33 | as.integer(10), 34 | FALSE 35 | ) 36 | expect_length(res, 1) 37 | 38 | res <- .Call( 39 | mark_, 40 | quote(1), 41 | new.env(), 42 | 0, 43 | as.integer(5), 44 | as.integer(10), 45 | FALSE 46 | ) 47 | expect_length(res, 5) 48 | }) 49 | 50 | it("If min_time is 0, runs for min_iterations", { 51 | res <- .Call( 52 | mark_, 53 | quote({ 54 | i <- 1 55 | while (i < 10000) i <- i + 1 56 | }), 57 | new.env(), 58 | .1, 59 | as.integer(1), 60 | as.integer(1000), 61 | FALSE 62 | ) 63 | 64 | expect_gte(length(res), 1) 65 | expect_lte(length(res), 1000) 66 | }) 67 | 68 | it("Evaluates code in the environment", { 69 | e <- new.env(parent = baseenv()) 70 | res <- .Call( 71 | mark_, 72 | quote({ 73 | a <- 42 74 | }), 75 | e, 76 | Inf, 77 | as.integer(1), 78 | as.integer(1), 79 | FALSE 80 | ) 81 | expect_equal(e[["a"]], 42) 82 | }) 83 | }) 84 | 85 | describe("mark", { 86 | it("Uses all.equal to check results by default", { 87 | res <- mark(1 + 1, 1L + 1L, check = TRUE, iterations = 1) 88 | 89 | expect_type(res$result, "list") 90 | expect_true(all.equal(res$result[[1]], res$result[[2]])) 91 | }) 92 | it("Can use other functions to check results like identical to check results", { 93 | # numerics and integers not identical 94 | expect_error( 95 | regexp = "Each result must equal the first result", 96 | mark(1 + 1, 1L + 1L, check = identical, iterations = 1) 97 | ) 98 | 99 | # Function that always returns false 100 | expect_error( 101 | regexp = "Each result must equal the first result", 102 | mark(1 + 1, 1 + 1, check = function(x, y) FALSE, iterations = 1) 103 | ) 104 | 105 | # Function that always returns true 106 | res <- mark(1 + 1, 1 + 2, check = function(x, y) TRUE, iterations = 1) 107 | 108 | expect_type(res$result, "list") 109 | expect_equal(res$result[[1]], 2) 110 | expect_equal(res$result[[2]], 3) 111 | }) 112 | 113 | it("works with capabilities('profmem')", { 114 | skip_if_not(capabilities("profmem")) 115 | 116 | res <- mark(1, 2, check = FALSE, iterations = 1) 117 | 118 | expect_length(res$memory, 2) 119 | 120 | expect_s3_class(res$memory[[1]], "Rprofmem") 121 | expect_equal(ncol(res$memory[[1]]), 3) 122 | expect_gte(nrow(res$memory[[1]]), 0) 123 | }) 124 | 125 | it("works without capabilities('profmem')", { 126 | res <- mark(1, 2, check = FALSE, iterations = 1, memory = FALSE) 127 | 128 | expect_equal(res$memory, vector("list", 2)) 129 | expect_equal(res$mem_alloc, as_bench_bytes(c(NA, NA))) 130 | }) 131 | it("Can handle `NULL` results", { 132 | res <- mark(if (FALSE) 1, max_iterations = 10) 133 | expect_equal(res$result, list(NULL)) 134 | }) 135 | it("Can errors with the deparsed expressions", { 136 | expect_snapshot(error = TRUE, { 137 | mark(1, 1, 3, max_iterations = 10) 138 | }) 139 | }) 140 | 141 | it("Works when calls are different lengths", { 142 | expect_snapshot(error = TRUE, { 143 | # Here the first call deparses to length 2, the second to length 4 144 | mark(if (TRUE) 2, if (TRUE) 1 else 3) 145 | }) 146 | }) 147 | it("works with memory = FALSE", { 148 | res <- mark(1, memory = FALSE) 149 | expect_s3_class(res, "bench_mark") 150 | expect_equal(res$memory, vector("list", 1)) 151 | expect_equal(res$mem_alloc, as_bench_bytes(NA)) 152 | }) 153 | it("works with check = FALSE", { 154 | res <- mark(1, check = FALSE) 155 | expect_s3_class(res, "bench_mark") 156 | expect_equal(res$result, list(NULL)) 157 | }) 158 | it("works with memory = FALSE and check = FALSE", { 159 | res <- mark(1, memory = FALSE, check = FALSE) 160 | expect_s3_class(res, "bench_mark") 161 | expect_equal(res$memory, list(NULL)) 162 | expect_equal(res$mem_alloc, as_bench_bytes(NA)) 163 | }) 164 | it("fails for memory profiling failures", { 165 | skip_on_os("windows") 166 | skip_on_cran() 167 | 168 | keep_busy <- function(n = 1e3) { 169 | r <- rnorm(n) 170 | p <- pnorm(r) 171 | q <- qnorm(p) 172 | o <- order(q) 173 | } 174 | expect_error( 175 | res <- mark(parallel::mclapply(seq_len(1e3), keep_busy, mc.cores = 2)), 176 | "Memory profiling failed" 177 | ) 178 | }) 179 | it("ignores trailing arguments", { 180 | bench::mark( 181 | 1 + 3, 182 | 2 + 2, 183 | ) 184 | }) 185 | it("truncates long expressions when printing (#94)", { 186 | local_reproducible_output(width = 30) 187 | 188 | name <- paste0(rep("a", 100), collapse = "") 189 | exprs <- list(as.name(name)) 190 | 191 | assign(name, 1, envir = environment()) 192 | 193 | out <- mark(exprs = exprs) 194 | 195 | # Only snapshot static columns 196 | out <- out[c("expression", "result")] 197 | 198 | expect_snapshot(out) 199 | }) 200 | }) 201 | 202 | describe("summary.bench_mark", { 203 | res <- bench_mark( 204 | tibble::tibble( 205 | expression = "1 + 1:1e+06", 206 | result = list(1:10), 207 | memory = list(NULL), 208 | time = list( 209 | c( 210 | 0.088492998, 211 | 0.109396977, 212 | 0.141906863, 213 | 0.005378346, 214 | 0.007563524, 215 | 0.002439451, 216 | 0.079715252, 217 | 0.003022223, 218 | 0.005948069, 219 | 0.002276121 220 | ) 221 | ), 222 | gc = list( 223 | tibble::tibble( 224 | level0 = c(1, 0, 0, 0, 1, 0, 0, 0, 1, 0), 225 | level1 = c(0, 1, 0, 0, 0, 0, 0, 0, 0, 0), 226 | level2 = c(0, 0, 1, 0, 0, 0, 1, 0, 0, 0) 227 | ) 228 | ) 229 | ) 230 | ) 231 | it("computes relative summaries if called with relative = TRUE", { 232 | # remove memory column, as there likely are no allocations or gc in these 233 | # benchmarks 234 | res1 <- summary(res) 235 | for (col in setdiff(summary_cols, "mem_alloc")) { 236 | # Absolute values should always be positive 237 | expect_true(all(res1[[!!col]] >= 0)) 238 | } 239 | 240 | # Relative values should always be greater than or equal to 1 241 | res2 <- summary(res, relative = TRUE) 242 | for (col in setdiff(summary_cols, c("mem_alloc", "n_gc"))) { 243 | expect_true(all(res2[[!!col]] >= 1)) 244 | } 245 | }) 246 | it("does not filter gc is `filter_gc` is FALSE", { 247 | res1 <- summary(res, filter_gc = TRUE) 248 | res2 <- summary(res, filter_gc = FALSE) 249 | 250 | expect_equal(res1$n_gc, 6) 251 | expect_equal(res1$n_gc, res2$n_gc) 252 | }) 253 | 254 | it("does not issue warnings if there are no garbage collections", { 255 | # This is artificial, but it avoids differences in gc on different 256 | # platforms / memory loads, so we can ensure the first has no gcs, and the 257 | # second has all gcs 258 | x <- bench_mark( 259 | tibble::tibble( 260 | expression = c(1, 2), 261 | result = list(1, 2), 262 | time = list( 263 | as_bench_time(c(0.166, 0.161, 0.162)), 264 | as_bench_time(c(0.276, 0.4)) 265 | ), 266 | memory = list(NULL, NULL), 267 | gc = list( 268 | tibble::tibble( 269 | level0 = integer(0), 270 | level1 = integer(0), 271 | level2 = integer(0) 272 | ), 273 | tibble::tibble( 274 | level0 = c(1L, 1L), 275 | level1 = c(0L, 0L), 276 | level2 = c(0L, 0L) 277 | ) 278 | ) 279 | ) 280 | ) 281 | 282 | expect_warning( 283 | regexp = "Some expressions had a GC in every iteration", 284 | res <- summary(x, filter_gc = TRUE) 285 | ) 286 | 287 | expect_equal(res$min, as_bench_time(c(.161, .276))) 288 | expect_equal(res$median, as_bench_time(c(.162, .338))) 289 | expect_equal(res$`itr/sec`, c(6.134969, 2.958580), tolerance = 1e-5) 290 | expect_equal(res$mem_alloc, as_bench_bytes(c(NA, NA))) 291 | expect_equal(res$`gc/sec`, c(0, 2.958580), tolerance = 1e-5) 292 | expect_equal(res$n_gc, c(0, 2)) 293 | expect_equal(res$n_itr, c(3, 2)) 294 | expect_equal(res$total_time, as_bench_time(c(.489, .676))) 295 | 296 | expect_no_warning(res2 <- summary(x, filter_gc = FALSE)) 297 | 298 | expect_identical(res, res2) 299 | }) 300 | }) 301 | 302 | describe("unnest.bench_mark", { 303 | it("does not contain result or memory columns", { 304 | skip_if_not_installed("tidyr", "1.0.0") 305 | bnch <- mark(1 + 1, 2 + 0) 306 | res <- tidyr::unnest(bnch, c(time, gc)) 307 | 308 | gc_cols <- colnames(bnch$gc[[1]]) 309 | 310 | expect_equal( 311 | colnames(res), 312 | c(head(colnames(bnch), n = -1), c(gc_cols, "gc")) 313 | ) 314 | 315 | expect_equal(nrow(res), length(bnch$time[[1]]) + length(bnch$time[[2]])) 316 | }) 317 | }) 318 | -------------------------------------------------------------------------------- /tests/testthat/test-press.R: -------------------------------------------------------------------------------- 1 | describe("press", { 2 | it("Adds parameters to output", { 3 | res <- press( 4 | x = 1, 5 | mark(1, max_iterations = 10), 6 | .quiet = TRUE 7 | ) 8 | expect_equal(colnames(res), c("expression", "x", summary_cols, data_cols)) 9 | expect_equal(nrow(res), 1) 10 | 11 | res2 <- press( 12 | x = 1:3, 13 | mark(1, max_iterations = 10), 14 | .quiet = TRUE 15 | ) 16 | expect_equal(colnames(res2), c("expression", "x", summary_cols, data_cols)) 17 | expect_equal(nrow(res2), 3) 18 | }) 19 | 20 | it("Outputs status message before evaluating each parameter", { 21 | expect_snapshot({ 22 | res <- press(x = 1, mark(rep(1, x), max_iterations = 10)) 23 | }) 24 | expect_equal(colnames(res), c("expression", "x", summary_cols, data_cols)) 25 | expect_equal(nrow(res), 1) 26 | 27 | messages <- character() 28 | withCallingHandlers( 29 | res <- press( 30 | x = 1:3, 31 | mark(rep(1, x), max_iterations = 10) 32 | ), 33 | message = function(cnd) { 34 | messages <<- append(messages, conditionMessage(cnd)) 35 | if (!is.null(findRestart("muffleMessage"))) { 36 | invokeRestart("muffleMessage") 37 | } 38 | } 39 | ) 40 | expect_snapshot(messages) 41 | expect_equal(colnames(res), c("expression", "x", summary_cols, data_cols)) 42 | expect_equal(nrow(res), 3) 43 | }) 44 | 45 | it("expands the grid if has named parameters", { 46 | res <- press( 47 | x = c(1, 2), 48 | y = c(1, 3), 49 | mark(list(x, y), max_iterations = 10), 50 | .quiet = TRUE 51 | ) 52 | 53 | expect_equal(res$x, c(1, 2, 1, 2)) 54 | expect_equal(res$y, c(1, 1, 3, 3)) 55 | expect_equal(res$result[[1]], list(1, 1)) 56 | expect_equal(res$result[[2]], list(2, 1)) 57 | expect_equal(res$result[[3]], list(1, 3)) 58 | expect_equal(res$result[[4]], list(2, 3)) 59 | }) 60 | 61 | it("takes values as-is if given in .grid", { 62 | res <- press( 63 | .grid = data.frame(x = c(1, 2), y = c(1, 3)), 64 | mark(list(x, y), max_iterations = 10), 65 | .quiet = TRUE 66 | ) 67 | 68 | expect_equal(res$x, c(1, 2)) 69 | expect_equal(res$y, c(1, 3)) 70 | expect_equal(res$result[[1]], list(1, 1)) 71 | expect_equal(res$result[[2]], list(2, 3)) 72 | }) 73 | 74 | it("runs `setup` with the parameters evaluated", { 75 | x <- 1 76 | res <- press( 77 | y = 2, 78 | { 79 | x <- y 80 | mark(x) 81 | }, 82 | .quiet = TRUE 83 | ) 84 | 85 | expect_equal(res$result[[1]], 2) 86 | }) 87 | 88 | it("`.grid` subsets tibbles and data.frames the same way (#142)", { 89 | x <- data.frame(a = 1, b = 2) 90 | out <- press(mark(c(a, b)), .grid = x, .quiet = TRUE) 91 | expect_identical(out$result[[1L]], c(1, 2)) 92 | 93 | x <- tibble::tibble(a = 1, b = 2) 94 | out <- press(mark(c(a, b)), .grid = x, .quiet = TRUE) 95 | expect_identical(out$result[[1L]], c(1, 2)) 96 | }) 97 | }) 98 | -------------------------------------------------------------------------------- /tests/testthat/test-time.R: -------------------------------------------------------------------------------- 1 | describe("as_bench_time", { 2 | it("accepts numeric input unchanged", { 3 | expect_equal(unclass(as_bench_time(123L)), 123L) 4 | expect_equal(unclass(as_bench_time(123)), 123) 5 | }) 6 | it("accepts bench_byte input unchanged", { 7 | x <- as_bench_time(123) 8 | expect_equal(as_bench_time(x), x) 9 | }) 10 | it("coerces character input", { 11 | expect_equal(unclass(as_bench_time("1")), 1) 12 | expect_equal(unclass(as_bench_time("1ns")), 1e-9) 13 | expect_equal(unclass(as_bench_time("1us")), 1e-6) 14 | expect_equal(unclass(as_bench_time("1ms")), 1e-3) 15 | expect_equal(unclass(as_bench_time("1s")), 1) 16 | expect_equal(unclass(as_bench_time("1m")), 60) 17 | expect_equal(unclass(as_bench_time("1h")), 60 * 60) 18 | expect_equal(unclass(as_bench_time("1d")), 60 * 60 * 24) 19 | expect_equal(unclass(as_bench_time("1w")), 60 * 60 * 24 * 7) 20 | }) 21 | }) 22 | 23 | describe("format.as_bench_time", { 24 | it("formats times under 60 as whole numbers", { 25 | expect_equal(format(as_bench_time(59)), "59s") 26 | expect_equal(format(as_bench_time(1)), "1s") 27 | }) 28 | it("formats times 60 and up as abbreviated minutes / hours / days", { 29 | withr::with_options(list("cli.unicode" = FALSE), { 30 | expect_equal(format(as_bench_time(.000000005)), "5ns") 31 | expect_equal(format(as_bench_time(.0000005)), "500ns") 32 | expect_equal(format(as_bench_time(.000005)), "5us") 33 | expect_equal(format(as_bench_time(.0005)), "500us") 34 | expect_equal(format(as_bench_time(.005)), "5ms") 35 | expect_equal(format(as_bench_time(.5)), "500ms") 36 | expect_equal(format(as_bench_time(30)), "30s") 37 | expect_equal(format(as_bench_time(60)), "1m") 38 | expect_equal(format(as_bench_time(90)), "1.5m") 39 | expect_equal(format(as_bench_time(90 * 60)), "1.5h") 40 | expect_equal(format(as_bench_time(60 * 60 * 60)), "2.5d") 41 | expect_equal(format(as_bench_time(10.5 * 24 * 60 * 60)), "1.5w") 42 | }) 43 | }) 44 | it("handles NA and NaN and Inf", { 45 | expect_equal(format(as_bench_time(NA)), "NA") 46 | expect_equal(format(as_bench_time(NaN)), "NaN") 47 | expect_equal(format(as_bench_time(Inf)), "Inf") 48 | expect_equal(format(as_bench_time(-Inf)), "-Inf") 49 | }) 50 | it("works with vectors", { 51 | v <- c(NA, .001, 60, 600, NaN, 6000) 52 | expect_equal( 53 | format(as_bench_time(v), trim = TRUE), 54 | c("NA", "1ms", "1m", "10m", "NaN", "1.67h") 55 | ) 56 | 57 | expect_equal(format(as_bench_time(numeric())), character()) 58 | }) 59 | }) 60 | 61 | describe("sum.as_bench_time", { 62 | it("sums its input and returns a bench_byte", { 63 | expect_equal(sum(as_bench_time(0)), new_bench_time(0)) 64 | expect_equal(sum(as_bench_time(c(1, 2))), new_bench_time(3)) 65 | expect_equal(sum(as_bench_time(c(1, NA))), new_bench_time(NA_real_)) 66 | }) 67 | }) 68 | 69 | describe("min.as_bench_time", { 70 | it("finds minimum input and returns a bench_byte", { 71 | expect_equal(min(as_bench_time(0)), new_bench_time(0)) 72 | expect_equal(min(as_bench_time(c(1, 2))), new_bench_time(1)) 73 | expect_equal(min(as_bench_time(c(1, NA))), new_bench_time(NA_real_)) 74 | }) 75 | }) 76 | 77 | describe("max.as_bench_time", { 78 | it("finds maximum input and returns a bench_byte", { 79 | expect_equal(max(as_bench_time(0)), new_bench_time(0)) 80 | expect_equal(max(as_bench_time(c(1, 2))), new_bench_time(2)) 81 | expect_equal(max(as_bench_time(c(1, NA))), new_bench_time(NA_real_)) 82 | }) 83 | }) 84 | 85 | describe("[.as_bench_time", { 86 | it("retains the as_bench_time class", { 87 | x <- as_bench_time(c(100, 200, 300)) 88 | expect_equal(x[], x) 89 | expect_equal(x[1], new_bench_time(100)) 90 | expect_equal(x[1:2], new_bench_time(c(100, 200))) 91 | }) 92 | }) 93 | 94 | describe("Ops.as_bench_time", { 95 | it("errors for unary operators", { 96 | x <- as_bench_time(c(100, 200, 300)) 97 | expect_error(!x, "unary '!' not defined for \"bench_time\" objects") 98 | expect_error(+x, "unary '\\+' not defined for \"bench_time\" objects") 99 | expect_error(-x, "unary '-' not defined for \"bench_time\" objects") 100 | }) 101 | 102 | it("works with boolean comparison operators", { 103 | x <- as_bench_time(c(100, 200, 300)) 104 | 105 | expect_equal(x == 100, c(TRUE, FALSE, FALSE)) 106 | expect_equal(x != 100, c(FALSE, TRUE, TRUE)) 107 | expect_equal(x > 100, c(FALSE, TRUE, TRUE)) 108 | expect_equal(x >= 100, c(TRUE, TRUE, TRUE)) 109 | expect_equal(x < 200, c(TRUE, FALSE, FALSE)) 110 | expect_equal(x <= 200, c(TRUE, TRUE, FALSE)) 111 | }) 112 | 113 | it("works with arithmetic operators", { 114 | x <- as_bench_time(c(100, 200, 300)) 115 | 116 | expect_equal(x + 100, as_bench_time(c(200, 300, 400))) 117 | expect_equal(x - 100, as_bench_time(c(0, 100, 200))) 118 | expect_equal(x * 100, as_bench_time(c(10000, 20000, 30000))) 119 | expect_equal(x / 2, as_bench_time(c(50, 100, 150))) 120 | expect_equal(x^2, as_bench_time(c(10000, 40000, 90000))) 121 | }) 122 | 123 | it("errors for other binary operators", { 124 | x <- as_bench_time(c(100, 200, 300)) 125 | expect_error(x %/% 2, "'%/%' not defined for \"bench_time\" objects") 126 | expect_error(x & TRUE, "'&' not defined for \"bench_time\" objects") 127 | expect_error(x | TRUE, "'|' not defined for \"bench_time\" objects") 128 | }) 129 | }) 130 | -------------------------------------------------------------------------------- /tests/testthat/test-workout.R: -------------------------------------------------------------------------------- 1 | describe("workout", { 2 | it("times each expression and names them", { 3 | res <- workout( 4 | x <- 1:1000 5 | ) 6 | expect_named(res, c("exprs", "process", "real")) 7 | expect_true(nrow(res) == 1) 8 | 9 | res2 <- workout({ 10 | x <- 1:1000 11 | evens <- x %% 2 == 0 12 | y <- x[evens] 13 | length(y) 14 | length(which(evens)) 15 | sum(evens) 16 | }) 17 | expect_named(res2, c("exprs", "process", "real")) 18 | expect_true(nrow(res2) == 6) 19 | }) 20 | }) 21 | 22 | describe("workout_expressions", { 23 | it("times given expressions", { 24 | res <- workout_expressions( 25 | as.list(parse(file = system.file("examples/exprs.R", package = "bench"))) 26 | ) 27 | expect_named(res, c("exprs", "process", "real")) 28 | expect_true(nrow(res) == 6) 29 | }) 30 | }) 31 | --------------------------------------------------------------------------------