├── .github ├── .gitignore ├── FUNDING.yml └── workflows │ ├── pkgdown.yaml │ ├── R-CMD-check.yaml │ └── pr-commands.yaml ├── vignettes ├── .gitignore ├── vo2_kinetics.Rmd └── incremental.Rmd ├── LICENSE ├── tests ├── testthat.R └── testthat │ ├── test-mrt.R │ ├── test-max.R │ ├── test-read_data.R │ ├── test-interpolate.R │ ├── test-detect_outliers.R │ ├── test-perform_average.R │ ├── test-process_data.R │ ├── test-vo2_kinetics.R │ ├── test-perform_kinetics.R │ └── test-incremental.R ├── man ├── figures │ ├── logo.png │ └── header.png ├── pipe.Rd ├── print.whippr.Rd ├── plot_outliers.Rd ├── model_diagnostics.Rd ├── plot_incremental.Rd ├── new_whippr_tibble.Rd ├── theme_whippr.Rd ├── normalize_time.Rd ├── work_rate_ramp.Rd ├── normalize_first_breath.Rd ├── get_residuals.Rd ├── interpolate.Rd ├── normalize_transitions.Rd ├── whippr-package.Rd ├── remove_empty.Rd ├── outliers_linear.Rd ├── outliers_anomaly.Rd ├── process_data.Rd ├── run_manual_cleaner.Rd ├── work_rate_step.Rd ├── read_data.Rd ├── predict_bands.Rd ├── predict_bands_transition.Rd ├── predict_bands_baseline.Rd ├── undoHistory.Rd ├── perform_average.Rd ├── perform_kinetics.Rd ├── perform_max.Rd ├── incremental_normalize.Rd ├── detect_outliers.Rd ├── vo2_max.Rd └── vo2_kinetics.Rd ├── inst ├── ramp_cosmed.xlsx ├── step_cortex.xlsx ├── example_cosmed.xlsx └── shinyThings │ └── undoHistory.js ├── pkgdown ├── favicon │ ├── favicon.ico │ ├── favicon-16x16.png │ ├── favicon-32x32.png │ ├── apple-touch-icon.png │ ├── apple-touch-icon-60x60.png │ ├── apple-touch-icon-76x76.png │ ├── apple-touch-icon-120x120.png │ ├── apple-touch-icon-152x152.png │ └── apple-touch-icon-180x180.png └── extra.css ├── .Rbuildignore ├── R ├── utils-pipe.R ├── whippr-package.R ├── plot-theme.R ├── globals.R ├── interpolate.R ├── helpers-read.R ├── utils.R ├── nlstools.R ├── normalize.R ├── helpers-incremental.R ├── averages.R ├── helpers-outliers.R ├── tbl.R ├── addin.R ├── shinyThings.R ├── predict.R ├── max.R └── incremental.R ├── codecov.yml ├── cran-comments.md ├── whippr.Rproj ├── LICENSE.md ├── DESCRIPTION ├── .gitignore ├── NAMESPACE ├── _pkgdown.yml ├── NEWS.md ├── README.Rmd └── README.md /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /vignettes/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | *.R 3 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | YEAR: 2020 2 | COPYRIGHT HOLDER: Felipe Mattioni Maturana 3 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | library(whippr) 3 | 4 | test_check("whippr") 5 | -------------------------------------------------------------------------------- /man/figures/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/man/figures/logo.png -------------------------------------------------------------------------------- /inst/ramp_cosmed.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/inst/ramp_cosmed.xlsx -------------------------------------------------------------------------------- /inst/step_cortex.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/inst/step_cortex.xlsx -------------------------------------------------------------------------------- /man/figures/header.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/man/figures/header.png -------------------------------------------------------------------------------- /tests/testthat/test-mrt.R: -------------------------------------------------------------------------------- 1 | test_that("MRT linear works", { 2 | expect_equal(2 * 2, 4) 3 | }) 4 | -------------------------------------------------------------------------------- /inst/example_cosmed.xlsx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/inst/example_cosmed.xlsx -------------------------------------------------------------------------------- /tests/testthat/test-max.R: -------------------------------------------------------------------------------- 1 | test_that("multiplication works", { 2 | expect_equal(2 * 2, 4) 3 | }) 4 | -------------------------------------------------------------------------------- /pkgdown/favicon/favicon.ico: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/pkgdown/favicon/favicon.ico -------------------------------------------------------------------------------- /pkgdown/favicon/favicon-16x16.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/pkgdown/favicon/favicon-16x16.png -------------------------------------------------------------------------------- /pkgdown/favicon/favicon-32x32.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/pkgdown/favicon/favicon-32x32.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/pkgdown/favicon/apple-touch-icon.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-60x60.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/pkgdown/favicon/apple-touch-icon-60x60.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-76x76.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/pkgdown/favicon/apple-touch-icon-76x76.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-120x120.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/pkgdown/favicon/apple-touch-icon-120x120.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-152x152.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/pkgdown/favicon/apple-touch-icon-152x152.png -------------------------------------------------------------------------------- /pkgdown/favicon/apple-touch-icon-180x180.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/fmmattioni/whippr/HEAD/pkgdown/favicon/apple-touch-icon-180x180.png -------------------------------------------------------------------------------- /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^whippr\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^LICENSE\.md$ 4 | ^README\.Rmd$ 5 | ^_pkgdown\.yml$ 6 | ^docs$ 7 | ^pkgdown$ 8 | ^\.github$ 9 | ^vignettes$ 10 | ^codecov\.yml$ 11 | ^CRAN-SUBMISSION$ 12 | ^cran-comments\.md$ 13 | -------------------------------------------------------------------------------- /R/utils-pipe.R: -------------------------------------------------------------------------------- 1 | #' Pipe operator 2 | #' 3 | #' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. 4 | #' 5 | #' @name %>% 6 | #' @rdname pipe 7 | #' @keywords internal 8 | #' @export 9 | #' @importFrom magrittr %>% 10 | #' @usage lhs \%>\% rhs 11 | NULL 12 | -------------------------------------------------------------------------------- /R/whippr-package.R: -------------------------------------------------------------------------------- 1 | #' @keywords internal 2 | "_PACKAGE" 3 | 4 | # The following block is used by usethis to automatically manage 5 | # roxygen namespace tags. Modify with care! 6 | ## usethis namespace: start 7 | #' @importFrom tibble tibble 8 | ## usethis namespace: end 9 | NULL 10 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: false 2 | 3 | coverage: 4 | status: 5 | project: 6 | default: 7 | target: auto 8 | threshold: 1% 9 | informational: true 10 | patch: 11 | default: 12 | target: auto 13 | threshold: 1% 14 | informational: true 15 | -------------------------------------------------------------------------------- /tests/testthat/test-read_data.R: -------------------------------------------------------------------------------- 1 | test_that("read data works", { 2 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 3 | 4 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 5 | 6 | expect_s3_class( 7 | object = df, 8 | class = "whippr" 9 | ) 10 | }) 11 | -------------------------------------------------------------------------------- /man/pipe.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils-pipe.R 3 | \name{\%>\%} 4 | \alias{\%>\%} 5 | \title{Pipe operator} 6 | \usage{ 7 | lhs \%>\% rhs 8 | } 9 | \description{ 10 | See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. 11 | } 12 | \keyword{internal} 13 | -------------------------------------------------------------------------------- /tests/testthat/test-interpolate.R: -------------------------------------------------------------------------------- 1 | test_that("interpolation works", { 2 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 3 | 4 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 5 | 6 | df_interpolated <- df %>% 7 | interpolate() 8 | 9 | expect_s3_class( 10 | object = df_interpolated, 11 | class = "whippr" 12 | ) 13 | }) 14 | -------------------------------------------------------------------------------- /man/print.whippr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tbl.R 3 | \name{print.whippr} 4 | \alias{print.whippr} 5 | \title{Whippr print method} 6 | \usage{ 7 | \method{print}{whippr}(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{A tibble with class 'whippr'} 11 | 12 | \item{...}{Extra arguments, not used.} 13 | } 14 | \description{ 15 | Whippr print method 16 | } 17 | -------------------------------------------------------------------------------- /man/plot_outliers.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/outliers.R 3 | \name{plot_outliers} 4 | \alias{plot_outliers} 5 | \title{Plot outliers} 6 | \usage{ 7 | plot_outliers(.data) 8 | } 9 | \arguments{ 10 | \item{.data}{The data retrieved from \code{detect_outliers()}.} 11 | } 12 | \value{ 13 | a patchwork object 14 | } 15 | \description{ 16 | Plot outliers 17 | } 18 | -------------------------------------------------------------------------------- /man/model_diagnostics.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/nlstools.R 3 | \name{model_diagnostics} 4 | \alias{model_diagnostics} 5 | \title{Model diagnostics} 6 | \usage{ 7 | model_diagnostics(.residuals_tbl) 8 | } 9 | \arguments{ 10 | \item{.residuals_tbl}{The data retrived from \code{get_residuals()}.} 11 | } 12 | \value{ 13 | a patchwork object 14 | } 15 | \description{ 16 | Plots different model diagnostics for checking the model performance. 17 | } 18 | -------------------------------------------------------------------------------- /man/plot_incremental.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/incremental.R 3 | \name{plot_incremental} 4 | \alias{plot_incremental} 5 | \title{Plot incremental test work rate} 6 | \usage{ 7 | plot_incremental(.data) 8 | } 9 | \arguments{ 10 | \item{.data}{data retrieved from \code{incremental_normalize()}.} 11 | } 12 | \value{ 13 | a ggplot object 14 | } 15 | \description{ 16 | Visualize what was done during the process of deriving the work rate from the incremental test protocol 17 | } 18 | -------------------------------------------------------------------------------- /man/new_whippr_tibble.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tbl.R 3 | \name{new_whippr_tibble} 4 | \alias{new_whippr_tibble} 5 | \title{Construct a new tibble with metadata} 6 | \usage{ 7 | new_whippr_tibble(.data, metadata) 8 | } 9 | \arguments{ 10 | \item{.data}{A data frame} 11 | 12 | \item{metadata}{Metadata to be passed along with the data} 13 | } 14 | \value{ 15 | a \link[tibble:tibble-package]{tibble} 16 | } 17 | \description{ 18 | Construct a new tibble with metadata 19 | } 20 | \keyword{internal} 21 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | ## R CMD check results 2 | 3 | 0 errors | 0 warnings | 1 note 4 | 5 | * This is a new release. 6 | 7 | ## Comments from last submission 8 | 9 | Found the following (possibly) invalid URLs: 10 | URL: https://korr.com/go/cardiocoach (moved to 11 | https://korr.com/go/cardiocoach/) 12 | From: README.md 13 | Status: 301 14 | Message: Moved Permanently 15 | 16 | Please change http --> https, add trailing slashes, or follow moved 17 | content as appropriate. 18 | 19 | > All of the above has been corrected accordingly. 20 | -------------------------------------------------------------------------------- /man/theme_whippr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/plot-theme.R 3 | \name{theme_whippr} 4 | \alias{theme_whippr} 5 | \title{Whippr ggplot2 theme} 6 | \usage{ 7 | theme_whippr(base_size = 14, base_family = "sans") 8 | } 9 | \arguments{ 10 | \item{base_size}{base font size, given in pts. Default is \code{14}.} 11 | 12 | \item{base_family}{base font family. Default is \code{sans}.} 13 | } 14 | \value{ 15 | a ggplot2 object 16 | } 17 | \description{ 18 | This theme was inspired by the plots from the Acta Physiologica Journal 19 | } 20 | -------------------------------------------------------------------------------- /inst/shinyThings/undoHistory.js: -------------------------------------------------------------------------------- 1 | function toggleHistoryButtonState (state) { 2 | if (state.enable) { 3 | for (var i = 0; i < state.enable.length; i++) { 4 | var btn_e = $('#' + state.enable[i]); 5 | btn_e.prop('disabled', false).removeClass('disabled'); 6 | } 7 | } 8 | if (state.disable) { 9 | for (var j = 0; j < state.disable.length; j++) { 10 | var btn_d = $('#' + state.disable[j]); 11 | btn_d.prop('disabled', true).addClass('disabled'); 12 | } 13 | } 14 | } 15 | 16 | Shiny.addCustomMessageHandler('undoHistoryButtons', toggleHistoryButtonState); 17 | -------------------------------------------------------------------------------- /man/normalize_time.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normalize.R 3 | \name{normalize_time} 4 | \alias{normalize_time} 5 | \title{Normalize time column} 6 | \usage{ 7 | normalize_time(.data, protocol_baseline_length) 8 | } 9 | \arguments{ 10 | \item{.data}{Breath-by-breath data.} 11 | 12 | \item{protocol_baseline_length}{The length of the baseline (in seconds).} 13 | } 14 | \value{ 15 | a \link[tibble:tibble-package]{tibble} 16 | } 17 | \description{ 18 | Normalizes the the time column such that the baseline phase has negative time values. Point zero will then represent the start of the transition phase. 19 | } 20 | -------------------------------------------------------------------------------- /pkgdown/extra.css: -------------------------------------------------------------------------------- 1 | /* 2 | Make only first letter upper case in TOC 3 | https://github.com/r-lib/pkgdown/issues/1302 4 | */ 5 | nav[data-toggle='toc'] .nav > li > a { 6 | text-transform: none !important; 7 | } 8 | 9 | .rmdinfo { 10 | padding: 2em 1em 1em 7em; 11 | margin-top: 20px; 12 | margin-bottom: 20px; 13 | border-radius: 25px; 14 | background-color: #e6f0ff; 15 | position:relative; 16 | } 17 | 18 | .rmdinfo:before { 19 | font-family: "Font Awesome 5 Free"; 20 | font-weight: 900; 21 | content: "\f129"; 22 | top: 50%; 23 | transform: translateY(-50%); 24 | left: 30px; 25 | position:absolute; 26 | font-size: 45px; 27 | } 28 | -------------------------------------------------------------------------------- /man/work_rate_ramp.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/helpers-incremental.R 3 | \name{work_rate_ramp} 4 | \alias{work_rate_ramp} 5 | \title{Work rate for a ramp-incremental test} 6 | \usage{ 7 | work_rate_ramp(.data, baseline_intensity, ramp_increase) 8 | } 9 | \arguments{ 10 | \item{.data}{The data with recognized protocol phases} 11 | 12 | \item{baseline_intensity}{The baseline intensity} 13 | 14 | \item{ramp_increase}{The ramp increase, in watts per minute} 15 | } 16 | \value{ 17 | a \link[tibble:tibble-package]{tibble} 18 | } 19 | \description{ 20 | This function produces the work rate throughout a ramp-incremental test given the procotol 21 | } 22 | \keyword{internal} 23 | -------------------------------------------------------------------------------- /whippr.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: No 4 | SaveWorkspace: No 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: knitr 13 | LaTeX: XeLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source --no-build-vignettes 21 | PackageBuildArgs: --no-build-vignettes --ignore-vignettes 22 | PackageBuildBinaryArgs: --no-build-vignettes --ignore-vignettes 23 | PackageCheckArgs: --no-build-vignettes --ignore-vignettes 24 | PackageRoxygenize: rd,collate,namespace 25 | 26 | UseNativePipeOperator: No 27 | -------------------------------------------------------------------------------- /tests/testthat/test-detect_outliers.R: -------------------------------------------------------------------------------- 1 | test_that("detection of outliers works", { 2 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 3 | 4 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 5 | 6 | ## detect outliers 7 | data_outliers <- detect_outliers( 8 | .data = df, 9 | test_type = "kinetics", 10 | vo2_column = "VO2", 11 | cleaning_level = 0.95, 12 | cleaning_baseline_fit = c("linear", "exponential", "exponential"), 13 | protocol_n_transitions = 3, 14 | protocol_baseline_length = 360, 15 | protocol_transition_length = 360, 16 | verbose = FALSE 17 | ) 18 | 19 | expect_s3_class( 20 | object = data_outliers, 21 | class = "whippr" 22 | ) 23 | }) 24 | -------------------------------------------------------------------------------- /man/normalize_first_breath.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normalize.R 3 | \name{normalize_first_breath} 4 | \alias{normalize_first_breath} 5 | \title{Normalize first breath} 6 | \usage{ 7 | normalize_first_breath(.data) 8 | } 9 | \arguments{ 10 | \item{.data}{Breath-by-breath data.} 11 | } 12 | \value{ 13 | a \link[tibble:tibble-package]{tibble} 14 | } 15 | \description{ 16 | This is needed specially when the data gets filtered. For example, if the data file does not only contain 17 | the baseline and transitions performed, we will have to normalize the time column. 18 | This function will make sure that in case the first breath does not start at zero, it will create a zero data point, 19 | duplicating the first breath. This will make sure the data does not get shifted (misalignment). 20 | } 21 | -------------------------------------------------------------------------------- /tests/testthat/test-perform_average.R: -------------------------------------------------------------------------------- 1 | test_that("bin-averaging works", { 2 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 3 | 4 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 5 | 6 | df_averaged <- df %>% 7 | interpolate() %>% 8 | perform_average(type = "bin", bins = 30) 9 | 10 | expect_s3_class( 11 | object = df_averaged, 12 | class = "whippr" 13 | ) 14 | }) 15 | 16 | test_that("rolling-averaging works", { 17 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 18 | 19 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 20 | 21 | df_averaged <- df %>% 22 | interpolate() %>% 23 | perform_average(type = "rolling", rolling_window = 30) 24 | 25 | expect_s3_class( 26 | object = df_averaged, 27 | class = "whippr" 28 | ) 29 | }) 30 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: fmmattioni # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: ['https://www.buymeacoffee.com/XQauwUWGm', 'https://paypal.me/fmmattioni?locale.x=en_US'] # Replace with up to 4 custom sponsorship URLs e.g., ['link1', 'link2'] 13 | -------------------------------------------------------------------------------- /man/get_residuals.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/nlstools.R 3 | \name{get_residuals} 4 | \alias{get_residuals} 5 | \title{Get residuals} 6 | \usage{ 7 | get_residuals(.model) 8 | } 9 | \arguments{ 10 | \item{.model}{A model of class \code{nls}.} 11 | } 12 | \value{ 13 | a \link[tibble:tibble-package]{tibble} containing the data passed to augment, and additional columns: 14 | \item{.fitted}{The predicted response for that observation.} 15 | \item{.resid}{The residual for a particular point.} 16 | \item{standardized_residuals}{Standardized residuals.} 17 | \item{sqrt_abs_standardized_residuals}{The sqrt of absolute value of standardized residuals.} 18 | \item{lag_residuals}{The lag of the \code{.resid} column for plotting auto-correlation.} 19 | } 20 | \description{ 21 | Computes residuals from the VO2 kinetics model. 22 | } 23 | -------------------------------------------------------------------------------- /man/interpolate.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/interpolate.R 3 | \name{interpolate} 4 | \alias{interpolate} 5 | \title{Interpolate data from breath-by-breath into second-by-second} 6 | \usage{ 7 | interpolate(.data) 8 | } 9 | \arguments{ 10 | \item{.data}{Data retrieved from \code{read_data()}.} 11 | } 12 | \value{ 13 | a \link[tibble:tibble-package]{tibble} 14 | } 15 | \description{ 16 | This function interpolates the data based on the time column. It takes the breath-by-breath data 17 | and transforms it into second-by-second. 18 | } 19 | \examples{ 20 | \dontrun{ 21 | ## get file path from example data 22 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 23 | 24 | ## read data 25 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 26 | 27 | df \%>\% 28 | interpolate() 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /man/normalize_transitions.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normalize.R 3 | \name{normalize_transitions} 4 | \alias{normalize_transitions} 5 | \title{Normalize transitions} 6 | \usage{ 7 | normalize_transitions( 8 | .data, 9 | protocol_n_transitions, 10 | protocol_baseline_length, 11 | protocol_transition_length 12 | ) 13 | } 14 | \arguments{ 15 | \item{.data}{Breath-by-breath data.} 16 | 17 | \item{protocol_n_transitions}{Number of transitions performed.} 18 | 19 | \item{protocol_baseline_length}{The length of the baseline (in seconds).} 20 | 21 | \item{protocol_transition_length}{The length of the transition (in seconds).} 22 | } 23 | \value{ 24 | a \link[tibble:tibble-package]{tibble} 25 | } 26 | \description{ 27 | Recognizes and normalizes the time column of each transition. It will also label the transitions into: 'baseline' or 'transition'. 28 | } 29 | -------------------------------------------------------------------------------- /tests/testthat/test-process_data.R: -------------------------------------------------------------------------------- 1 | test_that("process data works", { 2 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 3 | 4 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 5 | 6 | ## detect outliers 7 | data_outliers <- detect_outliers( 8 | .data = df, 9 | test_type = "kinetics", 10 | vo2_column = "VO2", 11 | cleaning_level = 0.95, 12 | cleaning_baseline_fit = c("linear", "exponential", "exponential"), 13 | protocol_n_transitions = 3, 14 | protocol_baseline_length = 360, 15 | protocol_transition_length = 360, 16 | verbose = FALSE 17 | ) 18 | 19 | ## process data 20 | data_processed <- process_data( 21 | .data_outliers = data_outliers, 22 | protocol_baseline_length = 360, 23 | fit_bin_average = 5 24 | ) 25 | 26 | expect_s3_class( 27 | object = data_processed, 28 | class = "whippr" 29 | ) 30 | }) 31 | -------------------------------------------------------------------------------- /man/whippr-package.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/whippr-package.R 3 | \docType{package} 4 | \name{whippr-package} 5 | \alias{whippr} 6 | \alias{whippr-package} 7 | \title{whippr: Tools for Manipulating Gas Exchange Data} 8 | \description{ 9 | \if{html}{\figure{logo.png}{options: style='float: right' alt='logo' width='120'}} 10 | 11 | Set of tools for manipulating gas exchange data from cardiopulmonary exercise testing. 12 | } 13 | \seealso{ 14 | Useful links: 15 | \itemize{ 16 | \item \url{https://fmmattioni.github.io/whippr/} 17 | \item \url{https://github.com/fmmattioni/whippr} 18 | \item Report bugs at \url{https://github.com/fmmattioni/whippr/issues} 19 | } 20 | 21 | } 22 | \author{ 23 | \strong{Maintainer}: Felipe Mattioni Maturana \email{felipe.mattioni@med.uni-tuebingen.de} (\href{https://orcid.org/0000-0002-4221-6104}{ORCID}) 24 | 25 | } 26 | \keyword{internal} 27 | -------------------------------------------------------------------------------- /man/remove_empty.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{remove_empty} 4 | \alias{remove_empty} 5 | \title{Remove empty rows and/or columns from a data.frame or matrix.} 6 | \usage{ 7 | remove_empty(dat, which = c("rows", "cols"), cutoff = 1) 8 | } 9 | \arguments{ 10 | \item{dat}{the input data.frame or matrix.} 11 | 12 | \item{which}{one of "rows", "cols", or \code{c("rows", "cols")}. Where no 13 | value of which is provided, defaults to removing both empty rows and empty 14 | columns, declaring the behavior with a printed message.} 15 | 16 | \item{cutoff}{What fraction (>0 to <=1) of rows or columns must be empty to 17 | be removed?} 18 | } 19 | \value{ 20 | Returns the object without its missing rows or columns. 21 | } 22 | \description{ 23 | Removes all rows and/or columns from a data.frame or matrix that 24 | are composed entirely of \code{NA} values. 25 | } 26 | \keyword{internal} 27 | -------------------------------------------------------------------------------- /tests/testthat/test-vo2_kinetics.R: -------------------------------------------------------------------------------- 1 | test_that("general vo2 kinetics function works", { 2 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 3 | 4 | ## read data 5 | df <- read_data(path = path_example, metabolic_cart = "cosmed", time_column = "t") 6 | 7 | ## VO2 kinetics analysis 8 | results_kinetics <- vo2_kinetics( 9 | .data = df, 10 | intensity_domain = "moderate", 11 | vo2_column = "VO2", 12 | protocol_n_transitions = 3, 13 | protocol_baseline_length = 360, 14 | protocol_transition_length = 360, 15 | cleaning_level = 0.95, 16 | cleaning_baseline_fit = c("linear", "exponential", "exponential"), 17 | fit_level = 0.95, 18 | fit_bin_average = 5, 19 | fit_phase_1_length = 20, 20 | fit_baseline_length = 120, 21 | fit_transition_length = 240, 22 | verbose = TRUE 23 | ) 24 | 25 | expect_s3_class( 26 | object = results_kinetics, 27 | class = "tbl" 28 | ) 29 | }) 30 | -------------------------------------------------------------------------------- /R/plot-theme.R: -------------------------------------------------------------------------------- 1 | #' Whippr ggplot2 theme 2 | #' 3 | #' This theme was inspired by the plots from the Acta Physiologica Journal 4 | #' 5 | #' @param base_size base font size, given in pts. Default is `14`. 6 | #' @param base_family base font family. Default is `sans`. 7 | #' 8 | #' @return a ggplot2 object 9 | #' @export 10 | #' @importFrom ggplot2 theme_light theme element_rect element_line element_text 11 | theme_whippr <- function(base_size = 14, base_family = "sans") { 12 | theme_light(base_size = base_size, base_family = base_family) + 13 | theme( 14 | panel.background = element_rect(fill = "#fefeda"), 15 | axis.line = element_line(color = "black"), 16 | axis.ticks = element_line(color = "black", linewidth = 1), 17 | axis.text = element_text(color = "black", face = "bold"), 18 | axis.title = element_text(face = "bold"), 19 | plot.title = element_text(face = "bold"), 20 | panel.border = element_rect(colour = NA) 21 | ) 22 | } 23 | -------------------------------------------------------------------------------- /man/outliers_linear.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/helpers-outliers.R 3 | \name{outliers_linear} 4 | \alias{outliers_linear} 5 | \title{Linear method for detecting outliers from an incremental test} 6 | \usage{ 7 | outliers_linear(.data, time_column, vo2_column, cleaning_level) 8 | } 9 | \arguments{ 10 | \item{.data}{The data retrieved from \code{incremental_normalize()}.} 11 | 12 | \item{time_column}{The name (quoted) of the column containing the time. Depending on the language of your system, this column might not be "t". Therefore, you may specify it here.} 13 | 14 | \item{vo2_column}{The name (quoted) of the column containing the absolute oxygen uptake (VO2) data.} 15 | 16 | \item{cleaning_level}{A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated.} 17 | } 18 | \value{ 19 | a \link[tibble:tibble-package]{tibble} 20 | } 21 | \description{ 22 | Function for internal use only. It will not be exported. 23 | } 24 | \keyword{internal} 25 | -------------------------------------------------------------------------------- /man/outliers_anomaly.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/helpers-outliers.R 3 | \name{outliers_anomaly} 4 | \alias{outliers_anomaly} 5 | \title{Anomaly method for detecting outliers from an incremental test} 6 | \usage{ 7 | outliers_anomaly(.data, time_column, vo2_column, cleaning_level) 8 | } 9 | \arguments{ 10 | \item{.data}{The data retrieved from \code{incremental_normalize()}.} 11 | 12 | \item{time_column}{The name (quoted) of the column containing the time. Depending on the language of your system, this column might not be "t". Therefore, you may specify it here.} 13 | 14 | \item{vo2_column}{The name (quoted) of the column containing the absolute oxygen uptake (VO2) data.} 15 | 16 | \item{cleaning_level}{A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated.} 17 | } 18 | \value{ 19 | a \link[tibble:tibble-package]{tibble} 20 | } 21 | \description{ 22 | Function for internal use only. It will not be exported. 23 | } 24 | \keyword{internal} 25 | -------------------------------------------------------------------------------- /man/process_data.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/kinetics.R 3 | \name{process_data} 4 | \alias{process_data} 5 | \title{Process data for VO2 kinetics fitting} 6 | \usage{ 7 | process_data(.data_outliers, protocol_baseline_length, fit_bin_average) 8 | } 9 | \arguments{ 10 | \item{.data_outliers}{The data retrived from \code{detect_outliers()}.} 11 | 12 | \item{protocol_baseline_length}{The length of the baseline (in seconds).} 13 | 14 | \item{fit_bin_average}{The bin average to be performed for the final fit.} 15 | } 16 | \value{ 17 | a \link[tibble:tibble-package]{tibble} with the time-aligned, ensembled-averaged, and bin-averaged data. 18 | } 19 | \description{ 20 | It removes the outliers detected through \code{detect_outliers()}, interpolates each transition, 21 | ensemble-averages all the transitions into one, performs a bin-average, and normalizes the time column 22 | (time zero will indicate the end of baseline and the start of the transition phase). 23 | } 24 | \details{ 25 | TODO 26 | } 27 | -------------------------------------------------------------------------------- /man/run_manual_cleaner.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/addin.R 3 | \name{run_manual_cleaner} 4 | \alias{run_manual_cleaner} 5 | \title{Manual data cleaner} 6 | \usage{ 7 | run_manual_cleaner(.data, width = 1200, height = 900) 8 | } 9 | \arguments{ 10 | \item{.data}{The data to be manually cleaned. The first column will be always treated as the x-axis.} 11 | 12 | \item{width}{The width, in pixels, of the window.} 13 | 14 | \item{height}{the height, in pixels, of the window.} 15 | } 16 | \value{ 17 | The code to reproduce the manual data cleaning. 18 | } 19 | \description{ 20 | Usually manual data cleaning should be avoided. However, sometimes in gas exchange data 21 | there is the need to delete a few clear "bad breaths" (noise). In these situations you may use this function. 22 | Although it is encouraged that you use the \code{detect_outliers()} function, you may use this function at your own risk. 23 | This function can also be used to clean other kind of data, like heart rate data. 24 | } 25 | -------------------------------------------------------------------------------- /man/work_rate_step.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/helpers-incremental.R 3 | \name{work_rate_step} 4 | \alias{work_rate_step} 5 | \title{Work rate for a step-incremental test} 6 | \usage{ 7 | work_rate_step( 8 | .data, 9 | baseline_intensity, 10 | step_start, 11 | step_increase, 12 | step_length 13 | ) 14 | } 15 | \arguments{ 16 | \item{.data}{The data with recognized protocol phases} 17 | 18 | \item{baseline_intensity}{The baseline intensity} 19 | 20 | \item{step_start}{In case the step test started in a different work rate than baseline} 21 | 22 | \item{step_increase}{The step in increase, in watts per step} 23 | 24 | \item{step_length}{The length, in seconds, of each step} 25 | } 26 | \value{ 27 | a \link[tibble:tibble-package]{tibble} 28 | } 29 | \description{ 30 | This function produces the work rate throughout a step-incremental test given the protocol 31 | This will retrieve both the 'original' work rates, and also will perform a 'linearization' of the steps. 32 | } 33 | \keyword{internal} 34 | -------------------------------------------------------------------------------- /R/globals.R: -------------------------------------------------------------------------------- 1 | utils::globalVariables( 2 | c(".", 3 | "V1", 4 | "V2", 5 | ".fitted", 6 | ".resid", 7 | ".se.fit", 8 | "bands_data", 9 | "baseline_fit", 10 | "data", 11 | "lag_residuals", 12 | "lwr_pred", 13 | "n_transition", 14 | "outlier", 15 | "phase", 16 | "sqrt_abs_standardized_residuals", 17 | "standardized_residuals", 18 | "transition", 19 | "upr_pred", 20 | "x", 21 | "y", 22 | "n", 23 | "fit", 24 | "lwr", 25 | "upr", 26 | "cleaning_baseline_fit", 27 | "name", 28 | "value", 29 | "info", 30 | "time", 31 | "VO2", 32 | "anomaly", 33 | "data_ramp_outliers", 34 | "formula_model", 35 | "label", 36 | "model", 37 | "model_augmented", 38 | "mrt_fit", 39 | "pred", 40 | "protocol_phase", 41 | "recomposed_l2", 42 | "remainder", 43 | "step", 44 | "step_work_rate", 45 | "t_step", 46 | "unit", 47 | "work_rate", 48 | "desc", 49 | "X1", 50 | "X2"), 51 | package = "whippr", 52 | add = FALSE 53 | ) 54 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | Copyright (c) 2020 Felipe Mattioni Maturana 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /tests/testthat/test-perform_kinetics.R: -------------------------------------------------------------------------------- 1 | test_that("core vo2 kinetics function works", { 2 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 3 | 4 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 5 | 6 | ## detect outliers 7 | data_outliers <- detect_outliers( 8 | .data = df, 9 | test_type = "kinetics", 10 | vo2_column = "VO2", 11 | cleaning_level = 0.95, 12 | cleaning_baseline_fit = c("linear", "exponential", "exponential"), 13 | protocol_n_transitions = 3, 14 | protocol_baseline_length = 360, 15 | protocol_transition_length = 360, 16 | verbose = FALSE 17 | ) 18 | 19 | ## process data 20 | data_processed <- process_data( 21 | .data_outliers = data_outliers, 22 | protocol_baseline_length = 360, 23 | fit_bin_average = 5 24 | ) 25 | 26 | data_kinetics <- perform_kinetics( 27 | .data_processed = data_processed, 28 | intensity_domain = "moderate", 29 | fit_level = 0.95, 30 | fit_phase_1_length = 20, 31 | fit_baseline_length = 120, 32 | fit_transition_length = 240, 33 | verbose = FALSE 34 | ) 35 | 36 | expect_s3_class( 37 | object = data_kinetics, 38 | class = "tbl" 39 | ) 40 | }) 41 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: whippr 2 | Title: Tools for Manipulating Gas Exchange Data 3 | Version: 0.1.4 4 | Authors@R: 5 | person(given = "Felipe", 6 | family = "Mattioni Maturana", 7 | role = c("aut", "cre"), 8 | email = "felipe.mattioni@med.uni-tuebingen.de", 9 | comment = c(ORCID = "0000-0002-4221-6104")) 10 | Description: Set of tools for manipulating gas exchange data from cardiopulmonary exercise testing. 11 | License: MIT + file LICENSE 12 | URL: https://fmmattioni.github.io/whippr/, https://github.com/fmmattioni/whippr 13 | BugReports: https://github.com/fmmattioni/whippr/issues 14 | Encoding: UTF-8 15 | Roxygen: list(markdown = TRUE) 16 | Imports: 17 | readxl (>= 1.3.1), 18 | dplyr (>= 1.0.1), 19 | stringr (>= 1.4.0), 20 | lubridate (>= 1.7.9), 21 | magrittr, 22 | tibble, 23 | zoo, 24 | purrr, 25 | tidyr (>= 1.1.1), 26 | broom (>= 0.7.0), 27 | cli, 28 | ggplot2 (>= 3.4.0), 29 | glue, 30 | minpack.lm, 31 | patchwork (>= 1.0.1), 32 | rlang, 33 | nlstools, 34 | pillar 35 | RoxygenNote: 7.3.2 36 | Suggests: 37 | knitr, 38 | rmarkdown, 39 | fansi, 40 | collapsibleTree, 41 | testthat, 42 | shiny, 43 | miniUI, 44 | datapasta, 45 | rstudioapi, 46 | htmltools, 47 | readr, 48 | anomalize, 49 | ggforce, 50 | ggtext, 51 | forcats 52 | -------------------------------------------------------------------------------- /R/interpolate.R: -------------------------------------------------------------------------------- 1 | #' Interpolate data from breath-by-breath into second-by-second 2 | #' 3 | #' This function interpolates the data based on the time column. It takes the breath-by-breath data 4 | #' and transforms it into second-by-second. 5 | #' 6 | #' @param .data Data retrieved from \code{read_data()}. 7 | #' 8 | #' @return a [tibble][tibble::tibble-package] 9 | #' @export 10 | #' 11 | #' @importFrom stats approx 12 | #' 13 | #' @examples 14 | #' \dontrun{ 15 | #' ## get file path from example data 16 | #' path_example <- system.file("example_cosmed.xlsx", package = "whippr") 17 | #' 18 | #' ## read data 19 | #' df <- read_data(path = path_example, metabolic_cart = "cosmed") 20 | #' 21 | #' df %>% 22 | #' interpolate() 23 | #' } 24 | interpolate <- function(.data) { 25 | ## first make sure data only contains numeric columns 26 | data_num <- .data %>% 27 | dplyr::select_if(is.numeric) %>% 28 | remove_empty(dat = ., which = c("rows", "cols")) 29 | 30 | suppressWarnings({ 31 | out <- lapply(data_num, function (i) approx( 32 | x = data_num[[1]], 33 | y = i, 34 | xout = seq(min(data_num[[1]]), max(data_num[[1]], na.rm = TRUE), 1) 35 | )$y 36 | ) %>% 37 | dplyr::as_tibble() 38 | }) 39 | 40 | 41 | metadata <- attributes(.data) 42 | metadata$data_status <- "interpolated data" 43 | 44 | out <- new_whippr_tibble(out, metadata) 45 | 46 | out 47 | } 48 | -------------------------------------------------------------------------------- /.github/workflows/pkgdown.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | release: 8 | types: [published] 9 | workflow_dispatch: 10 | 11 | name: pkgdown.yaml 12 | 13 | permissions: read-all 14 | 15 | jobs: 16 | pkgdown: 17 | runs-on: ubuntu-latest 18 | # Only restrict concurrency for non-PR jobs 19 | concurrency: 20 | group: pkgdown-${{ github.event_name != 'pull_request' || github.run_id }} 21 | env: 22 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 23 | permissions: 24 | contents: write 25 | steps: 26 | - uses: actions/checkout@v4 27 | 28 | - uses: r-lib/actions/setup-pandoc@v2 29 | 30 | - uses: r-lib/actions/setup-r@v2 31 | with: 32 | use-public-rspm: true 33 | 34 | - uses: r-lib/actions/setup-r-dependencies@v2 35 | with: 36 | extra-packages: any::pkgdown, local::. 37 | needs: website 38 | 39 | - name: Build site 40 | run: pkgdown::build_site_github_pages(new_process = FALSE, install = FALSE) 41 | shell: Rscript {0} 42 | 43 | - name: Deploy to GitHub pages 🚀 44 | if: github.event_name != 'pull_request' 45 | uses: JamesIves/github-pages-deploy-action@v4.5.0 46 | with: 47 | clean: false 48 | branch: gh-pages 49 | folder: docs 50 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .DS_Store 4 | .RData 5 | docs/ 6 | demo.Rmd 7 | dev.R 8 | dev/ 9 | CRAN-SUBMISSION 10 | 11 | # Created by https://www.gitignore.io/api/macos,r 12 | # Edit at https://www.gitignore.io/?templates=macos,r 13 | ### macOS ### 14 | # General 15 | .AppleDouble 16 | .LSOverride 17 | # Icon must end with two \r 18 | Icon 19 | # Thumbnails 20 | ._* 21 | # Files that might appear in the root of a volume 22 | .DocumentRevisions-V100 23 | .fseventsd 24 | .Spotlight-V100 25 | .TemporaryItems 26 | .Trashes 27 | .VolumeIcon.icns 28 | .com.apple.timemachine.donotpresent 29 | # Directories potentially created on remote AFP share 30 | .AppleDB 31 | .AppleDesktop 32 | Network Trash Folder 33 | Temporary Items 34 | .apdisk 35 | ### R ### 36 | # History files 37 | .Rapp.history 38 | # Session Data files 39 | .RDataTmp 40 | # User-specific files 41 | .Ruserdata 42 | # Example code in package build process 43 | *-Ex.R 44 | # Output files from R CMD build 45 | /*.tar.gz 46 | # Output files from R CMD check 47 | /*.Rcheck/ 48 | # RStudio files 49 | .Rproj.user/ 50 | # produced vignettes 51 | vignettes/*.html 52 | vignettes/*.pdf 53 | # OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3 54 | .httr-oauth 55 | # knitr and R markdown default cache directories 56 | *_cache/ 57 | /cache/ 58 | # Temporary files created by R markdown 59 | *.utf8.md 60 | *.knit.md 61 | ### R.Bookdown Stack ### 62 | # R package: bookdown caching files 63 | /*_files/ 64 | # End of https://www.gitignore.io/api/macos,r 65 | inst/doc 66 | -------------------------------------------------------------------------------- /man/read_data.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/read-data.R 3 | \name{read_data} 4 | \alias{read_data} 5 | \title{Read data from metabolic cart} 6 | \usage{ 7 | read_data( 8 | path, 9 | metabolic_cart = c("cosmed", "cortex", "nspire", "parvo", "geratherm", "cardiocoach", 10 | "custom"), 11 | time_column = "t", 12 | work_rate_column = NULL 13 | ) 14 | } 15 | \arguments{ 16 | \item{path}{Path to read the file from.} 17 | 18 | \item{metabolic_cart}{Metabolic cart that was used for data collection. Currently, 'cosmed', 'cortex', 'nspire', 'parvo', 'geratherm', and 'cardiocoach' are supported. Additionaly, there is an option called 'custom' that supports files that do not have a metabolic cart-specific format.} 19 | 20 | \item{time_column}{The name (quoted) of the column containing the time. Depending on the language of your system, this column might not be "t". Therefore, you may specify it here. Default to "t".} 21 | 22 | \item{work_rate_column}{Default is \code{NULL}. In case your work rate column is coerced as a character column 23 | you can define here the name of this column in your data file. This happens because at the very beginning of the test 24 | the system may input a character like "-" to indicate no work rate. Therefore this is not going to get recognized as a numeric column. 25 | If your work rate column is called \code{WR}, for example, just pass \code{"WR"} to this argument.} 26 | } 27 | \value{ 28 | a \link[tibble:tibble-package]{tibble} 29 | } 30 | \description{ 31 | It reads the raw data exported from the metabolic cart. 32 | } 33 | -------------------------------------------------------------------------------- /.github/workflows/R-CMD-check.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | push: 5 | branches: [main, master] 6 | pull_request: 7 | 8 | name: R-CMD-check.yaml 9 | 10 | permissions: read-all 11 | 12 | jobs: 13 | R-CMD-check: 14 | runs-on: ${{ matrix.config.os }} 15 | 16 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 17 | 18 | strategy: 19 | fail-fast: false 20 | matrix: 21 | config: 22 | - {os: macos-latest, r: 'release'} 23 | - {os: windows-latest, r: 'release'} 24 | - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'} 25 | - {os: ubuntu-latest, r: 'release'} 26 | - {os: ubuntu-latest, r: 'oldrel-1'} 27 | 28 | env: 29 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 30 | R_KEEP_PKG_SOURCE: yes 31 | 32 | steps: 33 | - uses: actions/checkout@v4 34 | 35 | - uses: r-lib/actions/setup-pandoc@v2 36 | 37 | - uses: r-lib/actions/setup-r@v2 38 | with: 39 | r-version: ${{ matrix.config.r }} 40 | http-user-agent: ${{ matrix.config.http-user-agent }} 41 | use-public-rspm: true 42 | 43 | - uses: r-lib/actions/setup-r-dependencies@v2 44 | with: 45 | extra-packages: any::rcmdcheck 46 | needs: check 47 | 48 | - uses: r-lib/actions/check-r-package@v2 49 | with: 50 | upload-snapshots: true 51 | build_args: 'c("--no-manual","--compact-vignettes=gs+qpdf")' 52 | -------------------------------------------------------------------------------- /R/helpers-read.R: -------------------------------------------------------------------------------- 1 | # Get target data 2 | ## these functions could be a single function actually, 3 | ## but perhaps it is better to keep it that way in case we add a system in the future that requires additional steps 4 | target_cortex <- function(.data, time_column){ 5 | target_cell <- which(.data == time_column, arr.ind = TRUE) 6 | 7 | ## usually cortex will not export the data at the top of the spreadsheet 8 | ## meaning that the data we are looking for won't have column names recognized here 9 | ## the following is just to make sure that, in case the user's export settings does export the data at the top of the spreadsheet, it will get recognized 10 | if(purrr::is_empty(target_cell)) 11 | target_cell <- which(colnames(.data) == time_column, arr.ind = TRUE) 12 | 13 | target_cell 14 | } 15 | 16 | target_cosmed <- function(.data, time_column){ 17 | target_cell <- which(colnames(.data) == time_column, arr.ind = TRUE) 18 | 19 | target_cell 20 | } 21 | 22 | target_nspire <- function(.data, time_column){ 23 | target_cell <- which(colnames(.data) == time_column, arr.ind = TRUE) 24 | 25 | target_cell 26 | } 27 | 28 | target_parvo <- function(.data, time_column) { 29 | target_cell <- which(.data == time_column, arr.ind = TRUE) 30 | 31 | ## usually cortex will not export the data at the top of the spreadsheet 32 | ## meaning that the data we are looking for won't have column names recognized here 33 | ## the following is just to make sure that, in case the user's export settings does export the data at the top of the spreadsheet, it will get recognized 34 | if(purrr::is_empty(target_cell)) 35 | target_cell <- which(colnames(.data) == time_column, arr.ind = TRUE) 36 | 37 | target_cell 38 | } 39 | -------------------------------------------------------------------------------- /man/predict_bands.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/predict.R 3 | \name{predict_bands} 4 | \alias{predict_bands} 5 | \title{Extract confidence and prediction bands} 6 | \usage{ 7 | predict_bands( 8 | .data, 9 | time_column = "t", 10 | vo2_column = "VO2", 11 | cleaning_level = 0.95, 12 | cleaning_baseline_fit = c("linear", "exponential") 13 | ) 14 | } 15 | \arguments{ 16 | \item{.data}{The nornalized data retrieved from \code{normalize_transitions()}.} 17 | 18 | \item{time_column}{The name (quoted) of the column containing the time. Depending on the language of your system, this column might not be "t". Therefore, you may specify it here. Default to "t".} 19 | 20 | \item{vo2_column}{The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to 'VO2'.} 21 | 22 | \item{cleaning_level}{A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated.} 23 | 24 | \item{cleaning_baseline_fit}{A character indicating what kind of fit to perform for each baseline. Either 'linear' or 'exponential'.} 25 | } 26 | \value{ 27 | a \link[tibble:tibble-package]{tibble} containing the following columns: 28 | \item{x}{The provided time data.} 29 | \item{y}{The provided VO2 data.} 30 | \item{.fitted}{The predicted response for that observation.} 31 | \item{.resid}{The residual for a particular point.} 32 | \item{lwr_conf}{Lower limit of the confidence band.} 33 | \item{upr_conf}{Upper limit of the confidence band.} 34 | \item{lwr_pred}{Lower limit of the prediction band.} 35 | \item{upr_pred}{Upper limit of the prediction band.} 36 | } 37 | \description{ 38 | It extracts confidence and prediction bands from the \code{nls} model. It is used only for data cleaning. 39 | } 40 | -------------------------------------------------------------------------------- /man/predict_bands_transition.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/predict.R 3 | \name{predict_bands_transition} 4 | \alias{predict_bands_transition} 5 | \title{Extract confidence and prediction bands for the transition phase} 6 | \usage{ 7 | predict_bands_transition( 8 | .data, 9 | time_column, 10 | vo2_column, 11 | cleaning_level, 12 | cleaning_model 13 | ) 14 | } 15 | \arguments{ 16 | \item{.data}{The nornalized data retrieved from \code{normalize_transitions()}. The data should be filtered to only the 'transition' phase before passing to the function.} 17 | 18 | \item{time_column}{The name (quoted) of the column containing the time. Depending on the language of your system, this column might not be "t". Therefore, you may specify it here. Default to "t".} 19 | 20 | \item{vo2_column}{The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to 'VO2'.} 21 | 22 | \item{cleaning_level}{A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated.} 23 | 24 | \item{cleaning_model}{The \code{nls} model to retrieve the bands from.} 25 | } 26 | \value{ 27 | a \link[tibble:tibble-package]{tibble} containing the following columns: 28 | \item{x}{The provided time data.} 29 | \item{y}{The provided VO2 data.} 30 | \item{.fitted}{The predicted response for that observation.} 31 | \item{.resid}{The residual for a particular point.} 32 | \item{lwr_conf}{Lower limit of the confidence band.} 33 | \item{upr_conf}{Upper limit of the confidence band.} 34 | \item{lwr_pred}{Lower limit of the prediction band.} 35 | \item{upr_pred}{Upper limit of the prediction band.} 36 | } 37 | \description{ 38 | Extract confidence and prediction bands for the transition phase 39 | } 40 | -------------------------------------------------------------------------------- /man/predict_bands_baseline.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/predict.R 3 | \name{predict_bands_baseline} 4 | \alias{predict_bands_baseline} 5 | \title{Extract confidence and prediction bands for the baseline phase} 6 | \usage{ 7 | predict_bands_baseline( 8 | .data, 9 | time_column, 10 | vo2_column, 11 | cleaning_level, 12 | cleaning_baseline_fit 13 | ) 14 | } 15 | \arguments{ 16 | \item{.data}{The nornalized data retrieved from \code{normalize_transitions()}. The data should be filtered to only the 'baseline' phase before passing to the function.} 17 | 18 | \item{time_column}{The name (quoted) of the column containing the time. Depending on the language of your system, this column might not be "t". Therefore, you may specify it here. Default to "t".} 19 | 20 | \item{vo2_column}{The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to 'VO2'.} 21 | 22 | \item{cleaning_level}{A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated.} 23 | 24 | \item{cleaning_baseline_fit}{A character indicating what kind of fit to perform for each baseline. Either 'linear' or 'exponential'.} 25 | } 26 | \value{ 27 | a \link[tibble:tibble-package]{tibble} containing the following columns: 28 | \item{x}{The provided time data.} 29 | \item{y}{The provided VO2 data.} 30 | \item{.fitted}{The predicted response for that observation.} 31 | \item{.resid}{The residual for a particular point.} 32 | \item{lwr_conf}{Lower limit of the confidence band.} 33 | \item{upr_conf}{Upper limit of the confidence band.} 34 | \item{lwr_pred}{Lower limit of the prediction band.} 35 | \item{upr_pred}{Upper limit of the prediction band.} 36 | } 37 | \description{ 38 | Extract confidence and prediction bands for the baseline phase 39 | } 40 | -------------------------------------------------------------------------------- /man/undoHistory.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/shinyThings.R 3 | \name{undoHistory} 4 | \alias{undoHistory} 5 | \title{Undo/Redo History Buttons} 6 | \usage{ 7 | undoHistory(id, value, value_debounce_rate = 500) 8 | } 9 | \arguments{ 10 | \item{id}{The module id} 11 | 12 | \item{value}{The reactive expression with the values should be saved for the 13 | user's history. This expression can contain arbitrary data and be of any 14 | structure as long as it returns a single value (or list). Each change in 15 | this value is stored, so the module may not work well for storing large 16 | data sets.} 17 | 18 | \item{value_debounce_rate}{Debounce rate in milliseconds for the \code{value} 19 | reactive expression. To avoid saving spurious changes in \code{value}, the 20 | expression is debounced. See \code{\link[shiny:debounce]{shiny::debounce()}} for more information.} 21 | } 22 | \value{ 23 | The \code{undoHistory()} module returns the currently selected history 24 | item as the user moves through the stack, or \code{NULL} if the last update 25 | was the result of user input. The returned value has the same structure as 26 | the reactive \code{value} passed to \code{undoHistory()}. 27 | } 28 | \description{ 29 | This is a simple Shiny module for undo/redo history. The Shiny module accepts 30 | an arbitrary reactive data value. Changes in the state of this reactive value 31 | are tracked and added to the user's history. The user can then repeatedly 32 | undo and redo to walk through this stack. The module returns the current 33 | selected value of the reactive from this historical stack, or \code{NULL} when 34 | the app state was changed by the user. Because this reactive can hold 35 | arbitrary data about the state of the Shiny app, it is up to the app 36 | developer to use the returned current value to update the Shiny apps' inputs 37 | and UI elements. 38 | } 39 | \keyword{internal} 40 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(detect_outliers,incremental) 4 | S3method(detect_outliers,kinetics) 5 | S3method(incremental_normalize,ramp) 6 | S3method(incremental_normalize,step) 7 | S3method(perform_average,bin) 8 | S3method(perform_average,ensemble) 9 | S3method(perform_average,rolling) 10 | S3method(perform_kinetics,heavy) 11 | S3method(perform_kinetics,moderate) 12 | S3method(plot_incremental,ramp) 13 | S3method(plot_incremental,step) 14 | S3method(plot_outliers,incremental) 15 | S3method(plot_outliers,kinetics) 16 | S3method(print,whippr) 17 | S3method(read_data,cardiocoach) 18 | S3method(read_data,cortex) 19 | S3method(read_data,cosmed) 20 | S3method(read_data,custom) 21 | S3method(read_data,geratherm) 22 | S3method(read_data,nspire) 23 | S3method(read_data,parvo) 24 | export("%>%") 25 | export(detect_outliers) 26 | export(get_residuals) 27 | export(incremental_normalize) 28 | export(interpolate) 29 | export(model_diagnostics) 30 | export(normalize_first_breath) 31 | export(normalize_time) 32 | export(normalize_transitions) 33 | export(perform_average) 34 | export(perform_kinetics) 35 | export(perform_max) 36 | export(plot_incremental) 37 | export(plot_outliers) 38 | export(predict_bands) 39 | export(predict_bands_baseline) 40 | export(predict_bands_transition) 41 | export(process_data) 42 | export(read_data) 43 | export(run_manual_cleaner) 44 | export(theme_whippr) 45 | export(vo2_kinetics) 46 | export(vo2_max) 47 | importFrom(ggplot2,element_line) 48 | importFrom(ggplot2,element_rect) 49 | importFrom(ggplot2,element_text) 50 | importFrom(ggplot2,theme) 51 | importFrom(ggplot2,theme_light) 52 | importFrom(magrittr,"%>%") 53 | importFrom(rlang,":=") 54 | importFrom(stats,approx) 55 | importFrom(stats,coef) 56 | importFrom(stats,deriv) 57 | importFrom(stats,lm) 58 | importFrom(stats,predict.lm) 59 | importFrom(stats,qt) 60 | importFrom(stats,vcov) 61 | importFrom(tibble,tibble) 62 | importFrom(utils,head) 63 | importFrom(utils,tail) 64 | -------------------------------------------------------------------------------- /R/utils.R: -------------------------------------------------------------------------------- 1 | # manual import of janitor::remove_empty() to avoid extra dependency 2 | 3 | #' @title Remove empty rows and/or columns from a data.frame or matrix. 4 | #' 5 | #' @description Removes all rows and/or columns from a data.frame or matrix that 6 | #' are composed entirely of \code{NA} values. 7 | #' 8 | #' @param dat the input data.frame or matrix. 9 | #' @param which one of "rows", "cols", or \code{c("rows", "cols")}. Where no 10 | #' value of which is provided, defaults to removing both empty rows and empty 11 | #' columns, declaring the behavior with a printed message. 12 | #' @param cutoff What fraction (>0 to <=1) of rows or columns must be empty to 13 | #' be removed? 14 | #' @return Returns the object without its missing rows or columns. 15 | #' @keywords internal 16 | remove_empty <- function(dat, which = c("rows", "cols"), cutoff=1) { 17 | if (missing(which) && !missing(dat)) { 18 | message("value for \"which\" not specified, defaulting to c(\"rows\", \"cols\")") 19 | which <- c("rows", "cols") 20 | } 21 | if ((sum(which %in% c("rows", "cols")) != length(which)) && !missing(dat)) { 22 | stop("\"which\" must be one of \"rows\", \"cols\", or c(\"rows\", \"cols\")") 23 | } 24 | if (length(cutoff) != 1) { 25 | stop("cutoff must be a single value") 26 | } else if (!is.numeric(cutoff)) { 27 | stop("cutoff must be numeric") 28 | } else if (cutoff <= 0 | cutoff > 1) { 29 | stop("cutoff must be >0 and <= 1") 30 | } else if (length(which) > 1 & cutoff != 1) { 31 | stop("cutoff must be used with only one of which = 'rows' or 'cols', not both") 32 | } 33 | if ("rows" %in% which) { 34 | # Using different code with cutoff = 1 vs cutoff != 1 to avoid possible 35 | # floating point errors. 36 | mask_keep <- 37 | if (cutoff == 1) { 38 | rowSums(is.na(dat)) != ncol(dat) 39 | } else { 40 | (rowSums(!is.na(dat))/ncol(dat)) > cutoff 41 | } 42 | dat <- dat[mask_keep, , drop = FALSE] 43 | } 44 | if ("cols" %in% which) { 45 | # Using different code with cutoff = 1 vs cutoff != 1 to avoid possible 46 | # floating point errors. 47 | mask_keep <- 48 | if (cutoff == 1) { 49 | colSums(is.na(dat)) != nrow(dat) 50 | } else { 51 | (colSums(!is.na(dat))/nrow(dat)) > cutoff 52 | } 53 | dat <- dat[, mask_keep, drop = FALSE] 54 | } 55 | dat 56 | } 57 | -------------------------------------------------------------------------------- /man/perform_average.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/averages.R 3 | \name{perform_average} 4 | \alias{perform_average} 5 | \title{Perform average on second-by-second data} 6 | \usage{ 7 | perform_average( 8 | .data, 9 | type = c("bin", "rolling", "ensemble"), 10 | bins = 30, 11 | bin_method = c("ceiling", "round", "floor"), 12 | rolling_window = 30 13 | ) 14 | } 15 | \arguments{ 16 | \item{.data}{The second-by-second data retrieved from \code{interpolate()}.} 17 | 18 | \item{type}{The type of the average to perform. Either \code{bin}, \code{rolling}, or \code{ensemble}.} 19 | 20 | \item{bins}{If bin-average is chosen, here you can specify the size of the bin-average, in seconds. Default to 30-s bin-average.} 21 | 22 | \item{bin_method}{Method for determining bin boundaries when \code{type = "bin"}. 23 | One of \code{"ceiling"} (default), \code{"round"}, or \code{"floor"}. 24 | \code{"ceiling"} is recommended as it ensures no data points are excluded 25 | from the analysis by always rounding up to the next bin boundary.} 26 | 27 | \item{rolling_window}{If rolling-average is chosen, here you can specify the rolling-average window, in seconds. Default to 30-s rolling-average.} 28 | } 29 | \value{ 30 | a \link[tibble:tibble-package]{tibble} 31 | } 32 | \description{ 33 | This function performs either a bin- or a rolling-average on the interpolated data. 34 | You must specify the \code{type} of the average before continuing. 35 | } 36 | \details{ 37 | Ensemble average is used in VO2 kinetics analysis, where a series of transitions from baseline to 38 | the moderate/heavy/severe intensity-domain is ensembled averaged into a single 'bout' for further data processing. 39 | 40 | When using bin averaging, the \code{bin_method} parameter controls how time points are assigned to bins: 41 | \itemize{ 42 | \item \code{"ceiling"}: Rounds up to the next bin boundary (recommended) 43 | \item \code{"round"}: Rounds to the nearest bin boundary 44 | \item \code{"floor"}: Rounds down to the previous bin boundary 45 | } 46 | } 47 | \examples{ 48 | \dontrun{ 49 | ## get file path from example data 50 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 51 | 52 | ## read data 53 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 54 | 55 | ## interpolate and perform 30-s bin-average 56 | df \%>\% 57 | interpolate() \%>\% 58 | perform_average(type = "bin", bins = 30) 59 | 60 | ## interpolate and perform 30-s rolling-average 61 | df \%>\% 62 | interpolate() \%>\% 63 | perform_average(type = "rolling", rolling_window = 30) 64 | } 65 | } 66 | -------------------------------------------------------------------------------- /man/perform_kinetics.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/kinetics.R 3 | \name{perform_kinetics} 4 | \alias{perform_kinetics} 5 | \title{Perform VO2 kinetics fitting} 6 | \usage{ 7 | perform_kinetics( 8 | .data_processed, 9 | intensity_domain = c("moderate", "heavy", "severe"), 10 | fit_level = 0.95, 11 | fit_phase_1_length, 12 | fit_baseline_length, 13 | fit_transition_length, 14 | verbose = TRUE, 15 | ... 16 | ) 17 | } 18 | \arguments{ 19 | \item{.data_processed}{The data retrieved from \code{process_data()}.} 20 | 21 | \item{intensity_domain}{The exercise-intensity domain that the test was performed. Either \emph{moderate}, \emph{heavy}, or \emph{severe}.} 22 | 23 | \item{fit_level}{A numeric scalar between 0 and 1 giving the confidence level for the parameter estimates in the final VO2 kinetics fit. Default to \code{0.95}.} 24 | 25 | \item{fit_phase_1_length}{The length of the phase I that you wish to exclude from the final exponential fit, in seconds. See \verb{VO2 kinetics} section in \code{?vo2_kinetics} for more details.} 26 | 27 | \item{fit_baseline_length}{The length the baseline to perform the final linear fit, in seconds. See \verb{VO2 kinetics} section \code{?vo2_kinetics} for more details.} 28 | 29 | \item{fit_transition_length}{The length of the transition to perform the final exponential fit, in seconds. See \verb{VO2 kinetics} section \code{?vo2_kinetics} for more details.} 30 | 31 | \item{verbose}{A boolean indicating whether messages should be printed in the console. Default to \code{TRUE}.} 32 | 33 | \item{...}{Additional arguments when fitting VO2 kinetics in the heavy- or severe-intensity domains. Arguments may be the following: 34 | \describe{ 35 | \item{\code{TODO}}{} 36 | }} 37 | } 38 | \value{ 39 | a \code{\link[tibble:tibble-package]{tibble}} containing one row and the nested columns: 40 | \item{data_fitted}{The data containing the time and VO2 columns, as well as the fitted data and its residuals for each data point.} 41 | \item{model}{A \code{nls} object. The model used in the VO2 kinetics fitting.} 42 | \item{model_summary}{The tidied summary of the \code{model}.} 43 | \item{model_residuals}{The residuals of the \code{model}.} 44 | \item{plot_model}{The final plot of the fitted \code{model}.} 45 | \item{plot_residuals}{The residuals plot for the \code{model} diagnostics.} 46 | } 47 | \description{ 48 | Performs the fitting process for the VO2 kinetics analysis. At this point, the data should already have been cleaned (outliers removed) and processed 49 | (interpolated, time-aligned, ensembled-averaged, and bin-averaged). 50 | } 51 | \details{ 52 | See \code{?vo2_kinetics} for details. 53 | } 54 | -------------------------------------------------------------------------------- /_pkgdown.yml: -------------------------------------------------------------------------------- 1 | destination: docs 2 | 3 | template: 4 | bootstrap: 5 5 | bootswatch: sandstone 6 | bslib: 7 | primary: "#0054AD" 8 | border-radius: 0.5rem 9 | btn-border-radius: 0.25rem 10 | opengraph: 11 | image: 12 | src: man/figures/header.png 13 | alt: "Tools for Manipulating Gas Exchange Data" 14 | twitter: 15 | creator: "@felipe_mattioni" 16 | card: summary_large_image 17 | params: 18 | ganalytics: UA-126092763-1 19 | 20 | authors: 21 | Felipe Mattioni Maturana: 22 | href: https://fmattioni.me 23 | 24 | toc: 25 | depth: 3 26 | 27 | development: 28 | mode: unreleased 29 | 30 | navbar: 31 | title: ~ 32 | type: default 33 | left: 34 | - icon: fa-home fa-lg 35 | href: index.html 36 | - icon: fa-book fa-lg 37 | text: "Articles" 38 | menu: 39 | - text: "VO2 kinetics analysis" 40 | href: articles/vo2_kinetics.html 41 | - text: "Incremental test analyses" 42 | href: articles/incremental.html 43 | - text: Reference 44 | icon: fa-balance-scale 45 | href: reference/index.html 46 | - text: News 47 | icon: fa-newspaper 48 | menu: 49 | - text: "Change log" 50 | href: news/index.html 51 | right: 52 | - icon: fa-github fa-lg 53 | href: https://github.com/fmmattioni/whippr 54 | - icon: fa-twitter fa-lg 55 | href: https://twitter.com/felipe_mattioni 56 | 57 | reference: 58 | - title: Read data 59 | desc: > 60 | Read raw data from metabolic cart. 61 | contents: 62 | - read_data 63 | - title: Interpolate data 64 | desc: > 65 | Interpolate data from breath-by-breath into second-by-second 66 | contents: 67 | - interpolate 68 | - title: Perform averages 69 | desc: > 70 | Perform average on second-by-second data 71 | contents: 72 | - perform_average 73 | - title: VO2 kinetics analysis 74 | desc: > 75 | Tools for performing VO2 kinetics analysis 76 | contents: 77 | - vo2_kinetics 78 | - starts_with("normalize") 79 | - detect_outliers 80 | - plot_outliers 81 | - starts_with("predict") 82 | - process_data 83 | - perform_kinetics 84 | - get_residuals 85 | - model_diagnostics 86 | - title: VO2 max analysis 87 | desc: > 88 | Tools for performing incremental test analysis 89 | contents: 90 | - incremental_normalize 91 | - plot_incremental 92 | - detect_outliers 93 | - perform_max 94 | - vo2_max 95 | - title: Additional tools 96 | desc: > 97 | Collection of a few additional helpful tools 98 | contents: 99 | - run_manual_cleaner 100 | - theme_whippr 101 | - print.whippr 102 | -------------------------------------------------------------------------------- /.github/workflows/pr-commands.yaml: -------------------------------------------------------------------------------- 1 | # Workflow derived from https://github.com/r-lib/actions/tree/v2/examples 2 | # Need help debugging build failures? Start at https://github.com/r-lib/actions#where-to-find-help 3 | on: 4 | issue_comment: 5 | types: [created] 6 | 7 | name: pr-commands.yaml 8 | 9 | permissions: read-all 10 | 11 | jobs: 12 | document: 13 | if: ${{ github.event.issue.pull_request && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') && startsWith(github.event.comment.body, '/document') }} 14 | name: document 15 | runs-on: ubuntu-latest 16 | env: 17 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 18 | permissions: 19 | contents: write 20 | steps: 21 | - uses: actions/checkout@v4 22 | 23 | - uses: r-lib/actions/pr-fetch@v2 24 | with: 25 | repo-token: ${{ secrets.GITHUB_TOKEN }} 26 | 27 | - uses: r-lib/actions/setup-r@v2 28 | with: 29 | use-public-rspm: true 30 | 31 | - uses: r-lib/actions/setup-r-dependencies@v2 32 | with: 33 | extra-packages: any::roxygen2 34 | needs: pr-document 35 | 36 | - name: Document 37 | run: roxygen2::roxygenise() 38 | shell: Rscript {0} 39 | 40 | - name: commit 41 | run: | 42 | git config --local user.name "$GITHUB_ACTOR" 43 | git config --local user.email "$GITHUB_ACTOR@users.noreply.github.com" 44 | git add man/\* NAMESPACE 45 | git commit -m 'Document' 46 | 47 | - uses: r-lib/actions/pr-push@v2 48 | with: 49 | repo-token: ${{ secrets.GITHUB_TOKEN }} 50 | 51 | style: 52 | if: ${{ github.event.issue.pull_request && (github.event.comment.author_association == 'MEMBER' || github.event.comment.author_association == 'OWNER') && startsWith(github.event.comment.body, '/style') }} 53 | name: style 54 | runs-on: ubuntu-latest 55 | env: 56 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 57 | permissions: 58 | contents: write 59 | steps: 60 | - uses: actions/checkout@v4 61 | 62 | - uses: r-lib/actions/pr-fetch@v2 63 | with: 64 | repo-token: ${{ secrets.GITHUB_TOKEN }} 65 | 66 | - uses: r-lib/actions/setup-r@v2 67 | 68 | - name: Install dependencies 69 | run: install.packages("styler") 70 | shell: Rscript {0} 71 | 72 | - name: Style 73 | run: styler::style_pkg() 74 | shell: Rscript {0} 75 | 76 | - name: commit 77 | run: | 78 | git config --local user.name "$GITHUB_ACTOR" 79 | git config --local user.email "$GITHUB_ACTOR@users.noreply.github.com" 80 | git add \*.R 81 | git commit -m 'Style' 82 | 83 | - uses: r-lib/actions/pr-push@v2 84 | with: 85 | repo-token: ${{ secrets.GITHUB_TOKEN }} 86 | -------------------------------------------------------------------------------- /tests/testthat/test-incremental.R: -------------------------------------------------------------------------------- 1 | test_that("incremental_normalize ramp works", { 2 | ## get file path from example data 3 | path_example <- system.file("ramp_cosmed.xlsx", package = "whippr") 4 | ## read data from ramp test 5 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 6 | ## normalize incremental test data 7 | ramp_normalized <- df %>% 8 | incremental_normalize( 9 | .data = ., 10 | incremental_type = "ramp", 11 | has_baseline = TRUE, 12 | baseline_length = 240, 13 | work_rate_magic = TRUE, 14 | baseline_intensity = 20, 15 | ramp_increase = 25 16 | ) 17 | 18 | expect_s3_class( 19 | object = ramp_normalized, 20 | class = "tbl" 21 | ) 22 | }) 23 | 24 | test_that("incremental_normalize step works", { 25 | ## get file path from example data 26 | path_example <- system.file("step_cortex.xlsx", package = "whippr") 27 | ## read data from ramp test 28 | df <- read_data(path = path_example, metabolic_cart = "cortex") 29 | ## normalize incremental test data 30 | ramp_normalized <- df %>% 31 | incremental_normalize( 32 | .data = ., 33 | incremental_type = "step", 34 | has_baseline = TRUE, 35 | baseline_length = 120, 36 | work_rate_magic = TRUE, 37 | baseline_intensity = 0, 38 | step_start = 50, 39 | step_increase = 25, 40 | step_length = 180 41 | ) 42 | 43 | expect_s3_class( 44 | object = ramp_normalized, 45 | class = "tbl" 46 | ) 47 | }) 48 | 49 | test_that("plot_incremental ramp works", { 50 | ## get file path from example data 51 | path_example <- system.file("ramp_cosmed.xlsx", package = "whippr") 52 | ## read data from ramp test 53 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 54 | ## normalize incremental test data 55 | ramp_normalized <- df %>% 56 | incremental_normalize( 57 | .data = ., 58 | incremental_type = "ramp", 59 | has_baseline = TRUE, 60 | baseline_length = 240, 61 | work_rate_magic = TRUE, 62 | baseline_intensity = 20, 63 | ramp_increase = 25 64 | ) 65 | 66 | ## plot 67 | expect_s3_class( 68 | plot_incremental(ramp_normalized), 69 | "ggplot" 70 | ) 71 | }) 72 | 73 | test_that("plot_incremental step works", { 74 | ## get file path from example data 75 | path_example <- system.file("step_cortex.xlsx", package = "whippr") 76 | ## read data from ramp test 77 | df <- read_data(path = path_example, metabolic_cart = "cortex") 78 | ## normalize incremental test data 79 | ramp_normalized <- df %>% 80 | incremental_normalize( 81 | .data = ., 82 | incremental_type = "step", 83 | has_baseline = TRUE, 84 | baseline_length = 120, 85 | work_rate_magic = TRUE, 86 | baseline_intensity = 0, 87 | step_start = 50, 88 | step_increase = 25, 89 | step_length = 180 90 | ) 91 | 92 | ## plot 93 | expect_s3_class( 94 | plot_incremental(ramp_normalized), 95 | "ggplot" 96 | ) 97 | }) 98 | -------------------------------------------------------------------------------- /man/perform_max.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/max.R 3 | \name{perform_max} 4 | \alias{perform_max} 5 | \title{Perform VO2max calculation} 6 | \usage{ 7 | perform_max( 8 | .data, 9 | vo2_column = "VO2", 10 | vo2_relative_column = NULL, 11 | heart_rate_column = NULL, 12 | rer_column = NULL, 13 | average_method = c("bin", "rolling"), 14 | average_length = 30, 15 | plot = TRUE, 16 | verbose = TRUE 17 | ) 18 | } 19 | \arguments{ 20 | \item{.data}{The data retrieved either from \code{incremental_normalize()} or \code{detect_outliers()}.} 21 | 22 | \item{vo2_column}{The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to \code{"VO2"}.} 23 | 24 | \item{vo2_relative_column}{The name (quoted) of the column containing the relative to body weight oxygen uptake (VO2) data. Default to \code{NULL}.} 25 | 26 | \item{heart_rate_column}{The name (quoted) of the column containing the heart rate (HR) data. Default to \code{NULL}. If \code{NULL}, this parameter will not be calculated.} 27 | 28 | \item{rer_column}{The name (quoted) of the column containing the respiratory exchange ratio (RER) data. Default to \code{NULL}. If \code{NULL}, this parameter will not be calculated.} 29 | 30 | \item{average_method}{The average method to be used for VO2max calculation. One of \code{bin} or \code{rolling}.} 31 | 32 | \item{average_length}{The length, in seconds, of the average to be used. For example, if \code{average_method = bin}, and \code{average_length = 30}, it will perform a 30-s bin-average.} 33 | 34 | \item{plot}{A boolean indicating whether to produce a plot with the summary results. Default to \code{TRUE}.} 35 | 36 | \item{verbose}{A boolean indicating whether messages should be printed in the console. Default to \code{TRUE}.} 37 | } 38 | \value{ 39 | a tibble 40 | } 41 | \description{ 42 | It performs the calculation of VO2max, HRmax, and maximal RER. Additionally, it detects whether a plateau can be identified from your data. 43 | } 44 | \examples{ 45 | \dontrun{ 46 | ## get file path from example data 47 | path_example <- system.file("ramp_cosmed.xlsx", package = "whippr") 48 | 49 | ## read data from ramp test 50 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 51 | 52 | ## normalize incremental test data 53 | ramp_normalized <- df \%>\% 54 | incremental_normalize( 55 | .data = ., 56 | incremental_type = "ramp", 57 | has_baseline = TRUE, 58 | baseline_length = 240, 59 | work_rate_magic = TRUE, 60 | baseline_intensity = 20, 61 | ramp_increase = 25 62 | ) 63 | 64 | ## detect outliers 65 | data_ramp_outliers <- detect_outliers( 66 | .data = ramp_normalized, 67 | test_type = "incremental", 68 | vo2_column = "VO2", 69 | cleaning_level = 0.95, 70 | method_incremental = "linear", 71 | verbose = TRUE 72 | ) 73 | 74 | ## analyze VO2max 75 | perform_max( 76 | .data = data_ramp_outliers, 77 | vo2_column = "VO2", 78 | vo2_relative_column = "VO2/Kg", 79 | heart_rate_column = "HR", 80 | rer_column = "R", 81 | average_method = "bin", 82 | average_length = 30, 83 | plot = TRUE, 84 | verbose = FALSE 85 | ) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /R/nlstools.R: -------------------------------------------------------------------------------- 1 | #' Get residuals 2 | #' 3 | #' Computes residuals from the VO2 kinetics model. 4 | #' 5 | #' @param .model A model of class \code{nls}. 6 | #' 7 | #' @return a [tibble][tibble::tibble-package] containing the data passed to augment, and additional columns: 8 | #' \item{.fitted}{The predicted response for that observation.} 9 | #' \item{.resid}{The residual for a particular point.} 10 | #' \item{standardized_residuals}{Standardized residuals.} 11 | #' \item{sqrt_abs_standardized_residuals}{The sqrt of absolute value of standardized residuals.} 12 | #' \item{lag_residuals}{The lag of the \code{.resid} column for plotting auto-correlation.} 13 | #' 14 | #' @export 15 | get_residuals <- function(.model) { 16 | ## adapted from nlstools::nlsResiduals() 17 | 18 | sigma_model <- summary(.model)$sigma 19 | 20 | model_augmented <- broom::augment(.model) 21 | 22 | mean_residuals <- mean(model_augmented$.resid) 23 | 24 | out <- model_augmented %>% 25 | dplyr::mutate(standardized_residuals = (.resid - mean_residuals) / sigma_model, 26 | sqrt_abs_standardized_residuals = abs(.resid / sigma_model), 27 | sqrt_abs_standardized_residuals = sqrt(sqrt_abs_standardized_residuals), 28 | lag_residuals = dplyr::lag(.resid)) 29 | 30 | out 31 | } 32 | 33 | #' Model diagnostics 34 | #' 35 | #' Plots different model diagnostics for checking the model performance. 36 | #' 37 | #' @param .residuals_tbl The data retrived from \code{get_residuals()}. 38 | #' 39 | #' @return a patchwork object 40 | #' @export 41 | model_diagnostics <- function(.residuals_tbl) { 42 | ## Plots inspired both by the 'nlstools' and 'see' packages 43 | 44 | ## P1 45 | p1 <- .residuals_tbl %>% 46 | ggplot2::ggplot(ggplot2::aes(.resid)) + 47 | ggplot2::geom_density(fill = "skyblue", alpha = 0.5) + 48 | ggplot2::labs( 49 | title = "Non-Normality of Residuals", 50 | subtitle = "Distribution should look like a normal curve", 51 | x = "Residuals", 52 | y = "Density" 53 | ) + 54 | theme_whippr() 55 | 56 | ## P2 57 | p2 <- .residuals_tbl %>% 58 | ggplot2::ggplot(ggplot2::aes(sample = standardized_residuals)) + 59 | ggplot2::stat_qq(shape = 21, size = 4, fill = "skyblue") + 60 | ggplot2::stat_qq_line(color = "darkred", linewidth = 1) + 61 | ggplot2::labs( 62 | title = "Non-normality of Residuals and Outliers", 63 | subtitle = "Dots should be plotted along the line", 64 | y = "Standardized Residuals", 65 | x = "Theoretical Quantiles" 66 | ) + 67 | theme_whippr() 68 | 69 | ## P3 70 | p3 <- .residuals_tbl %>% 71 | ggplot2::ggplot(ggplot2::aes(.fitted, .resid)) + 72 | ggplot2::geom_point(shape = 21, size = 4, fill = "skyblue") + 73 | ggplot2::geom_hline(yintercept = 0, lty = "dashed", color = "darkred", linewidth = 1) + 74 | ggplot2::labs( 75 | title = "Homoscedasticity", 76 | subtitle = "Dots should be similar above and below the dashed line", 77 | x = "Fitted values", 78 | y = "Residuals" 79 | ) + 80 | theme_whippr() 81 | 82 | p4 <- .residuals_tbl %>% 83 | tidyr::drop_na(lag_residuals) %>% 84 | ggplot2::ggplot(ggplot2::aes(.resid, lag_residuals)) + 85 | ggplot2::geom_point(shape = 21, size = 4, fill = "skyblue") + 86 | ggplot2::geom_hline(yintercept = 0, lty = "dashed", color = "darkred", linewidth = 1) + 87 | ggplot2::labs( 88 | title = "Autocorrelation", 89 | subtitle = "Dots should be plotted randomly", 90 | x = "Residuals", 91 | y = "Residuals - i" 92 | ) + 93 | theme_whippr() 94 | 95 | out <- patchwork::wrap_plots(p1, p2, p3, p4, ncol = 2) 96 | 97 | out 98 | } 99 | -------------------------------------------------------------------------------- /man/incremental_normalize.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/incremental.R 3 | \name{incremental_normalize} 4 | \alias{incremental_normalize} 5 | \title{Normalize incremental test data} 6 | \usage{ 7 | incremental_normalize( 8 | .data, 9 | incremental_type = c("ramp", "step"), 10 | has_baseline = TRUE, 11 | baseline_length = NULL, 12 | work_rate_magic = FALSE, 13 | baseline_intensity = NULL, 14 | ramp_increase = NULL, 15 | step_start = NULL, 16 | step_increase = NULL, 17 | step_length = NULL, 18 | ... 19 | ) 20 | } 21 | \arguments{ 22 | \item{.data}{Data retrieved from \code{read_data()}.} 23 | 24 | \item{incremental_type}{The type of the incremental test performed. Either "ramp" or "step".} 25 | 26 | \item{has_baseline}{A boolean to indicate whether the data contains a baseline phase. This is used for an incremental test only. Default to \code{TRUE}.} 27 | 28 | \item{baseline_length}{The baseline length (in seconds) performed.} 29 | 30 | \item{work_rate_magic}{A boolean indicating whether to perform the work rate calculations. When set to \code{TRUE}, 31 | it will calculate the work rate throughout a ramp or step test. In the case of a step test, it will also 32 | perform a linear transformation of the work rate. 33 | If set to \code{TRUE}, the arguments below should be given. Default to \code{FALSE}.} 34 | 35 | \item{baseline_intensity}{A numeric atomic vector indicating the work rate of the baseline. If the baseline was performed at rest, indicate \code{0}.} 36 | 37 | \item{ramp_increase}{A numeric atomic vector indicating the ramp increase in watts per minute (W/min). For example, if the ramp 38 | was \verb{30 W/min}, then pass the number \code{30} to this argument.} 39 | 40 | \item{step_start}{In case your baseline was performed at rest, you can set in this parameter at which intensity 41 | the step test started.} 42 | 43 | \item{step_increase}{A numeric atomic vector indicating the step increase, in watts. For example, if the step increase was 44 | \verb{25 W} at each step, then pass the number \code{25} to this argument.} 45 | 46 | \item{step_length}{A numeric atomic vector indicating the length (in seconds) of each step in the step incremental test.} 47 | 48 | \item{...}{Additional arguments. Currently ignored.} 49 | } 50 | \value{ 51 | a \link[tibble:tibble-package]{tibble} 52 | } 53 | \description{ 54 | Detect protocol phases (baseline, ramp, steps), normalize work rate, and 55 | time-align baseline phase (baseline time becomes negative). 56 | } 57 | \examples{ 58 | \dontrun{ 59 | ## get file path from example data 60 | path_example <- system.file("ramp_cosmed.xlsx", package = "whippr") 61 | 62 | ## read data from ramp test 63 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 64 | 65 | ## normalize incremental test data 66 | ramp_normalized <- df \%>\% 67 | incremental_normalize( 68 | .data = ., 69 | incremental_type = "ramp", 70 | has_baseline = TRUE, 71 | baseline_length = 240, 72 | work_rate_magic = TRUE, 73 | baseline_intensity = 20, 74 | ramp_increase = 25 75 | ) 76 | 77 | ## get file path from example data 78 | path_example_step <- system.file("step_cortex.xlsx", package = "whippr") 79 | 80 | ## read data from step test 81 | df_step <- read_data(path = path_example_step, metabolic_cart = "cortex") 82 | 83 | ## normalize incremental test data 84 | step_normalized <- df_step \%>\% 85 | incremental_normalize( 86 | .data = ., 87 | incremental_type = "step", 88 | has_baseline = TRUE, 89 | baseline_length = 120, 90 | work_rate_magic = TRUE, 91 | baseline_intensity = 0, 92 | step_start = 50, 93 | step_increase = 25, 94 | step_length = 180 95 | ) 96 | } 97 | } 98 | -------------------------------------------------------------------------------- /NEWS.md: -------------------------------------------------------------------------------- 1 | # whippr 0.1.4 2 | 3 | * Fixed an issue with the `perform_max()` function that the `aaverage_length` argument was not being correctly considered (#15). 4 | * Fixed issue with reading data from Parvo metabolic cart (#14). 5 | * Modified default bin average method from `round` to `ceiling`. 6 | 7 | # whippr 0.1.3 8 | 9 | * Removed `usethis` dependency. 10 | * Fixed typos. 11 | * Fixed `ggplot2`, `tidyselect`, and `tibble` warnings. 12 | * Adjusted documentation as requested by CRAN. 13 | 14 | # whippr 0.1.2 15 | 16 | * Fixed a bug in `read_data.cosmed()` that made the time column to return `NA` values when the test was longer than one hour. 17 | * Added a `custom` option to `read_data()`. 18 | 19 | # whippr 0.1.1 20 | 21 | * Updated docs with `roxygen 7.2.1`. 22 | 23 | # whippr 0.1.0 24 | 25 | * General cleanup has been performed to reduce dependencies. 26 | 27 | # whippr 0.0.0.9000 28 | 29 | ## Breaking changes 30 | 31 | * The following function calls were simplified: 32 | 33 | * `vo2_kinetics()` and `detect_outliers()` = `time_column` argument not needed anymore (this is automatically taken from `read_data()` now). 34 | * `plot_outliers()` = `test_type` argument not needed anymore (this is automatically taken from `detect_outliers()` now). 35 | * `perform_kinetics()` = `time_column` and `vo2_column` arguments not needed anymore (this is automatically taken from `read_data()` and `detect_outliers()`). 36 | 37 | ## New functions/methods 38 | 39 | * Added function `read_data()` to read data from metabolic cart (COSMED and CORTEX). 40 | * Now you can specify the name of the time column, in case your system is not in English. Default is set to "t". 41 | * Added option to read data from NSpire system (thanks to [@peter__leo](https://twitter.com/peter__leo)). 42 | * Added option to read data from Parvo Medics system (thanks to [@EatsleepfitJeff](https://twitter.com/EatsleepfitJeff)). 43 | * Added option to read data from Geratherm Respiratory system (thanks to [@marcorsini61](https://twitter.com/marcorsini61)) 44 | 45 | * Added function `interpolate()` to interpolate breath-by-breath data into second-by-second. 46 | 47 | * Added function `perform_average()` to perform bin- and rolling-averages. 48 | 49 | * Added `run_manual_cleaner()`. 50 | 51 | * Added testing of functions (internal modification only, not visible to the user). 52 | 53 | * Added new print method. 54 | 55 | * Added new functionality for analyzing data from incremental exercise. 56 | 57 | * Added `perform_max()` and `vo2_max()`. 58 | 59 | * Added support for the `CardioCoach` metabolic cart. 60 | 61 | ## Bug fixes 62 | 63 | * Fixed issue with `read_data()` when using the COSMED metabolic cart: previously, character columns were being coerced into `NA`(thanks @Scott-S-Hannah #4). 64 | 65 | * Added extra argument to `read_data()` to automatically fix the issue when the work rate column is coerced as a character column (thanks to [@ThibauxV](https://twitter.com/ThibauxV)). 66 | 67 | * Improved error messages in case `read_data()` cannot find the name of the time column provided. 68 | 69 | * Removed the `time_column` argument from the `interpolate()` and `perform_average()` functions. This is only necessary in `read_data()` now. 70 | 71 | * Make sure that data does not contain rows and cols with only `NA` in `interpolate()` (thanks @Scott-S-Hannah). 72 | 73 | * Fixed issue renaming work rate column in `read_data()` (thanks @Scott-S-Hannah #6). 74 | 75 | ## VO2 kinetics analyses 76 | 77 | * Added a set of tools for VO2 kinetics analyses. 78 | 79 | ## Incremental test analyses 80 | 81 | * Added a set of tools for incremental test analyses: data standardization and normalization, detection of 'bad breaths', mean response time, maximal values (i.e., VO2max, HRmax, maximal RER, etc), and ventilatory thresholds. 82 | -------------------------------------------------------------------------------- /man/detect_outliers.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/outliers.R 3 | \name{detect_outliers} 4 | \alias{detect_outliers} 5 | \title{Detect outliers} 6 | \usage{ 7 | detect_outliers( 8 | .data, 9 | test_type = c("incremental", "kinetics"), 10 | vo2_column = "VO2", 11 | cleaning_level = 0.95, 12 | cleaning_baseline_fit, 13 | protocol_n_transitions, 14 | protocol_baseline_length, 15 | protocol_transition_length, 16 | method_incremental = c("linear", "anomaly"), 17 | verbose = TRUE, 18 | ... 19 | ) 20 | } 21 | \arguments{ 22 | \item{.data}{Data retrieved from \code{read_data()} for a \strong{kinetics} test, or 23 | the data retrieved from \code{incremental_normalize()} for a \strong{incremental} test.} 24 | 25 | \item{test_type}{The test to be analyzed. Either 'incremental' or 'kinetics'.} 26 | 27 | \item{vo2_column}{The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to \code{VO2}.} 28 | 29 | \item{cleaning_level}{A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated. Default to \code{0.95}.} 30 | 31 | \item{cleaning_baseline_fit}{For \strong{kinetics} test only. A vector of the same length as the number in \code{protocol_n_transitions}, indicating what kind of fit to perform for each baseline. Vector accepts characters either 'linear' or 'exponential'.} 32 | 33 | \item{protocol_n_transitions}{For \strong{kinetics} test only. Number of transitions performed.} 34 | 35 | \item{protocol_baseline_length}{For \strong{kinetics} test only. The length of the baseline (in seconds).} 36 | 37 | \item{protocol_transition_length}{For \strong{kinetics} test only. The length of the transition (in seconds).} 38 | 39 | \item{method_incremental}{The method to be used in detecting outliers from the 40 | incremental test. Either 'linear' or 'anomaly'. See \code{Details}.} 41 | 42 | \item{verbose}{A boolean indicating whether messages should be printed in the console. Default to \code{TRUE}.} 43 | 44 | \item{...}{Additional arguments. Currently ignored.} 45 | } 46 | \value{ 47 | a \link[tibble:tibble-package]{tibble} 48 | } 49 | \description{ 50 | It detects outliers based on prediction bands for the given level of confidence provided. 51 | } 52 | \details{ 53 | TODO 54 | } 55 | \examples{ 56 | \dontrun{ 57 | ## get file path from example data 58 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 59 | 60 | ## read data 61 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 62 | 63 | ## detect outliers 64 | data_outliers <- detect_outliers( 65 | .data = df, 66 | test_type = "kinetics", 67 | vo2_column = "VO2", 68 | cleaning_level = 0.95, 69 | cleaning_baseline_fit = c("linear", "exponential", "exponential"), 70 | protocol_n_transitions = 3, 71 | protocol_baseline_length = 360, 72 | protocol_transition_length = 360, 73 | verbose = TRUE 74 | ) 75 | 76 | ## get file path from example data 77 | path_example_ramp <- system.file("ramp_cosmed.xlsx", package = "whippr") 78 | 79 | ## read data from ramp test 80 | df_ramp <- read_data(path = path_example_ramp, metabolic_cart = "cosmed") 81 | 82 | ## normalize incremental test data 83 | ramp_normalized <- df_ramp \%>\% 84 | incremental_normalize( 85 | .data = ., 86 | incremental_type = "ramp", 87 | has_baseline = TRUE, 88 | baseline_length = 240, 89 | work_rate_magic = TRUE, 90 | baseline_intensity = 20, 91 | ramp_increase = 25 92 | ) 93 | 94 | ## detect ramp outliers 95 | data_ramp_outliers <- detect_outliers( 96 | .data = ramp_normalized, 97 | test_type = "incremental", 98 | vo2_column = "VO2", 99 | cleaning_level = 0.95, 100 | method_incremental = "linear", 101 | verbose = TRUE 102 | ) 103 | } 104 | } 105 | -------------------------------------------------------------------------------- /R/normalize.R: -------------------------------------------------------------------------------- 1 | #' Normalize first breath 2 | #' 3 | #' This is needed specially when the data gets filtered. For example, if the data file does not only contain 4 | #' the baseline and transitions performed, we will have to normalize the time column. 5 | #' This function will make sure that in case the first breath does not start at zero, it will create a zero data point, 6 | #' duplicating the first breath. This will make sure the data does not get shifted (misalignment). 7 | #' 8 | #' @param .data Breath-by-breath data. 9 | #' 10 | #' @return a [tibble][tibble::tibble-package] 11 | #' @export 12 | normalize_first_breath <- function(.data) { 13 | first_breath <- .data %>% 14 | dplyr::select(1) %>% 15 | dplyr::slice(1) %>% 16 | dplyr::pull() %>% 17 | as.integer() 18 | 19 | if(first_breath %% 10 != 0) { 20 | row_to_add <- .data[1, ] %>% 21 | dplyr::mutate_at(1, function(x) x = first_breath - first_breath %% 10) 22 | 23 | out <- .data %>% 24 | dplyr::bind_rows(row_to_add, .) 25 | } else { 26 | out <- .data 27 | } 28 | 29 | out 30 | } 31 | 32 | #' Normalize transitions 33 | #' 34 | #' Recognizes and normalizes the time column of each transition. It will also label the transitions into: 'baseline' or 'transition'. 35 | #' 36 | #' @param .data Breath-by-breath data. 37 | #' @param protocol_n_transitions Number of transitions performed. 38 | #' @param protocol_baseline_length The length of the baseline (in seconds). 39 | #' @param protocol_transition_length The length of the transition (in seconds). 40 | #' 41 | #' @return a [tibble][tibble::tibble-package] 42 | #' @export 43 | normalize_transitions <- function( 44 | .data, 45 | protocol_n_transitions, 46 | protocol_baseline_length, 47 | protocol_transition_length 48 | ) { 49 | ## first step is to get the time that each transition ends (in seconds), 50 | ## and then apply the label to each transition 51 | info_transition <- dplyr::tibble( 52 | n_transition = seq(1, protocol_n_transitions, 1) 53 | ) %>% 54 | dplyr::mutate( 55 | end_transition = (protocol_baseline_length + protocol_transition_length) * n_transition, 56 | label = paste("transition", n_transition, sep = " ") 57 | ) 58 | 59 | ## get time column name 60 | time_column <- colnames(.data)[1] 61 | 62 | ## now we normalize the first breath for the whole data 63 | ## for more information see ?normalize_first_breath() 64 | .data %>% 65 | normalize_first_breath() %>% 66 | ## normalize the whole time column in case the test didn't start at zero 67 | dplyr::mutate_at(1, function(x) x = x - min(x)) %>% 68 | ## label transitions 69 | dplyr::mutate(transition = cut(!!rlang::sym(time_column), c(0, info_transition$end_transition), labels = info_transition$label, include.lowest = TRUE)) %>% 70 | ## now we normalize the first breath for each transition 71 | ## for more information see ?normalize_first_breath() 72 | tidyr::nest_legacy(-transition) %>% 73 | dplyr::mutate(data = purrr::map(.x = data, .f = normalize_first_breath)) %>% 74 | tidyr::unnest_legacy() %>% 75 | dplyr::select(2:ncol(.), transition) %>% 76 | dplyr::group_by(transition) %>% 77 | dplyr::mutate_at(1, function(x) x = x - min(x)) %>% 78 | dplyr::ungroup() %>% 79 | dplyr::mutate(phase = ifelse(.[[1]] <= protocol_baseline_length, "baseline", "transition")) 80 | } 81 | 82 | #' Normalize time column 83 | #' 84 | #' Normalizes the the time column such that the baseline phase has negative time values. Point zero will then represent the start of the transition phase. 85 | #' 86 | #' @param .data Breath-by-breath data. 87 | #' @param protocol_baseline_length The length of the baseline (in seconds). 88 | #' 89 | #' @return a [tibble][tibble::tibble-package] 90 | #' @export 91 | normalize_time <- function(.data, protocol_baseline_length) { 92 | out <- .data %>% 93 | dplyr::mutate_at(1, function(x) x = x - protocol_baseline_length) 94 | 95 | out 96 | } 97 | -------------------------------------------------------------------------------- /R/helpers-incremental.R: -------------------------------------------------------------------------------- 1 | #' Work rate for a ramp-incremental test 2 | #' 3 | #' This function produces the work rate throughout a ramp-incremental test given the procotol 4 | #' 5 | #' @param .data The data with recognized protocol phases 6 | #' @param baseline_intensity The baseline intensity 7 | #' @param ramp_increase The ramp increase, in watts per minute 8 | #' 9 | #' @return a [tibble][tibble::tibble-package] 10 | #' @keywords internal 11 | work_rate_ramp <- function( 12 | .data, 13 | baseline_intensity, 14 | ramp_increase 15 | ) { 16 | model <- lm(work_rate ~ time, data = data.frame(work_rate = c(baseline_intensity, baseline_intensity + ramp_increase), time = c(0, 60))) 17 | 18 | slope <- model$coefficients[2] 19 | 20 | ## get time column name 21 | time_column <- colnames(.data)[1] 22 | 23 | out <- .data %>% 24 | dplyr::mutate(work_rate = dplyr::case_when( 25 | protocol_phase == "baseline" ~ baseline_intensity, 26 | protocol_phase == "ramp" ~ baseline_intensity + slope * !!rlang::sym(time_column) 27 | )) 28 | 29 | out 30 | } 31 | 32 | #' Work rate for a step-incremental test 33 | #' 34 | #' This function produces the work rate throughout a step-incremental test given the protocol 35 | #' This will retrieve both the 'original' work rates, and also will perform a 'linearization' of the steps. 36 | #' 37 | #' @param .data The data with recognized protocol phases 38 | #' @param baseline_intensity The baseline intensity 39 | #' @param step_start In case the step test started in a different work rate than baseline 40 | #' @param step_increase The step in increase, in watts per step 41 | #' @param step_length The length, in seconds, of each step 42 | #' 43 | #' @return a [tibble][tibble::tibble-package] 44 | #' @keywords internal 45 | work_rate_step <- function( 46 | .data, 47 | baseline_intensity, 48 | step_start, 49 | step_increase, 50 | step_length 51 | ) { 52 | ## check if forcats is installed and have a prompt to install it. 53 | rlang::check_installed("forcats") 54 | 55 | if(!is.null(step_start)) { 56 | begin_intensity <- step_start 57 | } else { 58 | begin_intensity <- baseline_intensity 59 | } 60 | 61 | ## this will make sure that the final intensity of each step 62 | ## corresponds to the step intensity 63 | if(begin_intensity == 0) { 64 | # linearization 65 | model <- lm(work_rate ~ time, data = data.frame(work_rate = c(begin_intensity, begin_intensity + step_increase), time = c(0, step_length))) 66 | } else { 67 | # linearization 68 | model <- lm(work_rate ~ time, data = data.frame(work_rate = c(begin_intensity - step_increase, begin_intensity), time = c(0, step_length))) 69 | } 70 | 71 | slope <- model$coefficients[2] 72 | 73 | ## get time column name 74 | time_column <- colnames(.data)[1] 75 | 76 | ## this will make sure that the final intensity of each step 77 | ## corresponds to the step intensity 78 | if(begin_intensity == 0) { 79 | data_linearized <- .data %>% 80 | dplyr::mutate(work_rate = dplyr::case_when( 81 | protocol_phase == "baseline" ~ baseline_intensity, 82 | protocol_phase == "step" ~ begin_intensity + slope * !!rlang::sym(time_column) 83 | )) 84 | } else { 85 | data_linearized <- .data %>% 86 | dplyr::mutate(work_rate = dplyr::case_when( 87 | protocol_phase == "baseline" ~ baseline_intensity, 88 | protocol_phase == "step" ~ begin_intensity - step_increase + slope * !!rlang::sym(time_column) 89 | )) 90 | } 91 | 92 | # steps 93 | data_steps <- data_linearized %>% 94 | dplyr::mutate(step = paste0("step_", (!!rlang::sym(time_column) %/% step_length + 1)), 95 | step = forcats::as_factor(step)) 96 | 97 | n_steps <- length(unique(data_steps$step)) 98 | 99 | out <- data_steps %>% 100 | tidyr::nest(data = -step) %>% 101 | dplyr::mutate(step_work_rate = c(baseline_intensity, seq(begin_intensity, n_steps * step_increase + baseline_intensity, step_increase))) %>% 102 | tidyr::unnest(cols = data) %>% 103 | dplyr::relocate(step, .after = dplyr::last_col()) 104 | 105 | out 106 | } 107 | -------------------------------------------------------------------------------- /man/vo2_max.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/max.R 3 | \name{vo2_max} 4 | \alias{vo2_max} 5 | \title{VO2max} 6 | \usage{ 7 | vo2_max( 8 | .data, 9 | vo2_column = "VO2", 10 | vo2_relative_column = NULL, 11 | heart_rate_column = NULL, 12 | rer_column = NULL, 13 | detect_outliers = TRUE, 14 | average_method = c("bin", "rolling"), 15 | average_length = 30, 16 | mrt, 17 | plot = TRUE, 18 | verbose = TRUE, 19 | ... 20 | ) 21 | } 22 | \arguments{ 23 | \item{.data}{Data retrieved from \code{read_data()}.} 24 | 25 | \item{vo2_column}{The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to \code{"VO2"}.} 26 | 27 | \item{vo2_relative_column}{The name (quoted) of the column containing the relative to body weight oxygen uptake (VO2) data. Default to \code{NULL}.} 28 | 29 | \item{heart_rate_column}{The name (quoted) of the column containing the heart rate (HR) data. Default to \code{NULL}. If \code{NULL}, this parameter will not be calculated.} 30 | 31 | \item{rer_column}{The name (quoted) of the column containing the respiratory exchange ratio (RER) data. Default to \code{NULL}. If \code{NULL}, this parameter will not be calculated.} 32 | 33 | \item{detect_outliers}{A boolean indicating whether to detect outliers. Default to \code{TRUE}.} 34 | 35 | \item{average_method}{The average method to be used for VO2max calculation. One of \code{bin} or \code{rolling}.} 36 | 37 | \item{average_length}{The length, in seconds, of the average to be used. For example, if \code{average_method = bin}, and \code{average_length = 30}, it will perform a 30-s bin-average.} 38 | 39 | \item{mrt}{A boolean indicating whether to calculate the mean response time. To be implemented soon <- currently ignored.} 40 | 41 | \item{plot}{A boolean indicating whether to produce a plot with the summary results. Default to \code{TRUE}.} 42 | 43 | \item{verbose}{A boolean indicating whether messages should be printed in the console. Default to \code{TRUE}.} 44 | 45 | \item{...}{Additional arguments passed onto \code{incremental_normalize()}, \code{detect_outliers()} if \code{detect_outliers = TRUE}, and \code{incremental_mrt()} if \code{mrt = TRUE}.} 46 | } 47 | \value{ 48 | a \link[tibble:tibble-package]{tibble} containing one row and the following columns: 49 | \item{VO2max_absolute}{The absolute VO2max.} 50 | \item{VO2max_relative}{The relative VO2max.} 51 | \item{POpeak}{The peak power output.} 52 | \item{HRmax}{The maximal heart rate.} 53 | \item{RERmax}{The maximal RER.} 54 | \item{plot}{The plot, if \code{plot = TRUE}.} 55 | } 56 | \description{ 57 | It performs the whole process of the VO2max data analysis, which includes: 58 | data standardization and normalization according to incremental protocol (\code{incremental_normalize()}), 59 | 'bad breaths' detection (\code{detect_outliers()}), 60 | mean response time calculation (\code{incremental_mrt()}) (currently ignored), 61 | and maximal values calculation (VO2, PO, HR, RER) (\code{perform_max()}). 62 | } 63 | \details{ 64 | TODO 65 | } 66 | \examples{ 67 | \dontrun{ 68 | ## get file path from example data 69 | path_example <- system.file("ramp_cosmed.xlsx", package = "whippr") 70 | 71 | ## read data from ramp test 72 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 73 | 74 | ## normalize incremental test data 75 | ramp_normalized <- df \%>\% 76 | incremental_normalize( 77 | .data = ., 78 | incremental_type = "ramp", 79 | has_baseline = TRUE, 80 | baseline_length = 240, 81 | work_rate_magic = TRUE, 82 | baseline_intensity = 20, 83 | ramp_increase = 25 84 | ) 85 | 86 | ## detect outliers 87 | data_ramp_outliers <- detect_outliers( 88 | .data = ramp_normalized, 89 | test_type = "incremental", 90 | vo2_column = "VO2", 91 | cleaning_level = 0.95, 92 | method_incremental = "linear", 93 | verbose = TRUE 94 | ) 95 | 96 | ## analyze VO2max 97 | perform_max( 98 | .data = data_ramp_outliers, 99 | vo2_column = "VO2", 100 | vo2_relative_column = "VO2/Kg", 101 | heart_rate_column = "HR", 102 | rer_column = "R", 103 | average_method = "bin", 104 | average_length = 30, 105 | plot = TRUE, 106 | verbose = FALSE 107 | ) 108 | } 109 | 110 | } 111 | -------------------------------------------------------------------------------- /R/averages.R: -------------------------------------------------------------------------------- 1 | #' Perform average on second-by-second data 2 | #' 3 | #' This function performs either a bin- or a rolling-average on the interpolated data. 4 | #' You must specify the \code{type} of the average before continuing. 5 | #' 6 | #' @param .data The second-by-second data retrieved from \code{interpolate()}. 7 | #' @param type The type of the average to perform. Either \code{bin}, \code{rolling}, or \code{ensemble}. 8 | #' @param bins If bin-average is chosen, here you can specify the size of the bin-average, in seconds. Default to 30-s bin-average. 9 | #' @param bin_method Method for determining bin boundaries when \code{type = "bin"}. 10 | #' One of \code{"ceiling"} (default), \code{"round"}, or \code{"floor"}. 11 | #' \code{"ceiling"} is recommended as it ensures no data points are excluded 12 | #' from the analysis by always rounding up to the next bin boundary. 13 | #' @param rolling_window If rolling-average is chosen, here you can specify the rolling-average window, in seconds. Default to 30-s rolling-average. 14 | #' 15 | #' @return a [tibble][tibble::tibble-package] 16 | #' @export 17 | #' 18 | #' @details 19 | #' Ensemble average is used in VO2 kinetics analysis, where a series of transitions from baseline to 20 | #' the moderate/heavy/severe intensity-domain is ensembled averaged into a single 'bout' for further data processing. 21 | #' 22 | #' When using bin averaging, the \code{bin_method} parameter controls how time points are assigned to bins: 23 | #' \itemize{ 24 | #' \item \code{"ceiling"}: Rounds up to the next bin boundary (recommended) 25 | #' \item \code{"round"}: Rounds to the nearest bin boundary 26 | #' \item \code{"floor"}: Rounds down to the previous bin boundary 27 | #' } 28 | #' 29 | #' @examples 30 | #' \dontrun{ 31 | #' ## get file path from example data 32 | #' path_example <- system.file("example_cosmed.xlsx", package = "whippr") 33 | #' 34 | #' ## read data 35 | #' df <- read_data(path = path_example, metabolic_cart = "cosmed") 36 | #' 37 | #' ## interpolate and perform 30-s bin-average 38 | #' df %>% 39 | #' interpolate() %>% 40 | #' perform_average(type = "bin", bins = 30) 41 | #' 42 | #' ## interpolate and perform 30-s rolling-average 43 | #' df %>% 44 | #' interpolate() %>% 45 | #' perform_average(type = "rolling", rolling_window = 30) 46 | #' } 47 | perform_average <- function(.data, type = c("bin", "rolling", "ensemble"), bins = 30, bin_method = c("ceiling", "round", "floor"), rolling_window = 30) { 48 | if(missing(type)) 49 | stop("You must specify the type of average you would like to perform.", call. = FALSE) 50 | 51 | type <- match.arg(type) 52 | bin_method <- match.arg(bin_method) 53 | 54 | class(.data) <- type 55 | 56 | UseMethod("perform_average", .data) 57 | } 58 | 59 | #' @export 60 | perform_average.bin <- function(.data, type = c("bin", "rolling", "ensemble"), bins = 30, bin_method = c("ceiling", "round", "floor"), rolling_window = 30) { 61 | bin_method <- match.arg(bin_method) 62 | # select the appropriate rounding function 63 | rounding_func <- switch(bin_method, 64 | "ceiling" = ceiling, 65 | "round" = round, 66 | "floor" = floor) 67 | ## first make sure data only contains numeric columns 68 | data_num <- .data %>% 69 | dplyr::select_if(is.numeric) 70 | 71 | out <- data_num %>% 72 | dplyr::group_by_at(1, function(x) rounding_func(x / bins) * bins) %>% 73 | dplyr::summarise_all(mean, na.rm = TRUE) 74 | 75 | metadata <- attributes(.data) 76 | metadata$data_status <- glue::glue("averaged data - {bins}-s bins") 77 | 78 | out <- new_whippr_tibble(out, metadata) 79 | 80 | out 81 | } 82 | 83 | #' @export 84 | perform_average.rolling <- function(.data, type = c("bin", "rolling", "ensemble"), bins = 30, bin_method = c("ceiling", "round", "floor"), rolling_window = 30) { 85 | ## first make sure data only contains numeric columns 86 | data_num <- .data %>% 87 | dplyr::select_if(is.numeric) 88 | 89 | out <- data_num %>% 90 | zoo::rollmean(x = ., k = rolling_window) %>% 91 | dplyr::as_tibble() 92 | 93 | metadata <- attributes(.data) 94 | metadata$data_status <- glue::glue("averaged data - {rolling_window}-s rolling average") 95 | 96 | out <- new_whippr_tibble(out, metadata) 97 | 98 | out 99 | } 100 | 101 | #' @export 102 | perform_average.ensemble <- function(.data, type = c("bin", "rolling", "ensemble"), bins = 30, bin_method = c("ceiling", "round", "floor"), rolling_window = 30) { 103 | ## first make sure data only contains numeric columns 104 | data_num <- .data %>% 105 | dplyr::select_if(is.numeric) 106 | 107 | out <- data_num %>% 108 | dplyr::group_by_at(1) %>% 109 | dplyr::summarise_all(mean, na.rm = TRUE) 110 | 111 | metadata <- attributes(.data) 112 | metadata$data_status <- "ensemble-averaged data" 113 | 114 | out <- new_whippr_tibble(out, metadata) 115 | 116 | out 117 | } 118 | -------------------------------------------------------------------------------- /R/helpers-outliers.R: -------------------------------------------------------------------------------- 1 | #' Linear method for detecting outliers from an incremental test 2 | #' 3 | #' Function for internal use only. It will not be exported. 4 | #' 5 | #' @param .data The data retrieved from `incremental_normalize()`. 6 | #' @param time_column The name (quoted) of the column containing the time. Depending on the language of your system, this column might not be "t". Therefore, you may specify it here. 7 | #' @param vo2_column The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. 8 | #' @param cleaning_level A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated. 9 | #' 10 | #' @return a [tibble][tibble::tibble-package] 11 | #' @keywords internal 12 | outliers_linear <- function( 13 | .data, 14 | time_column, 15 | vo2_column, 16 | cleaning_level 17 | ) { 18 | ## set linear formula for each phase 19 | bsln_formula <- glue::glue("{vo2_column} ~ 1") 20 | incremental_formula <- glue::glue("{vo2_column} ~ {time_column}") 21 | 22 | out <- .data %>% 23 | ## separate each phase 24 | tidyr::nest(data = -protocol_phase) %>% 25 | dplyr::mutate( 26 | ## set the formula 27 | formula_model = 28 | dplyr::case_when( 29 | protocol_phase == "baseline" ~ bsln_formula, 30 | TRUE ~ incremental_formula 31 | ), 32 | ## set the model 33 | model = purrr::map2( 34 | .x = formula_model, 35 | .y = data, 36 | .f = ~ lm(formula = .x, data = .y) 37 | ), 38 | model_augmented = purrr::map( 39 | .x = model, 40 | .f = ~ broom::augment(.x) 41 | ) 42 | ) %>% 43 | ## column not needed anymore 44 | dplyr::select(-formula_model) %>% 45 | ## delete additional columns to needed in augmented data 46 | dplyr::mutate( 47 | model_augmented = purrr::map( 48 | .x = model_augmented, 49 | .f = ~ dplyr::select(.x, -dplyr::any_of(c(time_column, vo2_column))) 50 | ), 51 | model_augmented = purrr::map2( 52 | .x = data, 53 | .y = model_augmented, 54 | .f = ~ dplyr::mutate(.y, x = .x[[time_column]], y = .x[[vo2_column]]) %>% dplyr::select(x, y, dplyr::everything()) 55 | ), 56 | ## get confidence bands 57 | conf = purrr::map( 58 | .x = model, 59 | .f = ~ predict.lm(object = .x, interval = "confidence", level = cleaning_level) %>% 60 | dplyr::as_tibble() %>% 61 | dplyr::select(-1) %>% 62 | dplyr::rename_all(~ c("lwr_conf", "upr_conf")) 63 | ), 64 | ## get prediction bands 65 | pred = purrr::map( 66 | .x = model, 67 | .f = ~ suppressWarnings(predict.lm(object = .x, interval = "prediction", level = cleaning_level)) %>% 68 | dplyr::as_tibble() %>% 69 | dplyr::select(-1) %>% 70 | dplyr::rename_all(~ c("lwr_pred", "upr_pred")) 71 | ) 72 | ) %>% 73 | dplyr::select(-model) %>% 74 | tidyr::unnest(cols = c(data:pred)) %>% 75 | dplyr::select(2:ncol(.), protocol_phase) %>% 76 | ## if VO2 is above or below the prediction bands, classify the breath as an outlier 77 | dplyr::mutate( 78 | outlier = ifelse(y >= lwr_pred & y <= upr_pred, "no", "yes") 79 | ) 80 | 81 | out 82 | } 83 | 84 | #' Anomaly method for detecting outliers from an incremental test 85 | #' 86 | #' Function for internal use only. It will not be exported. 87 | #' 88 | #' @param .data The data retrieved from `incremental_normalize()`. 89 | #' @param time_column The name (quoted) of the column containing the time. Depending on the language of your system, this column might not be "t". Therefore, you may specify it here. 90 | #' @param vo2_column The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. 91 | #' @param cleaning_level A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated. 92 | #' 93 | #' @return a [tibble][tibble::tibble-package] 94 | #' @keywords internal 95 | outliers_anomaly <- function( 96 | .data, 97 | time_column, 98 | vo2_column, 99 | cleaning_level 100 | ) { 101 | ## check if anomalize is installed 102 | rlang::check_installed("anomalize") 103 | 104 | ## set alpha 105 | alpha <- 1 - cleaning_level 106 | 107 | outliers_df <- .data %>% 108 | dplyr::mutate_at(1, function(x) x = lubridate::as_date(x)) %>% 109 | anomalize::time_decompose(target = !!rlang::sym(vo2_column), method = "stl", message = FALSE) %>% 110 | anomalize::anomalize(target = remainder, method = "iqr", alpha = alpha) %>% 111 | anomalize::time_recompose() %>% 112 | dplyr::select(anomaly:recomposed_l2) %>% 113 | dplyr::rename_all(~ c("outlier", "lwr_pred", "upr_pred")) %>% 114 | dplyr::mutate(outlier = stringr::str_to_lower(outlier)) 115 | 116 | ## set time and vo2 columns as x and y 117 | ## this is to make simpler to plot it later on 118 | x_and_y <- .data %>% 119 | dplyr::select(!!rlang::sym(time_column), !!rlang::sym(vo2_column)) %>% 120 | dplyr::rename_all(~ c("x", "y")) 121 | 122 | out <- dplyr::bind_cols(.data, x_and_y, outliers_df) 123 | 124 | out 125 | } 126 | -------------------------------------------------------------------------------- /README.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | output: github_document 3 | --- 4 | 5 | 6 | 7 | ```{r, include = FALSE} 8 | knitr::opts_chunk$set( 9 | collapse = TRUE, 10 | comment = "#>", 11 | fig.path = "man/figures/README-", 12 | out.width = "100%" 13 | ) 14 | ``` 15 | 16 | # whippr 17 | 18 | 19 | [![Lifecycle: stable](https://img.shields.io/badge/lifecycle-stable-brightgreen.svg)](https://lifecycle.r-lib.org/articles/stages.html#stable) 20 | [![CRAN status](https://www.r-pkg.org/badges/version/whippr)](https://CRAN.R-project.org/package=whippr) 21 | [![Codecov test coverage](https://codecov.io/gh/fmmattioni/whippr/branch/master/graph/badge.svg)](https://app.codecov.io/gh/fmmattioni/whippr?branch=master) 22 | [![R-CMD-check](https://github.com/fmmattioni/whippr/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/fmmattioni/whippr/actions/workflows/R-CMD-check.yaml) 23 | 24 | 25 | The goal of `whippr` is to provide a set of tools for manipulating gas exchange data from cardiopulmonary exercise testing. 26 | 27 | ## Why `whippr`? 28 | 29 | The name of the package is in honor of [Prof. Brian J Whipp](https://erj.ersjournals.com/content/39/1/1) and his invaluable contribution to the field of exercise physiology. 30 | 31 | ## Installation 32 | 33 | You can install the development version of `whippr` from [Github](https://github.com/fmmattioni/whippr) with: 34 | 35 | ``` r 36 | # install.packages("remotes") 37 | remotes::install_github("fmmattioni/whippr") 38 | ``` 39 | 40 | ## Use 41 | 42 | ### Read data 43 | 44 | ```{r} 45 | library(whippr) 46 | 47 | ## example file that comes with the package for demonstration purposes 48 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 49 | 50 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 51 | 52 | df 53 | ``` 54 | 55 | ### Interpolate 56 | 57 | ```{r} 58 | df %>% 59 | interpolate() 60 | ``` 61 | 62 | ### Perform averages 63 | 64 | #### Bin-average 65 | 66 | ```{r} 67 | ## example of performing 30-s bin-averages 68 | df %>% 69 | interpolate() %>% 70 | perform_average(type = "bin", bins = 30) 71 | ``` 72 | 73 | #### Rolling-average 74 | 75 | ```{r} 76 | ## example of performing 30-s rolling-averages 77 | df %>% 78 | interpolate() %>% 79 | perform_average(type = "rolling", rolling_window = 30) 80 | ``` 81 | 82 | 83 | ### Perform VO2 kinetics analysis 84 | 85 | ```{r} 86 | results_kinetics <- vo2_kinetics( 87 | .data = df, 88 | intensity_domain = "moderate", 89 | vo2_column = "VO2", 90 | protocol_n_transitions = 3, 91 | protocol_baseline_length = 360, 92 | protocol_transition_length = 360, 93 | cleaning_level = 0.95, 94 | cleaning_baseline_fit = c("linear", "exponential", "exponential"), 95 | fit_level = 0.95, 96 | fit_bin_average = 5, 97 | fit_phase_1_length = 20, 98 | fit_baseline_length = 120, 99 | fit_transition_length = 240, 100 | verbose = TRUE 101 | ) 102 | ``` 103 | 104 | ### Perform VO2max analysis 105 | 106 | ```{r} 107 | df_incremental <- read_data(path = system.file("ramp_cosmed.xlsx", package = "whippr"), metabolic_cart = "cosmed") 108 | 109 | vo2_max( 110 | .data = df_incremental, ## data from `read_data()` 111 | vo2_column = "VO2", 112 | vo2_relative_column = "VO2/Kg", 113 | heart_rate_column = "HR", 114 | rer_column = "R", 115 | detect_outliers = TRUE, 116 | average_method = "bin", 117 | average_length = 30, 118 | plot = TRUE, 119 | verbose = TRUE, 120 | ## arguments for `incremental_normalize()` 121 | incremental_type = "ramp", 122 | has_baseline = TRUE, 123 | baseline_length = 240, ## 4-min baseline 124 | work_rate_magic = TRUE, ## produce a work rate column 125 | baseline_intensity = 20, ## baseline was performed at 20 W 126 | ramp_increase = 25, ## 25 W/min ramp 127 | ## arguments for `detect_outliers()` 128 | test_type = "incremental", 129 | cleaning_level = 0.95, 130 | method_incremental = "linear" 131 | ) 132 | ``` 133 | 134 | ## Metabolic carts currently supported 135 | 136 | * [COSMED](https://www.cosmed.com/en/) 137 | * [CORTEX](https://cortex-medical.com/EN) 138 | * [NSpire](https://www.pressebox.de/pressemitteilung/nspire-health-gmbh/ZAN-100-Diagnostische-Spirometrie/boxid/745555) 139 | * Parvo Medics 140 | * [Geratherm Respiratory](https://www.geratherm-respiratory.com/product-groups/cpet/) 141 | * [CardioCoach](https://korr.com/go/cardiocoach/) 142 | 143 | ## Online app 144 | 145 | Would you like to perform VO2 kinetics analyses but don't know R? No problem! You can use our online app: [VO2 Kinetics App](https://exphyslab.com/kinetics/) 146 | 147 | ## Code of Conduct 148 | 149 | Please note that this project is released with a [Contributor Code of Conduct](https://www.contributor-covenant.org/version/1/0/0/code-of-conduct.html). 150 | By participating in this project you agree to abide by its terms. 151 | 152 |
Icons made by monkik from www.flaticon.com
153 | -------------------------------------------------------------------------------- /R/tbl.R: -------------------------------------------------------------------------------- 1 | #' Construct a new tibble with metadata 2 | #' 3 | #' @param .data A data frame 4 | #' @param metadata Metadata to be passed along with the data 5 | #' 6 | #' @return a [tibble][tibble::tibble-package] 7 | #' @keywords internal 8 | new_whippr_tibble <- function(.data, metadata) { 9 | if(!is.data.frame(.data)) 10 | stop("You can only pass a data frame to this function.", call. = FALSE) 11 | 12 | whippr_tibble <- tibble::new_tibble( 13 | x = .data, 14 | nrow = nrow(.data), 15 | class = "whippr", 16 | read_data = metadata$read_data, 17 | metabolic_cart = metadata$metabolic_cart, 18 | data_status = metadata$data_status, 19 | time_column = metadata$time_column, 20 | vo2_column = metadata$vo2_column, 21 | test_type = metadata$test_type, 22 | processed_data = metadata$processed_data, 23 | incremental = metadata$incremental, 24 | normalized = metadata$normalized, 25 | incremental_type = metadata$incremental_type, 26 | has_baseline = metadata$has_baseline, 27 | baseline_length = metadata$baseline_length, 28 | baseline_intensity = metadata$baseline_intensity, 29 | ramp_increase = metadata$ramp_increase, 30 | step_start = metadata$step_start, 31 | step_increase = metadata$step_increase, 32 | step_length = metadata$step_length, 33 | outliers_detected = metadata$outliers_detected 34 | ) 35 | 36 | tibble::validate_tibble(whippr_tibble) 37 | whippr_tibble 38 | } 39 | 40 | #' Whippr print method 41 | #' 42 | #' @param x A tibble with class 'whippr' 43 | #' @param ... Extra arguments, not used. 44 | #' 45 | #' @export 46 | print.whippr <- function(x, ...) { 47 | 48 | header <- paste("# Metabolic cart:", attr(x, "metabolic_cart"), "\n") 49 | 50 | if(!is.null(attr(x, "data_status"))) 51 | header <- paste0(header, "# Data status: ", attr(x, "data_status"), "\n") 52 | 53 | if(!is.null(attr(x, "time_column"))) 54 | header <- paste0(header, "# Time column: ", attr(x, "time_column"), "\n") 55 | 56 | if(!is.null(attr(x, "vo2_column"))) 57 | header <- paste0(header, "# VO2 column: ", attr(x, "vo2_column"), "\n") 58 | 59 | if(!is.null(attr(x, "test_type"))) 60 | header <- paste0(header, "# Test type: ", attr(x, "test_type"), "\n") 61 | 62 | # INCREMENTAL ------------------------------------------------------------- 63 | 64 | if(!is.null(attr(x, "incremental"))) { 65 | if(attr(x, "incremental_type") == "ramp") { 66 | if(!is.null(attr(x, "has_baseline"))) { 67 | header <- paste0(header, "# Protocol: ", 68 | attr(x, "baseline_length"), 69 | "-s baseline at ", 70 | attr(x, "baseline_intensity"), 71 | " W and a ", 72 | attr(x, "ramp_increase"), 73 | "-W/min ramp", 74 | "\n") 75 | } else { 76 | header <- paste0(header, "# Protocol: ", 77 | attr(x, "no baseline"), 78 | " and a ", 79 | attr(x, "ramp_increase"), 80 | "-W/min ramp", 81 | "\n") 82 | } 83 | } else if(attr(x, "incremental_type") == "step") { 84 | if(!is.null(attr(x, "has_baseline"))) { 85 | if(!is.null(attr(x, "step_start"))) { 86 | header <- paste0(header, "# Protocol: ", 87 | attr(x, "baseline_length"), 88 | "-s baseline at ", 89 | attr(x, "baseline_intensity"), 90 | " W and ", 91 | attr(x, "step_length"), 92 | "-s steps of ", 93 | attr(x, "step_increase"), 94 | " W starting at ", 95 | attr(x, "step_start"), 96 | " W", 97 | "\n") 98 | } else { 99 | header <- paste0(header, "# Protocol: ", 100 | attr(x, "baseline_length"), 101 | "-s baseline at ", 102 | attr(x, "baseline_intensity"), 103 | " W and ", 104 | attr(x, "step_length"), 105 | "-s steps of ", 106 | attr(x, "step_increase"), 107 | " W", 108 | "\n") 109 | } 110 | } else { 111 | if(!is.null(attr(x, "step_start"))) { 112 | header <- paste0(header, "# Protocol: ", 113 | attr(x, "no baseline"), 114 | " and ", 115 | attr(x, "step_length"), 116 | "-s steps of ", 117 | attr(x, "step_increase"), 118 | " W starting at ", 119 | attr(x, "step_start"), 120 | " W", 121 | "\n") 122 | } else { 123 | header <- paste0(header, "# Protocol: ", 124 | attr(x, "no baseline"), 125 | " and ", 126 | attr(x, "step_length"), 127 | "-s steps of ", 128 | attr(x, "step_increase"), 129 | " W", 130 | "\n") 131 | } 132 | } 133 | } 134 | } 135 | 136 | cat(pillar::style_subtle(header)) 137 | 138 | NextMethod() 139 | } 140 | -------------------------------------------------------------------------------- /R/addin.R: -------------------------------------------------------------------------------- 1 | #' Manual data cleaner 2 | #' 3 | #' Usually manual data cleaning should be avoided. However, sometimes in gas exchange data 4 | #' there is the need to delete a few clear "bad breaths" (noise). In these situations you may use this function. 5 | #' Although it is encouraged that you use the `detect_outliers()` function, you may use this function at your own risk. 6 | #' This function can also be used to clean other kind of data, like heart rate data. 7 | #' 8 | #' @param .data The data to be manually cleaned. The first column will be always treated as the x-axis. 9 | #' @param width The width, in pixels, of the window. 10 | #' @param height the height, in pixels, of the window. 11 | #' 12 | #' @return The code to reproduce the manual data cleaning. 13 | #' 14 | #' @export 15 | run_manual_cleaner <- function(.data, width = 1200, height = 900) { 16 | 17 | # check additional needed packages 18 | rlang::check_installed(c("miniUI", "shiny", "rstudioapi", "datapasta", "htmltools")) 19 | 20 | ## check data 21 | if(missing(.data)) 22 | stop("It looks like you forgot to pass a data frame to the function.", call. = FALSE) 23 | 24 | if(!is.data.frame(.data)) 25 | stop("I am sorry, this function only supports data frames.", call. = FALSE) 26 | 27 | data_input <- deparse(substitute(.data)) 28 | 29 | ui <- miniUI::miniPage( 30 | miniUI::gadgetTitleBar("Manual data cleaning"), 31 | miniUI::miniContentPanel( 32 | 33 | shiny::fluidRow( 34 | shiny::column(width = 4, 35 | shiny::selectInput( 36 | inputId = "select_y_axis", 37 | label = "Select y axis", 38 | choices = NULL) 39 | ), 40 | shiny::column(width = 4, 41 | shiny::textInput( 42 | inputId = "output_df", 43 | label = "Enter the name of the new data frame:" 44 | ) 45 | ) 46 | ), 47 | 48 | shiny::plotOutput( 49 | outputId = "plot", 50 | height = 400, 51 | brush = shiny::brushOpts( 52 | id = "plot_brush" 53 | ) 54 | ), 55 | 56 | shiny::actionButton(inputId = "exclude_toggle", label = "Exclude points"), 57 | shiny::actionButton(inputId = "exclude_reset", label = "Reset"), 58 | undoHistoryUI(id = "history", back_text = "Step backward", fwd_text = "Step forward"), 59 | # Show which data points are being excluded 60 | shiny::tags$h4("Data points being excluded (x-axis value):"), 61 | shiny::verbatimTextOutput("v") 62 | ) 63 | ) 64 | 65 | server <- function(input, output, session) { 66 | 67 | r <- shiny::reactiveValues( 68 | data = .data, 69 | data_keep = NULL, 70 | exclude_rows = NULL 71 | ) 72 | 73 | ## keep history of points to exclude 74 | undo_app_state <- undoHistory( 75 | id = "history", 76 | value = shiny::reactive({ 77 | r$exclude_rows 78 | }) 79 | ) 80 | 81 | ## receive updates from undoHistory() and update the app 82 | shiny::observe({ 83 | shiny::req(!is.null(undo_app_state())) 84 | 85 | r$exclude_rows <- undo_app_state() 86 | }) 87 | 88 | # Just for debugging 89 | output$v <- shiny::renderPrint(r$exclude_rows) 90 | 91 | shiny::observe({ 92 | shiny::updateSelectInput( 93 | session, 94 | inputId = "select_y_axis", 95 | choices = colnames(r$data), 96 | selected = 0 97 | ) 98 | }) 99 | 100 | shiny::observe({ 101 | shiny::req(is.null(r$exclude_rows)) 102 | 103 | r$data_keep <- r$data 104 | }) 105 | 106 | shiny::observe({ 107 | shiny::req(r$exclude_rows) 108 | 109 | r$data_keep <- r$data %>% 110 | dplyr::filter_at(1, function(x) !x %in% r$exclude_rows) 111 | }) 112 | 113 | # Toggle points that are brushed, when button is clicked 114 | shiny::observeEvent(input$exclude_toggle, { 115 | time_column <- colnames(r$data_keep)[1] 116 | 117 | res <- shiny::brushedPoints(r$data_keep, input$plot_brush, xvar = time_column, yvar = input$select_y_axis) 118 | 119 | r$exclude_rows <- c(r$exclude_rows, res[[1]]) 120 | }) 121 | 122 | # Reset all points 123 | shiny::observeEvent(input$exclude_reset, { 124 | r$exclude_rows <- NULL 125 | }) 126 | 127 | output$plot <- shiny::renderPlot({ 128 | shiny::req(input$select_y_axis) 129 | 130 | time_column <- colnames(r$data_keep)[1] 131 | 132 | ggplot2::ggplot(r$data_keep, ggplot2::aes_string(time_column, input$select_y_axis)) + 133 | ggplot2::geom_point() + 134 | theme_whippr() 135 | 136 | }, res = 96) 137 | 138 | ## cancel button 139 | shiny::observeEvent(input$cancel, { 140 | shiny::stopApp() 141 | }) 142 | 143 | ## done button 144 | shiny::observeEvent(input$done, { 145 | ## prepare return code 146 | points_to_filter <- sort(unique(r$exclude_rows)) %>% 147 | datapasta::vector_construct() %>% 148 | stringr::str_remove("\n") 149 | time_column <- colnames(r$data_keep)[1] 150 | 151 | ## get current row in the R code 152 | context <- rstudioapi::getActiveDocumentContext() 153 | context_row <- context$selection[[1]]$range$end["row"] 154 | 155 | return_code <- glue::glue("\n\n\n## code from manual cleaning\n{input$output_df} <- {data_input} %>% \n\tdplyr::filter(!{time_column} %in% {points_to_filter})\n\n") 156 | 157 | rstudioapi::insertText(text = return_code, location = c(context_row + 1, 1)) 158 | shiny::stopApp() 159 | }) 160 | } 161 | 162 | shiny::runGadget(app = ui, server = server, viewer = shiny::dialogViewer(dialogName = "Manual cleaner", width = width, height = height), stopOnCancel = FALSE) 163 | } 164 | -------------------------------------------------------------------------------- /R/shinyThings.R: -------------------------------------------------------------------------------- 1 | # functions copied from gadenbuie/shinyThings 2 | # this is needed to prevent the import from github for CRAN submission 3 | 4 | #' Undo/Redo History Buttons 5 | #' 6 | #' This is a simple Shiny module for undo/redo history. The Shiny module accepts 7 | #' an arbitrary reactive data value. Changes in the state of this reactive value 8 | #' are tracked and added to the user's history. The user can then repeatedly 9 | #' undo and redo to walk through this stack. The module returns the current 10 | #' selected value of the reactive from this historical stack, or `NULL` when 11 | #' the app state was changed by the user. Because this reactive can hold 12 | #' arbitrary data about the state of the Shiny app, it is up to the app 13 | #' developer to use the returned current value to update the Shiny apps' inputs 14 | #' and UI elements. 15 | #' 16 | #' @param id The module id 17 | #' @param value The reactive expression with the values should be saved for the 18 | #' user's history. This expression can contain arbitrary data and be of any 19 | #' structure as long as it returns a single value (or list). Each change in 20 | #' this value is stored, so the module may not work well for storing large 21 | #' data sets. 22 | #' @param value_debounce_rate Debounce rate in milliseconds for the `value` 23 | #' reactive expression. To avoid saving spurious changes in `value`, the 24 | #' expression is debounced. See [shiny::debounce()] for more information. 25 | #' 26 | #' @return The `undoHistory()` module returns the currently selected history 27 | #' item as the user moves through the stack, or `NULL` if the last update 28 | #' was the result of user input. The returned value has the same structure as 29 | #' the reactive `value` passed to `undoHistory()`. 30 | #' @keywords internal 31 | undoHistory <- function(id, value, value_debounce_rate = 500) { 32 | shiny::callModule( 33 | undoHistoryModule, 34 | id = id, 35 | value = value, 36 | value_debounce_rate = value_debounce_rate 37 | ) 38 | } 39 | 40 | #' @keywords internal 41 | undoHistoryUI <- function( 42 | id, 43 | class = NULL, 44 | btn_class = "btn btn-default", 45 | back_text = NULL, 46 | back_title = "Undo", 47 | back_icon = "undo", 48 | fwd_text = NULL, 49 | fwd_title = "Redo", 50 | fwd_icon = "redo" 51 | ) { 52 | ns <- shiny::NS(id) 53 | stopifnot(is.null(class) || is.character(class)) 54 | stopifnot(is.character(btn_class)) 55 | if (length(btn_class) == 1) { 56 | btn_class <- rep(btn_class, 2) 57 | } else if (length(btn_class) != 2) { 58 | stop(paste( 59 | "`btn_class` must be length 1 (applied to both buttons) or 2 (applied to", 60 | "the undo/redo buttons respectively)." 61 | )) 62 | } 63 | spaces <- function(...) { 64 | x <- lapply(list(...), function(x) paste(x, collapse = " ")) 65 | paste(x, collapse = " ") 66 | } 67 | htmltools::tagList( 68 | htmltools::htmlDependency( 69 | name = "shinythings", 70 | version = utils::packageVersion("whippr"), 71 | package = "whippr", 72 | src = "shinyThings", 73 | script = "undoHistory.js" 74 | ), 75 | shiny::tags$div( 76 | class = spaces("btn-group", class), 77 | role = "group", 78 | `aria-label` = "Undo/Redo History", 79 | shiny::tags$button( 80 | id = ns("history_back"), 81 | class = spaces(btn_class[1], "action-button disabled"), 82 | `data-val` = 0L, 83 | disabled = TRUE, 84 | title = back_title, 85 | if (!is.null(back_icon)) shiny::icon(back_icon), 86 | back_text 87 | ), 88 | shiny::tags$button( 89 | id = ns("history_forward"), 90 | type = "button", 91 | class = spaces(btn_class[2], "action-button disabled"), 92 | `data-val` = 0L, 93 | disabled = TRUE, 94 | title = fwd_title, 95 | if (!is.null(fwd_icon)) shiny::icon(fwd_icon), 96 | fwd_text 97 | ) 98 | ) 99 | ) 100 | } 101 | 102 | undoHistoryModule <- function( 103 | input, 104 | output, 105 | session, 106 | value = shiny::reactive(NULL), 107 | value_debounce_rate = 500 108 | ) { 109 | ns <- session$ns 110 | 111 | 112 | # changes in record get pushed to top of `stack$history` 113 | # if the user backs into historical values, 114 | # then they are moved to top of stack_future 115 | stack <- shiny::reactiveValues(history = list(), future = list(), current = NULL) 116 | 117 | output$v_stack <- shiny::renderPrint({ 118 | utils::str(shiny::reactiveValuesToList(stack)[c("history", "current", "future")]) 119 | }) 120 | 121 | value_debounced <- shiny::debounce(value, value_debounce_rate) 122 | 123 | # Add updates to value_debounced() into the stack$history 124 | ref_id <- 0L 125 | shiny::observe({ 126 | shiny::req(!is.null(value_debounced())) 127 | current_value <- shiny::isolate(stack$current) 128 | if (!is.null(current_value) && identical(current_value, value_debounced())) { 129 | # Don't store latest change in history because it came from the module 130 | # or is the same as the most recent state 131 | return() 132 | } 133 | this <- list() 134 | ref_id <<- ref_id + 1L 135 | this[[paste(ref_id)]] <- value_debounced() 136 | stack$history <- c(this, shiny::isolate(stack$history)) 137 | stack$future <- list() 138 | stack$current <- NULL 139 | }) 140 | 141 | # Enable forward/backward buttons if there are values in stack 142 | has_history <- shiny::reactive({ 143 | !(is.null(stack$history) || length(stack$history) <= 1) 144 | }) 145 | 146 | has_future <- shiny::reactive({ 147 | !(is.null(stack$future) || length(stack$future) == 0) 148 | }) 149 | 150 | btn_state_lag <- c(FALSE, FALSE) 151 | 152 | shiny::observe({ 153 | btn_state <- c(has_history(), has_future()) 154 | if (identical(btn_state, btn_state_lag)) return() 155 | 156 | btn_ids <- ns(c("history_back", "history_forward")) 157 | 158 | btn_state_send <- list() 159 | if (any(btn_state)) btn_state_send$enable <- as.list(btn_ids[btn_state]) 160 | if (any(!btn_state)) btn_state_send$disable <- as.list(btn_ids[!btn_state]) 161 | btn_state_lag <<- btn_state 162 | session$sendCustomMessage("undoHistoryButtons", btn_state_send) 163 | }) 164 | 165 | restore_stack_item <- function(item) { 166 | ref_id <- names(item)[1] 167 | stack$current <- item[[1]] 168 | } 169 | 170 | # Move back in time 171 | shiny::observeEvent(input$history_back, { 172 | shiny::req(length(stack$history) > 1) 173 | 174 | # copy stack to save all changes at once at the end 175 | .stack <- shiny::reactiveValuesToList(stack) 176 | .stack$current <- NULL 177 | 178 | # current value goes to the future stack 179 | .stack$future <- c(.stack$history[1], .stack$future) 180 | 181 | # pop the current value off of the history stack 182 | .stack$history <- .stack$history[-1] 183 | 184 | # restore the previous value 185 | stack$future <- .stack$future 186 | stack$history <- .stack$history 187 | restore_stack_item(.stack$history[1]) 188 | }, priority = 1000) 189 | 190 | # Move forward in time 191 | shiny::observeEvent(input$history_forward, { 192 | shiny::req(length(stack$history) > 0, length(stack$future) > 0) 193 | 194 | .stack <- shiny::reactiveValuesToList(stack) 195 | .stack$current <- NULL 196 | 197 | # top of future stack goes to top of history stack 198 | .stack$history <- c(.stack$future[1], .stack$history) 199 | 200 | # pop the top of future stack 201 | .stack$future <- .stack$future[-1] 202 | 203 | # restore the (pseudo-)future value 204 | stack$future <- .stack$future 205 | stack$history <- .stack$history 206 | restore_stack_item(.stack$history[1]) 207 | }, priority = 1000) 208 | 209 | return(shiny::reactive(stack$current)) 210 | } 211 | -------------------------------------------------------------------------------- /vignettes/vo2_kinetics.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "VO2 Kinetics Analysis" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{VO2 Kinetics Analysis} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | ```{r, include = FALSE} 11 | knitr::opts_chunk$set( 12 | collapse = TRUE, 13 | comment = "#>", 14 | fig.width = 7, 15 | fig.height = 5, 16 | fig.align = "center", 17 | dpi = 300 18 | ) 19 | ``` 20 | 21 | ```{r color, echo = FALSE, results='asis'} 22 | # from https://github.com/r-lib/crayon/issues/24#issuecomment-581068792 23 | # crayon needs to be explicitly activated in Rmd 24 | options(crayon.enabled = TRUE) 25 | # Hooks needs to be set to deal with outputs 26 | # thanks to fansi logic 27 | old_hooks <- fansi::set_knit_hooks(knitr::knit_hooks, 28 | which = c("output", "message", "error")) 29 | ``` 30 | 31 | ```{block, type = 'rmdinfo'} 32 | Here you may find a walk-through on how to perform VO2 kinetics analysis in the **moderate-intensity domain**. 33 | 34 | Functions for analysis in the heavy- and severe-intensity domains will be added in the near future. 35 | ``` 36 | 37 | For making everyone's life easier, the general function `vo2_kinetics()` was created. This function calls smaller separate functions that **fully** automate the VO2 kinetics data analysis. The following interactive tree diagram shows how each function is called: 38 | 39 | ```{r echo=FALSE, screenshot.force=FALSE} 40 | library(dplyr, warn.conflicts = FALSE) 41 | library(collapsibleTree) 42 | 43 | normalize_transitions <- tibble( 44 | a = "vo2_kinetics()", 45 | b = "detect_outliers()", 46 | c = "normalize_transitions()", 47 | d = "normalize_first_breath()" 48 | ) 49 | 50 | predict_bands <- tibble( 51 | a = "vo2_kinetics()", 52 | b = "detect_outliers()", 53 | c = "predict_bands()", 54 | d = c("predict_bands_baseline()", "predict_bands_transition()") 55 | ) 56 | 57 | plot_outliers <- tibble( 58 | a = "vo2_kinetics()", 59 | b = "plot_outliers()" 60 | ) 61 | 62 | process_data <- tibble( 63 | a = "vo2_kinetics()", 64 | b = "process_data()", 65 | c = c("interpolate()", "perform_average()", "normalize_time()") 66 | ) 67 | 68 | perform_kinetics <- tibble( 69 | a = "vo2_kinetics()", 70 | b = "perform_kinetics()", 71 | c = c("get_residuals()", "model_diagnostics()") 72 | ) 73 | 74 | bind_rows(normalize_transitions, predict_bands, plot_outliers, process_data, perform_kinetics) %>% 75 | collapsibleTree( 76 | hierarchy = c("a", "b", "c", "d"), 77 | root = "VO2 kinetics analysis", 78 | width = 800, 79 | height = 300, 80 | zoomable = FALSE 81 | ) 82 | ``` 83 | 84 | ## Read the data 85 | 86 | The first step is to read the raw data with the `read_data()` function. Here we are going to use the example file that comes with `{whippr}`, which is a file exported from the COSMED metabolic cart. 87 | 88 | ```{r setup} 89 | library(whippr) 90 | 91 | raw_data <- read_data(path = system.file("example_cosmed.xlsx", package = "whippr"), metabolic_cart = "cosmed", time_column = "t") 92 | 93 | raw_data 94 | ``` 95 | 96 | As you can see in the following graph, this is a protocol where **3 transitions** from a baseline exercise intensity to to an exercise intensity below the gas exchange threshold. Therefore, this is a VO2 kinetics test in the **moderate-intensity domain**. In this specific case, the following was done: 97 | 98 | * 3 x 6-min baseline periods at 20 W. 99 | * 3 x 6-min transition periods at the power output associated with 90% of the gas exchange threshold. 100 | 101 | ```{r} 102 | library(ggplot2) 103 | 104 | raw_data %>% 105 | ggplot(aes(t, VO2)) + 106 | geom_point(shape = 21, size = 3, fill = "white") + 107 | theme_whippr() 108 | ``` 109 | 110 | ## Perform the analysis 111 | 112 | After reading the raw data, we can move directly to performing the VO2 kinetics analysis with `vo2_kinetics()`. This function will: 113 | 114 | - Recognize each baseline and transition phase 115 | - Normalize the first breath in each transition in a safe way to prevent time misalignment 116 | - Recognize outliers 117 | - Remove outliers 118 | - Interpolate each transition 119 | - Time-align the data 120 | - Ensemble-average the transitions 121 | - Perform the chosen bin-average 122 | - Fit the final mono-exponential model from VO2 kinetics from the options chosen 123 | - Calculate residuals 124 | 125 | ```{block, type = 'rmdinfo'} 126 | For modeling VO2 kinetics analysis in the **moderate-intensity domain**, a mono-exponential model is used: 127 | 128 | $$VO_2\left(t\right)=baseline+amplitude\cdot\left(1-e^{^{-\frac{\left(t-TD\right)}{τ}}}\right)$$ 129 | 130 | where: 131 | 132 | * `VO2(t)` = the oxygen uptake at any given time. 133 | * `baseline` = the oxygen uptake associated with the baseline phase. 134 | * `amplitude` = the steady-state increase increase in oxygen uptake above `baseline`. 135 | * `TD` = the time delay. 136 | * `τ` = the time constant defined as the duration of time for the oxygen uptake to increase to 63% of the steady-state increase. 137 | ``` 138 | 139 | ### Important options 140 | 141 | In `vo2_kinetics()` you must set important options before continuing. 142 | 143 | **Protocol-related options:** 144 | 145 | * `protocol_n_transitions` = Number of transitions performed. 146 | * `protocol_baseline_length` = The length of the baseline (in seconds). 147 | * `protocol_transition_length` = The length of the transition (in seconds). 148 | 149 | **Data cleaning-related options:** 150 | 151 | * `cleaning_level` = A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated during the data cleaning process. Breaths lying outside the prediction bands will be excluded. 152 | * `cleaning_baseline_fit` = A vector of the same length as the number in `protocol_n_transitions`, indicating what kind of fit to perform for each baseline. Either *linear* or *exponential*. 153 | 154 | **Fitting-related options:** 155 | 156 | * `fit_level` = A numeric scalar between 0 and 1 giving the confidence level for the parameter estimates in the final VO2 kinetics fit. 157 | * `fit_bin_average` = The bin average to be performed for the final fit. 158 | * `fit_phase_1_length` = The length of the phase I that you wish to exclude from the final exponential fit, in seconds. 159 | * `fit_baseline_length` = The length the baseline to perform the final linear fit, in seconds. 160 | * `fit_transition_length` = The length of the transition to perform the final exponential fit, in seconds. 161 | 162 | The analysis is performed like the following: 163 | 164 | ```{r} 165 | results <- vo2_kinetics( 166 | .data = raw_data, 167 | intensity_domain = "moderate", 168 | vo2_column = "VO2", 169 | protocol_n_transitions = 3, 170 | protocol_baseline_length = 360, 171 | protocol_transition_length = 360, 172 | cleaning_level = 0.95, 173 | cleaning_baseline_fit = c("linear", "exponential", "exponential"), 174 | fit_level = 0.95, 175 | fit_bin_average = 5, 176 | fit_phase_1_length = 20, 177 | fit_baseline_length = 120, 178 | fit_transition_length = 240, 179 | verbose = TRUE 180 | ) 181 | 182 | results 183 | ``` 184 | 185 | ## Fit parameters 186 | 187 | Fit parameters and confidence intervals may be accessed through the **model_summary** column. 188 | 189 | ```{r} 190 | results$model_summary[[1]] 191 | ``` 192 | 193 | ## Fit plot 194 | 195 | The fit plot may be accessed through the **plot_model** column. 196 | 197 | ```{r} 198 | results$plot_model[[1]] 199 | ``` 200 | 201 | ## Checking what was done during data cleaning 202 | 203 | The data cleaning process may be accessed through the **plot_outliers** column. 204 | 205 | ```{r fig.width=12, fig.height=10} 206 | results$plot_outliers[[1]] 207 | ``` 208 | 209 | ## Model diagnostics 210 | 211 | Model residuals plot may be accessed through the **plot_residuals** column. 212 | 213 | ```{r fig.width=12, fig.height=10} 214 | results$plot_residuals[[1]] 215 | ``` 216 | 217 | ## Additional columns 218 | 219 | ### Raw data with detected outliers 220 | 221 | The raw data with additional columns from the data cleaning process may be accessed through the **data_outliers** column. 222 | 223 | ```{r} 224 | results$data_outliers[[1]] 225 | ``` 226 | 227 | ### Processed data 228 | 229 | The processed data (cleaned, interpolated, time-aligned, ensemble-averaged, and bin-averaged) may be accessed through the **data_processed** column. 230 | 231 | ```{r} 232 | results$data_processed[[1]] 233 | ``` 234 | 235 | ### Fitted data 236 | 237 | The data from the baseline and transition fits may be accessed through the **data_fitted** column. 238 | 239 | ```{r} 240 | results$data_fitted[[1]] 241 | ``` 242 | 243 | ### Model 244 | 245 | The model used for fitting the mono-exponential model may be accessed through the **model** column. 246 | 247 | ```{r} 248 | results$model[[1]] 249 | 250 | summary(results$model[[1]]) 251 | ``` 252 | 253 | ### Residuals data 254 | 255 | The model residuals data may be accessed through the **model_residuals** column. 256 | 257 | ```{r} 258 | results$model_residuals[[1]] 259 | ``` 260 | 261 | -------------------------------------------------------------------------------- /vignettes/incremental.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Incremental test analyses" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{Incremental test analyses} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | ```{r, include = FALSE} 11 | knitr::opts_chunk$set( 12 | collapse = TRUE, 13 | comment = "#>", 14 | fig.align = "center", 15 | fig.width = 8, 16 | fig.height = 6 17 | ) 18 | ``` 19 | 20 | If you have an incremental test, `whippr` can also help you out! In this vignette you will learn how to: 21 | 22 | * Normalize your data (i.e., recognize baseline period, adjust work rates, and adjust time column); 23 | * Recognize "bad breaths" and remove them; 24 | * Calculate mean response time through various methods (*work in progress*); 25 | * Calculate maximal oxygen uptake (and analyze whether a plateau existed in your data); 26 | * Estimate ventilatory thresholds (*work in progress*). 27 | 28 | All of the above work both in a **ramp** and in a **step** incremental test. 29 | 30 | ## Normalize your data 31 | 32 | To get started, let's read in the example data from a ramp incremental test that comes with the package. This is a test that had the following protocol: 33 | 34 | - 4-min baseline period at 20 W; 35 | - ramp increase of 25 W/min. 36 | 37 | ```{r setup} 38 | library(whippr) 39 | library(ggplot2) 40 | library(dplyr) 41 | 42 | path_example <- system.file("ramp_cosmed.xlsx", package = "whippr") 43 | 44 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 45 | 46 | df 47 | ``` 48 | 49 | ```{r} 50 | df %>% 51 | ggplot(aes(t, VO2)) + 52 | geom_point(shape = 21, size = 4, fill = "white") + 53 | theme_whippr() 54 | ``` 55 | 56 | A few things to note: 57 | 58 | - Our time column is not normalized. That is, we do not know explicitly what is baseline, and what is ramp; 59 | - We do not have a work rate column. 60 | 61 | All of the above will be fixed with the `incremental_normalize()` function. Since we do not have a work rate column, we will set the argument `work_rate_magic = TRUE`. This argument will allow us to calculate the work rates throughout the test. 62 | 63 | ```{r} 64 | ramp_normalized <- df %>% 65 | incremental_normalize( 66 | .data = ., 67 | incremental_type = "ramp", 68 | has_baseline = TRUE, 69 | baseline_length = 240, ## 4-min baseline 70 | work_rate_magic = TRUE, 71 | baseline_intensity = 20, ## baseline was performed at 20 W 72 | ramp_increase = 25 ## 25 W/min ramp 73 | ) 74 | 75 | ramp_normalized 76 | ``` 77 | 78 | We can see that now our data is aware of the two different phases in the test: baseline and ramp period: 79 | 80 | ```{r} 81 | ramp_normalized %>% 82 | distinct(protocol_phase) 83 | ``` 84 | 85 | Additionally, a new column was created: `work_rate`: 86 | 87 | ```{r} 88 | ramp_normalized %>% 89 | select(t, work_rate) 90 | ``` 91 | 92 | And we can also plot it to check what was done. Note that there is a constant-load during baseline (20 W), and then a constant increase in power output (25 W/min) during the ramp phase: 93 | 94 | ```{r} 95 | ramp_normalized %>% 96 | plot_incremental() 97 | ``` 98 | 99 | We can therefore quickly analyze our peak power output as: 100 | 101 | ```{r} 102 | ramp_normalized %>% 103 | slice_max(work_rate) %>% 104 | select(work_rate) 105 | ``` 106 | 107 | ### Step test 108 | 109 | But what if we had a step-incremental test? How does the work rate transformation work? In a step test is important to have both the actual power output from each step, and also a linearization of the power output. To illustrate what I mean, let's take a look at an example: 110 | 111 | ```{r} 112 | ## get file path from example data 113 | path_example_step <- system.file("step_cortex.xlsx", package = "whippr") 114 | 115 | ## read data from step test 116 | df_step <- read_data(path = path_example_step, metabolic_cart = "cortex") 117 | 118 | df_step 119 | ``` 120 | 121 | Note that our data contains weird column names. So, for simplicity, let's rename the VO2 column. 122 | 123 | ```{r} 124 | df_step_renamed <- df_step %>% 125 | rename(VO2 = `V'O2 (STPD)`) 126 | ``` 127 | 128 | That is a test that had the following protocol: 129 | 130 | - resting period (i.e, 0 W) for 2 minutes; 131 | - step protocol starting at 50 W and increasing 25 W every 3 minutes. 132 | 133 | ```{r} 134 | df_step_renamed %>% 135 | ggplot(aes(t, VO2)) + 136 | geom_point(shape = 21, size = 4, fill = "white") + 137 | theme_whippr() 138 | ``` 139 | 140 | So, let's first normalize our data: 141 | 142 | ```{r} 143 | step_normalized <- df_step %>% 144 | incremental_normalize( 145 | .data = ., 146 | incremental_type = "step", 147 | has_baseline = TRUE, 148 | baseline_length = 120, ## 2 min baseline 149 | work_rate_magic = TRUE, 150 | baseline_intensity = 0, ## baseline was resting on the bike, so intensity is 0 W 151 | step_start = 50, ## step protocol started at 50 W 152 | step_increase = 25, ## step increase was 25 W 153 | step_length = 180 ## the intensity increased every 3 minutes 154 | ) 155 | 156 | step_normalized 157 | ``` 158 | 159 | And then we can visualize what was done with the work rate with the `plot_incremental()` function: 160 | 161 | ```{r fig.width=10, fig.height=10} 162 | step_normalized %>% 163 | plot_incremental() 164 | ``` 165 | 166 | As you can note, two working rates were created: one with the actual power output, and another one with the linearization of the power output. 167 | 168 | ```{r} 169 | step_normalized %>% 170 | select(t, protocol_phase:step) 171 | ``` 172 | 173 | This is useful, for example, to calculate the peak power output: 174 | 175 | ```{r} 176 | step_normalized %>% 177 | slice_max(work_rate) %>% 178 | select(work_rate) 179 | ``` 180 | 181 | ## Recognize bad breaths 182 | 183 | As you might have noticed, these two incremental test examples had a few bad breaths (outliers) that should be deleted prior to any data analysis. This can be easily achieved with the `detect_outliers()` function. 184 | 185 | Two methods for detecting outliers are available: **linear** and **anomaly** detection. The **linear** method is going to fit two linear models: one for the baseline period, and another one for the ramp (or step) period. The **anomaly** detection, however, uses the [anomalize](https://business-science.github.io/anomalize/) package, which decompose time series, and then perform the anomaly detection. Let's see it in action: 186 | 187 | ### Linear 188 | 189 | ```{r} 190 | ## detect ramp outliers 191 | data_ramp_outliers <- detect_outliers( 192 | .data = ramp_normalized, 193 | test_type = "incremental", 194 | vo2_column = "VO2", 195 | cleaning_level = 0.95, 196 | method_incremental = "linear", 197 | verbose = TRUE 198 | ) 199 | 200 | data_ramp_outliers 201 | ``` 202 | 203 | ```{r} 204 | data_ramp_outliers %>% 205 | plot_outliers() 206 | ``` 207 | 208 | Thereafter, you can easily remove the detected outliers like this: 209 | 210 | ```{r} 211 | data_ramp_outliers %>% 212 | filter(outlier == "no") 213 | ``` 214 | 215 | Note that we set **95%** of confidence level when detecting the outliers. You can easily change that, for example, to **99%**: 216 | 217 | ```{r} 218 | detect_outliers( 219 | .data = ramp_normalized, 220 | test_type = "incremental", 221 | vo2_column = "VO2", 222 | cleaning_level = 0.99, ## changed to 99% 223 | method_incremental = "linear", 224 | verbose = TRUE 225 | ) %>% 226 | plot_outliers() 227 | ``` 228 | 229 | ### Anomaly 230 | 231 | Now let's see how the anomaly detection performs: 232 | 233 | ```{r} 234 | detect_outliers( 235 | .data = ramp_normalized, 236 | test_type = "incremental", 237 | vo2_column = "VO2", 238 | cleaning_level = 0.95, 239 | method_incremental = "anomaly", ## changed to anomaly detection 240 | verbose = TRUE 241 | ) %>% 242 | plot_outliers() 243 | ``` 244 | 245 | ## Mean response time 246 | 247 | > Work in progress. 248 | 249 | ## VO2max (maximal oxygen uptake) 250 | 251 | There are two functions that you can use to analyze **VO2max**: 252 | 253 | - `vo2_max()`: it performs all the necessary steps, which include: 254 | - `incremental_normalize()`: normalize incremental test data 255 | - `detect_outliers()`: detect outliers 256 | - `interpolate()`: interpolate data from breath-by-breath into second-by-second 257 | - `perform_average()`: perform average on second-by-second data 258 | 259 | - `perform_max()`: it only performs the final steps (`interpolate()` and `perform_average()`). 260 | 261 | ### `perform_max()` 262 | 263 | ```{r} 264 | results_vo2max <- data_ramp_outliers %>% ## data was already normalized and outliers were detected 265 | perform_max( 266 | .data = ., 267 | vo2_column = "VO2", 268 | vo2_relative_column = "VO2/Kg", 269 | heart_rate_column = "HR", 270 | rer_column = "R", 271 | average_method = "bin", 272 | average_length = 30, 273 | plot = TRUE, 274 | verbose = FALSE 275 | ) 276 | 277 | results_vo2max 278 | ``` 279 | 280 | ```{r} 281 | results_vo2max$plot[[1]] 282 | ``` 283 | 284 | ### `vo2_max()` 285 | 286 | ```{r} 287 | vo2_max( 288 | .data = df, ## data from `read_data()` 289 | vo2_column = "VO2", 290 | vo2_relative_column = "VO2/Kg", 291 | heart_rate_column = "HR", 292 | rer_column = "R", 293 | detect_outliers = TRUE, 294 | average_method = "bin", 295 | average_length = 30, 296 | plot = TRUE, 297 | verbose = TRUE, 298 | ## arguments for `incremental_normalize()` 299 | incremental_type = "ramp", 300 | has_baseline = TRUE, 301 | baseline_length = 240, ## 4-min baseline 302 | work_rate_magic = TRUE, ## produce a work rate column 303 | baseline_intensity = 20, ## baseline was performed at 20 W 304 | ramp_increase = 25, ## 25 W/min ramp 305 | ## arguments for `detect_outliers()` 306 | test_type = "incremental", 307 | cleaning_level = 0.95, 308 | method_incremental = "linear" 309 | ) 310 | ``` 311 | 312 | 313 | ## Ventilatory thresholds 314 | 315 | > Work in progress. 316 | -------------------------------------------------------------------------------- /R/predict.R: -------------------------------------------------------------------------------- 1 | #' Extract confidence and prediction bands 2 | #' 3 | #' It extracts confidence and prediction bands from the \code{nls} model. It is used only for data cleaning. 4 | #' 5 | #' @param .data The nornalized data retrieved from \code{normalize_transitions()}. 6 | #' @param time_column The name (quoted) of the column containing the time. Depending on the language of your system, this column might not be "t". Therefore, you may specify it here. Default to "t". 7 | #' @param vo2_column The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to 'VO2'. 8 | #' @param cleaning_level A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated. 9 | #' @param cleaning_baseline_fit A character indicating what kind of fit to perform for each baseline. Either 'linear' or 'exponential'. 10 | #' 11 | #' @return a [tibble][tibble::tibble-package] containing the following columns: 12 | #' \item{x}{The provided time data.} 13 | #' \item{y}{The provided VO2 data.} 14 | #' \item{.fitted}{The predicted response for that observation.} 15 | #' \item{.resid}{The residual for a particular point.} 16 | #' \item{lwr_conf}{Lower limit of the confidence band.} 17 | #' \item{upr_conf}{Upper limit of the confidence band.} 18 | #' \item{lwr_pred}{Lower limit of the prediction band.} 19 | #' \item{upr_pred}{Upper limit of the prediction band.} 20 | #' 21 | #' @export 22 | #' 23 | #' @importFrom utils tail 24 | predict_bands <- function( 25 | .data, 26 | time_column = "t", 27 | vo2_column = "VO2", 28 | cleaning_level = 0.95, 29 | cleaning_baseline_fit = c("linear", "exponential") 30 | ) { 31 | 32 | 33 | # check arguments --------------------------------------------------------- 34 | cleaning_baseline_fit <- match.arg(cleaning_baseline_fit) 35 | 36 | # prepare data ------------------------------------------------------------ 37 | data_bsln <- .data %>% 38 | dplyr::filter(phase == "baseline") 39 | 40 | data_transition <- .data %>% 41 | dplyr::filter(phase == "transition") 42 | 43 | 44 | # baseline fit ------------------------------------------------------------ 45 | bands_bsln <- predict_bands_baseline( 46 | .data = data_bsln, 47 | time_column = time_column, 48 | vo2_column = vo2_column, 49 | cleaning_level = cleaning_level, 50 | cleaning_baseline_fit = cleaning_baseline_fit 51 | ) 52 | 53 | # define bsln value ------------------------------------------------------- 54 | baseline_value <- tail(bands_bsln$.fitted, 10) %>% 55 | mean() 56 | 57 | # define model depending on the intensity domain -------------------------- 58 | ## set starting values 59 | start_Amp <- max(data_transition[[vo2_column]]) - baseline_value 60 | start_TD <- 360 61 | start_tau <- 30 62 | 63 | formula_model <- glue::glue("{vo2_column} ~ {baseline_value} + Amp * (1 - exp(-({time_column} - TD)/tau))") 64 | 65 | cleaning_model <- minpack.lm::nlsLM( 66 | formula = formula_model, 67 | data = data_transition, 68 | start = list(Amp = start_Amp, TD = start_TD, tau = start_tau) 69 | ) 70 | 71 | bands_transition <- predict_bands_transition( 72 | .data = data_transition, 73 | time_column = time_column, 74 | vo2_column = vo2_column, 75 | cleaning_level = cleaning_level, 76 | cleaning_model = cleaning_model 77 | ) 78 | 79 | out <- dplyr::bind_rows(bands_bsln, bands_transition) 80 | 81 | out 82 | 83 | } 84 | 85 | #' Extract confidence and prediction bands for the baseline phase 86 | #' 87 | #' @param .data The nornalized data retrieved from \code{normalize_transitions()}. The data should be filtered to only the 'baseline' phase before passing to the function. 88 | #' @param time_column The name (quoted) of the column containing the time. Depending on the language of your system, this column might not be "t". Therefore, you may specify it here. Default to "t". 89 | #' @param vo2_column The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to 'VO2'. 90 | #' @param cleaning_level A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated. 91 | #' @param cleaning_baseline_fit A character indicating what kind of fit to perform for each baseline. Either 'linear' or 'exponential'. 92 | #' 93 | #' @return a [tibble][tibble::tibble-package] containing the following columns: 94 | #' \item{x}{The provided time data.} 95 | #' \item{y}{The provided VO2 data.} 96 | #' \item{.fitted}{The predicted response for that observation.} 97 | #' \item{.resid}{The residual for a particular point.} 98 | #' \item{lwr_conf}{Lower limit of the confidence band.} 99 | #' \item{upr_conf}{Upper limit of the confidence band.} 100 | #' \item{lwr_pred}{Lower limit of the prediction band.} 101 | #' \item{upr_pred}{Upper limit of the prediction band.} 102 | #' 103 | #' @export 104 | #' 105 | #' @importFrom stats lm predict.lm 106 | #' @importFrom utils head 107 | predict_bands_baseline <- function( 108 | .data, 109 | time_column, 110 | vo2_column, 111 | cleaning_level, 112 | cleaning_baseline_fit 113 | ) { 114 | 115 | if(cleaning_baseline_fit == "linear") { 116 | 117 | # linear fit with slope = 0 118 | linear_formula <- glue::glue("{vo2_column} ~ 1") 119 | model_bsln <- lm(linear_formula, data = .data) 120 | 121 | # prediction with prediction bands 122 | out <- .data %>% 123 | dplyr::select(1) %>% 124 | dplyr::bind_cols(broom::augment(model_bsln)) %>% 125 | dplyr::rename_at(1:2, ~ c("x", "y")) %>% 126 | dplyr::select(x:.resid) %>% 127 | dplyr::bind_cols(dplyr::as_tibble(predict.lm(model_bsln, interval = "confidence", level = cleaning_level))) %>% 128 | dplyr::rename( 129 | lwr_conf = lwr, 130 | upr_conf = upr 131 | ) %>% 132 | dplyr::select(-fit) %>% 133 | dplyr::bind_cols(dplyr::as_tibble(suppressWarnings(predict.lm(model_bsln, interval = "prediction", level = cleaning_level)))) %>% 134 | dplyr::rename( 135 | lwr_pred = lwr, 136 | upr_pred = upr 137 | ) %>% 138 | dplyr::select(-fit) %>% 139 | dplyr::mutate( 140 | outlier = ifelse(y >= lwr_pred & y <= upr_pred, "no", "yes") 141 | ) 142 | } else { 143 | 144 | ## set starting values 145 | baseline_value <- head(.data[[vo2_column]], 3) %>% mean() 146 | start_Amp <- min(.data[[vo2_column]]) - max(.data[[vo2_column]]) 147 | start_TD <- 10 148 | start_tau <- 30 149 | 150 | formula_model <- glue::glue("{vo2_column} ~ {baseline_value} + Amp * (1 - exp(-({time_column} - TD)/tau))") 151 | 152 | model_bsln_exp <- minpack.lm::nlsLM(formula_model, 153 | data = .data, 154 | start=list(Amp = start_Amp, TD = start_TD, tau = start_tau)) 155 | 156 | out <- predict_bands_transition( 157 | .data = .data, 158 | time_column = time_column, 159 | vo2_column = vo2_column, 160 | cleaning_level = cleaning_level, 161 | cleaning_model = model_bsln_exp 162 | ) 163 | 164 | } 165 | 166 | out 167 | 168 | } 169 | 170 | #' Extract confidence and prediction bands for the transition phase 171 | #' 172 | #' @param .data The nornalized data retrieved from \code{normalize_transitions()}. The data should be filtered to only the 'transition' phase before passing to the function. 173 | #' @param time_column The name (quoted) of the column containing the time. Depending on the language of your system, this column might not be "t". Therefore, you may specify it here. Default to "t". 174 | #' @param vo2_column The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to 'VO2'. 175 | #' @param cleaning_level A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated. 176 | #' @param cleaning_model The \code{nls} model to retrieve the bands from. 177 | #' 178 | #' @return a [tibble][tibble::tibble-package] containing the following columns: 179 | #' \item{x}{The provided time data.} 180 | #' \item{y}{The provided VO2 data.} 181 | #' \item{.fitted}{The predicted response for that observation.} 182 | #' \item{.resid}{The residual for a particular point.} 183 | #' \item{lwr_conf}{Lower limit of the confidence band.} 184 | #' \item{upr_conf}{Upper limit of the confidence band.} 185 | #' \item{lwr_pred}{Lower limit of the prediction band.} 186 | #' \item{upr_pred}{Upper limit of the prediction band.} 187 | #' 188 | #' @export 189 | #' 190 | #' @importFrom stats coef deriv qt vcov 191 | predict_bands_transition <- function( 192 | .data, 193 | time_column, 194 | vo2_column, 195 | cleaning_level, 196 | cleaning_model 197 | ) { 198 | ## this code is adapted from: http://sia.webpopix.org/nonlinearRegression.html#confidence-intervals-for-the-model-parameters 199 | 200 | summary_model <- summary(cleaning_model) 201 | augmented_model <- broom::augment(cleaning_model) 202 | 203 | residuals_model <- augmented_model %>% 204 | dplyr::select(.resid) %>% 205 | dplyr::pull() 206 | 207 | cleaning_level <- 1 - cleaning_level 208 | 209 | ## first step is to compute the variance 210 | fgh2 <- deriv(summary_model$formula, c("Amp", "TD", "tau"), function(Amp, TD, tau, t){} ) 211 | 212 | x_new <- .data[[time_column]] 213 | y <- .data[[vo2_column]] 214 | 215 | beta_est <- coef(cleaning_model) 216 | 217 | f_new <- fgh2(beta_est[1], beta_est[2], beta_est[3], x_new) 218 | 219 | g_new <- attr(f_new, "gradient") 220 | V_beta <- vcov(cleaning_model) 221 | GS <- rowSums((g_new %*% V_beta) * g_new) 222 | 223 | ## confidence bands 224 | degrees_freedom <- summary_model$df[2] 225 | delta_f <- sqrt(GS) * qt(1 - cleaning_level / 2, degrees_freedom) 226 | 227 | ## prediction bands 228 | sigma_est <- summary_model$sigma 229 | delta_y <- sqrt(GS + sigma_est ^ 2) * qt(1 - cleaning_level / 2, degrees_freedom) 230 | 231 | out <- dplyr::tibble( 232 | x = x_new, 233 | y = y, 234 | .fitted = f_new, 235 | .resid = residuals_model, 236 | lwr_conf = f_new - delta_f, 237 | upr_conf = f_new + delta_f, 238 | lwr_pred = f_new - delta_y, 239 | upr_pred = f_new + delta_y 240 | ) %>% 241 | dplyr::mutate( 242 | outlier = ifelse(y >= lwr_pred & y <= upr_pred, "no", "yes") 243 | ) 244 | 245 | out 246 | } 247 | -------------------------------------------------------------------------------- /R/max.R: -------------------------------------------------------------------------------- 1 | #' VO2max 2 | #' 3 | #' It performs the whole process of the VO2max data analysis, which includes: 4 | #' data standardization and normalization according to incremental protocol (`incremental_normalize()`), 5 | #' 'bad breaths' detection (`detect_outliers()`), 6 | #' mean response time calculation (`incremental_mrt()`) (currently ignored), 7 | #' and maximal values calculation (VO2, PO, HR, RER) (`perform_max()`). 8 | #' 9 | #' @param .data Data retrieved from `read_data()`. 10 | #' @param vo2_column The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to `"VO2"`. 11 | #' @param vo2_relative_column The name (quoted) of the column containing the relative to body weight oxygen uptake (VO2) data. Default to `NULL`. 12 | #' @param heart_rate_column The name (quoted) of the column containing the heart rate (HR) data. Default to `NULL`. If `NULL`, this parameter will not be calculated. 13 | #' @param rer_column The name (quoted) of the column containing the respiratory exchange ratio (RER) data. Default to `NULL`. If `NULL`, this parameter will not be calculated. 14 | #' @param average_method The average method to be used for VO2max calculation. One of `bin` or `rolling`. 15 | #' @param average_length The length, in seconds, of the average to be used. For example, if `average_method = bin`, and `average_length = 30`, it will perform a 30-s bin-average. 16 | #' @param detect_outliers A boolean indicating whether to detect outliers. Default to `TRUE`. 17 | #' @param mrt A boolean indicating whether to calculate the mean response time. To be implemented soon <- currently ignored. 18 | #' @param plot A boolean indicating whether to produce a plot with the summary results. Default to `TRUE`. 19 | #' @param verbose A boolean indicating whether messages should be printed in the console. Default to `TRUE`. 20 | #' @param ... Additional arguments passed onto `incremental_normalize()`, `detect_outliers()` if `detect_outliers = TRUE`, and `incremental_mrt()` if `mrt = TRUE`. 21 | #' 22 | #' @return a [tibble][tibble::tibble-package] containing one row and the following columns: 23 | #' \item{VO2max_absolute}{The absolute VO2max.} 24 | #' \item{VO2max_relative}{The relative VO2max.} 25 | #' \item{POpeak}{The peak power output.} 26 | #' \item{HRmax}{The maximal heart rate.} 27 | #' \item{RERmax}{The maximal RER.} 28 | #' \item{plot}{The plot, if `plot = TRUE`.} 29 | #' 30 | #' @details 31 | #' TODO 32 | #' 33 | #' @export 34 | #' 35 | #' @examples 36 | #' \dontrun{ 37 | #' ## get file path from example data 38 | #' path_example <- system.file("ramp_cosmed.xlsx", package = "whippr") 39 | #' 40 | #' ## read data from ramp test 41 | #' df <- read_data(path = path_example, metabolic_cart = "cosmed") 42 | #' 43 | #' ## normalize incremental test data 44 | #' ramp_normalized <- df %>% 45 | #' incremental_normalize( 46 | #' .data = ., 47 | #' incremental_type = "ramp", 48 | #' has_baseline = TRUE, 49 | #' baseline_length = 240, 50 | #' work_rate_magic = TRUE, 51 | #' baseline_intensity = 20, 52 | #' ramp_increase = 25 53 | #' ) 54 | #' 55 | #' ## detect outliers 56 | #' data_ramp_outliers <- detect_outliers( 57 | #' .data = ramp_normalized, 58 | #' test_type = "incremental", 59 | #' vo2_column = "VO2", 60 | #' cleaning_level = 0.95, 61 | #' method_incremental = "linear", 62 | #' verbose = TRUE 63 | #' ) 64 | #' 65 | #' ## analyze VO2max 66 | #' perform_max( 67 | #' .data = data_ramp_outliers, 68 | #' vo2_column = "VO2", 69 | #' vo2_relative_column = "VO2/Kg", 70 | #' heart_rate_column = "HR", 71 | #' rer_column = "R", 72 | #' average_method = "bin", 73 | #' average_length = 30, 74 | #' plot = TRUE, 75 | #' verbose = FALSE 76 | #' ) 77 | #' } 78 | #' 79 | vo2_max <- function( 80 | .data, 81 | vo2_column = "VO2", 82 | vo2_relative_column = NULL, 83 | heart_rate_column = NULL, 84 | rer_column = NULL, 85 | detect_outliers = TRUE, 86 | average_method = c("bin", "rolling"), 87 | average_length = 30, 88 | mrt, 89 | plot = TRUE, 90 | verbose = TRUE, 91 | ... 92 | ) { 93 | 94 | if(!attr(.data, "read_data")) 95 | stop("You need to read your data with `read_data()` first.", call. = FALSE) 96 | 97 | if(verbose) 98 | cli::cli_rule(center = cli::col_red(" * V\u0307O\u2082 max analysis * ")) 99 | 100 | if(verbose) 101 | cli::cli_alert_success("Normalizing incremental data...") 102 | 103 | ## normalize incremental test 104 | data_normalized <- incremental_normalize(.data = .data, ...) 105 | 106 | ## detect outliers 107 | if(detect_outliers) { 108 | 109 | data_normalized <- data_normalized %>% 110 | detect_outliers( 111 | .data = ., 112 | verbose = verbose, 113 | ... 114 | ) 115 | } 116 | 117 | ## perform VO2max 118 | out <- perform_max( 119 | .data = data_normalized, 120 | vo2_column = vo2_column, 121 | vo2_relative_column = vo2_relative_column, 122 | heart_rate_column = heart_rate_column, 123 | rer_column = rer_column, 124 | average_method = average_method, 125 | average_length = average_length, 126 | plot = plot 127 | ) 128 | 129 | out 130 | } 131 | 132 | #' Perform VO2max calculation 133 | #' 134 | #' It performs the calculation of VO2max, HRmax, and maximal RER. Additionally, it detects whether a plateau can be identified from your data. 135 | #' 136 | #' @param .data The data retrieved either from `incremental_normalize()` or `detect_outliers()`. 137 | #' @param vo2_column The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to `"VO2"`. 138 | #' @param vo2_relative_column The name (quoted) of the column containing the relative to body weight oxygen uptake (VO2) data. Default to `NULL`. 139 | #' @param heart_rate_column The name (quoted) of the column containing the heart rate (HR) data. Default to `NULL`. If `NULL`, this parameter will not be calculated. 140 | #' @param rer_column The name (quoted) of the column containing the respiratory exchange ratio (RER) data. Default to `NULL`. If `NULL`, this parameter will not be calculated. 141 | #' @param average_method The average method to be used for VO2max calculation. One of `bin` or `rolling`. 142 | #' @param average_length The length, in seconds, of the average to be used. For example, if `average_method = bin`, and `average_length = 30`, it will perform a 30-s bin-average. 143 | #' @param plot A boolean indicating whether to produce a plot with the summary results. Default to `TRUE`. 144 | #' @param verbose A boolean indicating whether messages should be printed in the console. Default to `TRUE`. 145 | #' 146 | #' @return a tibble 147 | #' @export 148 | #' 149 | #' @examples 150 | #' \dontrun{ 151 | #' ## get file path from example data 152 | #' path_example <- system.file("ramp_cosmed.xlsx", package = "whippr") 153 | #' 154 | #' ## read data from ramp test 155 | #' df <- read_data(path = path_example, metabolic_cart = "cosmed") 156 | #' 157 | #' ## normalize incremental test data 158 | #' ramp_normalized <- df %>% 159 | #' incremental_normalize( 160 | #' .data = ., 161 | #' incremental_type = "ramp", 162 | #' has_baseline = TRUE, 163 | #' baseline_length = 240, 164 | #' work_rate_magic = TRUE, 165 | #' baseline_intensity = 20, 166 | #' ramp_increase = 25 167 | #' ) 168 | #' 169 | #' ## detect outliers 170 | #' data_ramp_outliers <- detect_outliers( 171 | #' .data = ramp_normalized, 172 | #' test_type = "incremental", 173 | #' vo2_column = "VO2", 174 | #' cleaning_level = 0.95, 175 | #' method_incremental = "linear", 176 | #' verbose = TRUE 177 | #' ) 178 | #' 179 | #' ## analyze VO2max 180 | #' perform_max( 181 | #' .data = data_ramp_outliers, 182 | #' vo2_column = "VO2", 183 | #' vo2_relative_column = "VO2/Kg", 184 | #' heart_rate_column = "HR", 185 | #' rer_column = "R", 186 | #' average_method = "bin", 187 | #' average_length = 30, 188 | #' plot = TRUE, 189 | #' verbose = FALSE 190 | #' ) 191 | #' } 192 | perform_max <- function( 193 | .data, 194 | vo2_column = "VO2", 195 | vo2_relative_column = NULL, 196 | heart_rate_column = NULL, 197 | rer_column = NULL, 198 | average_method = c("bin", "rolling"), 199 | average_length = 30, 200 | plot = TRUE, 201 | verbose = TRUE 202 | ) { 203 | 204 | ## check args 205 | average_method <- match.arg(average_method) 206 | 207 | if(average_length < 0) 208 | stop("You can't choose a negative number for averaging the data.", call. = FALSE) 209 | 210 | ## check if ramp was normalized 211 | if(is.null(attr(.data, "normalized"))) 212 | stop("It looks like you did not normalize your incremental data. Did you forget to use `incremental_normalize()`?", call. = FALSE) 213 | 214 | ## check column names 215 | if(!is.null(vo2_column)) 216 | if(!vo2_column %in% colnames(.data)) 217 | stop(glue::glue("It looks like the column {vo2_column} does not exist."), call. = FALSE) 218 | 219 | if(!is.null(vo2_relative_column)) 220 | if(!vo2_relative_column %in% colnames(.data)) 221 | stop(glue::glue("It looks like the column {vo2_relative_column} does not exist."), call. = FALSE) 222 | 223 | if(!is.null(heart_rate_column)) 224 | if(!heart_rate_column %in% colnames(.data)) 225 | stop(glue::glue("It looks like the column {heart_rate_column} does not exist."), call. = FALSE) 226 | 227 | if(!is.null(rer_column)) 228 | if(!rer_column %in% colnames(.data)) 229 | stop(glue::glue("It looks like the column {rer_column} does not exist."), call. = FALSE) 230 | 231 | data_normalized <- .data 232 | 233 | ## check if outliers were identified 234 | ## if not, throw a warning 235 | if(is.null(attr(data_normalized, "outliers_detected"))) 236 | warning("You did not identify any outliers prior to using this function. You should consider using `detect_outliers()` before.", call. = FALSE) 237 | 238 | ## make sure no outliers exist 239 | if(attr(data_normalized, "outliers_detected")) { 240 | 241 | if(verbose) 242 | cli::cli_alert_success("Filtering out outliers...") 243 | 244 | data_normalized_filtered <- dplyr::filter(data_normalized, outlier == "no") 245 | } 246 | 247 | if(verbose) { 248 | cli::cli_alert_success("Interpolating from breath-by-breath into second-by-second...") 249 | cli::cli_alert_success("Performing averages...") 250 | } 251 | 252 | ## interpolate and average data 253 | data_averaged <- data_normalized_filtered %>% 254 | ## interpolate data from breath-by-breath to second-by-second 255 | interpolate() %>% 256 | ## perform the chosen average method 257 | perform_average(type = "bin", bins = average_length) 258 | 259 | ## plateau detection 260 | ## TODO 261 | 262 | ## results 263 | ### a few notes: 264 | ### VO2 and RER are analyzed through the averaged data 265 | ### HR and PO are analyzed from the breath-by-breath data 266 | out <- dplyr::tibble( 267 | VO2max_absolute = max(data_averaged[[vo2_column]]), 268 | VO2max_relative = ifelse(is.null(vo2_relative_column), NA, max(data_averaged[[vo2_relative_column]])), 269 | POpeak = as.integer(max(data_normalized[["work_rate"]])), 270 | HRmax = ifelse(is.null(heart_rate_column), NA, max(data_normalized[[heart_rate_column]])), 271 | RERmax = ifelse(is.null(rer_column), NA, max(data_averaged[[rer_column]])) 272 | ) 273 | 274 | if(plot) { 275 | ## check if ggforce and ggtext are installed 276 | rlang::check_installed(c("ggforce", "ggtext")) 277 | label_graph <- out %>% 278 | tidyr::pivot_longer(cols = dplyr::everything()) %>% 279 | tidyr::drop_na() %>% 280 | dplyr::mutate( 281 | value = dplyr::case_when( 282 | name == "RERmax" ~ round(value, 2), 283 | name == "VO2max_relative" ~ round(value, 1), 284 | TRUE ~ round(value, ) 285 | ) 286 | ) %>% 287 | dplyr::mutate( 288 | units = dplyr::case_when( 289 | name == "VO2max_absolute" ~ "mL/min", 290 | name == "VO2max_relative" ~ "mL/kg/min", 291 | name == "POpeak" ~ "W", 292 | name == "HRmax" ~ "bpm", 293 | name == "RERmax" ~ "" 294 | ) 295 | ) %>% 296 | dplyr::summarise(label = paste(name, "=", value, units, collapse = "\n")) %>% 297 | dplyr::pull() 298 | 299 | ## plot 300 | p <- data_averaged %>% 301 | dplyr::filter(t > 0) %>% 302 | ggplot2::ggplot(ggplot2::aes(work_rate, VO2)) + 303 | ggplot2::geom_point(shape = 21, size = 4, color = "black", fill = "white") + 304 | ggforce::geom_mark_rect(ggplot2::aes(filter = work_rate > out$POpeak * 0.9, label = "Maximal values", description = label_graph), label.minwidth = ggplot2::unit(100, "mm")) + 305 | ggplot2::labs( 306 | title = "Incremental test", 307 | subtitle = "Summary results", 308 | x = "Work Rate (W)", 309 | y = "V̇O2" 310 | ) + 311 | theme_whippr() + 312 | ggplot2::theme(axis.title.y = ggtext::element_markdown()) 313 | 314 | out <- out %>% 315 | dplyr::mutate(plot = list(p)) 316 | } 317 | 318 | out 319 | } 320 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | # whippr 5 | 6 | 7 | 8 | [![Lifecycle: 9 | stable](https://img.shields.io/badge/lifecycle-stable-brightgreen.svg)](https://lifecycle.r-lib.org/articles/stages.html#stable) 10 | [![CRAN 11 | status](https://www.r-pkg.org/badges/version/whippr)](https://CRAN.R-project.org/package=whippr) 12 | [![Codecov test 13 | coverage](https://codecov.io/gh/fmmattioni/whippr/branch/master/graph/badge.svg)](https://app.codecov.io/gh/fmmattioni/whippr?branch=master) 14 | [![R-CMD-check](https://github.com/fmmattioni/whippr/actions/workflows/R-CMD-check.yaml/badge.svg)](https://github.com/fmmattioni/whippr/actions/workflows/R-CMD-check.yaml) 15 | 16 | 17 | The goal of `whippr` is to provide a set of tools for manipulating gas 18 | exchange data from cardiopulmonary exercise testing. 19 | 20 | ## Why `whippr`? 21 | 22 | The name of the package is in honor of [Prof. Brian J 23 | Whipp](https://erj.ersjournals.com/content/39/1/1) and his invaluable 24 | contribution to the field of exercise physiology. 25 | 26 | ## Installation 27 | 28 | You can install the development version of `whippr` from 29 | [Github](https://github.com/fmmattioni/whippr) with: 30 | 31 | ``` r 32 | # install.packages("remotes") 33 | remotes::install_github("fmmattioni/whippr") 34 | ``` 35 | 36 | ## Use 37 | 38 | ### Read data 39 | 40 | ``` r 41 | library(whippr) 42 | 43 | ## example file that comes with the package for demonstration purposes 44 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 45 | 46 | df <- read_data(path = path_example, metabolic_cart = "cosmed") 47 | 48 | df 49 | #> # Metabolic cart: COSMED 50 | #> # Data status: raw data 51 | #> # Time column: t 52 | #> # A tibble: 754 × 119 53 | #> t Rf VT VE VO2 VCO2 O2exp CO2exp `VE/VO2` `VE/VCO2` `VO2/Kg` 54 | #> 55 | #> 1 2 8.08 1.19 9.60 380. 301. 185. 52.9 25.3 31.9 4.58 56 | #> 2 4 23.2 0.915 21.2 864. 665. 141. 40.8 24.5 31.9 10.4 57 | #> 3 8 15.6 2.11 32.9 1317. 1075. 325. 97.2 25.0 30.6 15.9 58 | #> 4 11 20.6 1.18 24.4 894. 714. 188. 49.2 27.3 34.1 10.8 59 | #> 5 14 23.3 0.947 22.1 822. 647. 150. 39.4 26.9 34.1 9.90 60 | #> 6 18 14.7 2.28 33.6 1347. 1126. 351. 108. 24.9 29.8 16.2 61 | #> 7 23 11.2 2.32 26.1 980. 848. 364. 107. 26.6 30.7 11.8 62 | #> 8 28 13.2 2.18 28.8 1147. 981. 336. 105. 25.2 29.4 13.8 63 | #> 9 31 17.7 1.51 26.7 1048. 860. 234. 68.8 25.5 31.0 12.6 64 | #> 10 35 14.2 1.68 23.8 973. 794. 257. 79.3 24.5 30.0 11.7 65 | #> # ℹ 744 more rows 66 | #> # ℹ 108 more variables: R , FeO2 , FeCO2 , HR , 67 | #> # `VO2/HR` , Load1 , Load2 , Load3 , Phase , 68 | #> # Marker , FetO2 , FetCO2 , FiO2 , FiCO2 , Ti , 69 | #> # Te , Ttot , `Ti/Ttot` , IV , PetO2 , PetCO2 , 70 | #> # `P(a-et)CO2` , SpO2 , `VD(phys)` , `VD/VT` , 71 | #> # `Env. Temp.` , `Analyz. Temp.` , `Analyz. Press.` , … 72 | ``` 73 | 74 | ### Interpolate 75 | 76 | ``` r 77 | df %>% 78 | interpolate() 79 | #> # Metabolic cart: COSMED 80 | #> # Data status: interpolated data 81 | #> # Time column: t 82 | #> # A tibble: 2,159 × 114 83 | #> t Rf VT VE VO2 VCO2 O2exp CO2exp `VE/VO2` `VE/VCO2` `VO2/Kg` 84 | #> 85 | #> 1 2 8.08 1.19 9.60 380. 301. 185. 52.9 25.3 31.9 4.58 86 | #> 2 3 15.6 1.05 15.4 622. 483. 163. 46.8 24.9 31.9 7.50 87 | #> 3 4 23.2 0.915 21.2 864. 665. 141. 40.8 24.5 31.9 10.4 88 | #> 4 5 21.3 1.21 24.1 978. 767. 187. 54.9 24.6 31.6 11.8 89 | #> 5 6 19.4 1.51 27.1 1091. 870. 233. 69.0 24.8 31.3 13.1 90 | #> 6 7 17.5 1.81 30.0 1204. 973. 279. 83.1 24.9 30.9 14.5 91 | #> 7 8 15.6 2.11 32.9 1317. 1075. 325. 97.2 25.0 30.6 15.9 92 | #> 8 9 17.3 1.80 30.1 1176. 955. 279. 81.2 25.7 31.8 14.2 93 | #> 9 10 19.0 1.49 27.2 1035. 834. 233. 65.2 26.5 33.0 12.5 94 | #> 10 11 20.6 1.18 24.4 894. 714. 188. 49.2 27.3 34.1 10.8 95 | #> # ℹ 2,149 more rows 96 | #> # ℹ 103 more variables: R , FeO2 , FeCO2 , HR , 97 | #> # `VO2/HR` , Load1 , Load2 , Load3 , Phase , 98 | #> # FetO2 , FetCO2 , FiO2 , FiCO2 , Ti , Te , 99 | #> # Ttot , `Ti/Ttot` , IV , PetO2 , PetCO2 , 100 | #> # `P(a-et)CO2` , SpO2 , `VD(phys)` , `VD/VT` , 101 | #> # `Env. Temp.` , `Analyz. Temp.` , `Analyz. Press.` , … 102 | ``` 103 | 104 | ### Perform averages 105 | 106 | #### Bin-average 107 | 108 | ``` r 109 | ## example of performing 30-s bin-averages 110 | df %>% 111 | interpolate() %>% 112 | perform_average(type = "bin", bins = 30) 113 | #> # Metabolic cart: COSMED 114 | #> # Data status: averaged data - 30-s bins 115 | #> # Time column: t 116 | #> # A tibble: 72 × 114 117 | #> t Rf VT VE VO2 VCO2 O2exp CO2exp `VE/VO2` `VE/VCO2` `VO2/Kg` 118 | #> 119 | #> 1 30 16.3 1.75 26.5 1032. 852. 272. 80.5 25.7 31.4 12.4 120 | #> 2 60 19.0 1.39 25.1 1046. 822. 211. 65.0 24.1 30.7 12.6 121 | #> 3 90 16.6 1.76 28.1 1164. 949. 268. 85.0 24.3 29.7 14.0 122 | #> 4 120 17.8 1.93 25.7 1054. 853. 296. 92.5 24.6 30.5 12.7 123 | #> 5 150 15.4 1.68 24.6 993. 823. 257. 80.4 24.8 29.9 12.0 124 | #> 6 180 18.1 1.38 25.1 1058. 833. 209. 65.4 24.0 30.4 12.7 125 | #> 7 210 22.3 1.37 29.1 1122. 935. 213. 63.4 26.0 31.3 13.5 126 | #> 8 240 16.6 1.91 24.9 966. 825. 301. 89.5 25.8 30.2 11.6 127 | #> 9 270 16.8 1.64 26.2 1044. 896. 252. 79.7 25.2 29.4 12.6 128 | #> 10 300 14.5 2.09 27.2 1097. 945. 322. 103. 24.6 28.8 13.2 129 | #> # ℹ 62 more rows 130 | #> # ℹ 103 more variables: R , FeO2 , FeCO2 , HR , 131 | #> # `VO2/HR` , Load1 , Load2 , Load3 , Phase , 132 | #> # FetO2 , FetCO2 , FiO2 , FiCO2 , Ti , Te , 133 | #> # Ttot , `Ti/Ttot` , IV , PetO2 , PetCO2 , 134 | #> # `P(a-et)CO2` , SpO2 , `VD(phys)` , `VD/VT` , 135 | #> # `Env. Temp.` , `Analyz. Temp.` , `Analyz. Press.` , … 136 | ``` 137 | 138 | #### Rolling-average 139 | 140 | ``` r 141 | ## example of performing 30-s rolling-averages 142 | df %>% 143 | interpolate() %>% 144 | perform_average(type = "rolling", rolling_window = 30) 145 | #> # Metabolic cart: COSMED 146 | #> # Data status: averaged data - 30-s rolling average 147 | #> # Time column: t 148 | #> # A tibble: 2,130 × 114 149 | #> t Rf VT VE VO2 VCO2 O2exp CO2exp `VE/VO2` `VE/VCO2` `VO2/Kg` 150 | #> 151 | #> 1 16.5 16.4 1.75 26.5 1033. 852. 271. 80.1 25.7 31.3 12.4 152 | #> 2 17.5 16.6 1.76 27.0 1054. 870. 273. 80.7 25.7 31.3 12.7 153 | #> 3 18.5 16.7 1.78 27.3 1067. 882. 276. 81.6 25.7 31.3 12.9 154 | #> 4 19.5 16.4 1.80 27.4 1071. 887. 280. 82.8 25.7 31.2 12.9 155 | #> 5 20.5 16.2 1.82 27.4 1071. 888. 282. 83.6 25.7 31.1 12.9 156 | #> 6 21.5 16.0 1.82 27.3 1068. 885. 282. 83.8 25.7 31.1 12.9 157 | #> 7 22.5 16.0 1.81 27.1 1062. 880. 280. 83.4 25.7 31.1 12.8 158 | #> 8 23.5 16.0 1.78 26.9 1052. 871. 277. 82.4 25.6 31.0 12.7 159 | #> 9 24.5 16.1 1.77 26.7 1048. 867. 274. 81.8 25.5 31.0 12.6 160 | #> 10 25.5 16.1 1.76 26.6 1050. 868. 273. 81.9 25.4 30.8 12.6 161 | #> # ℹ 2,120 more rows 162 | #> # ℹ 103 more variables: R , FeO2 , FeCO2 , HR , 163 | #> # `VO2/HR` , Load1 , Load2 , Load3 , Phase , 164 | #> # FetO2 , FetCO2 , FiO2 , FiCO2 , Ti , Te , 165 | #> # Ttot , `Ti/Ttot` , IV , PetO2 , PetCO2 , 166 | #> # `P(a-et)CO2` , SpO2 , `VD(phys)` , `VD/VT` , 167 | #> # `Env. Temp.` , `Analyz. Temp.` , `Analyz. Press.` , … 168 | ``` 169 | 170 | ### Perform VO2 kinetics analysis 171 | 172 | ``` r 173 | results_kinetics <- vo2_kinetics( 174 | .data = df, 175 | intensity_domain = "moderate", 176 | vo2_column = "VO2", 177 | protocol_n_transitions = 3, 178 | protocol_baseline_length = 360, 179 | protocol_transition_length = 360, 180 | cleaning_level = 0.95, 181 | cleaning_baseline_fit = c("linear", "exponential", "exponential"), 182 | fit_level = 0.95, 183 | fit_bin_average = 5, 184 | fit_phase_1_length = 20, 185 | fit_baseline_length = 120, 186 | fit_transition_length = 240, 187 | verbose = TRUE 188 | ) 189 | #> ────────────────────────── * V̇O₂ kinetics analysis * ───────────────────────── 190 | #> ✔ Detecting outliers 191 | #> • 14 outliers found in transition 1 192 | #> • 15 outliers found in transition 2 193 | #> • 13 outliers found in transition 3 194 | #> ✔ Processing data... 195 | #> ✔ └─ Removing outliers 196 | #> ✔ └─ Interpolating each transition 197 | #> ✔ └─ Ensemble-averaging transitions 198 | #> ✔ └─ Performing 5-s bin averages 199 | #> ✔ Fitting data... 200 | #> ✔ └─ Fitting baseline 201 | #> ✔ └─ Fitting transition 202 | #> ✔ └─ Calculating residuals 203 | #> ✔ └─ Preparing plots 204 | #> ────────────────────────────────── * DONE * ────────────────────────────────── 205 | ``` 206 | 207 | ### Perform VO2max analysis 208 | 209 | ``` r 210 | df_incremental <- read_data(path = system.file("ramp_cosmed.xlsx", package = "whippr"), metabolic_cart = "cosmed") 211 | 212 | vo2_max( 213 | .data = df_incremental, ## data from `read_data()` 214 | vo2_column = "VO2", 215 | vo2_relative_column = "VO2/Kg", 216 | heart_rate_column = "HR", 217 | rer_column = "R", 218 | detect_outliers = TRUE, 219 | average_method = "bin", 220 | average_length = 30, 221 | plot = TRUE, 222 | verbose = TRUE, 223 | ## arguments for `incremental_normalize()` 224 | incremental_type = "ramp", 225 | has_baseline = TRUE, 226 | baseline_length = 240, ## 4-min baseline 227 | work_rate_magic = TRUE, ## produce a work rate column 228 | baseline_intensity = 20, ## baseline was performed at 20 W 229 | ramp_increase = 25, ## 25 W/min ramp 230 | ## arguments for `detect_outliers()` 231 | test_type = "incremental", 232 | cleaning_level = 0.95, 233 | method_incremental = "linear" 234 | ) 235 | #> ──────────────────────────── * V̇O₂ max analysis * ──────────────────────────── 236 | #> ✔ Normalizing incremental data... 237 | #> ✔ Detecting outliers 238 | #> • 2 outlier(s) found in baseline 239 | #> • 15 outlier(s) found in ramp 240 | #> ✔ Filtering out outliers... 241 | #> ✔ Interpolating from breath-by-breath into second-by-second... 242 | #> ✔ Performing averages... 243 | #> # A tibble: 1 × 6 244 | #> VO2max_absolute VO2max_relative POpeak HRmax RERmax plot 245 | #> 246 | #> 1 3514. 45.8 303 193 1.13 247 | ``` 248 | 249 | ## Metabolic carts currently supported 250 | 251 | - [COSMED](https://www.cosmed.com/en/) 252 | - [CORTEX](https://cortex-medical.com/EN) 253 | - [NSpire](https://www.pressebox.de/pressemitteilung/nspire-health-gmbh/ZAN-100-Diagnostische-Spirometrie/boxid/745555) 254 | - Parvo Medics 255 | - [Geratherm 256 | Respiratory](https://www.geratherm-respiratory.com/product-groups/cpet/) 257 | - [CardioCoach](https://korr.com/go/cardiocoach/) 258 | 259 | ## Online app 260 | 261 | Would you like to perform VO2 kinetics analyses but don’t 262 | know R? No problem! You can use our online app: [VO2 Kinetics 263 | App](https://exphyslab.com/kinetics/) 264 | 265 | ## Code of Conduct 266 | 267 | Please note that this project is released with a [Contributor Code of 268 | Conduct](https://www.contributor-covenant.org/version/1/0/0/code-of-conduct.html). 269 | By participating in this project you agree to abide by its terms. 270 | 271 |
272 | 273 | Icons made by 274 | monkik 275 | from 276 | www.flaticon.com 277 | 278 |
279 | -------------------------------------------------------------------------------- /R/incremental.R: -------------------------------------------------------------------------------- 1 | #' Normalize incremental test data 2 | #' 3 | #' Detect protocol phases (baseline, ramp, steps), normalize work rate, and 4 | #' time-align baseline phase (baseline time becomes negative). 5 | #' 6 | #' @param .data Data retrieved from \code{read_data()}. 7 | #' @param incremental_type The type of the incremental test performed. Either "ramp" or "step". 8 | #' @param has_baseline A boolean to indicate whether the data contains a baseline phase. This is used for an incremental test only. Default to `TRUE`. 9 | #' @param baseline_length The baseline length (in seconds) performed. 10 | #' @param work_rate_magic A boolean indicating whether to perform the work rate calculations. When set to `TRUE`, 11 | #' it will calculate the work rate throughout a ramp or step test. In the case of a step test, it will also 12 | #' perform a linear transformation of the work rate. 13 | #' If set to `TRUE`, the arguments below should be given. Default to `FALSE`. 14 | #' @param baseline_intensity A numeric atomic vector indicating the work rate of the baseline. If the baseline was performed at rest, indicate `0`. 15 | #' @param ramp_increase A numeric atomic vector indicating the ramp increase in watts per minute (W/min). For example, if the ramp 16 | #' was `30 W/min`, then pass the number `30` to this argument. 17 | #' @param step_start In case your baseline was performed at rest, you can set in this parameter at which intensity 18 | #' the step test started. 19 | #' @param step_increase A numeric atomic vector indicating the step increase, in watts. For example, if the step increase was 20 | #' `25 W` at each step, then pass the number `25` to this argument. 21 | #' @param step_length A numeric atomic vector indicating the length (in seconds) of each step in the step incremental test. 22 | #' @param ... Additional arguments. Currently ignored. 23 | #' 24 | #' @return a [tibble][tibble::tibble-package] 25 | #' @export 26 | #' 27 | #' @examples 28 | #' \dontrun{ 29 | #' ## get file path from example data 30 | #' path_example <- system.file("ramp_cosmed.xlsx", package = "whippr") 31 | #' 32 | #' ## read data from ramp test 33 | #' df <- read_data(path = path_example, metabolic_cart = "cosmed") 34 | #' 35 | #' ## normalize incremental test data 36 | #' ramp_normalized <- df %>% 37 | #' incremental_normalize( 38 | #' .data = ., 39 | #' incremental_type = "ramp", 40 | #' has_baseline = TRUE, 41 | #' baseline_length = 240, 42 | #' work_rate_magic = TRUE, 43 | #' baseline_intensity = 20, 44 | #' ramp_increase = 25 45 | #' ) 46 | #' 47 | #' ## get file path from example data 48 | #' path_example_step <- system.file("step_cortex.xlsx", package = "whippr") 49 | #' 50 | #' ## read data from step test 51 | #' df_step <- read_data(path = path_example_step, metabolic_cart = "cortex") 52 | #' 53 | #' ## normalize incremental test data 54 | #' step_normalized <- df_step %>% 55 | #' incremental_normalize( 56 | #' .data = ., 57 | #' incremental_type = "step", 58 | #' has_baseline = TRUE, 59 | #' baseline_length = 120, 60 | #' work_rate_magic = TRUE, 61 | #' baseline_intensity = 0, 62 | #' step_start = 50, 63 | #' step_increase = 25, 64 | #' step_length = 180 65 | #' ) 66 | #' } 67 | incremental_normalize <- function( 68 | .data, 69 | incremental_type = c("ramp", "step"), 70 | has_baseline = TRUE, 71 | baseline_length = NULL, 72 | work_rate_magic = FALSE, 73 | baseline_intensity = NULL, 74 | ramp_increase = NULL, 75 | step_start = NULL, 76 | step_increase = NULL, 77 | step_length = NULL, 78 | ... 79 | ) { 80 | 81 | if(missing(.data)) 82 | stop("No data, no fun. Please, pass the data retrieved from 'read_data()' to the function.", call. = FALSE) 83 | 84 | if(is.null(attributes(.data)$read_data)) 85 | stop("It looks like you did not read your data with the `read_data()` function. Make sure you use it before continuing.", call. = FALSE) 86 | 87 | incremental_type <- match.arg(incremental_type) 88 | 89 | class(.data) <- incremental_type 90 | 91 | UseMethod("incremental_normalize", .data) 92 | } 93 | 94 | #' @export 95 | incremental_normalize.ramp <- function( 96 | .data, 97 | incremental_type = c("ramp", "step"), 98 | has_baseline = TRUE, 99 | baseline_length = NULL, 100 | work_rate_magic = FALSE, 101 | baseline_intensity = NULL, 102 | ramp_increase = NULL, 103 | step_start = NULL, 104 | step_increase = NULL, 105 | step_length = NULL, 106 | ... 107 | ) { 108 | 109 | if(has_baseline & missing(baseline_length)) 110 | stop("You indicated that your data contains a baseline phase, but you forgot to specify the length of the 111 | baseline. Please, indicate it in the 'baseline_length' argument.", call. = FALSE) 112 | 113 | # 1) time-align -------------------------------------------------------------- 114 | if(has_baseline) { 115 | data_time_aligned <- .data %>% 116 | dplyr::mutate(dplyr::across(.cols = 1, .fns = ~ .x - baseline_length)) 117 | } else { 118 | data_time_aligned <- .data 119 | } 120 | 121 | time_column <- attributes(.data)$time_column 122 | 123 | # 2) identify protocol phases ------------------------------------------------ 124 | if(has_baseline) { 125 | out <- data_time_aligned %>% 126 | dplyr::mutate(protocol_phase = dplyr::case_when( 127 | !!rlang::sym(time_column) <= 0 ~ "baseline", 128 | TRUE ~ "ramp" 129 | )) 130 | } else { 131 | out <- data_time_aligned %>% 132 | dplyr::mutate(protocol_phase = "ramp") 133 | } 134 | 135 | ## work rate magic 136 | if(work_rate_magic & any(c(missing(baseline_intensity), missing(ramp_increase)))) 137 | stop("For the work rate magic to work you need to specify the 'baseline_intensity' and 'ramp_increase' arguments.", call. = FALSE) 138 | 139 | if(work_rate_magic) 140 | out <- work_rate_ramp(.data = out, baseline_intensity = baseline_intensity, ramp_increase = ramp_increase) 141 | 142 | metadata <- attributes(.data) 143 | metadata$data_status <- "raw data - ramp normalized" 144 | metadata$test_type <- "incremental" 145 | metadata$incremental <- TRUE 146 | metadata$normalized <- TRUE 147 | metadata$incremental_type <- incremental_type 148 | metadata$has_baseline <- has_baseline 149 | metadata$baseline_length <- baseline_length 150 | metadata$baseline_intensity <- baseline_intensity 151 | metadata$ramp_increase <- ramp_increase 152 | 153 | out <- new_whippr_tibble(out, metadata) 154 | 155 | out 156 | } 157 | 158 | #' @export 159 | incremental_normalize.step <- function( 160 | .data, 161 | incremental_type = c("ramp", "step"), 162 | has_baseline = TRUE, 163 | baseline_length = NULL, 164 | work_rate_magic = FALSE, 165 | baseline_intensity = NULL, 166 | ramp_increase = NULL, 167 | step_start = NULL, 168 | step_increase = NULL, 169 | step_length = NULL, 170 | ... 171 | ) { 172 | 173 | if(any(is.null(step_increase), is.null(step_length))) 174 | stop("You need to specify the `step_increase` and `step_length` arguments for the step test.", call. = FALSE) 175 | 176 | if(has_baseline & missing(baseline_length)) 177 | stop("You indicated that your data contains a baseline phase, but you forgot to specify the length of the 178 | baseline. Please, indicate it in the 'baseline_length' argument.", call. = FALSE) 179 | 180 | # 1) time-align -------------------------------------------------------------- 181 | if(has_baseline) { 182 | data_time_aligned <- .data %>% 183 | dplyr::mutate(dplyr::across(.cols = 1, .fns = ~ .x - baseline_length)) 184 | } else { 185 | data_time_aligned <- .data 186 | } 187 | 188 | time_column <- attributes(.data)$time_column 189 | 190 | # 2) identify protocol phases ------------------------------------------------ 191 | if(has_baseline) { 192 | out <- data_time_aligned %>% 193 | dplyr::mutate(protocol_phase = dplyr::case_when( 194 | !!rlang::sym(time_column) <= 0 ~ "baseline", 195 | TRUE ~ "step" 196 | )) 197 | } else { 198 | out <- data_time_aligned %>% 199 | dplyr::mutate(protocol_phase = "step") 200 | } 201 | 202 | ## work rate magic 203 | if(work_rate_magic & any(c(missing(baseline_intensity), missing(step_increase), missing(step_length)))) 204 | stop("For the work rate magic to work you need to specify the 'baseline_intensity', 'step_increase', and 'step_length' arguments.", call. = FALSE) 205 | 206 | if(work_rate_magic) 207 | out <- work_rate_step( 208 | .data = out, 209 | baseline_intensity = baseline_intensity, 210 | step_start = step_start, 211 | step_increase = step_increase, 212 | step_length = step_length 213 | ) 214 | 215 | metadata <- attributes(.data) 216 | metadata$data_status <- "raw data - step normalized" 217 | metadata$test_type <- "incremental" 218 | metadata$incremental <- TRUE 219 | metadata$normalized <- TRUE 220 | metadata$incremental_type <- incremental_type 221 | metadata$has_baseline <- has_baseline 222 | metadata$baseline_length <- baseline_length 223 | metadata$baseline_intensity <- baseline_intensity 224 | metadata$step_start <- step_start 225 | metadata$step_increase <- step_increase 226 | metadata$step_length <- step_length 227 | 228 | out <- new_whippr_tibble(out, metadata) 229 | 230 | out 231 | 232 | } 233 | 234 | #' Plot incremental test work rate 235 | #' 236 | #' Visualize what was done during the process of deriving the work rate from the incremental test protocol 237 | #' 238 | #' @param .data data retrieved from `incremental_normalize()`. 239 | #' 240 | #' @return a ggplot object 241 | #' @export 242 | plot_incremental <- function(.data) { 243 | 244 | if(is.null(attr(.data, "normalized"))) 245 | stop("It looks like you did not normalized your incremental data yet with the `incremental_normalize()` function. 246 | Make sure you use it before continuing.", call. = FALSE) 247 | 248 | class(.data) <- attr(.data, "incremental_type") 249 | 250 | UseMethod("plot_incremental", .data) 251 | 252 | } 253 | 254 | #' @export 255 | plot_incremental.ramp <- function(.data) { 256 | ## check if ggforce is installed 257 | rlang::check_installed("ggforce") 258 | 259 | ## get time column 260 | time_column <- attr(.data, "time_column") 261 | 262 | df_labels <- .data %>% 263 | dplyr::group_by(protocol_phase) %>% 264 | dplyr::summarise(x = mean(t), y = mean(work_rate)) %>% 265 | dplyr::mutate(desc = glue::glue("{protocol_phase} period work rate")) 266 | 267 | out <- .data %>% 268 | ggplot2::ggplot(ggplot2::aes(!!rlang::sym(time_column), work_rate)) + 269 | ggplot2::geom_line() + 270 | ggforce::geom_mark_circle( 271 | data = df_labels, 272 | ggplot2::aes(x, y, label = protocol_phase, description = desc), 273 | expand = ggplot2::unit(2, "mm") 274 | ) + 275 | theme_whippr() 276 | 277 | out 278 | } 279 | 280 | #' @export 281 | plot_incremental.step <- function(.data) { 282 | 283 | ## get time column 284 | time_column <- attr(.data, "time_column") 285 | 286 | df_labels <- .data %>% 287 | dplyr::group_by(step, step_work_rate) %>% 288 | dplyr::summarise(t_step = mean(!!rlang::sym(time_column))) %>% 289 | dplyr::mutate( 290 | label = stringr::str_extract(string = step, pattern = "\\d.*"), 291 | label = glue::glue("Step {label}") 292 | ) 293 | 294 | df_seg <- .data %>% 295 | dplyr::filter(protocol_phase == "step") %>% 296 | dplyr::select(!!rlang::sym(time_column), work_rate) %>% 297 | dplyr::summarise( 298 | x = mean(!!rlang::sym(time_column)), 299 | y = mean(work_rate) 300 | ) %>% 301 | dplyr::mutate( 302 | label = "Continuous WR", 303 | desc = "Linearization of the steps" 304 | ) 305 | 306 | df_seg_2 <- .data %>% 307 | dplyr::filter(protocol_phase == "step") %>% 308 | dplyr::select(t, step_work_rate, step) %>% 309 | dplyr::group_by(step) %>% 310 | dplyr::summarise( 311 | x = min(!!rlang::sym(time_column)), 312 | y = min(step_work_rate) 313 | ) %>% 314 | dplyr::slice(nrow(.) / 2) %>% 315 | dplyr::mutate( 316 | label = "Steps performed", 317 | desc = "Work rate of the given step" 318 | ) 319 | 320 | p <- .data %>% 321 | ggplot2::ggplot(ggplot2::aes(!!rlang::sym(time_column), step_work_rate)) + 322 | ggplot2::geom_path(color = "black") + 323 | ggplot2::geom_path(ggplot2::aes(t, work_rate), color = "darkred", lty = "dashed") + 324 | ggplot2::geom_text( 325 | data = df_labels, 326 | ggplot2::aes(t_step, step_work_rate + 10, label = label), 327 | fontface = "bold" 328 | ) + 329 | ggforce::geom_mark_circle( 330 | data = df_seg, 331 | ggplot2::aes(x, y, label = label, description = desc), 332 | label.colour = "darkred", 333 | expand = ggplot2::unit(2, "mm"), 334 | label.buffer = ggplot2::unit(70, "mm") 335 | ) + 336 | ggforce::geom_mark_circle( 337 | data = df_seg_2, 338 | ggplot2::aes(x, y, label = label, description = desc), 339 | expand = ggplot2::unit(2, "mm"), 340 | label.buffer = ggplot2::unit(30, "mm") 341 | ) + 342 | ggplot2::labs( 343 | title = "Step incremental test", 344 | subtitle = "Description of the linearization performed on the work rate", 345 | x = "time (s)", 346 | y = "Step work rate (W)" 347 | ) + 348 | theme_whippr() 349 | 350 | p 351 | } 352 | -------------------------------------------------------------------------------- /man/vo2_kinetics.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/kinetics.R 3 | \name{vo2_kinetics} 4 | \alias{vo2_kinetics} 5 | \title{VO2 kinetics} 6 | \usage{ 7 | vo2_kinetics( 8 | .data, 9 | intensity_domain = c("moderate", "heavy", "severe"), 10 | vo2_column = "VO2", 11 | protocol_n_transitions, 12 | protocol_baseline_length, 13 | protocol_transition_length, 14 | cleaning_level = 0.95, 15 | cleaning_baseline_fit, 16 | fit_level = 0.95, 17 | fit_bin_average, 18 | fit_phase_1_length, 19 | fit_baseline_length, 20 | fit_transition_length, 21 | verbose = TRUE, 22 | ... 23 | ) 24 | } 25 | \arguments{ 26 | \item{.data}{Data retrieved from \code{read_data()}.} 27 | 28 | \item{intensity_domain}{The exercise-intensity domain that the test was performed. Either \emph{moderate}, \emph{heavy}, or \emph{severe}.} 29 | 30 | \item{vo2_column}{The name (quoted) of the column containing the absolute oxygen uptake (VO2) data. Default to \code{"VO2"}.} 31 | 32 | \item{protocol_n_transitions}{Number of transitions performed.} 33 | 34 | \item{protocol_baseline_length}{The length of the baseline (in seconds).} 35 | 36 | \item{protocol_transition_length}{The length of the transition (in seconds).} 37 | 38 | \item{cleaning_level}{A numeric scalar between 0 and 1 giving the confidence level for the intervals to be calculated during the data cleaning process. Breaths lying outside the prediction bands will be excluded. Default to \code{0.95}.} 39 | 40 | \item{cleaning_baseline_fit}{A vector of the same length as the number in \code{protocol_n_transitions}, indicating what kind of fit to perform for each baseline. Either \emph{linear} or \emph{exponential}.} 41 | 42 | \item{fit_level}{A numeric scalar between 0 and 1 giving the confidence level for the parameter estimates in the final VO2 kinetics fit. Default to \code{0.95}.} 43 | 44 | \item{fit_bin_average}{The bin average to be performed for the final fit.} 45 | 46 | \item{fit_phase_1_length}{The length of the phase I that you wish to exclude from the final exponential fit, in seconds. See \verb{VO2 kinetics} section for more details.} 47 | 48 | \item{fit_baseline_length}{The length the baseline to perform the final linear fit, in seconds. See \verb{VO2 kinetics} section for more details.} 49 | 50 | \item{fit_transition_length}{The length of the transition to perform the final exponential fit, in seconds. See \verb{VO2 kinetics} section for more details.} 51 | 52 | \item{verbose}{A boolean indicating whether messages should be printed in the console. Default to \code{TRUE}.} 53 | 54 | \item{...}{Additional arguments passed to \code{perform_kinetics()} when fitting VO2 kinetics in the heavy- or severe-intensity domains. See \code{?perform_kinetics} for more details.} 55 | } 56 | \value{ 57 | a \link[tibble:tibble-package]{tibble} containing one row and the nested columns: 58 | \item{data_outliers}{The raw data containing additional columns that identify breaths as outliers.} 59 | \item{plot_outliers}{A \code{patchwork} object to display outliers from every transition.} 60 | \item{data_processed}{The processed data (time-aligned, ensembled-averaged, and bin-averaged).} 61 | \item{data_fitted}{The data containing the time and VO2 columns, as well as the fitted data and its residuals for each data point.} 62 | \item{model}{A \code{nls} object. The model used in the VO2 kinetics fitting.} 63 | \item{model_summary}{The tidied summary of the \code{model}.} 64 | \item{model_residuals}{The residuals of the \code{model}.} 65 | \item{plot_model}{The final plot of the fitted \code{model}.} 66 | \item{plot_residuals}{The residuals plot for the \code{model} diagnostics.} 67 | } 68 | \description{ 69 | It performs the whole process of the VO2 kinetics data analysis, which includes: 70 | data cleaning (\code{detect_outliers()}); outliers removal, interpolation, ensemble-averaging transitions and bin-avering final dataset (\code{process_data()}), 71 | and modelling VO2 kinetics (\code{perform_kinetics()}). This function is a general function that will call these separate functions. 72 | You can also call each one of them separately if you want. 73 | } 74 | \details{ 75 | The function is a wrapper of smaller functions and has important arguments: 76 | \itemize{ 77 | \item \strong{protocol_} = sets arguments related to the protocol used. 78 | \item \strong{cleaning_} = sets arguments related to data cleaning. 79 | \item \strong{fit_} = sets arguments related to VO2 kinetics fitting. 80 | } 81 | 82 | The function works like the following sequence: 83 | 84 | \strong{\code{vo2_kinetics( )}}: 85 | \itemize{ 86 | \item \code{detect_outliers( )} = separates the data into the number of transitions indicated, 87 | and fits each baseline and transition phase indiviudally, retrieving the predictions bands for the level indicated. 88 | Then it recognizes breaths lying outside the prediciton bands and flag them as outliers. 89 | \item \code{plot_outliers( )} = plots each transition identifying outliers. 90 | \item \code{process_data( )} = It removes the outliers detected through \code{detect_outliers()}, interpolates each transition, 91 | ensemble-averages all the transitions into one, performs a bin-average, and normalizes the time column 92 | (time zero will indicate the end of baseline and the start of the transition phase). 93 | \item \code{perform_kinetics( )} = performs the VO2 kinetics fitting based on the \strong{fit_} parameters given. 94 | It also calculates the residuals, and plots the final fit as well as residuals for model diagnostics. 95 | } 96 | } 97 | \section{VO2 kinetics}{ 98 | VO2 kinetics, described as the rate of adjustment of the oxidative energy system to an 99 | instantaneous increase in the energy demand, is exponential in nature, and it is described by the 100 | oxygen uptake (VO2) time-constant (\eqn{\tau}VO2) (Murias, Spencer and Paterson (2014); Poole and Jones (2011)). 101 | 102 | VO2 kinetics analysis provides understanding of the mechanisms that regulate the rate at which oxidative 103 | phosphorylation adapts to step changes in exercise intensities and ATP requirement. This is usually accomplished 104 | by performing step transitions from a baseline intensity to a higher work rate in either the \strong{moderate-}, \strong{heavy-}, or 105 | \strong{severe-intensity domain} (Murias et al., 2011). 106 | 107 | Three distinct phases may be observed in the VO2 response during on-transient exercise: 108 | 109 | \strong{Phase I}: also termed as the cardiodynamic phase, it represents the circulatory transit delay 110 | on the VO2 response as a result of the increase in the pulmonary blood flow that does not reflect the increase 111 | in oxygen extraction in the active muscles. The time-window of the Phase I is determined in the \strong{\code{fit_phase_1_length}} argument, which will be internally passed into the \code{perform_kinetics()} function. 112 | 113 | \strong{Phase II}: also termed as the primary component, represents the exponential increase in VO2 114 | related to the continued increase in pulmonary and muscle blood flow. The Phase II is described by the time-constant parameter (\eqn{\tau}) 115 | in the mono-exponential model (see below), and it is defined as the duration of time (in seconds) for the VO2 response 116 | to increase to 63\% of the required steady-state. 117 | 118 | \strong{Phase III}: represents the steady-state phase of the VO2 response 119 | during moderate-intensity exercise. 120 | \subsection{Moderate-intensity domain}{ 121 | 122 | The on-transient response from baseline to a transition within the \strong{moderate-intensity domain} 123 | is analyzed using a \strong{mono-exponential model}: 124 | \deqn{VO_{2\left(t\right)}=baseline+amplitude\cdot\left(1-e^{^{-\frac{\left(t-TD\right)}{tau}}}\right)}{% 125 | VO2(t) = baseline + amplitude * (-exp(-(t-TD)/\tau))} 126 | 127 | where: 128 | \itemize{ 129 | \item \code{VO2(t)} = the oxygen uptake at any given time. 130 | \item \code{baseline} = the oxygen uptake associated with the baseline phase. 131 | \item \code{amplitude} = the steady-state increase increase in oxygen uptake above \code{baseline}. 132 | \item \code{TD} = the time delay. 133 | \item \eqn{\tau} = the time constant defined as the duration of time for the oxygen uptake to increase to 63\% of the steady-state increase. 134 | } 135 | 136 | The baseline value in the mono-exponential model is a \strong{fixed} value and pre-determined 137 | as the mean of the VO2 response (i.e., linear model with the slope set as zero) during the baseline phase. 138 | The time window of the baseline period is determined in the \strong{\code{fit_baseline_length}} argument, which will be internally passed into the \code{perform_kinetics()} function. 139 | 140 | Diverse exercise protocols exist to determine VO2 kinetics in the moderate-intensity domain. 141 | Usually, the protocol consists of multiple transitions (typically 3 or 4) from a baseline exercise-intensity to an exercise-intensity 142 | below the gas exchange threshold (typically the power output associated with 90\% of the gas exchange threshold). Bbaseline and 143 | transition phases are usually performed for 6 minutes each. The reason that 6 minutes is done for each phase is to give enough time for both 144 | to reach a steady-state response: 145 | 146 | For example, for each multiple of the time-constant (\eqn{\tau}), VO2 increases by 63\% of the 147 | difference between the previous \eqn{\tau} and the required steady-state. 148 | This means: 149 | \itemize{ 150 | \item \code{1} \eqn{\tau} \verb{= 63\%} \eqn{\Delta}. 151 | \item \code{2} \eqn{\tau} \verb{= 86\%} \eqn{\Delta} \verb{[100\% - 63\% = 37\%; (37\% x 63\%) + 63\% = 86\%]}. 152 | \item \code{3} \eqn{\tau} \verb{= 95\%} \eqn{\Delta} \verb{[100\% - 86\% = 14\%; (14\% x 63\%) + 86\% = 95\%]}. 153 | \item \code{4} \eqn{\tau} \verb{= 98\%} \eqn{\Delta} \verb{[100\% - 95\% = 5\%; (5\% x 63\%) + 95\% = 98\%]}. 154 | } 155 | 156 | In practical terms, let's imagine that a given participant has a \strong{\eqn{\tau} = 60 seconds}. This means that this person 157 | would need \strong{240 seconds} (\verb{4 x 60}) to reach \strong{steady-state} (98\% of the response) in the \strong{moderate-intensity domain}. This would leave other 158 | 120 seconds (2 minutes) of transition, so the protocol of performing 6-min transitions makes sure enough time is given. 159 | 160 | Now let's imagine that another person has a \strong{\eqn{\tau} = 20 seconds}. This means that this person 161 | would need \strong{80 seconds} (\verb{4 x 20}) to reach \strong{steady-state} (98\% of the response) in the \strong{moderate-intensity domain}. 162 | 163 | Given that there is enough time to reach a VO2 steady-state response with 6 minutes of transition, that means that for the final fit 164 | (when the transitions were cleaned, ensembled-averaged, and bin-averaged) there is no need to include the whole 6 minutes of the transition. 165 | This strategy avoids superfluous sections of the steady‐state data, thus maximizing the quality of the fit during the exercise on‐transient (Bell et al., 2001). 166 | This may be specified through the \strong{\code{fit_transition_length}} argument, which will be internally passed into the \code{perform_kinetics()} function. 167 | 168 | As for bin-averages in the final fit, usually the data are averaged into 5-s or 10-s bins, 5-s being the most common (Keir et al., 2014). 169 | This may be specified through the \strong{\code{fit_bin_average}} argument, which will be internally passed into the \code{process_data()} function. 170 | } 171 | 172 | \subsection{Heavy- and severe-intensity domains}{ 173 | 174 | TODO 175 | } 176 | } 177 | 178 | \examples{ 179 | \dontrun{ 180 | ## get file path from example data 181 | path_example <- system.file("example_cosmed.xlsx", package = "whippr") 182 | 183 | ## read data 184 | df <- read_data(path = path_example, metabolic_cart = "cosmed", time_column = "t") 185 | 186 | ## VO2 kinetics analysis 187 | results_kinetics <- vo2_kinetics( 188 | .data = df, 189 | intensity_domain = "moderate", 190 | vo2_column = "VO2", 191 | protocol_n_transitions = 3, 192 | protocol_baseline_length = 360, 193 | protocol_transition_length = 360, 194 | cleaning_level = 0.95, 195 | cleaning_baseline_fit = c("linear", "exponential", "exponential"), 196 | fit_level = 0.95, 197 | fit_bin_average = 5, 198 | fit_phase_1_length = 20, 199 | fit_baseline_length = 120, 200 | fit_transition_length = 240, 201 | verbose = TRUE 202 | ) 203 | } 204 | } 205 | \references{ 206 | Bell, C., Paterson, D. H., Kowalchuk, J. M., Padilla, J., & Cunningham, D. A. (2001). A comparison of modelling techniques used to characterise oxygen uptake kinetics during the on-transient of exercise. Experimental Physiology, 86(5), 667-676. 207 | 208 | Keir, D. A., Murias, J. M., Paterson, D. H., & Kowalchuk, J. M. (2014). Breath‐by‐breath pulmonary O2 uptake kinetics: effect of data processing on confidence in estimating model parameters. Experimental physiology, 99(11), 1511-1522. 209 | 210 | Murias, J. M., Spencer, M. D., & Paterson, D. H. (2014). The critical role of O2 provision in the dynamic adjustment of oxidative phosphorylation. Exercise and sport sciences reviews, 42(1), 4-11. 211 | 212 | Murias, J. M., Spencer, M. D., Kowalchuk, J. M., & Paterson, D. H. (2011). Influence of phase I duration on phase II VO2 kinetics parameter estimates in older and young adults. American Journal of Physiology-regulatory, integrative and comparative physiology, 301(1), R218-R224. 213 | 214 | Poole, D. C., & Jones, A. M. (2011). Oxygen uptake kinetics. Comprehensive Physiology, 2(2), 933-996. 215 | } 216 | --------------------------------------------------------------------------------