├── .Rbuildignore ├── .git-tricks.md ├── .gitignore ├── .travis.yml ├── DESCRIPTION ├── NAMESPACE ├── NEWS.md ├── R ├── .Rapp.history ├── CD4-data.R ├── COVID19-data.R ├── DTI-data.R ├── DTI2-data.R ├── GLS_CS.R ├── Gibbs_CS_FPCA.R ├── Gibbs_CS_Wish.R ├── Gibbs_Mult_FPCA.R ├── Gibbs_Mult_Wish.R ├── OLS_CS.R ├── Omegas.R ├── PEER.Sim-data.R ├── VB_CS_FPCA.R ├── VB_CS_Wish.R ├── VB_Mult_FPCA.R ├── VB_Mult_Wish.R ├── XtSiginvX.R ├── af.R ├── af_old.R ├── amc.R ├── bayes_fosr.R ├── ccb.fpc.R ├── coefficients.pfr.R ├── content-data.R ├── create.prep.func.R ├── dt_basis.R ├── f_sum.R ├── f_sum2.R ├── f_sum4.R ├── f_trace.R ├── face.Cov.mfpca.R ├── fbps.R ├── fgam.R ├── fosr.R ├── fosr.perm.R ├── fosr.perm.fit.R ├── fosr.perm.test.R ├── fosr.vs.R ├── fosr2s.R ├── fpc.R ├── fpca.face.R ├── fpca.lfda.R ├── fpca.sc.R ├── fpca.ssvd.R ├── fpca2s.R ├── fpcr.R ├── fpcr.setup.R ├── gasoline-data.R ├── irreg2mat.R ├── lf.R ├── lf.vd.R ├── lf_old.R ├── lofocv.R ├── lpeer.R ├── lpfr.R ├── lw.test.R ├── mfpca.face.R ├── mfpca.sc.R ├── osplinepen2d.R ├── parse.predict.pfr.R ├── peer.R ├── peer_old.R ├── pffr-ff.R ├── pffr-ffpc.R ├── pffr-methods.R ├── pffr-pcre.R ├── pffr-robust.R ├── pffr-sff.R ├── pffr-utilities.R ├── pffr.R ├── pfr.R ├── pfr_old.R ├── pi_basis.R ├── plot.fosr.R ├── plot.fosr.perm.R ├── plot.fosr.vs.R ├── plot.fpcr.R ├── plot.lpeer.R ├── plot.peer.R ├── plot.pfr.R ├── poridge.R ├── postprocess.pfr.R ├── predict.fbps.R ├── predict.fgam.R ├── predict.fosr.R ├── predict.fosr.vs.R ├── predict.pfr.R ├── predict.pfr_old.R ├── preprocess.pfr.R ├── pspline.setting.R ├── pwcv.R ├── quadWeights.R ├── re.R ├── rlrt.pfr.R ├── select_knots.R ├── summary.pfr.R ├── vis.fgam.R └── vis.pfr.R ├── README.md ├── cran-comments.md ├── data ├── COVID19.rda ├── DTI.RData ├── DTI2.RData ├── PEER.Sim.RData ├── Q.RData ├── cd4.RData ├── content.rda ├── gasoline.RData └── sofa.RData ├── man ├── COVID19.Rd ├── DTI.Rd ├── DTI2.Rd ├── PEER.Sim.Rd ├── Predict.matrix.dt.smooth.Rd ├── Predict.matrix.fpc.smooth.Rd ├── Predict.matrix.pcre.random.effect.Rd ├── Predict.matrix.peer.smooth.Rd ├── Predict.matrix.pi.smooth.Rd ├── Xt_siginv_X.Rd ├── af.Rd ├── af_old.Rd ├── amc.Rd ├── bayes_fosr.Rd ├── ccb.fpc.Rd ├── cd4.Rd ├── cmdscale_lanczos.Rd ├── coef.pffr.Rd ├── coefboot.pffr.Rd ├── coefficients.pfr.Rd ├── content.Rd ├── create.prep.func.Rd ├── dot-smooth.spec.Rd ├── expand.call.Rd ├── f_sum.Rd ├── f_sum2.Rd ├── f_sum4.Rd ├── f_trace.Rd ├── fbps.Rd ├── ff.Rd ├── ffpc.Rd ├── ffpcplot.Rd ├── fgam.Rd ├── fosr.Rd ├── fosr.perm.Rd ├── fosr.vs.Rd ├── fosr2s.Rd ├── fpc.Rd ├── fpca.face.Rd ├── fpca.lfda.Rd ├── fpca.sc.Rd ├── fpca.ssvd.Rd ├── fpca2s.Rd ├── fpcr.Rd ├── gasoline.Rd ├── getTF.Rd ├── gibbs_cs_fpca.Rd ├── gibbs_cs_wish.Rd ├── gibbs_mult_fpca.Rd ├── gibbs_mult_wish.Rd ├── gls_cs.Rd ├── lf.Rd ├── lf.vd.Rd ├── lf_old.Rd ├── lofocv.Rd ├── lpeer.Rd ├── lpfr.Rd ├── mfpca.face.Rd ├── mfpca.sc.Rd ├── model.matrix.pffr.Rd ├── ols_cs.Rd ├── pco_predict_preprocess.Rd ├── pcre.Rd ├── peer.Rd ├── peer_old.Rd ├── pffr.Rd ├── pffr.check.Rd ├── pffrGLS.Rd ├── pffrSim.Rd ├── pfr.Rd ├── pfr_old.Rd ├── pfr_plot.gam.Rd ├── plot.fosr.Rd ├── plot.fosr.vs.Rd ├── plot.fpcr.Rd ├── plot.lpeer.Rd ├── plot.peer.Rd ├── plot.pffr.Rd ├── plot.pfr.Rd ├── predict.fbps.Rd ├── predict.fgam.Rd ├── predict.fosr.Rd ├── predict.fosr.vs.Rd ├── predict.pffr.Rd ├── predict.pfr.Rd ├── print.summary.pffr.Rd ├── pwcv.Rd ├── qq.pffr.Rd ├── quadWeights.Rd ├── re.Rd ├── residuals.pffr.Rd ├── rlrt.pfr.Rd ├── sff.Rd ├── smooth.construct.dt.smooth.spec.Rd ├── smooth.construct.fpc.smooth.spec.Rd ├── smooth.construct.pco.smooth.spec.Rd ├── smooth.construct.pcre.smooth.spec.Rd ├── smooth.construct.pi.smooth.spec.Rd ├── smooth.construct.pss.smooth.spec.Rd ├── sofa.Rd ├── summary.pffr.Rd ├── summary.pfr.Rd ├── vb_cs_fpca.Rd ├── vb_cs_wish.Rd ├── vb_mult_fpca.Rd ├── vb_mult_wish.Rd ├── vis.fgam.Rd └── vis.pfr.Rd ├── revdep └── checks.rds └── tests ├── testthat.R └── testthat ├── test-fosr.R ├── test-fpca.R ├── test-fpcr.R ├── test-lpfr.R ├── test-mfpca.R ├── test-pcre.R ├── test-peer.R └── test-pffr.R /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^\.git$ 4 | ^\.gitignore$ 5 | ^\.travis\.yml$ 6 | .git-tricks.md 7 | refund.Rproj 8 | ^NEWS\.md$ 9 | ^cran-comments\.md$ 10 | ^README\.md$ 11 | revdep -------------------------------------------------------------------------------- /.git-tricks.md: -------------------------------------------------------------------------------- 1 | ### reset repo to state at a certain commit: 2 | 3 | git reset --hard 4 | git push -f origin master 5 | 6 | ### merge changes to a single file rather than commits: 7 | 8 | Need to merge just file f of branch B into file f of branch A 9 | (assumes that all changes are committed in both branches A and B): 10 | 11 | git checkout A 12 | git checkout --patch B f 13 | 14 | see http://stackoverflow.com/a/11593308/295025 15 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .DS_Store 2 | .Rproj.user 3 | .Rhistory 4 | .RData 5 | refund.Rproj 6 | *~ 7 | \#*\# -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: r 2 | sudo: required 3 | 4 | r: 5 | - oldrel 6 | - release 7 | - devel 8 | cache: packages 9 | 10 | # need this for current nlme, mgcv: 11 | r_packages: 12 | - nlme 13 | - mgcv 14 | - devtools 15 | 16 | #need this for nloptr? 17 | apt_packages: 18 | - libnlopt-dev 19 | 20 | after_failure: 21 | - ./travis-tool.sh dump_logs 22 | 23 | notifications: 24 | email: 25 | recipients: 26 | - jw3134@cumc.columbia.edu 27 | - fabian.scheipl@stat.uni-muenchen.de 28 | - jeff.goldsmith@columbia.edu 29 | on_success: change 30 | on_failure: always 31 | 32 | -------------------------------------------------------------------------------- /R/.Rapp.history: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/refunders/refund/b399f0b452aef13af408e6753529b3217175ca4f/R/.Rapp.history -------------------------------------------------------------------------------- /R/CD4-data.R: -------------------------------------------------------------------------------- 1 | ##' Observed CD4 cell counts 2 | ##' 3 | ##' CD4 cell counts for 366 subjects between months -18 and 42 since 4 | ##' seroconversion. Each subject's observations are contained in a single row. 5 | ##' 6 | ##' @name cd4 7 | ##' @docType data 8 | ##' @format A data frame made up of a 366 x 61 matrix of CD4 cell counts 9 | ##' 10 | ##' @references Goldsmith, J., Greven, S., and Crainiceanu, C. (2013). 11 | ##' Corrected confidence bands for functional data using principal components. 12 | ##' \emph{Biometrics}, 69(1), 41--51. 13 | ##' @keywords datasets 14 | NULL 15 | -------------------------------------------------------------------------------- /R/COVID19-data.R: -------------------------------------------------------------------------------- 1 | ##' The US weekly all-cause mortality and COVID19-associated deaths in 2020 2 | ##' 3 | ##' The COVID19 mortality data used in the "Functional Data Analysis with R" book 4 | ##' 5 | ##' @name COVID19 6 | ##' @docType data 7 | ##' 8 | ##' @usage data(COVID19) 9 | ##' 10 | ##' @format A list made up of \describe{ 11 | ##' \item{US_weekly_mort}{A numeric vector of length 207, which contains the 12 | ##' total number of weekly all-cause deaths in the US from January 14, 2017 to December 26, 2020;} 13 | ##' \item{US_weekly_mort_dates}{A vector of dates of length 207, which contains 14 | ##' the weeks corresponding to the US_weekly_mort vector;} 15 | ##' \item{US_weekly_mort_CV19}{A numeric vector of length 52, which contains the 16 | ##' total number of weekly COVID 19 deaths in the US from January 4, 2020 to December 26, 2020;} 17 | ##' \item{US_weekly_mort_CV19_dates}{A vector of dates of length 52, which contains 18 | ##' the weeks corresponding to the US_weekly_mort_CV19 vector;} 19 | ##' \item{US_weekly_excess_mort_2020}{A numeric vector of length 52, which contains 20 | ##' the US weekly excess mortality (total mortality in one week in 2020 minus 21 | ##' total mortality in the corresponding week of 2019) from January 4, 2020 to December 26, 2020;} 22 | ##' \item{US_weekly_excess_mort_2020_dates}{A vector dates of length 52, which contains 23 | ##' the weeks corresponding to the US_weekly_excess_mort_2020 vector.;} 24 | ##' \item{US_states_names}{A vector of strings containing the names of 52 US states 25 | ##' and territories in alphabetic order. These are the states for which all-cause 26 | ##' and Covid-19 data are available in this data set;} 27 | ##' \item{US_states_population}{A numeric vector containing the population of the 28 | ##' 52 states in the vector US_states_names estimated as of July 1, 2020. The 29 | ##' order of the vector US_states_population is the same as that of US_states_names;} 30 | ##' \item{States_excess_mortality}{A numeric 52 x 52 dimensional matrix that 31 | ##' contains the weekly US excess mortality in 52 states and territories. Each 32 | ##' row corresponds to one state in the same order as the vector US_states_names. 33 | ##' Each column corresponds to a week in 2020 corresponding to the order in the 34 | ##' vector US_weekly_excess_mort_2020_dates. The (i,j)th entry of the matrix is 35 | ##' the difference in all-cause mortality during the week j of 2020 and 2019 for state i;} 36 | ##' \item{States_excess_mortality_per_million}{A numeric 52 x 52 dimensional matrix 37 | ##' that contains the weekly US excess mortality in 52 states and territories 38 | ##' per one million individuals. This is obtained by dividing every row (corresponding 39 | ##' to a state) of States_excess_mortality by the population of that state stored 40 | ##' in US_states_population and multiplying by one million;} 41 | ##' \item{States_CV19_mortality}{A numeric 52 x 52 dimensional matrix that contains 42 | ##' the weekly US Covid-19 mortality in 52 states and territories. Each row 43 | ##' corresponds to one state in the same order as the vector US_states_names. Each 44 | ##' column corresponds to a week in 2020 corresponding to the order in the 45 | ##' vector US_weekly_excess_mort_2020_dates;} 46 | ##' \item{States_CV19_mortality_per_million}{A numeric 52 x 52 dimensional matrix 47 | ##' that contains the weekly US Covid-19 mortality in 52 states and territories 48 | ##' per one million individuals. This is obtained by dividing every row (corresponding 49 | ##' to a state) of States_CV19_mortality by the population of that state stored 50 | ##' in US_states_population and multiplying by one million.} 51 | ##' } 52 | ##' @references Crainiceanu, C., Goldsmith, J., Leroux, A., Cui, E. (2023). Functional 53 | ##' Data Analysis with R. \emph{Chapman & Hall/CRC Statistics} 54 | NULL 55 | -------------------------------------------------------------------------------- /R/DTI-data.R: -------------------------------------------------------------------------------- 1 | ##' Diffusion Tensor Imaging: tract profiles and outcomes 2 | ##' 3 | ##' Fractional anisotropy (FA) tract profiles for the corpus callosum (cca) and 4 | ##' the right corticospinal tract (rcst). Accompanying the tract profiles are 5 | ##' the subject ID numbers, visit number, total number of scans, multiple 6 | ##' sclerosis case status and Paced Auditory Serial Addition Test (pasat) 7 | ##' score. 8 | ##' 9 | ##' If you use this data as an example in written work, please include the 10 | ##' following acknowledgment: ``The MRI/DTI data were collected at Johns 11 | ##' Hopkins University and the Kennedy-Krieger Institute" 12 | ##' 13 | ##' DTI2 uses mean diffusivity of the the corpus callosum rather than FA, and 14 | ##' parallel diffusivity of the rcst rather than FA. Please see the 15 | ##' documentation for DTI2. 16 | ##' 17 | ##' 18 | ##' @name DTI 19 | ##' @docType data 20 | ##' @format A data frame made up of \describe{ 21 | ##' \item{cca}{A 382 x 93 22 | ##' matrix of fractional anisotropy tract profiles from the corpus 23 | ##' callosum;} 24 | ##' \item{rcst}{A 382 x 55 matrix 25 | ##' of fractional anisotropy tract profiles from the right corticospinal 26 | ##' tract;} 27 | ##' \item{ID}{Numeric vector of subject ID numbers;} 28 | ##' \item{visit}{Numeric vector of the subject-specific visit 29 | ##' numbers;} 30 | ##' \item{visit.time}{Numeric vector of the subject-specific visit time, measured 31 | ##' in days since first visit;} 32 | ##' \item{Nscans}{Numeric vector indicating the total number of visits 33 | ##' for each subject;} 34 | ##' \item{case}{Numeric vector of multiple sclerosis case status: 0 - healthy control, 1 - MS case;} 35 | ##' \item{sex}{factor variable indicated subject's sex;} 36 | ##' 37 | ##' \item{pasat}{Numeric vector containing the PASAT score at 38 | ##' each visit.} 39 | ##' } 40 | ##' @references Goldsmith, J., Bobb, J., Crainiceanu, C., Caffo, B., and Reich, 41 | ##' D. (2011). Penalized Functional Regression. \emph{Journal of Computational 42 | ##' and Graphical Statistics}, 20, 830 - 851. 43 | ##' 44 | ##' Goldsmith, J., Crainiceanu, C., Caffo, B., and Reich, D. (2010). 45 | ##' Longitudinal Penalized Functional Regression for Cognitive Outcomes on 46 | ##' Neuronal Tract Measurements. \emph{Journal of the Royal Statistical 47 | ##' Society: Series C}, 61, 453 - 469. 48 | NULL 49 | -------------------------------------------------------------------------------- /R/DTI2-data.R: -------------------------------------------------------------------------------- 1 | ##' Diffusion Tensor Imaging: more fractional anisotropy profiles and outcomes 2 | ##' 3 | ##' A diffusion tensor imaging dataset used in Swihart et al. (2012). Mean 4 | ##' diffusivity profiles for the corpus callosum (cca) and parallel diffusivity 5 | ##' for the right corticospinal tract (rcst). Accompanying the profiles are the 6 | ##' subject ID numbers, visit number, and Paced Auditory Serial Addition Test 7 | ##' (pasat) score. We thank Dr. Daniel Reich for making this dataset available. 8 | ##' 9 | ##' If you use this data as an example in written work, please include the 10 | ##' following acknowledgment: ``The MRI/DTI data were collected at Johns 11 | ##' Hopkins University and the Kennedy-Krieger Institute" 12 | ##' 13 | ##' Note: DTI2 uses mean diffusivity of the the corpus callosum rather than 14 | ##' fractional anisotropy (FA), and parallel diffusivity of the rcst rather 15 | ##' than FA. Please see the documentation for DTI for more about the DTI 16 | ##' dataset. 17 | ##' 18 | ##' 19 | ##' @name DTI2 20 | ##' @docType data 21 | ##' @format A data frame made up of \describe{ 22 | ##' \item{cca}{a 340 x 93 23 | ##' matrix of fractional anisotropy profiles from the corpus callosum;} 24 | ##' \item{rcst}{a 340 x 55 matrix of fractional anisotropy 25 | ##' profiles from the right corticospinal tract;} 26 | ##' \item{id}{numeric vector of subject ID numbers;} 27 | ##' \item{visit}{numeric vector of the 28 | ##' subject-specific visit numbers;} 29 | ##' \item{pasat}{numeric vector 30 | ##' containing the PASAT score at each visit.} 31 | ##' } 32 | ##' @references Goldsmith, J., Bobb, J., Crainiceanu, C., Caffo, B., and Reich, 33 | ##' D. (2011). Penalized functional regression. \emph{Journal of Computational 34 | ##' and Graphical Statistics}, 20(4), 830--851. 35 | ##' 36 | ##' Goldsmith, J., Crainiceanu, C., Caffo, B., and Reich, D. (2012). 37 | ##' Longitudinal penalized functional regression for cognitive outcomes on 38 | ##' neuronal tract measurements. \emph{Journal of the Royal Statistical 39 | ##' Society: Series C}, 61(3), 453--469. 40 | ##' 41 | ##' Swihart, B. J., Goldsmith, J., and Crainiceanu, C. M. (2014). Restricted 42 | ##' Likelihood Ratio Tests for Functional Effects in the Functional Linear Model. 43 | ##' \emph{Technometrics}, 56, 483--493. 44 | ##' 45 | ##' @keywords datasets 46 | NULL 47 | -------------------------------------------------------------------------------- /R/Omegas.R: -------------------------------------------------------------------------------- 1 | #' @keywords internal 2 | #' @importFrom fda eval.basis 3 | Omegas = function(bspline.basis, ss) { 4 | k.n = length(bspline.basis$params) 5 | k = c(rep(bspline.basis$rangeval[1],4), bspline.basis$params, rep(bspline.basis$rangeval[2],4)) 6 | hh = rep(NA, k.n+7) 7 | for (i in 1:(k.n+7)) hh[i] = (k[i+1]-k[i])/(6-2*ss) 8 | 9 | # Table 1 of Wand and Ormerod (2008) 10 | w.mat = rbind(c(1, NA, NA, NA, NA, NA, NA), 11 | c(1/3, 4/3, 1/3, NA, NA, NA, NA), 12 | c(14/45, 64/45, 8/15, 64/45, 14/45, NA, NA), 13 | c(41/140, 54/35, 27/140, 68/35, 27/140, 54/35, 41/140)) 14 | x = w = rep(NA, (7-2*ss)*(k.n+7)) 15 | for (l in 1:(k.n+7)) 16 | for (ll in 0:(6-2*ss)) { 17 | x[(7-2*ss)*(l-1)+ll+1]= k[l]+ll*hh[l] 18 | w[(7-2*ss)*(l-1)+ll+1]= hh[l]*w.mat[4-ss,ll+1] 19 | } 20 | B = eval.basis(x, bspline.basis, ss) 21 | t(B) %*% diag(w) %*% B 22 | } 23 | 24 | -------------------------------------------------------------------------------- /R/PEER.Sim-data.R: -------------------------------------------------------------------------------- 1 | ##' Simulated longitudinal data with functional predictor and scalar response, 2 | ##' and structural information associated with predictor function 3 | ##' 4 | ##' \code{PEER.Sim} contains simulated observations from 100 subjects, each 5 | ##' observed at 4 distinct timepoints. At each timepoint bumpy predictor 6 | ##' profile is generated randomly and the scalar response variable is generated 7 | ##' considering a time-varying regression function and subject intercept. 8 | ##' Accompanying the functional predictor and scalar response are the subject 9 | ##' ID numbers and time of measurements. 10 | ##' 11 | ##' \code{Q} represents the 7 x 100 matrix where each row provides structural 12 | ##' information about the functional predictor profile for data 13 | ##' \code{PEER.Sim}. For specific details about the simulation and Q matrix, 14 | ##' please refer to Kundu et. al. (2012). 15 | ##' 16 | ##' 17 | ##' @name PEER.Sim 18 | ##' @aliases PEER.Sim Q 19 | ##' @docType data 20 | ##' @format The data frame \code{PEER.Sim} is made up of subject ID 21 | ##' number(\code{id}), subject-specific time of measurement (\code{t}), 22 | ##' functional predictor profile (\code{W.1-W.100}) and scalar response 23 | ##' (\code{Y}) 24 | ##' @references Kundu, M. G., Harezlak, J., and Randolph, T. W. (2012). 25 | ##' Longitudinal functional models with structured penalties. (please contact 26 | ##' J. Harezlak at \email{harezlak@@iupui.edu}) 27 | NULL 28 | -------------------------------------------------------------------------------- /R/XtSiginvX.R: -------------------------------------------------------------------------------- 1 | #' Internal computation function 2 | #' 3 | #' Internal function used compute the products 4 | #' in cross-sectional VB algorithm and Gibbs sampler 5 | #' 6 | #' @param tx transpose of the X design matrix 7 | #' @param siginv inverse variance matrix 8 | #' @param y outcome matrix. if \code{NULL}, function computes 9 | #' first product; if not, function computes second product. 10 | #' 11 | #' @author Jeff Goldsmith \email{ajg2202@@cumc.columbia.edu} 12 | #' 13 | Xt_siginv_X = function(tx, siginv, y = NULL){ 14 | 15 | D = dim(siginv)[1] 16 | I = dim(tx)[2] / D 17 | if(is.null(y)){ 18 | ret.mat = matrix(0, nrow = dim(tx)[1], ncol = dim(tx)[1]) 19 | for(i in 1:I){ 20 | ind.cur = (D * (i - 1) + 1) : (D*i) 21 | prod.cur = tx[,ind.cur] %*% siginv %*% t(tx[,ind.cur]) 22 | ret.mat = ret.mat + prod.cur 23 | } 24 | } else if(!is.null(y)){ 25 | ret.mat = matrix(0, nrow = dim(tx)[1], ncol = 1) 26 | for(i in 1:I){ 27 | ind.cur = (D * (i - 1) + 1) : (D*i) 28 | prod.cur = tx[,ind.cur] %*% siginv %*% y[ind.cur] 29 | ret.mat = ret.mat + prod.cur 30 | } 31 | } 32 | return(ret.mat) 33 | } 34 | -------------------------------------------------------------------------------- /R/amc.R: -------------------------------------------------------------------------------- 1 | ##' Additive model with constraints 2 | ##' 3 | ##' An internal function, called by \code{fosr()}, that fits additive models 4 | ##' with linear constraints via a call to \code{\link[mgcv]{gam}} or 5 | ##' \code{\link[mgcv]{bam}} in the \pkg{mgcv} package. 6 | ##' 7 | ##' The additive model is fitted using \code{\link[mgcv]{gam}}, unless there 8 | ##' are more than 10000 responses; in that case \code{\link[mgcv]{bam}} is 9 | ##' used. 10 | ##' 11 | ##' @param y response vector. 12 | ##' @param Xmat design matrix. 13 | ##' @param S list of penalty matrices. 14 | ##' @param gam.method smoothing parameter selection method: "REML" for 15 | ##' restricted maximum likelihood, "GCV.Cp" for generalized cross-validation. 16 | ##' @param C matrix of linear constraints. Dimension should be number of 17 | ##' constraints times \code{ncol(Xmat)}. 18 | ##' @param lambda smoothing parameter value. If \code{NULL}, the smoothing 19 | ##' parameter(s) will be estimated. 20 | ##' @param \dots other arguments, passed to \code{\link[mgcv]{gam}} or 21 | ##' \code{\link[mgcv]{bam}}. 22 | ##' @return A list with the following elements: \item{gam}{the \code{gam} 23 | ##' object returned by \code{gam} or \code{bam}.} 24 | ##' \item{coefficients}{coefficients with respect to design matrix \code{Xmat}, 25 | ##' derived from the \code{gam()} fit.} \item{Vp, GinvXt}{outputs used by 26 | ##' \code{fosr}.} \item{method}{the \code{gam.method} argument of the call to 27 | ##' \code{amc}.} 28 | ##' @author Philip Reiss \email{phil.reiss@@nyumc.org} 29 | ##' @seealso \code{\link{fosr}} 30 | ##' @keywords internal 31 | ##' @importFrom mgcv bam gam 32 | ##' @importFrom MASS ginv 33 | amc <- function(y, Xmat, S, gam.method='REML', C=NULL, lambda=NULL, ...) { 34 | n.p = length(S) 35 | stopifnot( is.null(lambda) | length(lambda)==n.p ) 36 | if (!is.null(C)) { 37 | # The following is based on Wood (2006), p. 186 38 | n.con = dim(C)[1] 39 | Z. = qr.Q(qr(t(C)), complete=TRUE)[ , -(1:n.con)] 40 | Xmat. = Xmat %*% Z. 41 | S. = vector("list", n.p) 42 | for (i in 1:n.p) { 43 | if(is.null(lambda)) { 44 | S.[[i]] = list(crossprod(Z., S[[i]] %*% Z.)) 45 | } else { 46 | S.[[i]] = list(crossprod(Z., S[[i]] %*% Z.), sp = lambda[i]) 47 | } 48 | } 49 | } else { 50 | Z. = diag(ncol(Xmat)) 51 | Xmat. = Xmat 52 | if (is.null(lambda)) { 53 | S. = list(list(S[[1]])) 54 | } else { 55 | S. = list(list(S[[1]], sp = lambda[1])) 56 | } 57 | } 58 | 59 | fitter = if (length(y) > 10000) bam else gam 60 | fitobj = fitter(y ~ Xmat.-1, paraPen=list(Xmat.=S.[[1]]), ...) 61 | 62 | lambdavec = if (!is.null(fitobj$full.sp)) fitobj$full.sp else fitobj$sp 63 | fullpen = 0 64 | for (i in 1:n.p) fullpen = lambdavec[i] * S.[[i]][[1]] 65 | GinvXT <- try(Z. %*% solve(crossprod(Xmat.) + fullpen, t(Xmat.)), silent=TRUE) 66 | if (inherits(GinvXT, "try-error")) { 67 | warning(" 'X'X + penalty' is numerically rank-deficient.") 68 | GinvXT <- Z. %*% ginv(crossprod(Xmat.) + fullpen, t(Xmat.)) 69 | } 70 | 71 | list(gam = fitobj, 72 | coefficients = Z. %*% fitobj$coef, 73 | Vp = Z. %*% fitobj$Vp %*% t(Z.), 74 | GinvXT = GinvXT, 75 | method = gam.method) 76 | } 77 | 78 | -------------------------------------------------------------------------------- /R/content-data.R: -------------------------------------------------------------------------------- 1 | ##' The CONTENT child growth study 2 | ##' 3 | ##' The CONTENT child growth study was funded by the Sixth Framework Programme 4 | ##' of the European Union, Project CONTENT (INCO-DEV-3-032136) and was led by 5 | ##' Dr. William Checkley. The study was conducted between May 2007 and February 6 | ##' 2011 in Las Pampas de San Juan Miraflores and Nuevo Paraiso, two peri-urban 7 | ##' shanty towns with high population density located on the southern edge of 8 | ##' Lima city in Peru. 9 | ##' 10 | ##' @name content 11 | ##' @docType data 12 | ##' 13 | ##' @usage data(content) 14 | ##' 15 | ##' @format A list made up of \describe{ 16 | ##' \item{id}{Numeric vector of subject ID numbers;} 17 | ##' \item{ma1fe0}{Numeric vector of the sex of the child, 1 for male and 0 for female;} 18 | ##' \item{weightkg}{Numeric vector of the weight of the child measured in kilograms(kg);} 19 | ##' \item{height}{Numeric vector of the height of the child measured in centimeters;} 20 | ##' \item{agedays}{Numeric vector of the age of the child measured in days;} 21 | ##' \item{cbmi}{Numeric vector of the BMI of the child;} 22 | ##' \item{zlen}{Numeric vector of the height-for-age z-scores;} 23 | ##' \item{zwei}{Numeric vector of the weight-for-age z-scores;} 24 | ##' \item{zwfl}{Numeric vector of the weight-for-height z-scores;} 25 | ##' \item{zbmi}{Numeric vector of the BMI-for-age z-scores;} 26 | ##' } 27 | ##' @references Crainiceanu, C., Goldsmith, J., Leroux, A., Cui, E. (2023). Functional 28 | ##' Data Analysis with R. \emph{Chapman & Hall/CRC Statistics} 29 | NULL 30 | -------------------------------------------------------------------------------- /R/f_sum.R: -------------------------------------------------------------------------------- 1 | #' Sum computation 1 2 | #' 3 | #' Internal function used compute a sum in FPCA-based covariance updates 4 | #' 5 | #' @param mu.q.c current value of mu.q.c 6 | #' @param sig.q.c current value of sig.q.c 7 | #' @param theta spline basis 8 | #' @param obspts.mat matrix indicating the points on which data is observed 9 | #' 10 | #' @author Jeff Goldsmith \email{ajg2202@@cumc.columbia.edu} 11 | #' 12 | f_sum = function(mu.q.c, sig.q.c, theta, obspts.mat){ 13 | I = dim(mu.q.c)[1] 14 | kp = dim(mu.q.c)[2] 15 | kt = dim(theta)[1] 16 | ret.sum = matrix(0, kp*kt, kp*kt) 17 | 18 | for(i in 1:I){ 19 | mu.mat = matrix(mu.q.c[i,], nrow = 1, ncol = kp) 20 | ret.sum = ret.sum + kronecker(t(mu.mat) %*% mu.mat + sig.q.c[[i]], (theta[,obspts.mat[i,]])%*%t(theta[,obspts.mat[i,]])) 21 | } 22 | return(ret.sum) 23 | } -------------------------------------------------------------------------------- /R/f_sum2.R: -------------------------------------------------------------------------------- 1 | #' Sum computation 2 2 | #' 3 | #' Internal function used compute a sum in FPCA-based covariance updates 4 | #' 5 | #' @param y outcome matrix 6 | #' @param fixef current estimate of fixed effects 7 | #' @param mu.q.c current value of mu.q.c 8 | #' @param kt number of basis functions 9 | #' @param theta spline basis 10 | #' 11 | #' @author Jeff Goldsmith \email{ajg2202@@cumc.columbia.edu} 12 | #' 13 | f_sum2 = function(y, fixef, mu.q.c, kt, theta){ 14 | I = dim(mu.q.c)[1] 15 | kp = dim(mu.q.c)[2] 16 | ret.sum = matrix(0, nrow = kp*kt, ncol = 1) 17 | 18 | for(i in 1:I){ 19 | obs.pts = !is.na(y[i,]) 20 | ret.sum = ret.sum + kronecker((matrix(mu.q.c[i,])), theta[,obs.pts]) %*% matrix(y[i, obs.pts] - fixef[i,obs.pts]) 21 | } 22 | return(ret.sum) 23 | } -------------------------------------------------------------------------------- /R/f_sum4.R: -------------------------------------------------------------------------------- 1 | #' Sum computation 2 2 | #' 3 | #' Internal function used compute a sum in FPCA-based covariance updates 4 | #' 5 | #' @param mu.q.c current value of mu.q.c 6 | #' @param sig.q.c current value of sig.q.c 7 | #' @param mu.q.bpsi current value of mu.q.bpsi 8 | #' @param sig.q.bpsi current value of sig.q.bpsi 9 | #' @param theta current value of theta 10 | #' @param obspts.mat matrix indicating where curves are observed 11 | #' 12 | #' @author Jeff Goldsmith \email{ajg2202@@cumc.columbia.edu} 13 | #' 14 | f_sum4 = function(mu.q.c, sig.q.c, mu.q.bpsi, sig.q.bpsi, theta, obspts.mat){ 15 | I = dim(mu.q.c)[1] 16 | kp = dim(mu.q.c)[2] 17 | kt = dim(theta)[2] 18 | ret.sum = matrix(0, 1, 1) 19 | 20 | for(i in 1:I){ 21 | theta_i = t(theta)[,obspts.mat[i,]] 22 | temp = 23 | f_trace(Theta_i = theta_i, Sig_q_Bpsi = sig.q.bpsi, Kp = kp, Kt = kt) %*% matrix(mu.q.c[i,], kp, 1) %*% matrix(mu.q.c[i,], 1, kp) + 24 | f_trace(Theta_i = theta_i, Sig_q_Bpsi = sig.q.bpsi, Kp = kp, Kt = kt) %*% sig.q.c[[i]] + 25 | t(mu.q.bpsi) %*% theta_i %*% t(theta_i) %*% mu.q.bpsi %*% sig.q.c[[i]] 26 | 27 | ret.sum = ret.sum + sum(diag(temp)) 28 | } 29 | return(ret.sum) 30 | } 31 | 32 | 33 | -------------------------------------------------------------------------------- /R/f_trace.R: -------------------------------------------------------------------------------- 1 | #' Trace computation 2 | #' 3 | #' Internal function used compute a trace in FPCA-based covariance updates 4 | #' 5 | #' @param Theta_i basis functions on observed grid points 6 | #' @param Sig_q_Bpsi variance of FPC basis coefficients 7 | #' @param Kp number of FPCs 8 | #' @param Kt number of spline basis functions 9 | #' 10 | #' @author Jeff Goldsmith \email{ajg2202@@cumc.columbia.edu} 11 | f_trace = function(Theta_i, Sig_q_Bpsi, Kp, Kt){ 12 | 13 | ret.mat = matrix(NA, nrow = Kp, ncol = Kp) 14 | A = Theta_i %*% t(Theta_i) 15 | 16 | for(i in 1:Kp){ 17 | for(j in 1:Kp){ 18 | ret.mat[i,j] = sum(diag(A %*% Sig_q_Bpsi[((-1 + i)*Kt + 1):(i*Kt), ((-1 + j)*Kt + 1):(j*Kt)])) 19 | } 20 | } 21 | 22 | return(ret.mat) 23 | } 24 | -------------------------------------------------------------------------------- /R/fosr.perm.test.R: -------------------------------------------------------------------------------- 1 | #' @export 2 | #' @rdname fosr.perm 3 | fosr.perm.test <- 4 | function(x, level=.05) { 5 | if (!inherits(x, "fosr.perm")) stop("First argument must be an object of class 'fosr.perm") 6 | x$level = level 7 | critval = quantile(apply(x$F.perm, 1, max), 1-level, type=1) 8 | x$critval=critval 9 | if (length(level)==1) { 10 | x$signif = (x$F > critval) 11 | x$n2s = which(diff(c(0, x$signif))==1) 12 | x$s2n = which(diff(c(x$signif, 0))==-1) 13 | } 14 | x 15 | } 16 | 17 | -------------------------------------------------------------------------------- /R/fpcr.setup.R: -------------------------------------------------------------------------------- 1 | #' @importFrom fda eval.fd norder create.bspline.basis getbasispenalty eval.basis 2 | #' @importFrom stats lm 3 | fpcr.setup <- function(y, xfuncs = NULL, nbasis = NULL, basismat = NULL, penmat = NULL, argvals = NULL, 4 | covt = NULL, mean.signal.term = FALSE, spline.order = NULL, fdobj = NULL, pen.order = 2) { 5 | if (!is.null(fdobj)) { 6 | # cat("performing scalar on functional regression\n") 7 | basis <- fdobj$basis 8 | if (!is.null(nbasis)){ 9 | if (length(nbasis) > 1){ 10 | warning("nbasis =", nbasis[-1], "will be ignored. Only the first nbasis, nbasis =", 11 | nbasis[1], "will be considerred") 12 | } 13 | if (nbasis[1] != basis$nbasis){ 14 | warning(paste("'nbasis =", nbasis, "'overridden since the supplied 'fdobj' 15 | has basis dimension", basis$nbasis)) 16 | } 17 | } 18 | if (!is.null(spline.order)) { 19 | if(spline.order != norder(basis)){ 20 | warning(paste("'spline.order =", spline.order, "'overridden since the supplied 21 | 'fdobj' has a basis of order", norder(basis))) 22 | } 23 | } 24 | if (is.null(argvals)){ 25 | argvals <- seq(basis$rangeval[1], basis$rangeval[2], length.out = 401) 26 | } 27 | xfuncs <- t(eval.fd(argvals, fdobj)) 28 | } 29 | 30 | if (!is.array(xfuncs) || !length(dim(xfuncs)) %in% 2:3) 31 | stop("xfuncs must either be a 3D or 2D array") 32 | 33 | dim.sig <- length(dim(xfuncs)) - 1 34 | if (dim.sig == 1) { 35 | if (is.null(fdobj)){ 36 | if (!is.null(nbasis) && length(nbasis) > 1) { 37 | warning("nbasis = ", nbasis[-1], " will be ignored. Only the first nbasis, nbasis = ", 38 | nbasis[1], " will be considerred") 39 | } 40 | if (is.null(argvals)) argvals <- seq(0, 1, , ncol(xfuncs)) 41 | if (is.null(nbasis)) nbasis <- 40 42 | if (is.null(spline.order)) spline.order <- 4 43 | basis <- create.bspline.basis(rangeval = c(min(argvals), max(argvals)), 44 | nbasis = nbasis[1], norder = spline.order) 45 | } 46 | if (is.null(basismat)) basismat <- eval.basis(argvals, basis) 47 | if (is.null(penmat)) penmat <- getbasispenalty(basis, pen.order) 48 | } 49 | else { 50 | d1 <- dim(xfuncs)[2] 51 | d2 <- dim(xfuncs)[3] 52 | if (is.null(argvals)) argvals <- list(seq(0, 1,, d1), seq(0,1,,d2)) 53 | if (is.null(nbasis)) { 54 | nbasis <- c(15, 15) 55 | } else if (length(nbasis) == 1){ 56 | warning("nbasis = ", nbasis[1], " will be applied to both direction") 57 | nbasis <- c(nbasis, nbasis) 58 | } else if (length(nbasis) > 2){ 59 | warning("only nbasis = ", nbasis[1:2], " will be considerred") 60 | nbasis <- nbasis[1:2] 61 | } 62 | if (is.null(spline.order)) spline.order <- 4 63 | basis1 <- create.bspline.basis(rangeval = c(min(argvals[[1]]), max(argvals[[1]])), 64 | nbasis = nbasis[1], norder = spline.order) 65 | basis2 <- create.bspline.basis(rangeval = c(min(argvals[[2]]), max(argvals[[2]])), 66 | nbasis = nbasis[2], norder = spline.order) 67 | if (is.null(basismat)){ 68 | basismat <- eval.basis(argvals[[2]], basis2) %x% eval.basis(argvals[[1]], basis1) 69 | } 70 | if (is.null(penmat)) penmat <- osplinepen2d(basis1, basis2) 71 | dim(xfuncs) <- c(dim(xfuncs)[1], d1 * d2) 72 | } 73 | X0 <- if (mean.signal.term) cbind(1, apply(xfuncs, 1, mean)) 74 | else matrix(1, length(y), 1) 75 | if (!is.null(covt)) X0 <- cbind(X0, as.matrix(covt)) 76 | sigs.decor <- if (ncol(X0) == 1) scale(xfuncs, center = TRUE, scale = FALSE) 77 | else lm(xfuncs ~ X0 - 1)$resid 78 | SB <- sigs.decor %*% basismat 79 | return(list(X0 = X0, SB = SB, penmat = penmat, basismat = basismat, xfuncs = xfuncs, 80 | nbasis = nbasis, argvals = argvals, dim.sig = dim.sig)) 81 | } 82 | 83 | 84 | -------------------------------------------------------------------------------- /R/gasoline-data.R: -------------------------------------------------------------------------------- 1 | ##' Octane numbers and NIR spectra of gasoline 2 | ##' 3 | ##' Near-infrared reflectance spectra and octane numbers of 60 gasoline 4 | ##' samples. Each NIR spectrum consists of log(1/reflectance) measurements at 5 | ##' 401 wavelengths, in 2-nm intervals from 900 nm to 1700 nm. We thank Prof. 6 | ##' John Kalivas for making this data set available. 7 | ##' 8 | ##' 9 | ##' @name gasoline 10 | ##' @docType data 11 | ##' @format A data frame comprising \describe{ 12 | ##' \item{octane}{a numeric 13 | ##' vector of octane numbers for the 60 samples.} 14 | ##' \item{NIR}{a 60 x 401 15 | ##' matrix of NIR spectra.} 16 | ##' } 17 | ##' @seealso \code{\link{fpcr}} 18 | ##' @references For applications of functional principal component regression 19 | ##' to this data set: 20 | ##' 21 | ##' Reiss, P. T., and Ogden, R. T. (2007). Functional principal component 22 | ##' regression and functional partial least squares. \emph{Journal of the 23 | ##' American Statistical Association}, 102, 984--996. 24 | ##' 25 | ##' Reiss, P. T., and Ogden, R. T. (2009). Smoothing parameter selection for a 26 | ##' class of semiparametric linear models. \emph{Journal of the Royal 27 | ##' Statistical Society, Series B}, 71(2), 505--523. 28 | ##' @source Kalivas, John H. (1997). Two data sets of near infrared spectra. 29 | ##' \emph{Chemometrics and Intelligent Laboratory Systems}, 37, 255--259. 30 | ##' @keywords datasets 31 | NULL 32 | -------------------------------------------------------------------------------- /R/irreg2mat.R: -------------------------------------------------------------------------------- 1 | irreg2mat <- function(ydata, binning=FALSE, maxbins=1e3){ 2 | # TODO: arg checks 3 | 4 | ## drop any missings: 5 | ydata <- ydata[complete.cases(ydata), ] 6 | 7 | ## turn into row/column indices for new matrix 8 | nobs <- length(unique(ydata$.id)) 9 | # make sure newid takes values 1:nobs 10 | newid <- as.numeric(as.factor(ydata$.id)) 11 | 12 | ## bin y-index, if necessary 13 | bins <- sort(unique(ydata$.index)) 14 | if(binning && (length(bins) > maxbins)){ 15 | # linear binning; 16 | # bin-borders go from just below min to just above max 17 | # TODO: quantile-based binning? 18 | binvalues <- seq((1-.001*sign(bins[1]))*bins[1], 19 | (1+.001*sign(bins[length(bins)]))*bins[length(bins)], 20 | l=maxbins+1) 21 | bins <- binvalues 22 | binvalues <- head(filter(binvalues, c(.5, .5)), -1) 23 | } else { 24 | binvalues <- bins 25 | bins <- c((1-.001*sign(bins[1]))*bins[1], 26 | bins[-length(bins)], 27 | (1+.001*sign(bins[length(bins)]))*bins[length(bins)]) 28 | # take care of edge cases: 29 | if(bins[1] == 0) bins[1] <- -.001 30 | if(bins[length(bins)] == 0) bins[length(bins)] <- .001 31 | } 32 | newindex <- cut(ydata$.index, breaks=bins, include.lowest=TRUE) 33 | 34 | Y <- matrix(NA, nrow=nobs, ncol=nlevels(newindex)) 35 | colnames(Y) <- binvalues 36 | attr(Y, "index") <- binvalues 37 | Y[cbind(newid, as.numeric(newindex))] <- ydata$.value 38 | return(Y) 39 | } -------------------------------------------------------------------------------- /R/lw.test.R: -------------------------------------------------------------------------------- 1 | # Ledoit-Wolf sphericity test for high-dimensional data 2 | lw.test = function(X) { 3 | n = nrow(X) - 1 # Fujikoshi et al., p. 219 4 | p = ncol(X) 5 | S = cov(X) 6 | trS = sum(diag(S)) 7 | trS2 = sum(S^2) 8 | stat = (n*p*trS2/trS^2 - n - p - 1) / 2 9 | pvalue = 2 * pnorm(-abs(stat)) 10 | list(stat=stat, pvalue=pvalue) 11 | } 12 | -------------------------------------------------------------------------------- /R/osplinepen2d.R: -------------------------------------------------------------------------------- 1 | osplinepen2d = function(bbasis1, bbasis2) { 2 | omega20 = Omegas(bbasis1, 2) %x% Omegas(bbasis2, 0) 3 | omega11 = Omegas(bbasis1, 1) %x% Omegas(bbasis2, 1) 4 | omega02 = Omegas(bbasis1, 0) %x% Omegas(bbasis2, 2) 5 | omega20 + 2 * omega11 + omega02 6 | } 7 | -------------------------------------------------------------------------------- /R/parse.predict.pfr.R: -------------------------------------------------------------------------------- 1 | parse.predict.pfr <- function(pfr.obj, new.data){ 2 | ## parse out new.data 3 | subj.new <- new.data$subj 4 | covariates.new <- new.data$covariates 5 | funcs.new <- new.data$funcs 6 | ## parse out pfr.obj 7 | funcs.old <- pfr.obj$funcs 8 | kb.old <- pfr.obj$kb 9 | kz.old <- pfr.obj$kz 10 | nbasis.old <- pfr.obj$nbasis 11 | alpha.old <- pfr.obj$beta.covariates[1] 12 | beta.old <- pfr.obj$beta.covariates[-1] ## what happens if covariates = NULL 13 | p.old <- length(beta.old) 14 | N_subj.old <- ifelse(is.null(subj.new), 0, ncol(pfr.obj$Z1)) 15 | if(is.null(subj.new)){rand.int.old <- Inf 16 | }else rand.int.old <- matrix(pfr.obj$fit$coef[c((p.old+2):(N_subj.old+p.old+1))], ncol=1) 17 | subj.old <- pfr.obj$subj 18 | W <- pfr.obj$BetaHat 19 | smooth.option.old <- pfr.obj$smooth.option 20 | 21 | ## need to manage old and new subjects for level 1 predictions 22 | ## this code chunk determines which subjects were in original 23 | ## fit, and which ones are new. This is important and we follow 24 | ## what lme() does: everyone can have a fixed effect level 0 predicted 25 | ## value that utilizes no random effects; however, for level 1 individual level 26 | ## predictions we can only utilize random effects if the individual that is in the 27 | ## prediction was first in the fitted set 28 | subj.old.key <- unique(subj.old) 29 | subj.new.key <- unique(subj.new) 30 | in.both <- subj.old.key %in% subj.new.key 31 | subj.ext.key <- subj.old.key[in.both] 32 | rand.int.ext <- rand.int.old[in.both] 33 | extracted <- subj.new.key %in% subj.ext.key 34 | rand.int.new <- matrix(rep(NA, length(subj.new.key)), ncol=1) 35 | rand.int.new[extracted] <- rand.int.ext 36 | 37 | 38 | 39 | ret <- list(subj.new, covariates.new, funcs.new, 40 | kb.old, kz.old, nbasis.old, alpha.old, beta.old, p.old, 41 | N_subj.old, rand.int.old, subj.old, 42 | W, 43 | rand.int.new, 44 | funcs.old, 45 | smooth.option.old) 46 | names(ret) <- c("subj.new", "covariates.new", "funcs.new", 47 | "kb.old", "kz.old", "nbasis.old", "alpha.old", "beta.old", "p.old", 48 | "N_subj.old", "rand.int.old", "subj.old", 49 | "W", 50 | "rand.int.new", 51 | "funcs.old", 52 | "smooth.option.old") 53 | ret 54 | } 55 | -------------------------------------------------------------------------------- /R/plot.fosr.R: -------------------------------------------------------------------------------- 1 | ##' Default plotting of function-on-scalar regression objects 2 | ##' 3 | ##' Plots the coefficient function estimates produced by \code{fosr()}. 4 | ##' 5 | ##' 6 | ##' @param x an object of class \code{"\link{fosr}"}. 7 | ##' @param split value, or vector of values, at which to divide the set of 8 | ##' coefficient functions into groups, each plotted on a different scale. 9 | ##' E.g., if set to 1, the first function is plotted on one scale, and all 10 | ##' others on a different (common) scale. If \code{NULL}, all functions are 11 | ##' plotted on the same scale. 12 | ##' @param titles character vector of titles for the plots produced, e.g., 13 | ##' names of the corresponding scalar predictors. 14 | ##' @param xlabel label for the x-axes of the plots. 15 | ##' @param ylabel label for the y-axes of the plots. 16 | ##' @param set.mfrow logical value: if \code{TRUE}, the function will try to 17 | ##' set an appropriate value of the \code{mfrow} parameter for the plots. 18 | ##' Otherwise you may wish to set \code{mfrow} outside the function call. 19 | ##' @param \dots graphical parameters (see \code{\link{par}}) for the plot. 20 | ##' @author Philip Reiss \email{phil.reiss@@nyumc.org} 21 | ##' @seealso \code{\link{fosr}}, which includes examples. 22 | ##' @export 23 | plot.fosr <- 24 | function(x, split=NULL, titles=NULL, xlabel="", ylabel="Coefficient function", set.mfrow=TRUE, ...) { 25 | nplots = ncol(x$fd$coef) 26 | if (set.mfrow) { 27 | nro = floor(sqrt(nplots)) 28 | nco = ceiling(nplots / nro) 29 | par(mfrow=c(nro, nco)) 30 | } 31 | firsts = c(1, split+1) 32 | lasts = c(split, nplots) 33 | ngps = length(firsts) 34 | rng = matrix(NA, ngps, 2) 35 | for (i in 1:ngps) { 36 | rng[i, ] = c(min(x$est[ , firsts[i]:lasts[i]]-2*x$se[ , firsts[i]:lasts[i]]), max(x$est[ , firsts[i]:lasts[i]]+2*x$se[ , firsts[i]:lasts[i]])) 37 | for (k in firsts[i]:lasts[i]) { 38 | plot(x$argvals, x$est[ , k], type='l', ylim=rng[i, ], main=titles[k], xlab=xlabel, ylab=ylabel, ...) 39 | lines(x$argvals, x$est[ , k]-2*x$se[ , k], lty=3, lwd=1.5) 40 | lines(x$argvals, x$est[ , k]+2*x$se[ , k], lty=3, lwd=1.5) 41 | abline(h=0, col='grey') 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /R/plot.fosr.perm.R: -------------------------------------------------------------------------------- 1 | #' @export 2 | #' @rdname fosr.perm 3 | plot.fosr.perm <- 4 | function(x, level=.05, xlabel="", title=NULL,...) { 5 | if (is.null(level)) { 6 | if (is.null(x$level)) stop("Must specify level at which to test") 7 | else testobj = x 8 | } 9 | 10 | else testobj = fosr.perm.test(x, level=level) 11 | 12 | argvals = testobj$argvals 13 | F = testobj$F 14 | F.perm = testobj$F.perm 15 | 16 | matplot(argvals, t(rbind(F, F.perm)), type='l', col='grey', lty=1, ylab="F statistics", xlab=xlabel, main=title,...) 17 | abline(h=testobj$critval, col=1+1:length(testobj$level), lty=2) 18 | lines(argvals, F, col='blue') 19 | } 20 | 21 | -------------------------------------------------------------------------------- /R/plot.fosr.vs.R: -------------------------------------------------------------------------------- 1 | #' Plot for Function-on Scalar Regression with variable selection 2 | #' 3 | #' Given a "\code{\link{fosr.vs}}" object, produces a figure of estimated coefficient functions. 4 | #' 5 | #' @param x an object of class "\code{\link{fosr.vs}}". 6 | #' @param ... additional arguments. 7 | #' 8 | #' @return a figure of estimated coefficient functions. 9 | #' 10 | #' @author Yakuan Chen \email{yc2641@@cumc.columbia.edu} 11 | #' @seealso \code{\link{fosr.vs}} 12 | #' @import ggplot2 13 | #' @export 14 | #' 15 | #' @examples 16 | #' \dontrun{ 17 | #' I = 100 18 | #' p = 20 19 | #' D = 50 20 | #' grid = seq(0, 1, length = D) 21 | #' 22 | #' beta.true = matrix(0, p, D) 23 | #' beta.true[1,] = sin(2*grid*pi) 24 | #' beta.true[2,] = cos(2*grid*pi) 25 | #' beta.true[3,] = 2 26 | #' 27 | #' psi.true = matrix(NA, 2, D) 28 | #' psi.true[1,] = sin(4*grid*pi) 29 | #' psi.true[2,] = cos(4*grid*pi) 30 | #' lambda = c(3,1) 31 | #' 32 | #' set.seed(100) 33 | #' 34 | #' X = matrix(rnorm(I*p), I, p) 35 | #' C = cbind(rnorm(I, mean = 0, sd = lambda[1]), rnorm(I, mean = 0, sd = lambda[2])) 36 | #' 37 | #' fixef = X%*%beta.true 38 | #' pcaef = C %*% psi.true 39 | #' error = matrix(rnorm(I*D), I, D) 40 | #' 41 | #' Yi.true = fixef 42 | #' Yi.pca = fixef + pcaef 43 | #' Yi.obs = fixef + pcaef + error 44 | #' 45 | #' data = as.data.frame(X) 46 | #' data$Y = Yi.obs 47 | #' fit.mcp = fosr.vs(Y~., data = data[1:80,], method="grMCP") 48 | #' plot(fit.mcp) 49 | #' } 50 | #' 51 | #' 52 | 53 | 54 | plot.fosr.vs <- function(x, ...){ 55 | p <- dim(coef(x))[1] 56 | D <- dim(coef(x))[2] 57 | df = as.data.frame(cbind(as.vector(sapply(1:p, function(y) seq(1,D)/D)), as.vector(t(coef(x))), as.vector(sapply(1:p, function(y) rep(y,D))))) 58 | ggplot(df, aes_string(x='V1', y='V2', group='V3', colour = 'factor(V3)')) + geom_path() + xlab("") + ylab("") + theme(legend.title = element_blank()) + scale_color_manual(values=1:p,labels=rownames(coef(x))) 59 | } -------------------------------------------------------------------------------- /R/plot.fpcr.R: -------------------------------------------------------------------------------- 1 | ##' Default plotting for functional principal component regression output 2 | ##' 3 | ##' Inputs an object created by \code{\link{fpcr}}, and plots the estimated 4 | ##' coefficient function. 5 | ##' 6 | ##' 7 | ##' @param x an object of class \code{"\link{fpcr}"}. 8 | ##' @param se if \code{TRUE} (the default), upper and lower lines are added at 9 | ##' 2 standard errors (in the Bayesian sense; see Wood, 2006) above and below 10 | ##' the coefficient function estimate. If a positive number is supplied, the 11 | ##' standard error is instead multiplied by this number. 12 | ##' @param col color for the line(s). This should be either a number, or a 13 | ##' vector of length 3 for the coefficient function estimate, lower bound, and 14 | ##' upper bound, respectively. 15 | ##' @param lty line type(s) for the coefficient function estimate, lower bound, 16 | ##' and upper bound. 17 | ##' @param xlab,ylab x- and y-axis labels. 18 | ##' @param \dots other arguments passed to the underlying plotting function. 19 | ##' @return None; only a plot is produced. 20 | ##' @author Philip Reiss \email{phil.reiss@@nyumc.org} 21 | ##' @seealso \code{\link{fpcr}}, which includes an example. 22 | ##' @references Wood, S. N. (2006). \emph{Generalized Additive Models: An 23 | ##' Introduction with R}. Boca Raton, FL: Chapman & Hall. 24 | ##' @export 25 | ##' @importFrom graphics matplot 26 | plot.fpcr = function(x, se=TRUE, col=1, lty=c(1,2,2), xlab="", ylab="Coefficient function", ...) { 27 | if (se) { 28 | if (is.numeric(se)) se.mult <- se 29 | else se.mult <- 2 30 | se.mult = max(se.mult, 0) 31 | } 32 | if (se) matplot(x$argvals, cbind(x$fhat, x$fhat-se.mult*x$se, x$fhat+se.mult*x$se), type="l", lty=lty, col=col, xlab=xlab, ylab=ylab, ...) 33 | else plot(x$argvals, x$fhat, type="l", col=col, xlab=xlab, ylab=ylab, ...) 34 | } 35 | -------------------------------------------------------------------------------- /R/plot.lpeer.R: -------------------------------------------------------------------------------- 1 | ##' Plotting of estimated regression functions obtained through \code{lpeer()} 2 | ##' 3 | ##' Plots the estimate of components of estimated regression function obtained 4 | ##' from an \code{\link{lpeer}} object along with pointwise confidence bands. 5 | ##' 6 | ##' Pointwise confidence interval is displayed only if the user set \code{se=T} 7 | ##' in the call to \code{\link{lpeer}}, and does not reflect any multiplicity 8 | ##' correction. 9 | ##' 10 | ##' @param x object of class \code{"\link{lpeer}"}. 11 | ##' @param conf pointwise confidence level. 12 | ##' @param ... additional arguments passed to \code{\link{plot}}. 13 | ##' @author Madan Gopal Kundu \email{mgkundu@@iupui.edu} 14 | ##' @seealso \code{peer}, \code{lpeer}, \code{plot.peer} 15 | ##' @export 16 | ##' @importFrom graphics matplot 17 | ##' @importFrom stats qnorm 18 | ##' @references Kundu, M. G., Harezlak, J., and Randolph, T. W. (2012). 19 | ##' Longitudinal functional models with structured penalties. (Please contact 20 | ##' J. Harezlak at \email{harezlak@@iupui.edu}.) 21 | ##' 22 | ##' Randolph, T. W., Harezlak, J, and Feng, Z. (2012). Structured penalties for 23 | ##' functional linear models - partially empirical eigenvectors for regression. 24 | ##' \emph{Electronic Journal of Statistics}, 6, 323--353. 25 | ##' @examples 26 | ##' \dontrun{ 27 | ##' data(DTI) 28 | ##' cca = DTI$cca[which(DTI$case == 1),] 29 | ##' DTI = DTI[which(DTI$case == 1),] 30 | ##' fit.cca.lpeer1 = lpeer(Y=DTI$pasat, t=DTI$visit, subj=DTI$ID, funcs = cca) 31 | ##' plot(fit.cca.lpeer1) 32 | ##' } 33 | ### Function to plot estimated regression function 34 | plot.lpeer<- function(x, conf=0.95, ...){ 35 | if(!inherits(x, "lpeer")) return (cat("Error: The object is not an lpeer object.\n")) 36 | if(conf>0.99 | conf<0.70) return (cat("Error: Confidence level should be within 0.70 and 0.99\n")) 37 | d<- x$d 38 | status<- x$status 39 | if(d==0) par(mfrow=c(1,1)) 40 | if(d==1) par(mfrow=c(1,2)) 41 | if(d>1) par(mfrow=c(2,2)) 42 | for(i in 0:d) 43 | { 44 | est<- x$GammaHat[,(i+1)] 45 | if(status==0) matplot(est, type='l', main=paste('gamma', i, sep=''), ...) 46 | if(status==1){ 47 | ll<- x$GammaHat[,(i+1)] - qnorm(0.5+conf/2)*x$se.Gamma[,(i+1)] 48 | ul<- x$GammaHat[,(i+1)] + qnorm(0.5+conf/2)*x$se.Gamma[,(i+1)] 49 | matplot(est, type='l', ylim=range(est, ll, ul), 50 | main=paste('gamma', i, sep=''), ...) 51 | matplot(ll, type='l', add=T, lty=2, col=2) 52 | matplot(ul, type='l', add=T, lty=2, col=2) 53 | } 54 | abline(h=0) 55 | } 56 | } 57 | 58 | 59 | -------------------------------------------------------------------------------- /R/plot.peer.R: -------------------------------------------------------------------------------- 1 | ##' Plotting of estimated regression functions obtained through \code{peer()} 2 | ##' 3 | ##' Plots the estimate of components of estimated regression function obtained 4 | ##' from a \code{\link{peer}} object along with pointwise confidence bands. 5 | ##' 6 | ##' Pointwise confidence interval is displayed only if the user set \code{se=T} 7 | ##' in the call to \code{\link{peer}}, and does not reflect any multiplicity 8 | ##' correction. 9 | ##' 10 | ##' @param x object of class \code{"\link{peer}"}. 11 | ##' @param conf pointwise confidence level. 12 | ##' @param ylab y-axis label. 13 | ##' @param main title for the plot. 14 | ##' @param ... additional arguments passed to \code{\link{plot}}. 15 | ##' @author Madan Gopal Kundu \email{mgkundu@@iupui.edu} 16 | ##' @seealso \code{peer}, \code{lpeer}, \code{plot.lpeer} 17 | ##' @importFrom graphics matplot 18 | ##' @importFrom stats qnorm 19 | ##' @export 20 | ##' @references Kundu, M. G., Harezlak, J., and Randolph, T. W. (2012). 21 | ##' Longitudinal functional models with structured penalties. (Please contact 22 | ##' J. Harezlak at \email{harezlak@@iupui.edu}.) 23 | ##' 24 | ##' Randolph, T. W., Harezlak, J, and Feng, Z. (2012). Structured penalties for 25 | ##' functional linear models - partially empirical eigenvectors for regression. 26 | ##' \emph{Electronic Journal of Statistics}, 6, 323--353. 27 | ##' @examples 28 | ##' # See example in peer() 29 | 30 | ### Function to plot estimated regression function 31 | plot.peer<- function(x, conf=0.95, ylab='Estimated regression function', main=expression(gamma),...){ 32 | if(!inherits(x, "peer")) return (cat("Error: The object is not an peer object.\n")) 33 | if(conf>0.99 | conf<0.70) return (cat("Error: Confidence level should be within 0.70 and 0.99\n")) 34 | status<- x$status 35 | est<- x$GammaHat 36 | if(status==0) matplot(est, type='l', ylab=ylab, 37 | main=main, ...) 38 | if(status==1){ 39 | ll<- est - qnorm(0.5+conf/2)*x$se.Gamma 40 | ul<- est + qnorm(0.5+conf/2)*x$se.Gamma 41 | matplot(est, type='l', ylim=range(est, ll, ul), ylab=ylab, 42 | main=main, ...) 43 | matplot(ll, type='l', add=T, lty=2, col=2) 44 | matplot(ul, type='l', add=T, lty=2, col=2) 45 | } 46 | abline(h=0) 47 | } 48 | -------------------------------------------------------------------------------- /R/postprocess.pfr.R: -------------------------------------------------------------------------------- 1 | postprocess.pfr <- function(fit=NULL, X=NULL, p=NULL, N_subj=NULL, phi=NULL, subj=NULL, N.Pred=NULL, kb=NULL){ 2 | coefs = fit$coef 3 | fitted.vals <- as.matrix(X[, 1:length(coefs)]) %*% coefs 4 | beta.covariates = coefs[1:(p + 1)] 5 | ## assign level 0 (population) and level 1 (subject) fittings 6 | ## assigning depends on subj=NULL and equivalently N_subj =0; 7 | if(is.null(subj) & N_subj == 0){fitted.vals.level.0 <- fitted.vals 8 | fitted.vals.level.1 <- NULL 9 | }else{ fitted.vals.level.1 <- fitted.vals 10 | ## for population level (level 0), remove subject specific columns and coefficients 11 | fitted.vals.level.0 <- as.matrix(X[,-1*c((p+2):(N_subj+p+1))])%*%fit$coef[-1*c((p+2):(N_subj+p+1))] 12 | } 13 | BetaHat <- varBeta <- varBetaHat <- Bounds <- list() 14 | for(i in 1:N.Pred){ 15 | BetaHat[[i]] = phi[[i]] %*% coefs[-1*(1:(N_subj+p+1))][((i-1)*kb+1):(kb*i)] 16 | varBeta[[i]] = fit$Vp[-1*(1:(N_subj+p+1)),-1*(1:(N_subj+p+1))][((i-1)*kb+1):(kb*i),((i-1)*kb+1):(kb*i)] 17 | varBetaHat[[i]] = phi[[i]] %*% varBeta[[i]] %*% t(phi[[i]]) 18 | Bounds[[i]] = cbind(BetaHat[[i]] + 1.96 * (sqrt(diag(varBetaHat[[i]]))), 19 | BetaHat[[i]] - 1.96 * (sqrt(diag(varBetaHat[[i]])))) 20 | } 21 | 22 | ## old pfr (v. XX.XX.Y) would not return C, J, or CJ. We do that here: 23 | ret <- list(fit, fitted.vals, fitted.vals.level.0, fitted.vals.level.1, 24 | BetaHat, beta.covariates, 25 | varBetaHat, Bounds) 26 | names(ret) <- c("fit", "fitted.vals", "fitted.vals.level.0", "fitted.vals.level.1", 27 | "BetaHat", "beta.covariates", 28 | "varBetaHat", "Bounds") 29 | ret 30 | } 31 | -------------------------------------------------------------------------------- /R/predict.fosr.R: -------------------------------------------------------------------------------- 1 | #' Prediction from a fitted bayes_fosr model 2 | #' 3 | #' Takes a fitted \code{fosr}-object produced by \code{\link{bayes_fosr}} and produces predictions given a 4 | #' new set of values for the model covariates or the original values used for the model fit. 5 | #' 6 | #' @param object a fitted \code{fosr} object as produced by \code{\link{bayes_fosr}} 7 | #' @param newdata a named list containing the values of the model covariates at which predictions 8 | #' are required. If this is not provided then predictions corresponding to the original data are 9 | #' returned. All variables provided to newdata should be in the format supplied to the model fitting 10 | #' function. 11 | #' @param ... additional (unused) arguments 12 | #' 13 | #' @return ... 14 | #' 15 | #' @author Jeff Goldsmith \email{jeff.goldsmith@@columbia.edu} 16 | #' @seealso \code{\link{bayes_fosr}} 17 | #' @export 18 | #' @examples 19 | #' \dontrun{ 20 | #' library(reshape2) 21 | #' library(dplyr) 22 | #' library(ggplot2) 23 | #' 24 | #' ##### Cross-sectional real-data example ##### 25 | #' 26 | #' ## organize data 27 | #' data(DTI) 28 | #' DTI = subset(DTI, select = c(cca, case, pasat)) 29 | #' DTI = DTI[complete.cases(DTI),] 30 | #' DTI$gender = factor(sample(c("male","female"), dim(DTI)[1], replace = TRUE)) 31 | #' DTI$status = factor(sample(c("RRMS", "SPMS", "PPMS"), dim(DTI)[1], replace = TRUE)) 32 | #' 33 | #' ## fit models 34 | #' VB = bayes_fosr(cca ~ pasat, data = DTI, Kp = 4, Kt = 10) 35 | #' 36 | #' ## obtain predictions 37 | #' pred = predict(VB, sample_n(DTI, 10)) 38 | #' } 39 | #' 40 | predict.fosr <- function (object, newdata, ...) { 41 | 42 | if (!missing(newdata)) { 43 | X.design = model.matrix(object$terms, newdata) 44 | y = X.design %*% object$beta.hat 45 | } 46 | 47 | else { 48 | y = object$Yhat 49 | } 50 | 51 | return(y) 52 | 53 | } 54 | 55 | -------------------------------------------------------------------------------- /R/predict.fosr.vs.R: -------------------------------------------------------------------------------- 1 | #' Prediction for Function-on Scalar Regression with variable selection 2 | #' 3 | #' Given a "\code{\link{fosr.vs}}" object and new data, produces fitted values. 4 | #' 5 | #' @param object an object of class "\code{\link{fosr.vs}}". 6 | #' @param newdata a data frame that contains the values of the model covariates at which predictors are required. 7 | #' @param ... additional arguments. 8 | #' 9 | #' @return fitted values. 10 | #' 11 | #' @author Yakuan Chen \email{yc2641@@cumc.columbia.edu} 12 | #' @seealso \code{\link{fosr.vs}} 13 | #' @export 14 | #' 15 | #' @examples 16 | #' \dontrun{ 17 | #' I = 100 18 | #' p = 20 19 | #' D = 50 20 | #' grid = seq(0, 1, length = D) 21 | #' 22 | #' beta.true = matrix(0, p, D) 23 | #' beta.true[1,] = sin(2*grid*pi) 24 | #' beta.true[2,] = cos(2*grid*pi) 25 | #' beta.true[3,] = 2 26 | #' 27 | #' psi.true = matrix(NA, 2, D) 28 | #' psi.true[1,] = sin(4*grid*pi) 29 | #' psi.true[2,] = cos(4*grid*pi) 30 | #' lambda = c(3,1) 31 | #' 32 | #' set.seed(100) 33 | #' 34 | #' X = matrix(rnorm(I*p), I, p) 35 | #' C = cbind(rnorm(I, mean = 0, sd = lambda[1]), rnorm(I, mean = 0, sd = lambda[2])) 36 | #' 37 | #' fixef = X%*%beta.true 38 | #' pcaef = C %*% psi.true 39 | #' error = matrix(rnorm(I*D), I, D) 40 | #' 41 | #' Yi.true = fixef 42 | #' Yi.pca = fixef + pcaef 43 | #' Yi.obs = fixef + pcaef + error 44 | #' 45 | #' data = as.data.frame(X) 46 | #' data$Y = Yi.obs 47 | #' fit.mcp = fosr.vs(Y~., data = data[1:80,], method="grMCP") 48 | #' predicted.value = predict(fit.mcp, data[81:100,]) 49 | #' 50 | #' } 51 | #' 52 | 53 | predict.fosr.vs <- function(object, newdata=NULL, ...){ 54 | x <- model.matrix(object$formula, newdata) 55 | y <- x %*% coef(object) 56 | return(y) 57 | } -------------------------------------------------------------------------------- /R/predict.pfr_old.R: -------------------------------------------------------------------------------- 1 | #' @export 2 | predict.pfr_old <- function(object, new.data=NULL, levels=NULL, ...){ 3 | ## predict.pfr() will provide fitted values at the population and subject level for pfr objects 4 | ## predict.pfr() will also provide predictions for new.data at each level where applicable. 5 | ## if new.data is null, then return fitted.vals.level.0 and fitted.vals.level.1 from object 6 | if(is.null(new.data)){ 7 | fitted.vals.level.0 <- object$fitted.vals.level.0 8 | fitted.vals.level.1 <- object$fitted.vals.level.1 9 | }else{ 10 | ## if new.data is not null, then we need to do a few parsing steps and calculate predictions. 11 | par <- parse.predict.pfr(object, new.data) 12 | ## prep new.data for matrix multiplication; calculate subject specific scores and loadings for funcs.new 13 | ## pre <- with(par,preprocess.pfr(subj=subj.new, 14 | ## covariates=covariates.new, funcs=funcs.old, kz=kz.old, kb=kb.old, 15 | ## nbasis=nbasis.old, 16 | ## funcs.new=funcs.new)) 17 | pre <- preprocess.pfr(subj=par$subj.new, 18 | covariates=par$covariates.new, funcs=par$funcs.old, kz=par$kz.old, kb=par$kb.old, 19 | nbasis=par$nbasis.old, 20 | funcs.new=par$funcs.new,smooth.option=par$smooth.option.old) 21 | # psi <- data.calc$psi 22 | # C <- data.calc$C 23 | # Z1 <- data.calc$Z1 24 | ## calculate all the functional pieces into one.sum; need loop. 25 | one.sum <- rep(0, nrow(pre$C[[1]])) 26 | for(i in 1:length(pre$C)){one.sum <- one.sum + pre$C[[i]]%*%t(pre$psi[[i]])%*%(unlist(par$W[[i]]))} 27 | ## calc level 0 and level 1 28 | ## fitted.vals.level.0 <- with(par, 29 | ## if(is.null(covariates.new)){ alpha.old + one.sum 30 | ## }else alpha.old + covariates.new%*%beta.old + one.sum) 31 | fitted.vals.level.0 <- if(is.null(par$covariates.new)){ par$alpha.old + one.sum 32 | }else{ par$alpha.old + par$covariates.new%*%par$beta.old + one.sum} 33 | ## for Z1, need for loop to assign NAs into Z1 and replace NA intercepts with 0 34 | ## this is so that the fitted.vals.level.1 will be NA if the subj.new was not in subj.old 35 | if(!is.null(par$subj.new)){ 36 | for(i in 1:ncol(pre$Z1)){ pre$Z1[pre$Z1[,i]==1, i] <- par$rand.int.new[i]} 37 | ##par$rand.int.new[is.na(par$rand.int.new)] <- 0 38 | ##fitted.vals.level.1 <- fitted.vals.level.0 + pre$Z1%*%par$rand.int.new 39 | rs <- rowSums(pre$Z1) 40 | fitted.vals.level.1 <- fitted.vals.level.0 + rs 41 | }else{ fitted.vals.level.1 <- rep(NA, length(fitted.vals.level.0))} 42 | 43 | } 44 | ret <- list(fitted.vals.level.0, fitted.vals.level.1) 45 | names(ret) <- c("fitted.vals.level.0", "fitted.vals.level.1") 46 | ret 47 | } 48 | 49 | -------------------------------------------------------------------------------- /R/pspline.setting.R: -------------------------------------------------------------------------------- 1 | pspline.setting <- function(x,knots=select_knots(x,35),p=3,m=2,periodicity=FALSE,weight=NULL){ 2 | 3 | # x: the marginal data points 4 | # knots: the list of interior knots or the numbers of interior knots 5 | # p: degrees for B-splines, with defaults values 3 6 | # m: orders of difference penalty, with default values 2 7 | #require(splines) 8 | #require(Matrix) 9 | 10 | ### design matrix 11 | K = length(knots)-2*p-1 12 | B = spline.des(knots=knots, x=x, ord = p+1,outer.ok = TRUE)$design 13 | if(periodicity){ 14 | Bint = B[,-c(1:p,K+1:p)] 15 | Bleft = B[,1:p] 16 | Bright = B[,K+1:p] 17 | B = cbind(Bint,Bleft+Bright) 18 | } 19 | 20 | 21 | difference.penalty <-function(m,p,K,periodicity=periodicity){ 22 | 23 | # parameter m: difference order 24 | # parameter p: degree of B-splines 25 | # parameter K: number of interior knots 26 | c = rep(0,m+1) 27 | 28 | for(i in 0:m) 29 | c[i+1] = (-1)^(i+1)*factorial(m)/(factorial(i)*factorial(m-i)) 30 | 31 | if(!periodicity){ 32 | 33 | M = matrix(0,nrow=K+p-m,ncol=K+p) 34 | for(i in 1:(K+p-m)) M[i,i:(i+m)] = c 35 | } 36 | if(periodicity){ 37 | 38 | M = matrix(0,nrow=K,ncol=K) 39 | for(i in 1:(K-m)) M[i,i:(i+m)] = c 40 | for(i in (K-m+1):K) M[i,c(i:K,1:(m-K+i))] = c 41 | } 42 | 43 | return(M) 44 | } 45 | 46 | 47 | P = difference.penalty(m,p,K,periodicity) 48 | P1 = Matrix(P) 49 | P2 = Matrix(t(P)) 50 | P = P2%*%P1 51 | 52 | MM <- function(A,s,option=1){ 53 | if(option==2) 54 | return(A*(s%*%t(rep(1,dim(A)[2])))) 55 | if(option==1) 56 | return(A*(rep(1,dim(A)[1])%*%t(s))) 57 | } 58 | 59 | if(is.null(weight)) weight <- rep(1,length(x)) 60 | 61 | 62 | B1 = Matrix(MM(t(B),weight)) 63 | B = Matrix(B) 64 | Sig = B1%*%B 65 | eSig = eigen(Sig) 66 | V = eSig$vectors 67 | E = eSig$values 68 | if(min(E)<=0.0000001) {#cat("Warning! t(B)%*%B is singular!\n"); 69 | #cat("A small identity matrix is added!\n"); 70 | E <- E + 0.000001; 71 | 72 | } 73 | Sigi_sqrt = MM(V,1/sqrt(E))%*%t(V) 74 | 75 | #Sigi = V%*%diag(1/E)%*%t(V) 76 | tUPU = Sigi_sqrt%*%(P%*%Sigi_sqrt) 77 | Esig = eigen(tUPU,symmetric=TRUE) 78 | U = Esig$vectors 79 | s = Esig$values 80 | if(!periodicity) s[(K+p-m+1):(K+p)]=0 81 | if(periodicity) s[K] = 0 82 | A = B%*%(Sigi_sqrt%*%U) 83 | 84 | List = list( 85 | "A" = A, 86 | "B" = B, 87 | "s" = s, 88 | "Sigi.sqrt"=Sigi_sqrt, 89 | "U" = U, 90 | "P" = P) 91 | 92 | return(List) 93 | } 94 | -------------------------------------------------------------------------------- /R/pwcv.R: -------------------------------------------------------------------------------- 1 | ##' Pointwise cross-validation for function-on-scalar regression 2 | ##' 3 | ##' Estimates prediction error for a function-on-scalar regression model by 4 | ##' leave-one-function-out cross-validation (CV), at each of a specified set of 5 | ##' points. 6 | ##' 7 | ##' Integrating the pointwise CV estimate over the function domain yields the 8 | ##' \emph{cross-validated integrated squared error}, the standard overall model 9 | ##' fit score returned by \code{\link{lofocv}}. 10 | ##' 11 | ##' It may be desirable to derive the value of \code{lambda} from an 12 | ##' appropriate call to \code{\link{fosr}}, as in the example below. 13 | ##' 14 | ##' @param fdobj a functional data object (class \code{fd}) giving the 15 | ##' functional responses. 16 | ##' @param Z the model matrix, whose columns represent scalar predictors. 17 | ##' @param L a row vector or matrix of linear contrasts of the coefficient 18 | ##' functions, to be restricted to equal zero. 19 | ##' @param lambda smoothing parameter: either a nonnegative scalar or a vector, 20 | ##' of length \code{ncol(Z)}, of nonnegative values. 21 | ##' @param eval.pts argument values at which the CV score is to be evaluated. 22 | ##' @param scale logical value or vector determining scaling of the matrix 23 | ##' \code{Z} (see \code{\link{scale}}, to which the value of this argument is 24 | ##' passed). 25 | ##' @return A vector of the same length as \code{eval.pts} giving the CV 26 | ##' scores. 27 | ##' @author Philip Reiss \email{phil.reiss@@nyumc.org} 28 | ##' @seealso \code{\link{fosr}}, \code{\link{lofocv}} 29 | ##' @references Reiss, P. T., Huang, L., and Mennes, M. (2010). Fast 30 | ##' function-on-scalar regression with penalized basis expansions. 31 | ##' \emph{International Journal of Biostatistics}, 6(1), article 28. Available 32 | ##' at \url{https://pubmed.ncbi.nlm.nih.gov/21969982/} 33 | ##' @export 34 | ##' @importFrom fda getbasispenalty eval.basis 35 | pwcv <- function(fdobj, Z, L=NULL, lambda, eval.pts=seq(min(fdobj$basis$range),max(fdobj$basis$range), length.out=201), scale=FALSE) { 36 | Z = scale(Z, center=FALSE, scale=scale) 37 | bss = fdobj$basis 38 | q = ncol(Z) 39 | 40 | J = getbasispenalty(bss, 0) 41 | svdJ = svd(J) 42 | J12 = svdJ$u %*% diag(sqrt(svdJ$d)) %*% t(svdJ$u) 43 | 44 | if (length(lambda) %in% c(1,q)) S = diag(lambda, q) %x% getbasispenalty(bss, 2) 45 | else stop("lambda must be either a scalar or a vector of length ncol(Z)") 46 | 47 | C = t(fdobj$coefs) 48 | N = NROW(C); K = NCOL(C) 49 | coefs.t = as.vector(J12 %*% t(C)) 50 | 51 | if (!is.null(L)) { 52 | constr = L %x% diag(bss$nbasis) 53 | n.con = dim(constr)[1] 54 | Z. = qr.Q(qr(t(constr)), complete=TRUE)[ , -(1:n.con)] 55 | X. = (Z %x% J12) %*% Z. 56 | S. = crossprod(Z., S %*% Z.) 57 | } 58 | else { 59 | X. = Z %x% J12 60 | S. = S 61 | } 62 | 63 | A = X. %*% solve(crossprod(X.)+S., t(X.)) 64 | resmat = t(matrix(coefs.t - A %*% coefs.t, K)) 65 | 66 | discreps = matrix(NA, K, N) 67 | for (i in 1:N) { 68 | ith = ((i-1)*K+1):(i*K) 69 | discreps[ , i] = solve(diag(K)-A[ith,ith], resmat[i, ]) 70 | } 71 | 72 | pw.discreps = eval.basis(eval.pts, bss) %*% solve(J12, discreps) 73 | pw.prederr = rowMeans(pw.discreps^2) 74 | pw.prederr 75 | } 76 | -------------------------------------------------------------------------------- /R/quadWeights.R: -------------------------------------------------------------------------------- 1 | #' Compute quadrature weights 2 | #' 3 | #' Utility function for numerical integration. 4 | #' @param argvals function arguments. 5 | #' @param method quadrature method. Can be either \code{trapedoidal} or \code{midpoint}. 6 | #' @return a vector of quadrature weights for the points supplied in \code{argvals}. 7 | #' @author Clara Happ, with modifications by Philip Reiss 8 | 9 | # TODO: check about 'midpoint' 10 | # TODO: have this function called by lf, af, etc. 11 | # TODO: harmonize with quadrature implemented elsewhere: 'simpson' and 'riemann' options? 12 | 13 | quadWeights<- function(argvals, method = "trapezoidal") 14 | { 15 | ret <- switch(method, 16 | trapezoidal = {D <- length(argvals) 17 | 1/2*c(argvals[2] - argvals[1], argvals[3:D] -argvals[1:(D-2)], argvals[D] - argvals[D-1])}, 18 | midpoint = c(0,diff(argvals)), # why is this called 'midpoint'??? 19 | stop("function quadWeights: choose either trapezoidal or midpoint quadrature rule")) 20 | 21 | return(ret) 22 | } 23 | -------------------------------------------------------------------------------- /R/re.R: -------------------------------------------------------------------------------- 1 | #' Random effects constructor for fgam 2 | #' 3 | #' Sets up a random effect for the levels of \code{x}. 4 | #' Use the \code{by}-argument to request random slopes. 5 | #' 6 | #' See \code{\link[mgcv]{random.effects}} in \pkg{mgcv}. 7 | #' 8 | #' @param x a grouping variable: must be a \code{factor} 9 | #' @param ... further arguments handed over to \code{\link[mgcv]{s}}, 10 | #' see \code{\link[mgcv]{random.effects}} 11 | #' @seealso \code{\link[mgcv]{random.effects}} 12 | #' @export 13 | re <- function(x, ...) { 14 | # TODO: add `cov`-arg, then call bs="mrf" to allow for correlated effects. 15 | data <- list(x) 16 | xsymbol <- substitute(x) 17 | names(data) <- deparse(xsymbol) 18 | call <- match.call() 19 | call[[1]] <- quote(s) 20 | call$bs <- "re" 21 | list(call = call, data = data) 22 | } -------------------------------------------------------------------------------- /R/select_knots.R: -------------------------------------------------------------------------------- 1 | 2 | select_knots <- function(t,knots=10,p=3,option="equally-spaced"){ 3 | 4 | qs <- seq(0,1,length=knots+1) 5 | 6 | if(option=="equally-spaced"){ 7 | knots <- (max(t)-min(t))*qs + min(t) 8 | } 9 | if(option=="quantile"){ 10 | knots <- as.vector(quantile(t,qs)) 11 | } 12 | 13 | K <- length(knots) 14 | knots_left <- 2*knots[1]-knots[p:1+1] 15 | knots_right <- 2*knots[K] - knots[K-(1:p)] 16 | 17 | return(c(knots_left,knots,knots_right)) 18 | } -------------------------------------------------------------------------------- /R/summary.pfr.R: -------------------------------------------------------------------------------- 1 | #' Summary for a pfr fit 2 | #' 3 | #' Take a fitted \code{pfr}-object and produce summaries from it. 4 | #' See \code{\link[mgcv]{summary.gam}()} for details. 5 | #' 6 | #' @param object a fitted \code{pfr}-object 7 | #' @param ... see \code{\link[mgcv]{summary.gam}()} for options. 8 | #' 9 | #' @return A list with summary information, see \code{\link[mgcv]{summary.gam}()} 10 | #' @method summary pfr 11 | #' 12 | #' @details 13 | #' This function currently simply strips the \code{"pfr"} class label and 14 | #' calls \code{\link[mgcv]{summary.gam}}. 15 | #' 16 | #' @author Jonathan Gellar \email{JGellar@@mathematica-mpr.com}, Fabian Scheipl 17 | #' @export 18 | summary.pfr <- function (object, ...) { 19 | call <- match.call() 20 | call[[1]] <- mgcv::summary.gam 21 | ## drop "pfr" class and replace with changed value s.t. method dispatch 22 | ## works without glitches 23 | ## if we don't do this, summary.gam will call coef.pfr on the object and that 24 | ## doesn't work 25 | class(object) <- class(object)[!(class(object) %in% "pfr")] 26 | call$object <- as.name("object") 27 | eval(call) 28 | #TODO: modify term names to correspond to pfr formula notation, see summary.pffr 29 | } 30 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # `refund` 2 | [![](https://travis-ci.org/refunders/refund.svg?branch=master)](https://travis-ci.org/refunders/refund) 3 | 4 | [![](http://cranlogs.r-pkg.org/badges/refund)](https://cran.rstudio.com/web/packages/refund/index.html) 5 | 6 | ## Methods for regression with functional data 7 | 8 | These packages implement various approaches to functional data regression. 9 | 10 | Regression with scalar responses and functional predictors is implemented in functions `pfr`, `peer`, `lpeer`, `fpcr` and `fgam`. For regression with functional responses, see `pffr`, `fosr`, and `fosr2s`. 11 | 12 | Regularized covariance and FPC estimation is implemented in functions `fpca.sc`, 13 | `fpca.ssvd`, `fpca.face`, `fpca2s`. 14 | 15 | 16 | Shiny-based interactive graphics for visualizing results from `fpca` and regression methods in `refund` can be generated using the `plot_shiny()` function in the `refund.shiny` package. 17 | 18 | 19 | Wavelet-based functional regression methods with scalar responses and functional predictors can be found in the `wcr` and `wnet` functions in the `refund.wave` package. 20 | 21 | --------------- 22 | 23 | ### Installation 24 | 25 | To install the latest patched version directly from Github, please use `devtools::install_github("refunders/refund")` for `refund` and `devtools::install_github("refunders/refund.shiny")` for `refund.shiny` and `devtools::install_github("refunders/refund.wave")` for `refund.wave`. 26 | 27 | To install the developer version with experimental features directly from Github, please use `devtools::install_github("refunders/refund", ref="devel")`. 28 | 29 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | ## CRAN submission 0.1-30 2 | 3 | Version: 0.1-29 4 | Check: S3 generic/method consistency 5 | 6 | ## CRAN submission 0.1-29 7 | 8 | * Fixed errors due to failure to find CanadianWeather data in examples 9 | 10 | ## CRAN submission 0.1-28 11 | 12 | The Date field is not in ISO 8601 yyyy-mm-dd format. 13 | 14 | * Fixed for resubmission 15 | 16 | 17 | 18 | ## Test environments 19 | * local windows 8 x64, R 3.1.3 20 | * win-builder (devel and release) 21 | 22 | ## R CMD check results 23 | There were no ERRORs or NOTEs. 24 | 25 | There is 1 WARNING: "'library' or 'require' call not declared from: ‘dtw’" this is because the package is called via the "method="dtw", window.type="sakoechiba"" options to dist(). 26 | -------------------------------------------------------------------------------- /data/COVID19.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/refunders/refund/b399f0b452aef13af408e6753529b3217175ca4f/data/COVID19.rda -------------------------------------------------------------------------------- /data/DTI.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/refunders/refund/b399f0b452aef13af408e6753529b3217175ca4f/data/DTI.RData -------------------------------------------------------------------------------- /data/DTI2.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/refunders/refund/b399f0b452aef13af408e6753529b3217175ca4f/data/DTI2.RData -------------------------------------------------------------------------------- /data/PEER.Sim.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/refunders/refund/b399f0b452aef13af408e6753529b3217175ca4f/data/PEER.Sim.RData -------------------------------------------------------------------------------- /data/Q.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/refunders/refund/b399f0b452aef13af408e6753529b3217175ca4f/data/Q.RData -------------------------------------------------------------------------------- /data/cd4.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/refunders/refund/b399f0b452aef13af408e6753529b3217175ca4f/data/cd4.RData -------------------------------------------------------------------------------- /data/content.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/refunders/refund/b399f0b452aef13af408e6753529b3217175ca4f/data/content.rda -------------------------------------------------------------------------------- /data/gasoline.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/refunders/refund/b399f0b452aef13af408e6753529b3217175ca4f/data/gasoline.RData -------------------------------------------------------------------------------- /data/sofa.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/refunders/refund/b399f0b452aef13af408e6753529b3217175ca4f/data/sofa.RData -------------------------------------------------------------------------------- /man/COVID19.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/COVID19-data.R 3 | \docType{data} 4 | \name{COVID19} 5 | \alias{COVID19} 6 | \title{The US weekly all-cause mortality and COVID19-associated deaths in 2020} 7 | \format{ 8 | A list made up of \describe{ 9 | \item{US_weekly_mort}{A numeric vector of length 207, which contains the 10 | total number of weekly all-cause deaths in the US from January 14, 2017 to December 26, 2020;} 11 | \item{US_weekly_mort_dates}{A vector of dates of length 207, which contains 12 | the weeks corresponding to the US_weekly_mort vector;} 13 | \item{US_weekly_mort_CV19}{A numeric vector of length 52, which contains the 14 | total number of weekly COVID 19 deaths in the US from January 4, 2020 to December 26, 2020;} 15 | \item{US_weekly_mort_CV19_dates}{A vector of dates of length 52, which contains 16 | the weeks corresponding to the US_weekly_mort_CV19 vector;} 17 | \item{US_weekly_excess_mort_2020}{A numeric vector of length 52, which contains 18 | the US weekly excess mortality (total mortality in one week in 2020 minus 19 | total mortality in the corresponding week of 2019) from January 4, 2020 to December 26, 2020;} 20 | \item{US_weekly_excess_mort_2020_dates}{A vector dates of length 52, which contains 21 | the weeks corresponding to the US_weekly_excess_mort_2020 vector.;} 22 | \item{US_states_names}{A vector of strings containing the names of 52 US states 23 | and territories in alphabetic order. These are the states for which all-cause 24 | and Covid-19 data are available in this data set;} 25 | \item{US_states_population}{A numeric vector containing the population of the 26 | 52 states in the vector US_states_names estimated as of July 1, 2020. The 27 | order of the vector US_states_population is the same as that of US_states_names;} 28 | \item{States_excess_mortality}{A numeric 52 x 52 dimensional matrix that 29 | contains the weekly US excess mortality in 52 states and territories. Each 30 | row corresponds to one state in the same order as the vector US_states_names. 31 | Each column corresponds to a week in 2020 corresponding to the order in the 32 | vector US_weekly_excess_mort_2020_dates. The (i,j)th entry of the matrix is 33 | the difference in all-cause mortality during the week j of 2020 and 2019 for state i;} 34 | \item{States_excess_mortality_per_million}{A numeric 52 x 52 dimensional matrix 35 | that contains the weekly US excess mortality in 52 states and territories 36 | per one million individuals. This is obtained by dividing every row (corresponding 37 | to a state) of States_excess_mortality by the population of that state stored 38 | in US_states_population and multiplying by one million;} 39 | \item{States_CV19_mortality}{A numeric 52 x 52 dimensional matrix that contains 40 | the weekly US Covid-19 mortality in 52 states and territories. Each row 41 | corresponds to one state in the same order as the vector US_states_names. Each 42 | column corresponds to a week in 2020 corresponding to the order in the 43 | vector US_weekly_excess_mort_2020_dates;} 44 | \item{States_CV19_mortality_per_million}{A numeric 52 x 52 dimensional matrix 45 | that contains the weekly US Covid-19 mortality in 52 states and territories 46 | per one million individuals. This is obtained by dividing every row (corresponding 47 | to a state) of States_CV19_mortality by the population of that state stored 48 | in US_states_population and multiplying by one million.} 49 | } 50 | } 51 | \usage{ 52 | data(COVID19) 53 | } 54 | \description{ 55 | The COVID19 mortality data used in the "Functional Data Analysis with R" book 56 | } 57 | \references{ 58 | Crainiceanu, C., Goldsmith, J., Leroux, A., Cui, E. (2023). Functional 59 | Data Analysis with R. \emph{Chapman & Hall/CRC Statistics} 60 | } 61 | -------------------------------------------------------------------------------- /man/DTI.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/DTI-data.R 3 | \docType{data} 4 | \name{DTI} 5 | \alias{DTI} 6 | \title{Diffusion Tensor Imaging: tract profiles and outcomes} 7 | \format{ 8 | A data frame made up of \describe{ 9 | \item{cca}{A 382 x 93 10 | matrix of fractional anisotropy tract profiles from the corpus 11 | callosum;} 12 | \item{rcst}{A 382 x 55 matrix 13 | of fractional anisotropy tract profiles from the right corticospinal 14 | tract;} 15 | \item{ID}{Numeric vector of subject ID numbers;} 16 | \item{visit}{Numeric vector of the subject-specific visit 17 | numbers;} 18 | \item{visit.time}{Numeric vector of the subject-specific visit time, measured 19 | in days since first visit;} 20 | \item{Nscans}{Numeric vector indicating the total number of visits 21 | for each subject;} 22 | \item{case}{Numeric vector of multiple sclerosis case status: 0 - healthy control, 1 - MS case;} 23 | \item{sex}{factor variable indicated subject's sex;} 24 | 25 | \item{pasat}{Numeric vector containing the PASAT score at 26 | each visit.} 27 | } 28 | } 29 | \description{ 30 | Fractional anisotropy (FA) tract profiles for the corpus callosum (cca) and 31 | the right corticospinal tract (rcst). Accompanying the tract profiles are 32 | the subject ID numbers, visit number, total number of scans, multiple 33 | sclerosis case status and Paced Auditory Serial Addition Test (pasat) 34 | score. 35 | } 36 | \details{ 37 | If you use this data as an example in written work, please include the 38 | following acknowledgment: ``The MRI/DTI data were collected at Johns 39 | Hopkins University and the Kennedy-Krieger Institute" 40 | 41 | DTI2 uses mean diffusivity of the the corpus callosum rather than FA, and 42 | parallel diffusivity of the rcst rather than FA. Please see the 43 | documentation for DTI2. 44 | } 45 | \references{ 46 | Goldsmith, J., Bobb, J., Crainiceanu, C., Caffo, B., and Reich, 47 | D. (2011). Penalized Functional Regression. \emph{Journal of Computational 48 | and Graphical Statistics}, 20, 830 - 851. 49 | 50 | Goldsmith, J., Crainiceanu, C., Caffo, B., and Reich, D. (2010). 51 | Longitudinal Penalized Functional Regression for Cognitive Outcomes on 52 | Neuronal Tract Measurements. \emph{Journal of the Royal Statistical 53 | Society: Series C}, 61, 453 - 469. 54 | } 55 | -------------------------------------------------------------------------------- /man/DTI2.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/DTI2-data.R 3 | \docType{data} 4 | \name{DTI2} 5 | \alias{DTI2} 6 | \title{Diffusion Tensor Imaging: more fractional anisotropy profiles and outcomes} 7 | \format{ 8 | A data frame made up of \describe{ 9 | \item{cca}{a 340 x 93 10 | matrix of fractional anisotropy profiles from the corpus callosum;} 11 | \item{rcst}{a 340 x 55 matrix of fractional anisotropy 12 | profiles from the right corticospinal tract;} 13 | \item{id}{numeric vector of subject ID numbers;} 14 | \item{visit}{numeric vector of the 15 | subject-specific visit numbers;} 16 | \item{pasat}{numeric vector 17 | containing the PASAT score at each visit.} 18 | } 19 | } 20 | \description{ 21 | A diffusion tensor imaging dataset used in Swihart et al. (2012). Mean 22 | diffusivity profiles for the corpus callosum (cca) and parallel diffusivity 23 | for the right corticospinal tract (rcst). Accompanying the profiles are the 24 | subject ID numbers, visit number, and Paced Auditory Serial Addition Test 25 | (pasat) score. We thank Dr. Daniel Reich for making this dataset available. 26 | } 27 | \details{ 28 | If you use this data as an example in written work, please include the 29 | following acknowledgment: ``The MRI/DTI data were collected at Johns 30 | Hopkins University and the Kennedy-Krieger Institute" 31 | 32 | Note: DTI2 uses mean diffusivity of the the corpus callosum rather than 33 | fractional anisotropy (FA), and parallel diffusivity of the rcst rather 34 | than FA. Please see the documentation for DTI for more about the DTI 35 | dataset. 36 | } 37 | \references{ 38 | Goldsmith, J., Bobb, J., Crainiceanu, C., Caffo, B., and Reich, 39 | D. (2011). Penalized functional regression. \emph{Journal of Computational 40 | and Graphical Statistics}, 20(4), 830--851. 41 | 42 | Goldsmith, J., Crainiceanu, C., Caffo, B., and Reich, D. (2012). 43 | Longitudinal penalized functional regression for cognitive outcomes on 44 | neuronal tract measurements. \emph{Journal of the Royal Statistical 45 | Society: Series C}, 61(3), 453--469. 46 | 47 | Swihart, B. J., Goldsmith, J., and Crainiceanu, C. M. (2014). Restricted 48 | Likelihood Ratio Tests for Functional Effects in the Functional Linear Model. 49 | \emph{Technometrics}, 56, 483--493. 50 | } 51 | \keyword{datasets} 52 | -------------------------------------------------------------------------------- /man/PEER.Sim.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/PEER.Sim-data.R 3 | \docType{data} 4 | \name{PEER.Sim} 5 | \alias{PEER.Sim} 6 | \alias{Q} 7 | \title{Simulated longitudinal data with functional predictor and scalar response, 8 | and structural information associated with predictor function} 9 | \format{ 10 | The data frame \code{PEER.Sim} is made up of subject ID 11 | number(\code{id}), subject-specific time of measurement (\code{t}), 12 | functional predictor profile (\code{W.1-W.100}) and scalar response 13 | (\code{Y}) 14 | } 15 | \description{ 16 | \code{PEER.Sim} contains simulated observations from 100 subjects, each 17 | observed at 4 distinct timepoints. At each timepoint bumpy predictor 18 | profile is generated randomly and the scalar response variable is generated 19 | considering a time-varying regression function and subject intercept. 20 | Accompanying the functional predictor and scalar response are the subject 21 | ID numbers and time of measurements. 22 | } 23 | \details{ 24 | \code{Q} represents the 7 x 100 matrix where each row provides structural 25 | information about the functional predictor profile for data 26 | \code{PEER.Sim}. For specific details about the simulation and Q matrix, 27 | please refer to Kundu et. al. (2012). 28 | } 29 | \references{ 30 | Kundu, M. G., Harezlak, J., and Randolph, T. W. (2012). 31 | Longitudinal functional models with structured penalties. (please contact 32 | J. Harezlak at \email{harezlak@iupui.edu}) 33 | } 34 | -------------------------------------------------------------------------------- /man/Predict.matrix.dt.smooth.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/dt_basis.R 3 | \name{Predict.matrix.dt.smooth} 4 | \alias{Predict.matrix.dt.smooth} 5 | \title{Predict.matrix method for dt basis} 6 | \usage{ 7 | \method{Predict.matrix}{dt.smooth}(object, data) 8 | } 9 | \arguments{ 10 | \item{object}{a \code{dt.smooth} object created by 11 | \code{\link{smooth.construct.dt.smooth.spec}}, see 12 | \code{\link[mgcv]{smooth.construct}}} 13 | 14 | \item{data}{see \code{\link[mgcv]{smooth.construct}}} 15 | } 16 | \value{ 17 | design matrix for domain-transformed terms 18 | } 19 | \description{ 20 | Predict.matrix method for dt basis 21 | } 22 | \author{ 23 | Jonathan Gellar 24 | } 25 | -------------------------------------------------------------------------------- /man/Predict.matrix.fpc.smooth.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fpc.R 3 | \name{Predict.matrix.fpc.smooth} 4 | \alias{Predict.matrix.fpc.smooth} 5 | \title{mgcv-style constructor for prediction of FPC terms} 6 | \usage{ 7 | \method{Predict.matrix}{fpc.smooth}(object, data) 8 | } 9 | \arguments{ 10 | \item{object}{a \code{fpc.smooth} object created by 11 | \code{{smooth.construct.fpc.smooth.spec}}, see 12 | \code{[mgcv]{smooth.construct}}} 13 | 14 | \item{data}{see \code{[mgcv]{smooth.construct}}} 15 | } 16 | \value{ 17 | design matrix for FPC terms 18 | } 19 | \description{ 20 | mgcv-style constructor for prediction of FPC terms 21 | } 22 | \author{ 23 | Jonathan Gellar 24 | } 25 | -------------------------------------------------------------------------------- /man/Predict.matrix.pcre.random.effect.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-pcre.R 3 | \name{Predict.matrix.pcre.random.effect} 4 | \alias{Predict.matrix.pcre.random.effect} 5 | \title{mgcv-style constructor for prediction of PC-basis functional random effects} 6 | \usage{ 7 | \method{Predict.matrix}{pcre.random.effect}(object, data) 8 | } 9 | \arguments{ 10 | \item{object}{a smooth specification object, see \code{\link[mgcv]{smooth.construct}}} 11 | 12 | \item{data}{see \code{\link[mgcv]{smooth.construct}}} 13 | } 14 | \value{ 15 | design matrix for PC-based functional random effects 16 | } 17 | \description{ 18 | mgcv-style constructor for prediction of PC-basis functional random effects 19 | } 20 | \author{ 21 | Fabian Scheipl; adapted from 'Predict.matrix.random.effect' by S.N. Wood. 22 | } 23 | -------------------------------------------------------------------------------- /man/Predict.matrix.peer.smooth.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/peer.R 3 | \name{Predict.matrix.peer.smooth} 4 | \alias{Predict.matrix.peer.smooth} 5 | \title{mgcv-style constructor for prediction of PEER terms} 6 | \usage{ 7 | \method{Predict.matrix}{peer.smooth}(object, data) 8 | } 9 | \arguments{ 10 | \item{object}{a \code{peer.smooth} object created by 11 | \code{{.smooth.spec}}, see 12 | \code{[mgcv]{smooth.construct}}} 13 | 14 | \item{data}{see \code{[mgcv]{smooth.construct}}} 15 | } 16 | \value{ 17 | design matrix for PEER terms 18 | } 19 | \description{ 20 | mgcv-style constructor for prediction of PEER terms 21 | } 22 | \author{ 23 | Jonathan Gellar 24 | } 25 | -------------------------------------------------------------------------------- /man/Predict.matrix.pi.smooth.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pi_basis.R 3 | \name{Predict.matrix.pi.smooth} 4 | \alias{Predict.matrix.pi.smooth} 5 | \title{Predict.matrix method for pi basis} 6 | \usage{ 7 | \method{Predict.matrix}{pi.smooth}(object, data) 8 | } 9 | \arguments{ 10 | \item{object}{a \code{pi.smooth} object created by 11 | \code{\link{smooth.construct.pi.smooth.spec}}, see 12 | \code{\link[mgcv]{smooth.construct}}} 13 | 14 | \item{data}{see \code{\link[mgcv]{smooth.construct}}} 15 | } 16 | \value{ 17 | design matrix for PEER terms 18 | } 19 | \description{ 20 | Predict.matrix method for pi basis 21 | } 22 | \author{ 23 | Jonathan Gellar 24 | } 25 | -------------------------------------------------------------------------------- /man/Xt_siginv_X.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/XtSiginvX.R 3 | \name{Xt_siginv_X} 4 | \alias{Xt_siginv_X} 5 | \title{Internal computation function} 6 | \usage{ 7 | Xt_siginv_X(tx, siginv, y = NULL) 8 | } 9 | \arguments{ 10 | \item{tx}{transpose of the X design matrix} 11 | 12 | \item{siginv}{inverse variance matrix} 13 | 14 | \item{y}{outcome matrix. if \code{NULL}, function computes 15 | first product; if not, function computes second product.} 16 | } 17 | \description{ 18 | Internal function used compute the products 19 | in cross-sectional VB algorithm and Gibbs sampler 20 | } 21 | \author{ 22 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 23 | } 24 | -------------------------------------------------------------------------------- /man/amc.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/amc.R 3 | \name{amc} 4 | \alias{amc} 5 | \title{Additive model with constraints} 6 | \usage{ 7 | amc(y, Xmat, S, gam.method = "REML", C = NULL, lambda = NULL, ...) 8 | } 9 | \arguments{ 10 | \item{y}{response vector.} 11 | 12 | \item{Xmat}{design matrix.} 13 | 14 | \item{S}{list of penalty matrices.} 15 | 16 | \item{gam.method}{smoothing parameter selection method: "REML" for 17 | restricted maximum likelihood, "GCV.Cp" for generalized cross-validation.} 18 | 19 | \item{C}{matrix of linear constraints. Dimension should be number of 20 | constraints times \code{ncol(Xmat)}.} 21 | 22 | \item{lambda}{smoothing parameter value. If \code{NULL}, the smoothing 23 | parameter(s) will be estimated.} 24 | 25 | \item{\dots}{other arguments, passed to \code{\link[mgcv]{gam}} or 26 | \code{\link[mgcv]{bam}}.} 27 | } 28 | \value{ 29 | A list with the following elements: \item{gam}{the \code{gam} 30 | object returned by \code{gam} or \code{bam}.} 31 | \item{coefficients}{coefficients with respect to design matrix \code{Xmat}, 32 | derived from the \code{gam()} fit.} \item{Vp, GinvXt}{outputs used by 33 | \code{fosr}.} \item{method}{the \code{gam.method} argument of the call to 34 | \code{amc}.} 35 | } 36 | \description{ 37 | An internal function, called by \code{fosr()}, that fits additive models 38 | with linear constraints via a call to \code{\link[mgcv]{gam}} or 39 | \code{\link[mgcv]{bam}} in the \pkg{mgcv} package. 40 | } 41 | \details{ 42 | The additive model is fitted using \code{\link[mgcv]{gam}}, unless there 43 | are more than 10000 responses; in that case \code{\link[mgcv]{bam}} is 44 | used. 45 | } 46 | \seealso{ 47 | \code{\link{fosr}} 48 | } 49 | \author{ 50 | Philip Reiss \email{phil.reiss@nyumc.org} 51 | } 52 | \keyword{internal} 53 | -------------------------------------------------------------------------------- /man/cd4.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/CD4-data.R 3 | \docType{data} 4 | \name{cd4} 5 | \alias{cd4} 6 | \title{Observed CD4 cell counts} 7 | \format{ 8 | A data frame made up of a 366 x 61 matrix of CD4 cell counts 9 | } 10 | \description{ 11 | CD4 cell counts for 366 subjects between months -18 and 42 since 12 | seroconversion. Each subject's observations are contained in a single row. 13 | } 14 | \references{ 15 | Goldsmith, J., Greven, S., and Crainiceanu, C. (2013). 16 | Corrected confidence bands for functional data using principal components. 17 | \emph{Biometrics}, 69(1), 41--51. 18 | } 19 | \keyword{datasets} 20 | -------------------------------------------------------------------------------- /man/cmdscale_lanczos.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/poridge.R 3 | \name{cmdscale_lanczos} 4 | \alias{cmdscale_lanczos} 5 | \title{Faster multi-dimensional scaling} 6 | \usage{ 7 | cmdscale_lanczos(d, k = 2, eig = FALSE, add = FALSE, x.ret = FALSE) 8 | } 9 | \arguments{ 10 | \item{d}{a distance structure as returned by \code{{dist}}, or a full 11 | symmetric matrix of distances or dissimilarities.} 12 | 13 | \item{k}{the maximum dimension of the space which the data are to be 14 | represented in; must be in \code{\{1, 2, ..., n-1\}}.} 15 | 16 | \item{eig}{logical indicating whether eigenvalues should be returned.} 17 | 18 | \item{add}{logical indicating if the additive constant of Cailliez (1983) 19 | should be computed, and added to the non-diagonal dissimilarities such that 20 | the modified dissimilarities are Euclidean.} 21 | 22 | \item{x.ret}{indicates whether the doubly centred symmetric distance matrix 23 | should be returned.} 24 | } 25 | \value{ 26 | as \code{{cmdscale}} 27 | } 28 | \description{ 29 | This is a modified version of \code{{cmdscale}} that uses the Lanczos 30 | procedure (\code{[mgcv]{slanczos}}) instead of \code{eigen}. Called by 31 | \code{{smooth.construct.pco.smooth.spec}}. 32 | } 33 | \references{ 34 | Cailliez, F. (1983). The analytical solution of the additive constant problem. 35 | \emph{Psychometrika}, 48, 343-349. 36 | } 37 | \seealso{ 38 | \code{{smooth.construct.pco.smooth.spec}} 39 | } 40 | \author{ 41 | David L Miller, based on code by R Core. 42 | } 43 | -------------------------------------------------------------------------------- /man/coefboot.pffr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-robust.R 3 | \name{coefboot.pffr} 4 | \alias{coefboot.pffr} 5 | \title{Simple bootstrap CIs for pffr} 6 | \usage{ 7 | coefboot.pffr( 8 | object, 9 | n1 = 100, 10 | n2 = 40, 11 | n3 = 20, 12 | B = 100, 13 | ncpus = getOption("boot.ncpus", 1), 14 | parallel = c("no", "multicore", "snow"), 15 | cl = NULL, 16 | conf = c(0.9, 0.95), 17 | type = "percent", 18 | method = c("resample", "residual", "residual.c"), 19 | showProgress = TRUE, 20 | ... 21 | ) 22 | } 23 | \arguments{ 24 | \item{object}{a fitted \code{\link{pffr}}-model} 25 | 26 | \item{n1}{see \code{\link{coef.pffr}}} 27 | 28 | \item{n2}{see \code{\link{coef.pffr}}} 29 | 30 | \item{n3}{see \code{\link{coef.pffr}}} 31 | 32 | \item{B}{number of bootstrap replicates, defaults to (a measly) 100} 33 | 34 | \item{ncpus}{see \code{\link[boot]{boot}}. Defaults to \code{getOption("boot.ncpus", 1L)} (like \code{boot}).} 35 | 36 | \item{parallel}{see \code{\link[boot]{boot}}} 37 | 38 | \item{cl}{see \code{\link[boot]{boot}}} 39 | 40 | \item{conf}{desired levels of bootstrap CIs, defaults to 0.90 and 0.95} 41 | 42 | \item{type}{type of bootstrap interval, see \code{\link[boot]{boot.ci}}. Defaults to "percent" for percentile-based CIs.} 43 | 44 | \item{method}{either "resample" (default) to resample response trajectories, or "residual" to resample responses as fitted values 45 | plus residual trajectories or "residual.c" to resample responses as fitted values 46 | plus residual trajectories that are centered at zero for each gridpoint.} 47 | 48 | \item{showProgress}{TRUE/FALSE} 49 | 50 | \item{...}{not used} 51 | } 52 | \value{ 53 | a list with similar structure as the return value of \code{\link{coef.pffr}}, containing the 54 | original point estimates of the various terms along with their bootstrap CIs. 55 | } 56 | \description{ 57 | This function resamples observations in the data set to obtain approximate CIs for different 58 | terms and coefficient functions that correct for the effects of dependency and heteroskedasticity 59 | of the residuals along the index of the functional response, i.e., it aims for correct inference 60 | if the residuals along the index of the functional response are not i.i.d. 61 | } 62 | \author{ 63 | Fabian Scheipl 64 | } 65 | -------------------------------------------------------------------------------- /man/coefficients.pfr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/coefficients.pfr.R 3 | \name{coefficients.pfr} 4 | \alias{coefficients.pfr} 5 | \alias{coef.pfr} 6 | \title{Extract coefficient functions from a fitted pfr-object} 7 | \usage{ 8 | \method{coefficients}{pfr}( 9 | object, 10 | select = 1, 11 | coords = NULL, 12 | n = NULL, 13 | se = ifelse(length(object$smooth) & select, TRUE, FALSE), 14 | seWithMean = FALSE, 15 | useVc = TRUE, 16 | Qtransform = FALSE, 17 | ... 18 | ) 19 | 20 | \method{coef}{pfr}( 21 | object, 22 | select = 1, 23 | coords = NULL, 24 | n = NULL, 25 | se = ifelse(length(object$smooth) & select, TRUE, FALSE), 26 | seWithMean = FALSE, 27 | useVc = TRUE, 28 | Qtransform = FALSE, 29 | ... 30 | ) 31 | } 32 | \arguments{ 33 | \item{object}{return object from \code{\link{pfr}}} 34 | 35 | \item{select}{integer indicating the index of the desired smooth term 36 | in \code{object$smooth}. Enter 0 to request the raw coefficients 37 | (i.e., \code{object$coefficients}) and standard errors (if \code{se==TRUE}).} 38 | 39 | \item{coords}{named list indicating the desired coordinates where the 40 | coefficient function is to be evaluated. Names must match the argument names 41 | in \code{object$smooth[[select]]$term}. If \code{NULL}, uses \code{n} 42 | to generate equally-spaced coordinates.} 43 | 44 | \item{n}{integer vector indicating the number of equally spaced coordinates 45 | for each argument. If length 1, the same number is used for each argument. 46 | Otherwise, the length must match \code{object$smooth[[select]]$dim}.} 47 | 48 | \item{se}{if \code{TRUE}, returns pointwise standard error estimates. Defaults 49 | to \code{FALSE} if raw coefficients are being returned; otherwise \code{TRUE}.} 50 | 51 | \item{seWithMean}{if \code{TRUE} the standard errors include uncertainty about 52 | the overall mean; if \code{FALSE}, they relate purely to the centered 53 | smooth itself. Marra and Wood (2012) suggests that \code{TRUE} results in 54 | better coverage performance for GAMs.} 55 | 56 | \item{useVc}{if \code{TRUE}, standard errors are calculated using a covariance 57 | matrix that has been corrected for smoothing parameter uncertainty. This 58 | matrix will only be available under ML or REML smoothing.} 59 | 60 | \item{Qtransform}{For additive functional terms, \code{TRUE} indicates the 61 | coefficient should be extracted on the quantile-transformed scale, whereas 62 | \code{FALSE} indicates the scale of the original data. Note this is 63 | different from the \code{Qtransform} arguemnt of \code{af}, which specifies 64 | the scale on which the term is fit.} 65 | 66 | \item{...}{these arguments are ignored} 67 | } 68 | \value{ 69 | a data frame containing the evaluation points, 70 | coefficient function values and optionally the SE's for the term indicated 71 | by \code{select}. 72 | } 73 | \description{ 74 | This function is used to extract a coefficient from a fitted `pfr` model, in 75 | particular smooth functions resulting from including functional terms specified 76 | with \code{lf}, \code{af}, etc. It can also be used to extract smooths 77 | genereated using \code{mgcv}'s \code{s}, \code{te}, or \code{t2}. 78 | } 79 | \references{ 80 | Marra, G and S.N. Wood (2012) Coverage Properties of Confidence Intervals for 81 | Generalized Additive Model Components. Scandinavian Journal of Statistics. 82 | } 83 | \author{ 84 | Jonathan Gellar and Fabian Scheipl 85 | } 86 | -------------------------------------------------------------------------------- /man/content.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/content-data.R 3 | \docType{data} 4 | \name{content} 5 | \alias{content} 6 | \title{The CONTENT child growth study} 7 | \format{ 8 | A list made up of \describe{ 9 | \item{id}{Numeric vector of subject ID numbers;} 10 | \item{ma1fe0}{Numeric vector of the sex of the child, 1 for male and 0 for female;} 11 | \item{weightkg}{Numeric vector of the weight of the child measured in kilograms(kg);} 12 | \item{height}{Numeric vector of the height of the child measured in centimeters;} 13 | \item{agedays}{Numeric vector of the age of the child measured in days;} 14 | \item{cbmi}{Numeric vector of the BMI of the child;} 15 | \item{zlen}{Numeric vector of the height-for-age z-scores;} 16 | \item{zwei}{Numeric vector of the weight-for-age z-scores;} 17 | \item{zwfl}{Numeric vector of the weight-for-height z-scores;} 18 | \item{zbmi}{Numeric vector of the BMI-for-age z-scores;} 19 | } 20 | } 21 | \usage{ 22 | data(content) 23 | } 24 | \description{ 25 | The CONTENT child growth study was funded by the Sixth Framework Programme 26 | of the European Union, Project CONTENT (INCO-DEV-3-032136) and was led by 27 | Dr. William Checkley. The study was conducted between May 2007 and February 28 | 2011 in Las Pampas de San Juan Miraflores and Nuevo Paraiso, two peri-urban 29 | shanty towns with high population density located on the southern edge of 30 | Lima city in Peru. 31 | } 32 | \references{ 33 | Crainiceanu, C., Goldsmith, J., Leroux, A., Cui, E. (2023). Functional 34 | Data Analysis with R. \emph{Chapman & Hall/CRC Statistics} 35 | } 36 | -------------------------------------------------------------------------------- /man/create.prep.func.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/create.prep.func.R 3 | \name{create.prep.func} 4 | \alias{create.prep.func} 5 | \title{Construct a function for preprocessing functional predictors} 6 | \usage{ 7 | create.prep.func( 8 | X, 9 | argvals = seq(0, 1, length = ncol(X)), 10 | method = c("fpca.sc", "fpca.face", "fpca.ssvd", "bspline", "interpolate"), 11 | options = NULL 12 | ) 13 | } 14 | \arguments{ 15 | \item{X}{an \code{N} by \code{J=ncol(argvals)} matrix of function evaluations 16 | \eqn{X_i(t_{i1}),., X_i(t_{iJ}); i=1,.,N.} For FPCA-based processing methods, these functions are 17 | used to define the eigen decomposition used to preprocess current and future data (for example, in 18 | \code{\link{predict.pfr}})} 19 | 20 | \item{argvals}{matrix (or vector) of indices of evaluations of \eqn{X_i(t)}; i.e. a matrix with 21 | \emph{i}th row \eqn{(t_{i1},.,t_{iJ})}} 22 | 23 | \item{method}{character string indicating the preprocessing method. Options 24 | are \code{"fpca.sc"}, \code{"fpca.face"}, \code{"fpca.ssvd"}, \code{"bspline"}, 25 | and \code{"interpolate"}. The first three use the corresponding existing function; 26 | \code{"bspline"} uses an (unpenalized) cubic bspline smoother with \code{nbasis} basis 27 | functions; \code{"interpolate"} uses linear interpolation.} 28 | 29 | \item{options}{list of options passed to the preprocessing method; as an example, options for \code{fpca.sc} 30 | include \code{pve}, \code{nbasis}, and \code{npc}.} 31 | } 32 | \value{ 33 | a function that returns the preprocessed functional predictors, with arguments 34 | \item{newX}{The functional predictors to process} 35 | \item{argvals.}{Indices of evaluation of \code{newX}} 36 | \item{options.}{Any options needed to preprocess the predictor functions} 37 | } 38 | \description{ 39 | Prior to using functions \code{X} as predictors in a scalar-on-function regression, it is often 40 | necessary to presmooth curves to remove measurement error or interpolate to a common grid. This 41 | function creates a function to do this preprocessing depending on the method specified. 42 | } 43 | \seealso{ 44 | \code{\link{pfr}}, \code{\link{fpca.sc}}, \code{\link{fpca.face}}, \code{\link{fpca.ssvd}} 45 | } 46 | \author{ 47 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 48 | } 49 | -------------------------------------------------------------------------------- /man/dot-smooth.spec.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/peer.R 3 | \name{.smooth.spec} 4 | \alias{.smooth.spec} 5 | \title{Basis constructor for PEER terms} 6 | \usage{ 7 | .smooth.spec(object, data, knots) 8 | } 9 | \arguments{ 10 | \item{object}{a \code{peer.smooth.spec} object, usually generated by a 11 | term \code{s(x, bs="peer")}; see Details.} 12 | 13 | \item{data}{a list containing the data (including any \code{by} variable) 14 | required by this term, with names corresponding to \code{object$term} 15 | (and \code{object$by}). Only the first element of this list is used.} 16 | 17 | \item{knots}{not used, but required by the generic \code{smooth.construct}.} 18 | } 19 | \value{ 20 | An object of class \code{"peer.smooth"}. See 21 | \code{{smooth.construct}} for the elements that this object will 22 | contain. 23 | } 24 | \description{ 25 | Smooth basis constructor to define structured penalties (Randolph et al., 26 | 2012) for smooth terms. 27 | } 28 | \details{ 29 | The smooth specification object, defined using \code{s()}, should 30 | contain an \code{xt} element. \code{xt} will be a list that contains 31 | additional information needed to specify the penalty. The type of penalty 32 | is indicated by \code{xt$pentype}. There are four types of penalties 33 | available: 34 | \enumerate{ 35 | \item \code{xt$pentype=="RIDGE"} for a ridge penalty, the default 36 | \item \code{xt$pentype=="D"} for a difference penalty. The order of the 37 | difference penalty is specified by the \code{m} argument of 38 | \code{s()}. 39 | \item \code{xt$pentype=="DECOMP"} for a decomposition-based penalty, 40 | \eqn{bP_Q + a(I-P_Q)}, where \eqn{P_Q = Q^t(QQ^t)^{-1}Q}. The \eqn{Q} 41 | matrix must be specified by \code{xt$Q}, and the scalar \eqn{a} by 42 | \code{xt$phia}. The number of columns of \code{Q} must be equal to the 43 | length of the data. Each row represents a basis function where the 44 | functional predictor is expected to lie, according to prior belief. 45 | \item \code{xt$pentype=="USER"} for a user-specified penalty matrix 46 | \eqn{L}, supplied by \code{xt$L}. 47 | } 48 | } 49 | \references{ 50 | Randolph, T. W., Harezlak, J, and Feng, Z. (2012). Structured penalties for 51 | functional linear models - partially empirical eigenvectors for regression. 52 | \emph{Electronic Journal of Statistics}, 6, 323-353. 53 | } 54 | \seealso{ 55 | \code{{peer}} 56 | } 57 | \author{ 58 | Madan Gopal Kundu \email{mgkundu@iupui.edu} and Jonathan Gellar 59 | } 60 | -------------------------------------------------------------------------------- /man/expand.call.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-utilities.R 3 | \name{expand.call} 4 | \alias{expand.call} 5 | \title{Return call with all possible arguments} 6 | \usage{ 7 | expand.call( 8 | definition = NULL, 9 | call = sys.call(sys.parent(1)), 10 | expand.dots = TRUE 11 | ) 12 | } 13 | \arguments{ 14 | \item{definition}{a function. See \code{\link[base]{match.call}}.} 15 | 16 | \item{call}{an unevaluated call to the function specified by definition. See \code{\link[base]{match.call}}.} 17 | 18 | \item{expand.dots}{logical. Should arguments matching ... in the call be included or left as a ... argument? See \code{\link[base]{match.call}}.} 19 | } 20 | \value{ 21 | An object of mode "\code{\link[base]{call}}". 22 | } 23 | \description{ 24 | Return a call in which all of the arguments which were supplied or have presets are specified by their full names and their supplied or default values. 25 | } 26 | \seealso{ 27 | \code{\link[base]{match.call}} 28 | } 29 | \author{ 30 | Fabian Scheipl 31 | } 32 | -------------------------------------------------------------------------------- /man/f_sum.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/f_sum.R 3 | \name{f_sum} 4 | \alias{f_sum} 5 | \title{Sum computation 1} 6 | \usage{ 7 | f_sum(mu.q.c, sig.q.c, theta, obspts.mat) 8 | } 9 | \arguments{ 10 | \item{mu.q.c}{current value of mu.q.c} 11 | 12 | \item{sig.q.c}{current value of sig.q.c} 13 | 14 | \item{theta}{spline basis} 15 | 16 | \item{obspts.mat}{matrix indicating the points on which data is observed} 17 | } 18 | \description{ 19 | Internal function used compute a sum in FPCA-based covariance updates 20 | } 21 | \author{ 22 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 23 | } 24 | -------------------------------------------------------------------------------- /man/f_sum2.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/f_sum2.R 3 | \name{f_sum2} 4 | \alias{f_sum2} 5 | \title{Sum computation 2} 6 | \usage{ 7 | f_sum2(y, fixef, mu.q.c, kt, theta) 8 | } 9 | \arguments{ 10 | \item{y}{outcome matrix} 11 | 12 | \item{fixef}{current estimate of fixed effects} 13 | 14 | \item{mu.q.c}{current value of mu.q.c} 15 | 16 | \item{kt}{number of basis functions} 17 | 18 | \item{theta}{spline basis} 19 | } 20 | \description{ 21 | Internal function used compute a sum in FPCA-based covariance updates 22 | } 23 | \author{ 24 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 25 | } 26 | -------------------------------------------------------------------------------- /man/f_sum4.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/f_sum4.R 3 | \name{f_sum4} 4 | \alias{f_sum4} 5 | \title{Sum computation 2} 6 | \usage{ 7 | f_sum4(mu.q.c, sig.q.c, mu.q.bpsi, sig.q.bpsi, theta, obspts.mat) 8 | } 9 | \arguments{ 10 | \item{mu.q.c}{current value of mu.q.c} 11 | 12 | \item{sig.q.c}{current value of sig.q.c} 13 | 14 | \item{mu.q.bpsi}{current value of mu.q.bpsi} 15 | 16 | \item{sig.q.bpsi}{current value of sig.q.bpsi} 17 | 18 | \item{theta}{current value of theta} 19 | 20 | \item{obspts.mat}{matrix indicating where curves are observed} 21 | } 22 | \description{ 23 | Internal function used compute a sum in FPCA-based covariance updates 24 | } 25 | \author{ 26 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 27 | } 28 | -------------------------------------------------------------------------------- /man/f_trace.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/f_trace.R 3 | \name{f_trace} 4 | \alias{f_trace} 5 | \title{Trace computation} 6 | \usage{ 7 | f_trace(Theta_i, Sig_q_Bpsi, Kp, Kt) 8 | } 9 | \arguments{ 10 | \item{Theta_i}{basis functions on observed grid points} 11 | 12 | \item{Sig_q_Bpsi}{variance of FPC basis coefficients} 13 | 14 | \item{Kp}{number of FPCs} 15 | 16 | \item{Kt}{number of spline basis functions} 17 | } 18 | \description{ 19 | Internal function used compute a trace in FPCA-based covariance updates 20 | } 21 | \author{ 22 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 23 | } 24 | -------------------------------------------------------------------------------- /man/ffpcplot.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-ffpc.R 3 | \name{ffpcplot} 4 | \alias{ffpcplot} 5 | \title{Plot PC-based function-on-function regression terms} 6 | \usage{ 7 | ffpcplot( 8 | object, 9 | type = c("fpc+surf", "surf", "fpc"), 10 | pages = 1, 11 | se.mult = 2, 12 | ticktype = "detailed", 13 | theta = 30, 14 | phi = 30, 15 | plot = TRUE, 16 | auto.layout = TRUE 17 | ) 18 | } 19 | \arguments{ 20 | \item{object}{a fitted \code{pffr}-model} 21 | 22 | \item{type}{one of "fpc+surf", "surf" or "fpc": "surf" shows a perspective plot of the coefficient surface implied 23 | by the estimated effect functions of the FPC scores, "fpc" shows three plots: 24 | 1) a scree-type plot of the estimated eigenvalues of the functional covariate, 2) the estimated eigenfunctions, 25 | and 3) the estimated coefficient functions associated with the FPC scores. Defaults to showing both.} 26 | 27 | \item{pages}{the number of pages over which to spread the output. Defaults to 1. (Irrelevant if \code{auto.layout=FALSE}.)} 28 | 29 | \item{se.mult}{display estimated coefficient functions associated with the FPC scores with plus/minus this number time the estimated standard error. 30 | Defaults to 2.} 31 | 32 | \item{ticktype}{see \code{\link[graphics]{persp}}.} 33 | 34 | \item{theta}{see \code{\link[graphics]{persp}}.} 35 | 36 | \item{phi}{see \code{\link[graphics]{persp}}.} 37 | 38 | \item{plot}{produce plots or only return plotting data? Defaults to \code{TRUE}.} 39 | 40 | \item{auto.layout}{should the the function set a suitable layout automatically? Defaults to TRUE} 41 | } 42 | \value{ 43 | primarily produces plots, invisibly returns a list containing 44 | the data used for the plots. 45 | } 46 | \description{ 47 | Convenience function for graphical summaries of \code{ffpc}-terms from a 48 | \code{pffr} fit. 49 | } 50 | \examples{ 51 | \dontrun{ 52 | #see ?ffpc 53 | } 54 | } 55 | \author{ 56 | Fabian Scheipl 57 | } 58 | -------------------------------------------------------------------------------- /man/fosr.vs.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fosr.vs.R 3 | \name{fosr.vs} 4 | \alias{fosr.vs} 5 | \title{Function-on Scalar Regression with variable selection} 6 | \usage{ 7 | fosr.vs( 8 | formula, 9 | data, 10 | nbasis = 10, 11 | method = c("ls", "grLasso", "grMCP", "grSCAD"), 12 | epsilon = 1e-05, 13 | max.iter_num = 100 14 | ) 15 | } 16 | \arguments{ 17 | \item{formula}{an object of class "\code{{formula}}": an expression of the model to be fitted.} 18 | 19 | \item{data}{a data frame that contains the variables in the model.} 20 | 21 | \item{nbasis}{number of B-spline basis functions used.} 22 | 23 | \item{method}{group variable selection method to be used ("grLasso", "grMCP", "grSCAD" refer to group Lasso, group MCP and group SCAD, respectively) or "\code{ls}" for least squares estimation.} 24 | 25 | \item{epsilon}{the convergence criterion.} 26 | 27 | \item{max.iter_num}{maximum number of iterations.} 28 | } 29 | \value{ 30 | A fitted fosr.vs-object, which is a list with the following elements: 31 | \item{formula}{an object of class "\code{{formula}}": an expression of the model to be fitted.} 32 | \item{coefficients}{the estimated coefficient functions.} 33 | \item{fitted.values}{the fitted curves.} 34 | \item{residuals}{the residual curves.} 35 | \item{vcov}{the estimated variance-covariance matrix when convergence is achieved.} 36 | \item{method}{group variable selection method to be used or "\code{ls}" for least squares estimation.} 37 | } 38 | \description{ 39 | Implements an iterative algorithm for function-on-scalar regression with variable selection 40 | by alternatively updating the coefficients and covariance structure. 41 | } 42 | \examples{ 43 | \dontrun{ 44 | set.seed(100) 45 | 46 | I = 100 47 | p = 20 48 | D = 50 49 | grid = seq(0, 1, length = D) 50 | 51 | beta.true = matrix(0, p, D) 52 | beta.true[1,] = sin(2*grid*pi) 53 | beta.true[2,] = cos(2*grid*pi) 54 | beta.true[3,] = 2 55 | 56 | psi.true = matrix(NA, 2, D) 57 | psi.true[1,] = sin(4*grid*pi) 58 | psi.true[2,] = cos(4*grid*pi) 59 | lambda = c(3,1) 60 | 61 | set.seed(100) 62 | 63 | X = matrix(rnorm(I*p), I, p) 64 | C = cbind(rnorm(I, mean = 0, sd = lambda[1]), rnorm(I, mean = 0, sd = lambda[2])) 65 | 66 | fixef = X\%*\%beta.true 67 | pcaef = C \%*\% psi.true 68 | error = matrix(rnorm(I*D), I, D) 69 | 70 | Yi.true = fixef 71 | Yi.pca = fixef + pcaef 72 | Yi.obs = fixef + pcaef + error 73 | 74 | data = as.data.frame(X) 75 | data$Y = Yi.obs 76 | fit.fosr.vs = fosr.vs(Y~., data = data, method="grMCP") 77 | plot(fit.fosr.vs) 78 | } 79 | 80 | 81 | } 82 | \references{ 83 | Chen, Y., Goldsmith, J., and Ogden, T. (2016). 84 | Variable selection in function-on-scalar regression. \emph{Stat} 5 88-101 85 | } 86 | \seealso{ 87 | \code{{grpreg}} 88 | } 89 | \author{ 90 | Yakuan Chen \email{yc2641@cumc.columbia.edu} 91 | } 92 | -------------------------------------------------------------------------------- /man/fosr2s.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fosr2s.R 3 | \name{fosr2s} 4 | \alias{fosr2s} 5 | \title{Two-step function-on-scalar regression} 6 | \usage{ 7 | fosr2s( 8 | Y, 9 | X, 10 | argvals = seq(0, 1, , ncol(Y)), 11 | nbasis = 15, 12 | norder = 4, 13 | pen.order = norder - 2, 14 | basistype = "bspline" 15 | ) 16 | } 17 | \arguments{ 18 | \item{Y}{the functional responses, given as an \eqn{n\times d} matrix.} 19 | 20 | \item{X}{\eqn{n\times p} model matrix, whose columns represent scalar 21 | predictors. Should ordinarily include a column of 1s.} 22 | 23 | \item{argvals}{the \eqn{d} argument values at which the functional 24 | responses are evaluated, and at which the coefficient functions will be 25 | evaluated.} 26 | 27 | \item{nbasis}{number of basis functions used to represent the coefficient 28 | functions.} 29 | 30 | \item{norder}{norder of the spline basis, when \code{basistype="bspline"} 31 | (the default, 4, gives cubic splines).} 32 | 33 | \item{pen.order}{order of derivative penalty.} 34 | 35 | \item{basistype}{type of basis used. The basis is created by an appropriate 36 | constructor function from the \pkg{fda} package; see basisfd. Only \code{"bspline"} and \code{"fourier"} are 37 | supported.} 38 | } 39 | \value{ 40 | An object of class \code{fosr}, which is a list with the following 41 | elements: \item{fd}{object of class \code{"{fd}"} representing the 42 | estimated coefficient functions. Its main components are a basis and a 43 | matrix of coefficients with respect to that basis. } 44 | \item{raw.coef}{\eqn{d\times p} matrix of coefficient estimates from 45 | regressing on \code{X} separately at each point along the function. } 46 | \item{raw.se}{\eqn{d\times p} matrix of standard errors of the raw 47 | coefficient estimates. } \item{yhat}{\eqn{n\times d} matrix of fitted 48 | values. } \item{est.func}{\eqn{d\times p} matrix of coefficient function 49 | estimates, obtained by smoothing the columns of \code{raw.coef}. } 50 | \item{se.func}{\eqn{d\times p} matrix of coefficient function standard 51 | errors. } \item{argvals}{points at which the coefficient functions are 52 | evaluated. } \item{lambda}{smoothing parameters (chosen by REML) used to 53 | smooth the \eqn{p} coefficient functions with respect to the supplied 54 | basis. } 55 | } 56 | \description{ 57 | This function performs linear regression with functional responses and 58 | scalar predictors by (1) fitting a separate linear model at each point 59 | along the function, and then (2) smoothing the resulting coefficients to 60 | obtain coefficient functions. 61 | } 62 | \details{ 63 | Unlike \code{{fosr}} and \code{{pffr}}, which obtain smooth 64 | coefficient functions by minimizing a penalized criterion, this function 65 | introduces smoothing only as a second step. The idea was proposed by Fan 66 | and Zhang (2000), who employed local polynomials rather than roughness 67 | penalization for the smoothing step. 68 | } 69 | \references{ 70 | Fan, J., and Zhang, J.-T. (2000). Two-step estimation of 71 | functional linear models with applications to longitudinal data. 72 | \emph{Journal of the Royal Statistical Society, Series B}, 62(2), 303--322. 73 | } 74 | \seealso{ 75 | \code{{fosr}}, \code{{pffr}} 76 | } 77 | \author{ 78 | Philip Reiss \email{phil.reiss@nyumc.org} and Lan Huo 79 | } 80 | -------------------------------------------------------------------------------- /man/gasoline.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/gasoline-data.R 3 | \docType{data} 4 | \name{gasoline} 5 | \alias{gasoline} 6 | \title{Octane numbers and NIR spectra of gasoline} 7 | \format{ 8 | A data frame comprising \describe{ 9 | \item{octane}{a numeric 10 | vector of octane numbers for the 60 samples.} 11 | \item{NIR}{a 60 x 401 12 | matrix of NIR spectra.} 13 | } 14 | } 15 | \source{ 16 | Kalivas, John H. (1997). Two data sets of near infrared spectra. 17 | \emph{Chemometrics and Intelligent Laboratory Systems}, 37, 255--259. 18 | } 19 | \description{ 20 | Near-infrared reflectance spectra and octane numbers of 60 gasoline 21 | samples. Each NIR spectrum consists of log(1/reflectance) measurements at 22 | 401 wavelengths, in 2-nm intervals from 900 nm to 1700 nm. We thank Prof. 23 | John Kalivas for making this data set available. 24 | } 25 | \references{ 26 | For applications of functional principal component regression 27 | to this data set: 28 | 29 | Reiss, P. T., and Ogden, R. T. (2007). Functional principal component 30 | regression and functional partial least squares. \emph{Journal of the 31 | American Statistical Association}, 102, 984--996. 32 | 33 | Reiss, P. T., and Ogden, R. T. (2009). Smoothing parameter selection for a 34 | class of semiparametric linear models. \emph{Journal of the Royal 35 | Statistical Society, Series B}, 71(2), 505--523. 36 | } 37 | \seealso{ 38 | \code{\link{fpcr}} 39 | } 40 | \keyword{datasets} 41 | -------------------------------------------------------------------------------- /man/getTF.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/dt_basis.R 3 | \name{getTF} 4 | \alias{getTF} 5 | \title{Get recognized transformation function} 6 | \usage{ 7 | getTF(fname, nterm) 8 | } 9 | \description{ 10 | Get recognized transformation function 11 | } 12 | \keyword{internal} 13 | -------------------------------------------------------------------------------- /man/gibbs_cs_fpca.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Gibbs_CS_FPCA.R 3 | \name{gibbs_cs_fpca} 4 | \alias{gibbs_cs_fpca} 5 | \title{Cross-sectional FoSR using a Gibbs sampler and FPCA} 6 | \usage{ 7 | gibbs_cs_fpca( 8 | formula, 9 | Kt = 5, 10 | Kp = 2, 11 | data = NULL, 12 | verbose = TRUE, 13 | N.iter = 5000, 14 | N.burn = 1000, 15 | SEED = NULL, 16 | sig2.me = 0.01, 17 | alpha = 0.1, 18 | Aw = NULL, 19 | Bw = NULL, 20 | Apsi = NULL, 21 | Bpsi = NULL 22 | ) 23 | } 24 | \arguments{ 25 | \item{formula}{a formula indicating the structure of the proposed model.} 26 | 27 | \item{Kt}{number of spline basis functions used to estimate coefficient functions} 28 | 29 | \item{Kp}{number of FPCA basis functions to be estimated} 30 | 31 | \item{data}{an optional data frame, list or environment containing the 32 | variables in the model. If not found in data, the variables are taken from 33 | environment(formula), typically the environment from which the function is 34 | called.} 35 | 36 | \item{verbose}{logical defaulting to \code{TRUE} -- should updates on progress be printed?} 37 | 38 | \item{N.iter}{number of iterations used in the Gibbs sampler} 39 | 40 | \item{N.burn}{number of iterations discarded as burn-in} 41 | 42 | \item{SEED}{seed value to start the sampler; ensures reproducibility} 43 | 44 | \item{sig2.me}{starting value for measurement error variance} 45 | 46 | \item{alpha}{tuning parameter balancing second-derivative penalty and 47 | zeroth-derivative penalty (alpha = 0 is all second-derivative penalty)} 48 | 49 | \item{Aw}{hyperparameter for inverse gamma controlling variance of spline terms 50 | for population-level effects} 51 | 52 | \item{Bw}{hyperparameter for inverse gamma controlling variance of spline terms 53 | for population-level effects} 54 | 55 | \item{Apsi}{hyperparameter for inverse gamma controlling variance of spline terms 56 | for FPC effects} 57 | 58 | \item{Bpsi}{hyperparameter for inverse gamma controlling variance of spline terms 59 | for FPC effects} 60 | } 61 | \description{ 62 | Fitting function for function-on-scalar regression for cross-sectional data. 63 | This function estimates model parameters using a Gibbs sampler and estimates 64 | the residual covariance surface using FPCA. 65 | } 66 | \references{ 67 | Goldsmith, J., Kitago, T. (2016). 68 | Assessing Systematic Effects of Stroke on Motor Control using Hierarchical 69 | Function-on-Scalar Regression. \emph{Journal of the Royal Statistical Society: 70 | Series C}, 65 215-236. 71 | } 72 | \author{ 73 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 74 | } 75 | -------------------------------------------------------------------------------- /man/gibbs_cs_wish.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Gibbs_CS_Wish.R 3 | \name{gibbs_cs_wish} 4 | \alias{gibbs_cs_wish} 5 | \title{Cross-sectional FoSR using a Gibbs sampler and Wishart prior} 6 | \usage{ 7 | gibbs_cs_wish( 8 | formula, 9 | Kt = 5, 10 | data = NULL, 11 | verbose = TRUE, 12 | N.iter = 5000, 13 | N.burn = 1000, 14 | alpha = 0.1, 15 | min.iter = 10, 16 | max.iter = 50, 17 | Aw = NULL, 18 | Bw = NULL, 19 | v = NULL, 20 | SEED = NULL 21 | ) 22 | } 23 | \arguments{ 24 | \item{formula}{a formula indicating the structure of the proposed model.} 25 | 26 | \item{Kt}{number of spline basis functions used to estimate coefficient functions} 27 | 28 | \item{data}{an optional data frame, list or environment containing the 29 | variables in the model. If not found in data, the variables are taken from 30 | environment(formula), typically the environment from which the function is 31 | called.} 32 | 33 | \item{verbose}{logical defaulting to \code{TRUE} -- should updates on progress be printed?} 34 | 35 | \item{N.iter}{number of iterations used in the Gibbs sampler} 36 | 37 | \item{N.burn}{number of iterations discarded as burn-in} 38 | 39 | \item{alpha}{tuning parameter balancing second-derivative penalty and 40 | zeroth-derivative penalty (alpha = 0 is all second-derivative penalty)} 41 | 42 | \item{min.iter}{minimum number of iterations} 43 | 44 | \item{max.iter}{maximum number of iterations} 45 | 46 | \item{Aw}{hyperparameter for inverse gamma controlling variance of spline terms 47 | for population-level effects} 48 | 49 | \item{Bw}{hyperparameter for inverse gamma controlling variance of spline terms 50 | for population-level effects} 51 | 52 | \item{v}{hyperparameter for inverse Wishart prior on residual covariance} 53 | 54 | \item{SEED}{seed value to start the sampler; ensures reproducibility} 55 | } 56 | \description{ 57 | Fitting function for function-on-scalar regression for cross-sectional data. 58 | This function estimates model parameters using a Gibbs sampler and estimates 59 | the residual covariance surface using a Wishart prior. 60 | } 61 | \references{ 62 | Goldsmith, J., Kitago, T. (2016). 63 | Assessing Systematic Effects of Stroke on Motor Control using Hierarchical 64 | Function-on-Scalar Regression. \emph{Journal of the Royal Statistical Society: 65 | Series C}, 65 215-236. 66 | } 67 | \author{ 68 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 69 | } 70 | -------------------------------------------------------------------------------- /man/gibbs_mult_fpca.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Gibbs_Mult_FPCA.R 3 | \name{gibbs_mult_fpca} 4 | \alias{gibbs_mult_fpca} 5 | \title{Multilevel FoSR using a Gibbs sampler and FPCA} 6 | \usage{ 7 | gibbs_mult_fpca( 8 | formula, 9 | Kt = 5, 10 | Kp = 2, 11 | data = NULL, 12 | verbose = TRUE, 13 | N.iter = 5000, 14 | N.burn = 1000, 15 | sig2.me = 0.01, 16 | alpha = 0.1, 17 | SEED = NULL 18 | ) 19 | } 20 | \arguments{ 21 | \item{formula}{a formula indicating the structure of the proposed model.} 22 | 23 | \item{Kt}{number of spline basis functions used to estimate coefficient functions} 24 | 25 | \item{Kp}{number of FPCA basis functions to be estimated} 26 | 27 | \item{data}{an optional data frame, list or environment containing the 28 | variables in the model. If not found in data, the variables are taken from 29 | environment(formula), typically the environment from which the function is 30 | called.} 31 | 32 | \item{verbose}{logical defaulting to \code{TRUE} -- should updates on progress be printed?} 33 | 34 | \item{N.iter}{number of iterations used in the Gibbs sampler} 35 | 36 | \item{N.burn}{number of iterations discarded as burn-in} 37 | 38 | \item{sig2.me}{starting value for measurement error variance} 39 | 40 | \item{alpha}{tuning parameter balancing second-derivative penalty and 41 | zeroth-derivative penalty (alpha = 0 is all second-derivative penalty)} 42 | 43 | \item{SEED}{seed value to start the sampler; ensures reproducibility} 44 | } 45 | \description{ 46 | Fitting function for function-on-scalar regression for longitudinal data. 47 | This function estimates model parameters using a Gibbs sampler and estimates 48 | the residual covariance surface using FPCA. 49 | } 50 | \references{ 51 | Goldsmith, J., Kitago, T. (2016). 52 | Assessing Systematic Effects of Stroke on Motor Control using Hierarchical 53 | Function-on-Scalar Regression. \emph{Journal of the Royal Statistical Society: 54 | Series C}, 65 215-236. 55 | } 56 | \author{ 57 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 58 | } 59 | -------------------------------------------------------------------------------- /man/gibbs_mult_wish.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/Gibbs_Mult_Wish.R 3 | \name{gibbs_mult_wish} 4 | \alias{gibbs_mult_wish} 5 | \title{Multilevel FoSR using a Gibbs sampler and Wishart prior} 6 | \usage{ 7 | gibbs_mult_wish( 8 | formula, 9 | Kt = 5, 10 | data = NULL, 11 | verbose = TRUE, 12 | N.iter = 5000, 13 | N.burn = 1000, 14 | alpha = 0.1, 15 | Az = NULL, 16 | Bz = NULL, 17 | Aw = NULL, 18 | Bw = NULL, 19 | v = NULL, 20 | SEED = NULL 21 | ) 22 | } 23 | \arguments{ 24 | \item{formula}{a formula indicating the structure of the proposed model.} 25 | 26 | \item{Kt}{number of spline basis functions used to estimate coefficient functions} 27 | 28 | \item{data}{an optional data frame, list or environment containing the 29 | variables in the model. If not found in data, the variables are taken from 30 | environment(formula), typically the environment from which the function is 31 | called.} 32 | 33 | \item{verbose}{logical defaulting to \code{TRUE} -- should updates on progress be printed?} 34 | 35 | \item{N.iter}{number of iterations used in the Gibbs sampler} 36 | 37 | \item{N.burn}{number of iterations discarded as burn-in} 38 | 39 | \item{alpha}{tuning parameter balancing second-derivative penalty and 40 | zeroth-derivative penalty (alpha = 0 is all second-derivative penalty)} 41 | 42 | \item{Az}{hyperparameter for inverse gamma controlling variance of spline terms 43 | for subject-level effects} 44 | 45 | \item{Bz}{hyperparameter for inverse gamma controlling variance of spline terms 46 | for subject-level effects} 47 | 48 | \item{Aw}{hyperparameter for inverse gamma controlling variance of spline terms 49 | for population-level effects} 50 | 51 | \item{Bw}{hyperparameter for inverse gamma controlling variance of spline terms 52 | for population-level effects} 53 | 54 | \item{v}{hyperparameter for inverse Wishart prior on residual covariance} 55 | 56 | \item{SEED}{seed value to start the sampler; ensures reproducibility} 57 | } 58 | \description{ 59 | Fitting function for function-on-scalar regression for multilevel data. 60 | This function estimates model parameters using a Gibbs sampler and estimates 61 | the residual covariance surface using a Wishart prior. 62 | } 63 | \references{ 64 | Goldsmith, J., Kitago, T. (2016). 65 | Assessing Systematic Effects of Stroke on Motor Control using Hierarchical 66 | Function-on-Scalar Regression. \emph{Journal of the Royal Statistical Society: 67 | Series C}, 65 215-236. 68 | } 69 | \author{ 70 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 71 | } 72 | -------------------------------------------------------------------------------- /man/gls_cs.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/GLS_CS.R 3 | \name{gls_cs} 4 | \alias{gls_cs} 5 | \title{Cross-sectional FoSR using GLS} 6 | \usage{ 7 | gls_cs( 8 | formula, 9 | data = NULL, 10 | Kt = 5, 11 | basis = "bs", 12 | sigma = NULL, 13 | verbose = TRUE, 14 | CI.type = "pointwise" 15 | ) 16 | } 17 | \arguments{ 18 | \item{formula}{a formula indicating the structure of the proposed model.} 19 | 20 | \item{data}{an optional data frame, list or environment containing the 21 | variables in the model. If not found in data, the variables are taken from 22 | environment(formula), typically the environment from which the function is 23 | called.} 24 | 25 | \item{Kt}{number of spline basis functions used to estimate coefficient functions} 26 | 27 | \item{basis}{basis type; options are "bs" for b-splines and "pbs" for periodic 28 | b-splines} 29 | 30 | \item{sigma}{optional covariance matrix used in GLS; if \code{NULL}, OLS will be 31 | used to estimated fixed effects, and the covariance matrix will be estimated from 32 | the residuals.} 33 | 34 | \item{verbose}{logical defaulting to \code{TRUE} -- should updates on progress be printed?} 35 | 36 | \item{CI.type}{Indicates CI type for coefficient functions; options are "pointwise" and 37 | "simultaneous"} 38 | } 39 | \description{ 40 | Fitting function for function-on-scalar regression for cross-sectional data. 41 | This function estimates model parameters using GLS: first, an OLS estimate of 42 | spline coefficients is estimated; second, the residual covariance is estimated 43 | using an FPC decomposition of the OLS residual curves; finally, a GLS estimate 44 | of spline coefficients is estimated. Although this is in the `BayesFoSR` package, 45 | there is nothing Bayesian about this FoSR. 46 | } 47 | \references{ 48 | Goldsmith, J., Kitago, T. (2016). 49 | Assessing Systematic Effects of Stroke on Motor Control using Hierarchical 50 | Function-on-Scalar Regression. \emph{Journal of the Royal Statistical Society: 51 | Series C}, 65 215-236. 52 | } 53 | \author{ 54 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 55 | } 56 | -------------------------------------------------------------------------------- /man/lf_old.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/lf_old.R 3 | \name{lf_old} 4 | \alias{lf_old} 5 | \title{Construct an FLM regression term} 6 | \usage{ 7 | lf_old( 8 | X, 9 | argvals = seq(0, 1, l = ncol(X)), 10 | xind = NULL, 11 | integration = c("simpson", "trapezoidal", "riemann"), 12 | L = NULL, 13 | splinepars = list(bs = "ps", k = min(ceiling(n/4), 40), m = c(2, 2)), 14 | presmooth = TRUE 15 | ) 16 | } 17 | \arguments{ 18 | \item{X}{an \code{N} by \code{J=ncol(argvals)} matrix of function evaluations 19 | \eqn{X_i(t_{i1}),., X_i(t_{iJ}); i=1,.,N.}} 20 | 21 | \item{argvals}{matrix (or vector) of indices of evaluations of \eqn{X_i(t)}; i.e. a matrix with 22 | \emph{i}th row \eqn{(t_{i1},.,t_{iJ})}} 23 | 24 | \item{xind}{same as argvals. It will not be supported in the next version of refund.} 25 | 26 | \item{integration}{method used for numerical integration. Defaults to \code{"simpson"}'s rule 27 | for calculating entries in \code{L}. Alternatively and for non-equidistant grids, 28 | \dQuote{\code{trapezoidal}} or \code{"riemann"}. \code{"riemann"} integration is always used if 29 | \code{L} is specified} 30 | 31 | \item{L}{an optional \code{N} by \code{ncol(argvals)} matrix giving the weights for the numerical 32 | integration over \code{t}} 33 | 34 | \item{splinepars}{optional arguments specifying options for representing and penalizing the 35 | functional coefficient \eqn{\beta(t)}. Defaults to a cubic B-spline with second-order difference 36 | penalties, i.e. \code{list(bs="ps", m=c(2, 1))} See \code{te} or \code{s} for details} 37 | 38 | \item{presmooth}{logical; if true, the functional predictor is pre-smoothed prior to fitting. See 39 | \code{smooth.basisPar}} 40 | } 41 | \value{ 42 | a list with the following entries 43 | \enumerate{ 44 | \item \code{call} - a \code{call} to \code{te} (or \code{s}, \code{t2}) using the appropriately 45 | constructed covariate and weight matrices 46 | \item \code{argvals} - the \code{argvals} argument supplied to \code{lf} 47 | \item \code{L} - the matrix of weights used for the integration 48 | \item{xindname} - the name used for the functional predictor variable in the \code{formula} 49 | used by \code{mgcv} 50 | \item \code{tindname} - the name used for \code{argvals} variable in the \code{formula} used by \code{mgcv} 51 | \item \code{LXname} - the name used for the \code{L} variable in the \code{formula} used by \code{mgcv} 52 | \item \code{presmooth} - the \code{presmooth} argument supplied to \code{lf} 53 | \item \code{Xfd} - an \code{fd} object from presmoothing the functional predictors using 54 | \code{{smooth.basisPar}}. Only present if \code{presmooth=TRUE}. See \code{{fd}} 55 | } 56 | } 57 | \description{ 58 | Defines a term \eqn{\int_{T}\beta(t)X_i(t)dt} for inclusion in an \code{[mgcv]{gam}}-formula 59 | (or \code{bam} or \code{gamm} or \code{[gamm4]{gamm4}}) as constructed by 60 | \code{fgam}, where \eqn{\beta(t)} is an unknown coefficient function and \eqn{X_i(t)} 61 | is a functional predictor on the closed interval \eqn{T}. Defaults to a cubic B-spline with 62 | second-order difference penalties for estimating \eqn{\beta(t)}. The functional predictor must 63 | be fully observed on a regular grid. 64 | } 65 | \seealso{ 66 | \code{{fgam}}, \code{{af}}, mgcv's \code{{linear.functional.terms}}, 67 | \code{{fgam}} for examples 68 | } 69 | \author{ 70 | Mathew W. McLean \email{mathew.w.mclean@gmail.com} and Fabian Scheipl 71 | } 72 | -------------------------------------------------------------------------------- /man/lofocv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/lofocv.R 3 | \name{lofocv} 4 | \alias{lofocv} 5 | \title{Leave-one-function-out cross-validation} 6 | \usage{ 7 | lofocv(Y, X, S1, argvals, lamvec = NULL, constr = NULL, maxlam = NULL) 8 | } 9 | \arguments{ 10 | \item{Y}{matrix of responses, e.g. with columns corresponding to basis 11 | function coefficients.} 12 | 13 | \item{X}{model matrix.} 14 | 15 | \item{S1}{penalty matrix.} 16 | 17 | \item{argvals}{values where the functions are evaluated} 18 | 19 | \item{lamvec}{vector of candidate smoothing parameter values. If 20 | \code{NULL}, smoothing parameter is chosen by \code{\link{optimize}}.} 21 | 22 | \item{constr}{matrix of linear constraints.} 23 | 24 | \item{maxlam}{maximum smoothing parameter value to consider (when 25 | \code{lamvec=NULL}).} 26 | } 27 | \value{ 28 | if \code{lamvec=NULL}, a list (returned by \code{optimize}) with 29 | elements \code{minimum} and \code{objective} giving, respectively, the 30 | chosen smoothing parameter and the associated cross-validation score. 31 | Otherwise a 2-column table with the candidate smoothing parameters in the 32 | first column and the corresponding cross-validation scores in the second. 33 | } 34 | \description{ 35 | This internal function, called by \code{fosr()} when \code{method="OLS"}, 36 | performs efficient leave-one-function-out cross-validation using 37 | Demmler-Reinsch orthogonalization to choose the smoothing parameter. 38 | } 39 | \seealso{ 40 | \code{\link{fosr}}, \code{\link{pwcv}} 41 | } 42 | \author{ 43 | Philip Reiss \email{phil.reiss@nyumc.org} and Lei Huang 44 | } 45 | \keyword{internal} 46 | -------------------------------------------------------------------------------- /man/model.matrix.pffr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-methods.R 3 | \name{model.matrix.pffr} 4 | \alias{model.matrix.pffr} 5 | \title{Obtain model matrix for a pffr fit} 6 | \usage{ 7 | \method{model.matrix}{pffr}(object, ...) 8 | } 9 | \arguments{ 10 | \item{object}{a fitted \code{pffr}-object} 11 | 12 | \item{...}{other arguments, passed to \code{\link[mgcv]{predict.gam}}.} 13 | } 14 | \value{ 15 | A model matrix 16 | } 17 | \description{ 18 | Obtain model matrix for a pffr fit 19 | } 20 | \author{ 21 | Fabian Scheipl 22 | } 23 | -------------------------------------------------------------------------------- /man/ols_cs.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/OLS_CS.R 3 | \name{ols_cs} 4 | \alias{ols_cs} 5 | \title{Cross-sectional FoSR using GLS} 6 | \usage{ 7 | ols_cs(formula, data = NULL, Kt = 5, basis = "bs", verbose = TRUE) 8 | } 9 | \arguments{ 10 | \item{formula}{a formula indicating the structure of the proposed model.} 11 | 12 | \item{data}{an optional data frame, list or environment containing the 13 | variables in the model. If not found in data, the variables are taken from 14 | environment(formula), typically the environment from which the function is 15 | called.} 16 | 17 | \item{Kt}{number of spline basis functions used to estimate coefficient functions} 18 | 19 | \item{basis}{basis type; options are "bs" for b-splines and "pbs" for periodic 20 | b-splines} 21 | 22 | \item{verbose}{logical defaulting to \code{TRUE} -- should updates on progress be printed?} 23 | } 24 | \description{ 25 | Fitting function for function-on-scalar regression for cross-sectional data. 26 | This function estimates model parameters using GLS: first, an OLS estimate of 27 | spline coefficients is estimated; second, the residual covariance is estimated 28 | using an FPC decomposition of the OLS residual curves; finally, a GLS estimate 29 | of spline coefficients is estimated. Although this is in the `BayesFoSR` package, 30 | there is nothing Bayesian about this FoSR. 31 | } 32 | \references{ 33 | Goldsmith, J., Kitago, T. (2016). 34 | Assessing Systematic Effects of Stroke on Motor Control using Hierarchical 35 | Function-on-Scalar Regression. \emph{Journal of the Royal Statistical Society: 36 | Series C}, 65 215-236. 37 | } 38 | \author{ 39 | Jeff Goldsmith \email{ajg2202@cumc.columbia.edu} 40 | } 41 | -------------------------------------------------------------------------------- /man/pco_predict_preprocess.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/poridge.R 3 | \name{pco_predict_preprocess} 4 | \alias{pco_predict_preprocess} 5 | \title{Make predictions using pco basis terms} 6 | \usage{ 7 | pco_predict_preprocess(model, newdata = NULL, dist_list) 8 | } 9 | \arguments{ 10 | \item{model}{a fitted \code{[mgcv]{gam}} model with at least one term of 11 | class "\code{pco.smooth}".} 12 | 13 | \item{newdata}{data frame including the new values for any 14 | non-\code{{pco}} terms in the original fit. If there were none, this 15 | can be left as \code{NULL}.} 16 | 17 | \item{dist_list}{a list of \code{n} \eqn{\times} \code{n*} matrices, one per 18 | \code{{pco}} term in the model, giving the distances from the 19 | \code{n*} prediction points to the \code{n} design points (original 20 | observations). List entry names should correspond to the names of the terms 21 | in the model (e.g., if the model includes a \code{s(x)} term, 22 | \code{dist_list} must include an element named "\code{x}").} 23 | } 24 | \value{ 25 | a \code{{data.frame}} with the coordinates for the new data 26 | inserted into principal coordinate space, in addition to the supplied 27 | \code{newdata} if this was non-\code{NULL}. This can be used as the 28 | \code{newdata} argument in a call to \code{[mgcv]{predict.gam}}. 29 | } 30 | \description{ 31 | This function performs the necessary preprocessing for making predictions 32 | with \code{[mgcv]{gam}} models that include \code{{pco}} basis 33 | terms. The function \code{pco_predict_preprocess} builds a \code{data.frame} 34 | (or augments an existing one) to be used with the usual \code{predict} 35 | function. 36 | } 37 | \details{ 38 | Models with \code{{pco}} basis terms are fitted by inputting distances 39 | among the observations and then regressing (with a ridge penalty) on leading 40 | principal coordinates arising from these distances. To perform prediction, we 41 | must input the distances from the new data points to the original points, and 42 | then "insert" the former into the principal coordinate space by the 43 | interpolation method of Gower (1968) (see also Miller, 2012). 44 | 45 | An example of how to use this function in practice is shown in 46 | \code{{smooth.construct.pco.smooth.spec}}. 47 | } 48 | \references{ 49 | Gower, J. C. (1968). Adding a point to vector diagrams in 50 | multivariate analysis. Biometrika, 55(3), 582-585. 51 | 52 | Miller, D. L. (2012). On smooth models for complex domains and distances. PhD 53 | dissertation, Department of Mathematical Sciences, University of Bath. 54 | } 55 | \seealso{ 56 | \code{{smooth.construct.pco.smooth.spec}} 57 | } 58 | \author{ 59 | David L Miller 60 | } 61 | -------------------------------------------------------------------------------- /man/pcre.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-pcre.R 3 | \name{pcre} 4 | \alias{pcre} 5 | \title{pffr-constructor for functional principal component-based functional random intercepts.} 6 | \usage{ 7 | pcre(id, efunctions, evalues, yind, ...) 8 | } 9 | \arguments{ 10 | \item{id}{grouping variable a factor} 11 | 12 | \item{efunctions}{matrix of eigenfunction evaluations on gridpoints \code{yind} ( x )} 13 | 14 | \item{evalues}{eigenvalues associated with \code{efunctions}} 15 | 16 | \item{yind}{vector of gridpoints on which \code{efunctions} are evaluated.} 17 | 18 | \item{...}{not used} 19 | } 20 | \value{ 21 | a list used internally for constructing an appropriate call to \code{mgcv::gam} 22 | } 23 | \description{ 24 | pffr-constructor for functional principal component-based functional random intercepts. 25 | } 26 | \section{Details}{ 27 | Fits functional random intercepts \eqn{B_i(t)} for a grouping variable \code{id} 28 | using as a basis the functions \eqn{\phi_m(t)} in \code{efunctions} with variances \eqn{\lambda_m} in \code{evalues}: 29 | \eqn{B_i(t) \approx \sum_m^M \phi_m(t)\delta_{im}} with 30 | independent \eqn{\delta_{im} \sim N(0, \sigma^2\lambda_m)}, where \eqn{\sigma^2} 31 | is (usually) estimated and controls the overall contribution of the \eqn{B_i(t)} while the relative importance 32 | of the \eqn{M} basisfunctions is controlled by the supplied variances \code{lambda_m}. 33 | Can be used to model smooth residuals if \code{id} is simply an index of observations. 34 | Differing from scalar random effects in \code{mgcv}, these effects are estimated under a "sum-to-zero-for-each-t"-constraint -- 35 | specifically \eqn{\sum_i \hat b_i(t) = 0} (not \eqn{\sum_i n_i \hat b_i(t) = 0}) where $n_i$ is the number of observed curves for 36 | subject i, so the intercept curve for models with unbalanced group sizes no longer corresponds to the global mean function. 37 | 38 | \code{efunctions} and \code{evalues} are typically eigenfunctions and eigenvalues of an estimated 39 | covariance operator for the functional process to be modeled, i.e., they are 40 | a functional principal components basis. 41 | } 42 | 43 | \examples{ 44 | \dontrun{ 45 | residualfunction <- function(t){ 46 | #generate quintic polynomial error functions 47 | drop(poly(t, 5)\%*\%rnorm(5, sd=sqrt(2:6))) 48 | } 49 | # generate data Y(t) = mu(t) + E(t) + white noise 50 | set.seed(1122) 51 | n <- 50 52 | T <- 30 53 | t <- seq(0,1, l=T) 54 | # E(t): smooth residual functions 55 | E <- t(replicate(n, residualfunction(t))) 56 | int <- matrix(scale(3*dnorm(t, m=.5, sd=.5) - dbeta(t, 5, 2)), byrow=T, n, T) 57 | Y <- int + E + matrix(.2*rnorm(n*T), n, T) 58 | data <- data.frame(Y=I(Y)) 59 | # fit model under independence assumption: 60 | summary(m0 <- pffr(Y ~ 1, yind=t, data=data)) 61 | # get first 5 eigenfunctions of residual covariance 62 | # (i.e. first 5 functional PCs of empirical residual process) 63 | Ehat <- resid(m0) 64 | fpcE <- fpca.sc(Ehat, npc=5) 65 | efunctions <- fpcE$efunctions 66 | evalues <- fpcE$evalues 67 | data$id <- factor(1:nrow(data)) 68 | # refit model with fpc-based residuals 69 | m1 <- pffr(Y ~ 1 + pcre(id=id, efunctions=efunctions, evalues=evalues, yind=t), yind=t, data=data) 70 | t1 <- predict(m1, type="terms") 71 | summary(m1) 72 | #compare squared errors 73 | mean((int-fitted(m0))^2) 74 | mean((int-t1[[1]])^2) 75 | mean((E-t1[[2]])^2) 76 | # compare fitted & true smooth residuals and fitted intercept functions: 77 | layout(t(matrix(1:4,2,2))) 78 | matplot(t(E), lty=1, type="l", ylim=range(E, t1[[2]])) 79 | matplot(t(t1[[2]]), lty=1, type="l", ylim=range(E, t1[[2]])) 80 | plot(m1, select=1, main="m1", ylim=range(Y)) 81 | lines(t, int[1,], col=rgb(1,0,0,.5)) 82 | plot(m0, select=1, main="m0", ylim=range(Y)) 83 | lines(t, int[1,], col=rgb(1,0,0,.5)) 84 | } 85 | } 86 | \author{ 87 | Fabian Scheipl 88 | } 89 | -------------------------------------------------------------------------------- /man/pffr.check.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-methods.R 3 | \name{pffr.check} 4 | \alias{pffr.check} 5 | \title{Some diagnostics for a fitted pffr model} 6 | \usage{ 7 | pffr.check( 8 | b, 9 | old.style = FALSE, 10 | type = c("deviance", "pearson", "response"), 11 | k.sample = 5000, 12 | k.rep = 200, 13 | rep = 0, 14 | level = 0.9, 15 | rl.col = 2, 16 | rep.col = "gray80", 17 | ... 18 | ) 19 | } 20 | \arguments{ 21 | \item{b}{a fitted \code{\link{pffr}}-object} 22 | 23 | \item{old.style}{If you want old fashioned plots, exactly as in Wood, 2006, set to \code{TRUE}.} 24 | 25 | \item{type}{type of residuals, see \code{\link[mgcv]{residuals.gam}}, used in 26 | all plots.} 27 | 28 | \item{k.sample}{Above this k testing uses a random sub-sample of data.} 29 | 30 | \item{k.rep}{how many re-shuffles to do to get p-value for k testing.} 31 | 32 | \item{rep}{passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.} 33 | 34 | \item{level}{passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.} 35 | 36 | \item{rl.col}{passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.} 37 | 38 | \item{rep.col}{passed to \code{\link[mgcv]{qq.gam}} when \code{old.style} is \code{FALSE}.} 39 | 40 | \item{...}{extra graphics parameters to pass to plotting functions.} 41 | } 42 | \description{ 43 | This is simply a wrapper for \code{\link[mgcv]{gam.check}()}. 44 | } 45 | -------------------------------------------------------------------------------- /man/pffrGLS.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-robust.R 3 | \name{pffrGLS} 4 | \alias{pffrGLS} 5 | \title{Penalized function-on-function regression with non-i.i.d. residuals} 6 | \usage{ 7 | pffrGLS( 8 | formula, 9 | yind, 10 | hatSigma, 11 | algorithm = NA, 12 | method = "REML", 13 | tensortype = c("te", "t2"), 14 | bs.yindex = list(bs = "ps", k = 5, m = c(2, 1)), 15 | bs.int = list(bs = "ps", k = 20, m = c(2, 1)), 16 | cond.cutoff = 500, 17 | ... 18 | ) 19 | } 20 | \arguments{ 21 | \item{formula}{a formula with special terms as for \code{\link[mgcv]{gam}}, with additional special terms \code{\link{ff}()} and \code{c()}. See \code{\link[refund]{pffr}}.} 22 | 23 | \item{yind}{a vector with length equal to the number of columns of the matrix of functional responses giving the vector of evaluation points \eqn{(t_1, \dots ,t_{G})}. 24 | see \code{\link[refund]{pffr}}} 25 | 26 | \item{hatSigma}{(an estimate of) the within-observation covariance (along the responses' index), evaluated at \code{yind}. See Details.} 27 | 28 | \item{algorithm}{the name of the function used to estimate the model. Defaults to \code{\link[mgcv]{gam}} if the matrix of functional responses has less than \code{2e5} data points 29 | and to \code{\link[mgcv]{bam}} if not. "gamm" (see \code{\link[mgcv]{gamm}}) and "gamm4" (see \code{\link[gamm4]{gamm4}}) are valid options as well.} 30 | 31 | \item{method}{See \code{\link[refund]{pffr}}} 32 | 33 | \item{tensortype}{See \code{\link[refund]{pffr}}} 34 | 35 | \item{bs.yindex}{See \code{\link[refund]{pffr}}} 36 | 37 | \item{bs.int}{See \code{\link[refund]{pffr}}} 38 | 39 | \item{cond.cutoff}{if the condition number of \code{hatSigma} is greater than this, \code{hatSigma} is 40 | made ``more'' positive-definite via \code{\link[Matrix]{nearPD}} to ensure a condition number equal to cond.cutoff. Defaults to 500.} 41 | 42 | \item{...}{additional arguments that are valid for \code{\link[mgcv]{gam}} or \code{\link[mgcv]{bam}}. See \code{\link[refund]{pffr}}.} 43 | } 44 | \value{ 45 | a fitted \code{pffr}-object, see \code{\link[refund]{pffr}}. 46 | } 47 | \description{ 48 | Implements additive regression for functional and scalar covariates and functional responses. 49 | This function is a wrapper for \code{mgcv}'s \code{\link[mgcv]{gam}} and its siblings to fit models of the general form \cr 50 | \eqn{Y_i(t) = \mu(t) + \int X_i(s)\beta(s,t)ds + f(z_{1i}, t) + f(z_{2i}) + z_{3i} \beta_3(t) + \dots + E_i(t))}\cr 51 | with a functional (but not necessarily continuous) response \eqn{Y(t)}, 52 | (optional) smooth intercept \eqn{\mu(t)}, (multiple) functional covariates \eqn{X(t)} and scalar covariates 53 | \eqn{z_1}, \eqn{z_2}, etc. The residual functions \eqn{E_i(t) \sim GP(0, K(t,t'))} are assumed to be i.i.d. 54 | realizations of a Gaussian process. An estimate of the covariance operator \eqn{K(t,t')} evaluated on \code{yind} 55 | has to be supplied in the \code{hatSigma}-argument. 56 | } 57 | \section{Details}{ 58 | 59 | Note that \code{hatSigma} has to be positive definite. If \code{hatSigma} is close to positive \emph{semi-}definite or badly conditioned, 60 | estimated standard errors become unstable (typically much too small). \code{pffrGLS} will try to diagnose this and issue a warning. 61 | The danger is especially big if the number of functional observations is smaller than the number of gridpoints 62 | (i.e, \code{length(yind)}), since the raw covariance estimate will not have full rank.\cr 63 | Please see \code{\link[refund]{pffr}} for details on model specification and 64 | implementation. \cr THIS IS AN EXPERIMENTAL VERSION AND NOT WELL TESTED YET -- USE AT YOUR OWN RISK. 65 | } 66 | 67 | \seealso{ 68 | \code{\link[refund]{pffr}}, \code{\link[refund]{fpca.sc}} 69 | } 70 | \author{ 71 | Fabian Scheipl 72 | } 73 | -------------------------------------------------------------------------------- /man/pffrSim.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-utilities.R 3 | \name{pffrSim} 4 | \alias{pffrSim} 5 | \title{Simulate example data for pffr} 6 | \usage{ 7 | pffrSim( 8 | scenario = "all", 9 | n = 100, 10 | nxgrid = 40, 11 | nygrid = 60, 12 | SNR = 10, 13 | propmissing = 0, 14 | limits = NULL 15 | ) 16 | } 17 | \arguments{ 18 | \item{scenario}{see Description} 19 | 20 | \item{n}{number of observations} 21 | 22 | \item{nxgrid}{number of evaluation points of functional covariates} 23 | 24 | \item{nygrid}{number of evaluation points of the functional response} 25 | 26 | \item{SNR}{the signal-to-noise ratio for the generated data: empirical 27 | variance of the additive predictor divided by variance of the errors.} 28 | 29 | \item{propmissing}{proportion of missing data in the response, default = 0. 30 | See Details.} 31 | 32 | \item{limits}{a function that defines an integration range, see 33 | \code{\link{ff}}} 34 | } 35 | \value{ 36 | a named list with the simulated data, and the true components of the 37 | predictor etc as attributes. 38 | } 39 | \description{ 40 | Simulates example data for \code{\link{pffr}} from a variety of terms. 41 | Scenario "all" generates data from a complex multivariate model \deqn{Y_i(t) 42 | = \mu(t) + \int X_{1i}(s)\beta_1(s,t)ds + xlin \beta_3(t) + f(xte1, xte2) + 43 | f(xsmoo, t) + \beta_4 xconst + f(xfactor, t) + \epsilon_i(t)}. Scenarios "int", "ff", "lin", 44 | "te", "smoo", "const", "factor", generate data from simpler models containing only the 45 | respective term(s) in the model equation given above. Specifying a 46 | vector-valued scenario will generate data from a combination of the 47 | respective terms. Sparse/irregular response trajectories can be generated by 48 | setting \code{propmissing} to something greater than 0 (and smaller than 1). 49 | The return object then also includes a \code{ydata}-item with the sparsified 50 | data. 51 | } 52 | \details{ 53 | See source code for details.\cr 54 | } 55 | -------------------------------------------------------------------------------- /man/pfr_plot.gam.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/plot.pfr.R 3 | \name{pfr_plot.gam} 4 | \alias{pfr_plot.gam} 5 | \alias{plot.mgcv.smooth} 6 | \alias{plot.random.effect} 7 | \title{Local version of \code{plot.gam}} 8 | \usage{ 9 | pfr_plot.gam( 10 | x, 11 | residuals = FALSE, 12 | rug = TRUE, 13 | se = TRUE, 14 | pages = 0, 15 | select = NULL, 16 | scale = -1, 17 | n = 100, 18 | n2 = 40, 19 | n3 = 3, 20 | theta = 30, 21 | phi = 30, 22 | jit = FALSE, 23 | xlab = NULL, 24 | ylab = NULL, 25 | main = NULL, 26 | ylim = NULL, 27 | xlim = NULL, 28 | too.far = 0.1, 29 | all.terms = FALSE, 30 | shade = FALSE, 31 | shade.col = "gray80", 32 | shift = 0, 33 | trans = I, 34 | seWithMean = FALSE, 35 | unconditional = FALSE, 36 | by.resids = FALSE, 37 | scheme = 0, 38 | ... 39 | ) 40 | 41 | \method{plot}{mgcv.smooth}( 42 | x, 43 | P = NULL, 44 | data = NULL, 45 | label = "", 46 | se1.mult = 1, 47 | se2.mult = 2, 48 | partial.resids = FALSE, 49 | rug = TRUE, 50 | se = TRUE, 51 | scale = -1, 52 | n = 100, 53 | n2 = 40, 54 | theta = 30, 55 | phi = 30, 56 | jit = FALSE, 57 | xlab = NULL, 58 | ylab = NULL, 59 | main = NULL, 60 | ylim = NULL, 61 | xlim = NULL, 62 | too.far = 0.1, 63 | shade = FALSE, 64 | shade.col = "gray80", 65 | shift = 0, 66 | trans = I, 67 | by.resids = FALSE, 68 | scheme = 0, 69 | ... 70 | ) 71 | 72 | \method{plot}{random.effect}( 73 | x, 74 | P = NULL, 75 | data = NULL, 76 | label = "", 77 | se1.mult = 1, 78 | se2.mult = 2, 79 | partial.resids = FALSE, 80 | rug = TRUE, 81 | se = TRUE, 82 | scale = -1, 83 | n = 100, 84 | n2 = 40, 85 | n3 = 3, 86 | theta = 30, 87 | phi = 30, 88 | jit = FALSE, 89 | xlab = NULL, 90 | ylab = NULL, 91 | main = NULL, 92 | ylim = NULL, 93 | xlim = NULL, 94 | too.far = 0.1, 95 | shade = FALSE, 96 | shade.col = "gray80", 97 | shift = 0, 98 | trans = I, 99 | by.resids = FALSE, 100 | scheme = 0, 101 | ... 102 | ) 103 | } 104 | \description{ 105 | These internal functions were copied from Simon Wood's \code{mgcv} package, 106 | with some minor changes to allow for plotting \code{pfr} objects. 107 | } 108 | \seealso{ 109 | \code{\link[mgcv]{plot.gam}} 110 | } 111 | \keyword{internal} 112 | -------------------------------------------------------------------------------- /man/plot.fosr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/plot.fosr.R 3 | \name{plot.fosr} 4 | \alias{plot.fosr} 5 | \title{Default plotting of function-on-scalar regression objects} 6 | \usage{ 7 | \method{plot}{fosr}( 8 | x, 9 | split = NULL, 10 | titles = NULL, 11 | xlabel = "", 12 | ylabel = "Coefficient function", 13 | set.mfrow = TRUE, 14 | ... 15 | ) 16 | } 17 | \arguments{ 18 | \item{x}{an object of class \code{"\link{fosr}"}.} 19 | 20 | \item{split}{value, or vector of values, at which to divide the set of 21 | coefficient functions into groups, each plotted on a different scale. 22 | E.g., if set to 1, the first function is plotted on one scale, and all 23 | others on a different (common) scale. If \code{NULL}, all functions are 24 | plotted on the same scale.} 25 | 26 | \item{titles}{character vector of titles for the plots produced, e.g., 27 | names of the corresponding scalar predictors.} 28 | 29 | \item{xlabel}{label for the x-axes of the plots.} 30 | 31 | \item{ylabel}{label for the y-axes of the plots.} 32 | 33 | \item{set.mfrow}{logical value: if \code{TRUE}, the function will try to 34 | set an appropriate value of the \code{mfrow} parameter for the plots. 35 | Otherwise you may wish to set \code{mfrow} outside the function call.} 36 | 37 | \item{\dots}{graphical parameters (see \code{\link{par}}) for the plot.} 38 | } 39 | \description{ 40 | Plots the coefficient function estimates produced by \code{fosr()}. 41 | } 42 | \seealso{ 43 | \code{\link{fosr}}, which includes examples. 44 | } 45 | \author{ 46 | Philip Reiss \email{phil.reiss@nyumc.org} 47 | } 48 | -------------------------------------------------------------------------------- /man/plot.fosr.vs.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/plot.fosr.vs.R 3 | \name{plot.fosr.vs} 4 | \alias{plot.fosr.vs} 5 | \title{Plot for Function-on Scalar Regression with variable selection} 6 | \usage{ 7 | \method{plot}{fosr.vs}(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{an object of class "\code{\link{fosr.vs}}".} 11 | 12 | \item{...}{additional arguments.} 13 | } 14 | \value{ 15 | a figure of estimated coefficient functions. 16 | } 17 | \description{ 18 | Given a "\code{\link{fosr.vs}}" object, produces a figure of estimated coefficient functions. 19 | } 20 | \examples{ 21 | \dontrun{ 22 | I = 100 23 | p = 20 24 | D = 50 25 | grid = seq(0, 1, length = D) 26 | 27 | beta.true = matrix(0, p, D) 28 | beta.true[1,] = sin(2*grid*pi) 29 | beta.true[2,] = cos(2*grid*pi) 30 | beta.true[3,] = 2 31 | 32 | psi.true = matrix(NA, 2, D) 33 | psi.true[1,] = sin(4*grid*pi) 34 | psi.true[2,] = cos(4*grid*pi) 35 | lambda = c(3,1) 36 | 37 | set.seed(100) 38 | 39 | X = matrix(rnorm(I*p), I, p) 40 | C = cbind(rnorm(I, mean = 0, sd = lambda[1]), rnorm(I, mean = 0, sd = lambda[2])) 41 | 42 | fixef = X\%*\%beta.true 43 | pcaef = C \%*\% psi.true 44 | error = matrix(rnorm(I*D), I, D) 45 | 46 | Yi.true = fixef 47 | Yi.pca = fixef + pcaef 48 | Yi.obs = fixef + pcaef + error 49 | 50 | data = as.data.frame(X) 51 | data$Y = Yi.obs 52 | fit.mcp = fosr.vs(Y~., data = data[1:80,], method="grMCP") 53 | plot(fit.mcp) 54 | } 55 | 56 | 57 | } 58 | \seealso{ 59 | \code{\link{fosr.vs}} 60 | } 61 | \author{ 62 | Yakuan Chen \email{yc2641@cumc.columbia.edu} 63 | } 64 | -------------------------------------------------------------------------------- /man/plot.fpcr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/plot.fpcr.R 3 | \name{plot.fpcr} 4 | \alias{plot.fpcr} 5 | \title{Default plotting for functional principal component regression output} 6 | \usage{ 7 | \method{plot}{fpcr}( 8 | x, 9 | se = TRUE, 10 | col = 1, 11 | lty = c(1, 2, 2), 12 | xlab = "", 13 | ylab = "Coefficient function", 14 | ... 15 | ) 16 | } 17 | \arguments{ 18 | \item{x}{an object of class \code{"\link{fpcr}"}.} 19 | 20 | \item{se}{if \code{TRUE} (the default), upper and lower lines are added at 21 | 2 standard errors (in the Bayesian sense; see Wood, 2006) above and below 22 | the coefficient function estimate. If a positive number is supplied, the 23 | standard error is instead multiplied by this number.} 24 | 25 | \item{col}{color for the line(s). This should be either a number, or a 26 | vector of length 3 for the coefficient function estimate, lower bound, and 27 | upper bound, respectively.} 28 | 29 | \item{lty}{line type(s) for the coefficient function estimate, lower bound, 30 | and upper bound.} 31 | 32 | \item{xlab, ylab}{x- and y-axis labels.} 33 | 34 | \item{\dots}{other arguments passed to the underlying plotting function.} 35 | } 36 | \value{ 37 | None; only a plot is produced. 38 | } 39 | \description{ 40 | Inputs an object created by \code{\link{fpcr}}, and plots the estimated 41 | coefficient function. 42 | } 43 | \references{ 44 | Wood, S. N. (2006). \emph{Generalized Additive Models: An 45 | Introduction with R}. Boca Raton, FL: Chapman & Hall. 46 | } 47 | \seealso{ 48 | \code{\link{fpcr}}, which includes an example. 49 | } 50 | \author{ 51 | Philip Reiss \email{phil.reiss@nyumc.org} 52 | } 53 | -------------------------------------------------------------------------------- /man/plot.lpeer.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/plot.lpeer.R 3 | \name{plot.lpeer} 4 | \alias{plot.lpeer} 5 | \title{Plotting of estimated regression functions obtained through \code{lpeer()}} 6 | \usage{ 7 | \method{plot}{lpeer}(x, conf = 0.95, ...) 8 | } 9 | \arguments{ 10 | \item{x}{object of class \code{"\link{lpeer}"}.} 11 | 12 | \item{conf}{pointwise confidence level.} 13 | 14 | \item{...}{additional arguments passed to \code{\link{plot}}.} 15 | } 16 | \description{ 17 | Plots the estimate of components of estimated regression function obtained 18 | from an \code{\link{lpeer}} object along with pointwise confidence bands. 19 | } 20 | \details{ 21 | Pointwise confidence interval is displayed only if the user set \code{se=T} 22 | in the call to \code{\link{lpeer}}, and does not reflect any multiplicity 23 | correction. 24 | } 25 | \examples{ 26 | \dontrun{ 27 | data(DTI) 28 | cca = DTI$cca[which(DTI$case == 1),] 29 | DTI = DTI[which(DTI$case == 1),] 30 | fit.cca.lpeer1 = lpeer(Y=DTI$pasat, t=DTI$visit, subj=DTI$ID, funcs = cca) 31 | plot(fit.cca.lpeer1) 32 | } 33 | } 34 | \references{ 35 | Kundu, M. G., Harezlak, J., and Randolph, T. W. (2012). 36 | Longitudinal functional models with structured penalties. (Please contact 37 | J. Harezlak at \email{harezlak@iupui.edu}.) 38 | 39 | Randolph, T. W., Harezlak, J, and Feng, Z. (2012). Structured penalties for 40 | functional linear models - partially empirical eigenvectors for regression. 41 | \emph{Electronic Journal of Statistics}, 6, 323--353. 42 | } 43 | \seealso{ 44 | \code{peer}, \code{lpeer}, \code{plot.peer} 45 | } 46 | \author{ 47 | Madan Gopal Kundu \email{mgkundu@iupui.edu} 48 | } 49 | -------------------------------------------------------------------------------- /man/plot.peer.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/plot.peer.R 3 | \name{plot.peer} 4 | \alias{plot.peer} 5 | \title{Plotting of estimated regression functions obtained through \code{peer()}} 6 | \usage{ 7 | \method{plot}{peer}( 8 | x, 9 | conf = 0.95, 10 | ylab = "Estimated regression function", 11 | main = expression(gamma), 12 | ... 13 | ) 14 | } 15 | \arguments{ 16 | \item{x}{object of class \code{"\link{peer}"}.} 17 | 18 | \item{conf}{pointwise confidence level.} 19 | 20 | \item{ylab}{y-axis label.} 21 | 22 | \item{main}{title for the plot.} 23 | 24 | \item{...}{additional arguments passed to \code{\link{plot}}.} 25 | } 26 | \description{ 27 | Plots the estimate of components of estimated regression function obtained 28 | from a \code{\link{peer}} object along with pointwise confidence bands. 29 | } 30 | \details{ 31 | Pointwise confidence interval is displayed only if the user set \code{se=T} 32 | in the call to \code{\link{peer}}, and does not reflect any multiplicity 33 | correction. 34 | } 35 | \examples{ 36 | # See example in peer() 37 | } 38 | \references{ 39 | Kundu, M. G., Harezlak, J., and Randolph, T. W. (2012). 40 | Longitudinal functional models with structured penalties. (Please contact 41 | J. Harezlak at \email{harezlak@iupui.edu}.) 42 | 43 | Randolph, T. W., Harezlak, J, and Feng, Z. (2012). Structured penalties for 44 | functional linear models - partially empirical eigenvectors for regression. 45 | \emph{Electronic Journal of Statistics}, 6, 323--353. 46 | } 47 | \seealso{ 48 | \code{peer}, \code{lpeer}, \code{plot.lpeer} 49 | } 50 | \author{ 51 | Madan Gopal Kundu \email{mgkundu@iupui.edu} 52 | } 53 | -------------------------------------------------------------------------------- /man/plot.pffr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-methods.R 3 | \name{plot.pffr} 4 | \alias{plot.pffr} 5 | \title{Plot a pffr fit} 6 | \usage{ 7 | \method{plot}{pffr}(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{a fitted \code{pffr}-object} 11 | 12 | \item{...}{arguments handed over to \code{\link[mgcv]{plot.gam}}} 13 | } 14 | \value{ 15 | This function only generates plots. 16 | } 17 | \description{ 18 | Plot a fitted pffr-object. Simply dispatches to \code{\link[mgcv]{plot.gam}}. 19 | } 20 | \author{ 21 | Fabian Scheipl 22 | } 23 | -------------------------------------------------------------------------------- /man/plot.pfr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/plot.pfr.R 3 | \name{plot.pfr} 4 | \alias{plot.pfr} 5 | \title{Plot a pfr object} 6 | \usage{ 7 | \method{plot}{pfr}(x, Qtransform = FALSE, ...) 8 | } 9 | \arguments{ 10 | \item{x}{a fitted \code{pfr}-object} 11 | 12 | \item{Qtransform}{For additive functional terms, \code{TRUE} indicates the 13 | coefficient should be plotted on the quantile-transformed scale, whereas 14 | \code{FALSE} indicates the scale of the original data. Note this is 15 | different from the \code{Qtransform} arguemnt of \code{af}, which specifies 16 | the scale on which the term is fit.} 17 | 18 | \item{...}{arguments handed over to \code{\link[mgcv]{plot.gam}}} 19 | } 20 | \value{ 21 | This function's main purpose is its side effect of generating plots. 22 | It also silently returns a list of the data used to produce the plots, which 23 | can be used to generate customized plots. 24 | } 25 | \description{ 26 | This function plots the smooth coefficients of a pfr object. These include 27 | functional coefficients as well as any smooths of scalar covariates. The 28 | function dispatches to \code{pfr_plot.gam}, which is our local copy of 29 | \code{\link[mgcv]{plot.gam}} with some minor changes. 30 | } 31 | \seealso{ 32 | \code{\link{af}}, \code{\link{pfr}} 33 | } 34 | \author{ 35 | Jonathan Gellar 36 | } 37 | -------------------------------------------------------------------------------- /man/predict.fbps.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/predict.fbps.R 3 | \name{predict.fbps} 4 | \alias{predict.fbps} 5 | \title{Prediction for fast bivariate \emph{P}-spline (fbps)} 6 | \usage{ 7 | \method{predict}{fbps}(object, newdata, ...) 8 | } 9 | \arguments{ 10 | \item{object}{an object returned by \code{\link{fbps}}} 11 | 12 | \item{newdata}{a data frame or list consisting of x and z values for which predicted values are desired. 13 | vectors of x and z need to be of the same length.} 14 | 15 | \item{...}{additional arguments.} 16 | } 17 | \value{ 18 | A list with components \item{x}{a vector of x given in newdata} 19 | \item{z}{a vector of z given in newdata} \item{fitted.values}{a vector of 20 | fitted values corresponding to x and z given in newdata} 21 | } 22 | \description{ 23 | Produces predictions given a \code{\link{fbps}} object and new data 24 | } 25 | \examples{ 26 | ########################## 27 | #### True function ##### 28 | ########################## 29 | n1 <- 60 30 | n2 <- 80 31 | x <- (1: n1)/n1-1/2/n1 32 | z <- (1: n2)/n2-1/2/n2 33 | MY <- array(0,c(length(x),length(z))) 34 | sigx <- .3 35 | sigz <- .4 36 | for(i in 1: length(x)) 37 | for(j in 1: length(z)) 38 | { 39 | #MY[i,j] <- .75/(pi*sigx*sigz) *exp(-(x[i]-.2)^2/sigx^2-(z[j]-.3)^2/sigz^2) 40 | #MY[i,j] <- MY[i,j] + .45/(pi*sigx*sigz) *exp(-(x[i]-.7)^2/sigx^2-(z[j]-.8)^2/sigz^2) 41 | MY[i,j] = sin(2*pi*(x[i]-.5)^3)*cos(4*pi*z[j]) 42 | } 43 | 44 | ########################## 45 | #### Observed data ##### 46 | ########################## 47 | sigma <- 1 48 | Y <- MY + sigma*rnorm(n1*n2,0,1) 49 | 50 | ########################## 51 | #### Estimation ##### 52 | ########################## 53 | est <- fbps(Y,list(x=x,z=z)) 54 | mse <- mean((est$Yhat-MY)^2) 55 | cat("mse of fbps is",mse,"\n") 56 | cat("The smoothing parameters are:",est$lambda,"\n") 57 | 58 | ######################################################################## 59 | ########## Compare the estimated surface with the true surface ######### 60 | ######################################################################## 61 | 62 | par(mfrow=c(1,2)) 63 | persp(x,z,MY,zlab="f(x,z)",zlim=c(-1,2.5), phi=30,theta=45,expand=0.8,r=4, 64 | col="blue",main="True surface") 65 | persp(x,z,est$Yhat,zlab="f(x,z)",zlim=c(-1,2.5),phi=30,theta=45, 66 | expand=0.8,r=4,col="red",main="Estimated surface") 67 | 68 | ########################## 69 | #### prediction ##### 70 | ########################## 71 | 72 | # 1. make prediction with predict.fbps() for all pairs of x and z given in the original data 73 | # ( it's expected to have same results as Yhat obtianed using fbps() above ) 74 | newdata <- list(x= rep(x, length(z)), z = rep(z, each=length(x))) 75 | pred1 <- predict(est, newdata=newdata)$fitted.values 76 | pred1.mat <- matrix(pred1, nrow=length(x)) 77 | par(mfrow=c(1,2)) 78 | image(pred1.mat); image(est$Yhat) 79 | all.equal(as.numeric(pred1.mat), as.numeric(est$Yhat)) 80 | 81 | # 2. predict for pairs of first 10 x values and first 5 z values 82 | newdata <- list(x= rep(x[1:10], 5), z = rep(z[1:5], each=10)) 83 | pred2 <- predict(est, newdata=newdata)$fitted.values 84 | pred2.mat <- matrix(pred2, nrow=10) 85 | par(mfrow=c(1,2)) 86 | image(pred2.mat); image(est$Yhat[1:10,1:5]) 87 | all.equal(as.numeric(pred2.mat), as.numeric(est$Yhat[1:10,1:5])) 88 | # 3. predict for one pair 89 | newdata <- list(x=x[5], z=z[3]) 90 | pred3 <- predict(est, newdata=newdata)$fitted.values 91 | all.equal(as.numeric(pred3), as.numeric(est$Yhat[5,3])) 92 | } 93 | \references{ 94 | Xiao, L., Li, Y., and Ruppert, D. (2013). Fast bivariate 95 | \emph{P}-splines: the sandwich smoother. \emph{Journal of the Royal 96 | Statistical Society: Series B}, 75(3), 577--599. 97 | } 98 | \author{ 99 | Luo Xiao \email{lxiao@jhsph.edu} 100 | } 101 | -------------------------------------------------------------------------------- /man/predict.fgam.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/predict.fgam.R 3 | \name{predict.fgam} 4 | \alias{predict.fgam} 5 | \title{Prediction from a fitted FGAM model} 6 | \usage{ 7 | \method{predict}{fgam}( 8 | object, 9 | newdata, 10 | type = "response", 11 | se.fit = FALSE, 12 | terms = NULL, 13 | PredOutOfRange = FALSE, 14 | ... 15 | ) 16 | } 17 | \arguments{ 18 | \item{object}{a fitted \code{fgam} object as produced by \code{{fgam}}} 19 | 20 | \item{newdata}{a named list containing the values of the model covariates at which predictions 21 | are required. If this is not provided then predictions corresponding to the original data are 22 | returned. All variables provided to newdata should be in the format supplied to \code{{fgam}}, 23 | i.e., functional predictors must be supplied as matrices with each row corresponding to one 24 | observed function. Index variables for the functional covariates are reused from the fitted model 25 | object or alternatively can be supplied as attributes of the matrix of functional predictor values. 26 | Any variables in the model not specified in newdata are set to their average values from the data 27 | supplied during fitting the model} 28 | 29 | \item{type}{character; see \code{{predict.gam}} for details} 30 | 31 | \item{se.fit}{logical; see \code{{predict.gam}} for details} 32 | 33 | \item{terms}{character see \code{{predict.gam}} for details} 34 | 35 | \item{PredOutOfRange}{logical; if this argument is true then any functional predictor values in 36 | newdata corresponding to \code{fgam} terms that are greater[less] than the maximum[minimum] of the 37 | domain of the marginal basis for the rows of the tensor product smooth are set to the maximum[minimum] 38 | of the domain. If this argument is false, attempting to predict a value of the functional predictor 39 | outside the range of this basis produces an error} 40 | 41 | \item{...}{additional arguments passed on to \code{{predict.gam}}} 42 | } 43 | \value{ 44 | If \code{type == "lpmatrix"}, the design matrix for the supplied covariate values in long 45 | format. If \code{se == TRUE}, a list with entries fit and se.fit containing fits and standard errors, 46 | respectively. If \code{type == "terms" or "iterms"} each of these lists is a list of matrices of the 47 | same dimension as the response for newdata containing the linear predictor and its se for each term 48 | } 49 | \description{ 50 | Takes a fitted \code{fgam}-object produced by \code{{fgam}} and produces predictions given a 51 | new set of values for the model covariates or the original values used for the model fit. 52 | Predictions can be accompanied by standard errors, based on the posterior distribution of the 53 | model coefficients. This is a wrapper function for \code{{predict.gam}}() 54 | } 55 | \examples{ 56 | ######### Octane data example ######### 57 | data(gasoline) 58 | N <- length(gasoline$octane) 59 | wavelengths = 2*450:850 60 | nir = matrix(NA, 60,401) 61 | test <- sample(60,20) 62 | for (i in 1:60) nir[i,] = gasoline$NIR[i, ] # changes class from AsIs to matrix 63 | y <- gasoline$octane 64 | #fit <- fgam(y~af(nir,xind=wavelengths,splinepars=list(k=c(6,6),m=list(c(2,2),c(2,2)))), 65 | # subset=(1:N)[-test]) 66 | #preds <- predict(fit,newdata=list(nir=nir[test,]),type='response') 67 | #plot(preds,y[test]) 68 | #abline(a=0,b=1) 69 | } 70 | \seealso{ 71 | \code{{fgam}}, \code{[mgcv]{predict.gam}} 72 | } 73 | \author{ 74 | Mathew W. McLean \email{mathew.w.mclean@gmail.com} and Fabian Scheipl 75 | } 76 | -------------------------------------------------------------------------------- /man/predict.fosr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/predict.fosr.R 3 | \name{predict.fosr} 4 | \alias{predict.fosr} 5 | \title{Prediction from a fitted bayes_fosr model} 6 | \usage{ 7 | \method{predict}{fosr}(object, newdata, ...) 8 | } 9 | \arguments{ 10 | \item{object}{a fitted \code{fosr} object as produced by \code{\link{bayes_fosr}}} 11 | 12 | \item{newdata}{a named list containing the values of the model covariates at which predictions 13 | are required. If this is not provided then predictions corresponding to the original data are 14 | returned. All variables provided to newdata should be in the format supplied to the model fitting 15 | function.} 16 | 17 | \item{...}{additional (unused) arguments} 18 | } 19 | \value{ 20 | ... 21 | } 22 | \description{ 23 | Takes a fitted \code{fosr}-object produced by \code{\link{bayes_fosr}} and produces predictions given a 24 | new set of values for the model covariates or the original values used for the model fit. 25 | } 26 | \examples{ 27 | \dontrun{ 28 | library(reshape2) 29 | library(dplyr) 30 | library(ggplot2) 31 | 32 | ##### Cross-sectional real-data example ##### 33 | 34 | ## organize data 35 | data(DTI) 36 | DTI = subset(DTI, select = c(cca, case, pasat)) 37 | DTI = DTI[complete.cases(DTI),] 38 | DTI$gender = factor(sample(c("male","female"), dim(DTI)[1], replace = TRUE)) 39 | DTI$status = factor(sample(c("RRMS", "SPMS", "PPMS"), dim(DTI)[1], replace = TRUE)) 40 | 41 | ## fit models 42 | VB = bayes_fosr(cca ~ pasat, data = DTI, Kp = 4, Kt = 10) 43 | 44 | ## obtain predictions 45 | pred = predict(VB, sample_n(DTI, 10)) 46 | } 47 | 48 | } 49 | \seealso{ 50 | \code{\link{bayes_fosr}} 51 | } 52 | \author{ 53 | Jeff Goldsmith \email{jeff.goldsmith@columbia.edu} 54 | } 55 | -------------------------------------------------------------------------------- /man/predict.fosr.vs.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/predict.fosr.vs.R 3 | \name{predict.fosr.vs} 4 | \alias{predict.fosr.vs} 5 | \title{Prediction for Function-on Scalar Regression with variable selection} 6 | \usage{ 7 | \method{predict}{fosr.vs}(object, newdata = NULL, ...) 8 | } 9 | \arguments{ 10 | \item{object}{an object of class "\code{\link{fosr.vs}}".} 11 | 12 | \item{newdata}{a data frame that contains the values of the model covariates at which predictors are required.} 13 | 14 | \item{...}{additional arguments.} 15 | } 16 | \value{ 17 | fitted values. 18 | } 19 | \description{ 20 | Given a "\code{\link{fosr.vs}}" object and new data, produces fitted values. 21 | } 22 | \examples{ 23 | \dontrun{ 24 | I = 100 25 | p = 20 26 | D = 50 27 | grid = seq(0, 1, length = D) 28 | 29 | beta.true = matrix(0, p, D) 30 | beta.true[1,] = sin(2*grid*pi) 31 | beta.true[2,] = cos(2*grid*pi) 32 | beta.true[3,] = 2 33 | 34 | psi.true = matrix(NA, 2, D) 35 | psi.true[1,] = sin(4*grid*pi) 36 | psi.true[2,] = cos(4*grid*pi) 37 | lambda = c(3,1) 38 | 39 | set.seed(100) 40 | 41 | X = matrix(rnorm(I*p), I, p) 42 | C = cbind(rnorm(I, mean = 0, sd = lambda[1]), rnorm(I, mean = 0, sd = lambda[2])) 43 | 44 | fixef = X\%*\%beta.true 45 | pcaef = C \%*\% psi.true 46 | error = matrix(rnorm(I*D), I, D) 47 | 48 | Yi.true = fixef 49 | Yi.pca = fixef + pcaef 50 | Yi.obs = fixef + pcaef + error 51 | 52 | data = as.data.frame(X) 53 | data$Y = Yi.obs 54 | fit.mcp = fosr.vs(Y~., data = data[1:80,], method="grMCP") 55 | predicted.value = predict(fit.mcp, data[81:100,]) 56 | 57 | } 58 | 59 | } 60 | \seealso{ 61 | \code{\link{fosr.vs}} 62 | } 63 | \author{ 64 | Yakuan Chen \email{yc2641@cumc.columbia.edu} 65 | } 66 | -------------------------------------------------------------------------------- /man/predict.pffr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-methods.R 3 | \name{predict.pffr} 4 | \alias{predict.pffr} 5 | \title{Prediction for penalized function-on-function regression} 6 | \usage{ 7 | \method{predict}{pffr}(object, newdata, reformat = TRUE, type = "link", se.fit = FALSE, ...) 8 | } 9 | \arguments{ 10 | \item{object}{a fitted \code{pffr}-object} 11 | 12 | \item{newdata}{A named list (or a \code{data.frame}) containing the values of the 13 | model covariates at which predictions are required. 14 | If no \code{newdata} is provided then predictions corresponding to the original data 15 | are returned. If \code{newdata} is provided then it must contain all the variables needed 16 | for prediction, in the format supplied to \code{pffr}, i.e., functional predictors must be 17 | supplied as matrices with each row corresponding to one observed function. 18 | See Details for more on index variables and prediction for models fit on 19 | irregular or sparse data.} 20 | 21 | \item{reformat}{logical, defaults to TRUE. Should predictions be returned in matrix form (default) or 22 | in the long vector shape returned by \code{predict.gam()}?} 23 | 24 | \item{type}{see \code{\link[mgcv]{predict.gam}()} for details. 25 | Note that \code{type == "lpmatrix"} will force \code{reformat} to FALSE.} 26 | 27 | \item{se.fit}{see \code{\link[mgcv]{predict.gam}()}} 28 | 29 | \item{...}{additional arguments passed on to \code{\link[mgcv]{predict.gam}()}} 30 | } 31 | \value{ 32 | If \code{type == "lpmatrix"}, the design matrix for the supplied covariate values in long format. 33 | If \code{se == TRUE}, a list with entries \code{fit} and \code{se.fit} containing fits and standard errors, respectively. 34 | If \code{type == "terms"} or \code{"iterms"} each of these lists is a list of matrices of the same dimension as the response for \code{newdata} 35 | containing the linear predictor and its se for each term. 36 | } 37 | \description{ 38 | Takes a fitted \code{pffr}-object produced by \code{\link{pffr}()} and produces 39 | predictions given a new set of values for the model covariates or the original 40 | values used for the model fit. Predictions can be accompanied by standard errors, 41 | based on the posterior distribution of the model coefficients. This is a wrapper 42 | function for \code{\link[mgcv]{predict.gam}()}. 43 | } 44 | \details{ 45 | Index variables (i.e., evaluation points) for the functional covariates are reused 46 | from the fitted model object and cannot be supplied with \code{newdata}. 47 | Prediction is always for the entire index range of the responses as defined 48 | in the original fit. If the original fit was performed on sparse or irregular, 49 | non-gridded response data supplied via \code{pffr}'s \code{ydata}-argument 50 | and no \code{newdata} was supplied, this function will 51 | simply return fitted values for the original evaluation points of the response (in list form). 52 | If the original fit was performed on sparse or irregular data and \code{newdata} \emph{was} 53 | supplied, the function will return predictions on the grid of evaluation points given in 54 | \code{object$pffr$yind}. 55 | } 56 | \seealso{ 57 | \code{\link[mgcv]{predict.gam}()} 58 | } 59 | \author{ 60 | Fabian Scheipl 61 | } 62 | -------------------------------------------------------------------------------- /man/predict.pfr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/predict.pfr.R 3 | \name{predict.pfr} 4 | \alias{predict.pfr} 5 | \title{Prediction from a fitted pfr model} 6 | \usage{ 7 | \method{predict}{pfr}( 8 | object, 9 | newdata, 10 | type = "response", 11 | se.fit = FALSE, 12 | terms = NULL, 13 | PredOutOfRange = FALSE, 14 | ... 15 | ) 16 | } 17 | \arguments{ 18 | \item{object}{a fitted \code{pfr} object as produced by \code{{pfr}}} 19 | 20 | \item{newdata}{a named list containing the values of the model covariates at which predictions 21 | are required. If this is not provided then predictions corresponding to the original data are 22 | returned. All variables provided to newdata should be in the format supplied to \code{{pfr}}, 23 | i.e., functional predictors must be supplied as matrices with each row corresponding to one 24 | observed function. Index variables for the functional covariates are reused from the fitted model 25 | object or alternatively can be supplied as attributes of the matrix of functional predictor values. 26 | Any variables in the model not specified in newdata are set to their average values from the data 27 | supplied during fitting the model} 28 | 29 | \item{type}{character; see \code{{predict.gam}} for details} 30 | 31 | \item{se.fit}{logical; see \code{{predict.gam}} for details} 32 | 33 | \item{terms}{character see \code{{predict.gam}} for details} 34 | 35 | \item{PredOutOfRange}{logical; if this argument is true then any functional predictor values in 36 | newdata corresponding to \code{pfr} terms that are greater[less] than the maximum[minimum] of the 37 | domain of the marginal basis for the rows of the tensor product smooth are set to the maximum[minimum] 38 | of the domain. If this argument is false, attempting to predict a value of the functional predictor 39 | outside the range of this basis produces an error} 40 | 41 | \item{...}{additional arguments passed on to \code{{predict.gam}}} 42 | } 43 | \value{ 44 | If \code{type == "lpmatrix"}, the design matrix for the supplied covariate values in long 45 | format. If \code{se == TRUE}, a list with entries fit and se.fit containing fits and standard errors, 46 | respectively. If \code{type == "terms" or "iterms"} each of these lists is a list of matrices of the 47 | same dimension as the response for newdata containing the linear predictor and its se for each term 48 | } 49 | \description{ 50 | Takes a fitted \code{pfr}-object produced by \code{{pfr}} and produces predictions given a 51 | new set of values for the model covariates or the original values used for the model fit. 52 | Predictions can be accompanied by standard errors, based on the posterior distribution of the 53 | model coefficients. This is a wrapper function for \code{{predict.gam}}() 54 | } 55 | \examples{ 56 | ######### Octane data example ######### 57 | data(gasoline) 58 | N <- length(gasoline$octane) 59 | wavelengths = 2*450:850 60 | nir = matrix(NA, 60,401) 61 | test <- sample(60,20) 62 | for (i in 1:60) nir[i,] = gasoline$NIR[i, ] # changes class from AsIs to matrix 63 | y <- gasoline$octane 64 | #fit <- pfr(y~af(nir,argvals=wavelengths,k=c(6,6), m=list(c(2,2),c(2,2))), 65 | # subset=(1:N)[-test]) 66 | #preds <- predict(fit,newdata=list(nir=nir[test,]),type='response') 67 | #plot(preds,y[test]) 68 | #abline(a=0,b=1) 69 | } 70 | \seealso{ 71 | \code{{pfr}}, \code{[mgcv]{predict.gam}} 72 | } 73 | \author{ 74 | Mathew W. McLean \email{mathew.w.mclean@gmail.com} and Fabian Scheipl 75 | } 76 | -------------------------------------------------------------------------------- /man/print.summary.pffr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-methods.R 3 | \name{print.summary.pffr} 4 | \alias{print.summary.pffr} 5 | \title{Print method for summary of a pffr fit} 6 | \usage{ 7 | \method{print}{summary.pffr}( 8 | x, 9 | digits = max(3, getOption("digits") - 3), 10 | signif.stars = getOption("show.signif.stars"), 11 | ... 12 | ) 13 | } 14 | \arguments{ 15 | \item{x}{a fitted \code{pffr}-object} 16 | 17 | \item{digits}{controls number of digits printed in output.} 18 | 19 | \item{signif.stars}{Should significance stars be printed alongside output?} 20 | 21 | \item{...}{not used} 22 | } 23 | \value{ 24 | A \code{\link{summary.pffr}} object 25 | } 26 | \description{ 27 | Pretty printing for a \code{summary.pffr}-object. 28 | See \code{\link[mgcv]{print.summary.gam}()} for details. 29 | } 30 | \author{ 31 | Fabian Scheipl, adapted from \code{\link[mgcv]{print.summary.gam}()} by Simon Wood, Henric Nilsson 32 | } 33 | -------------------------------------------------------------------------------- /man/pwcv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pwcv.R 3 | \name{pwcv} 4 | \alias{pwcv} 5 | \title{Pointwise cross-validation for function-on-scalar regression} 6 | \usage{ 7 | pwcv( 8 | fdobj, 9 | Z, 10 | L = NULL, 11 | lambda, 12 | eval.pts = seq(min(fdobj$basis$range), max(fdobj$basis$range), length.out = 201), 13 | scale = FALSE 14 | ) 15 | } 16 | \arguments{ 17 | \item{fdobj}{a functional data object (class \code{fd}) giving the 18 | functional responses.} 19 | 20 | \item{Z}{the model matrix, whose columns represent scalar predictors.} 21 | 22 | \item{L}{a row vector or matrix of linear contrasts of the coefficient 23 | functions, to be restricted to equal zero.} 24 | 25 | \item{lambda}{smoothing parameter: either a nonnegative scalar or a vector, 26 | of length \code{ncol(Z)}, of nonnegative values.} 27 | 28 | \item{eval.pts}{argument values at which the CV score is to be evaluated.} 29 | 30 | \item{scale}{logical value or vector determining scaling of the matrix 31 | \code{Z} (see \code{\link{scale}}, to which the value of this argument is 32 | passed).} 33 | } 34 | \value{ 35 | A vector of the same length as \code{eval.pts} giving the CV 36 | scores. 37 | } 38 | \description{ 39 | Estimates prediction error for a function-on-scalar regression model by 40 | leave-one-function-out cross-validation (CV), at each of a specified set of 41 | points. 42 | } 43 | \details{ 44 | Integrating the pointwise CV estimate over the function domain yields the 45 | \emph{cross-validated integrated squared error}, the standard overall model 46 | fit score returned by \code{\link{lofocv}}. 47 | 48 | It may be desirable to derive the value of \code{lambda} from an 49 | appropriate call to \code{\link{fosr}}, as in the example below. 50 | } 51 | \references{ 52 | Reiss, P. T., Huang, L., and Mennes, M. (2010). Fast 53 | function-on-scalar regression with penalized basis expansions. 54 | \emph{International Journal of Biostatistics}, 6(1), article 28. Available 55 | at \url{https://pubmed.ncbi.nlm.nih.gov/21969982/} 56 | } 57 | \seealso{ 58 | \code{\link{fosr}}, \code{\link{lofocv}} 59 | } 60 | \author{ 61 | Philip Reiss \email{phil.reiss@nyumc.org} 62 | } 63 | -------------------------------------------------------------------------------- /man/qq.pffr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-methods.R 3 | \name{qq.pffr} 4 | \alias{qq.pffr} 5 | \title{QQ plots for pffr model residuals} 6 | \usage{ 7 | qq.pffr( 8 | object, 9 | rep = 0, 10 | level = 0.9, 11 | s.rep = 10, 12 | type = c("deviance", "pearson", "response"), 13 | pch = ".", 14 | rl.col = 2, 15 | rep.col = "gray80", 16 | ... 17 | ) 18 | } 19 | \arguments{ 20 | \item{object}{a fitted \code{\link{pffr}}-object} 21 | 22 | \item{rep}{How many replicate datasets to generate to simulate quantiles 23 | of the residual distribution. \code{0} results in an efficient 24 | simulation free method for direct calculation, if this is possible for 25 | the object family.} 26 | 27 | \item{level}{If simulation is used for the quantiles, then reference intervals can be provided for the QQ-plot, this specifies the level. 28 | 0 or less for no intervals, 1 or more to simply plot the QQ plot for each replicate generated.} 29 | 30 | \item{s.rep}{how many times to randomize uniform quantiles to data under direct computation.} 31 | 32 | \item{type}{what sort of residuals should be plotted? See 33 | \code{\link[mgcv]{residuals.gam}}.} 34 | 35 | \item{pch}{plot character to use. 19 is good.} 36 | 37 | \item{rl.col}{color for the reference line on the plot.} 38 | 39 | \item{rep.col}{color for reference bands or replicate reference plots.} 40 | 41 | \item{...}{extra graphics parameters to pass to plotting functions.} 42 | } 43 | \description{ 44 | This is simply a wrapper for \code{\link[mgcv]{qq.gam}()}. 45 | } 46 | -------------------------------------------------------------------------------- /man/quadWeights.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/quadWeights.R 3 | \name{quadWeights} 4 | \alias{quadWeights} 5 | \title{Compute quadrature weights} 6 | \usage{ 7 | quadWeights(argvals, method = "trapezoidal") 8 | } 9 | \arguments{ 10 | \item{argvals}{function arguments.} 11 | 12 | \item{method}{quadrature method. Can be either \code{trapedoidal} or \code{midpoint}.} 13 | } 14 | \value{ 15 | a vector of quadrature weights for the points supplied in \code{argvals}. 16 | } 17 | \description{ 18 | Utility function for numerical integration. 19 | } 20 | \author{ 21 | Clara Happ, with modifications by Philip Reiss 22 | } 23 | -------------------------------------------------------------------------------- /man/re.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/re.R 3 | \name{re} 4 | \alias{re} 5 | \title{Random effects constructor for fgam} 6 | \usage{ 7 | re(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{a grouping variable: must be a \code{factor}} 11 | 12 | \item{...}{further arguments handed over to \code{\link[mgcv]{s}}, 13 | see \code{\link[mgcv]{random.effects}}} 14 | } 15 | \description{ 16 | Sets up a random effect for the levels of \code{x}. 17 | Use the \code{by}-argument to request random slopes. 18 | } 19 | \details{ 20 | See \code{\link[mgcv]{random.effects}} in \pkg{mgcv}. 21 | } 22 | \seealso{ 23 | \code{\link[mgcv]{random.effects}} 24 | } 25 | -------------------------------------------------------------------------------- /man/residuals.pffr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-methods.R 3 | \name{residuals.pffr} 4 | \alias{residuals.pffr} 5 | \alias{fitted.pffr} 6 | \title{Obtain residuals and fitted values for a pffr models} 7 | \usage{ 8 | \method{residuals}{pffr}(object, reformat = TRUE, ...) 9 | 10 | \method{fitted}{pffr}(object, reformat = TRUE, ...) 11 | } 12 | \arguments{ 13 | \item{object}{a fitted \code{pffr}-object} 14 | 15 | \item{reformat}{logical, defaults to TRUE. Should residuals be returned in 16 | \code{n x yindex} matrix form (regular grid data) or, respectively, in the 17 | shape of the originally supplied \code{ydata} argument (sparse/irregular 18 | data), or, if \code{FALSE}, simply as a long vector as returned by 19 | \code{resid.gam()}?} 20 | 21 | \item{...}{other arguments, passed to \code{\link[mgcv]{residuals.gam}}.} 22 | } 23 | \value{ 24 | A matrix or \code{ydata}-like \code{data.frame} or a vector of 25 | residuals / fitted values (see \code{reformat}-argument) 26 | } 27 | \description{ 28 | See \code{\link{predict.pffr}} for alternative options to extract estimated 29 | values from a \code{pffr} object. 30 | "Fitted values" here refers to the estimated additive predictor values, 31 | these will not be on the scale of the response for models with link functions. 32 | } 33 | \author{ 34 | Fabian Scheipl 35 | } 36 | -------------------------------------------------------------------------------- /man/sff.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pffr-sff.R 3 | \name{sff} 4 | \alias{sff} 5 | \title{Construct a smooth function-on-function regression term} 6 | \usage{ 7 | sff( 8 | X, 9 | yind, 10 | xind = seq(0, 1, l = ncol(X)), 11 | basistype = c("te", "t2", "s"), 12 | integration = c("simpson", "trapezoidal"), 13 | L = NULL, 14 | limits = NULL, 15 | splinepars = list(bs = "ps", m = c(2, 2, 2)) 16 | ) 17 | } 18 | \arguments{ 19 | \item{X}{an n by \code{ncol(xind)} matrix of function evaluations 20 | \eqn{X_i(s_{i1}),\dots, X_i(s_{iS})}; \eqn{i=1,\dots,n}.} 21 | 22 | \item{yind}{\emph{DEPRECATED} matrix (or vector) of indices of evaluations of 23 | \eqn{Y_i(t)}; i.e. matrix with rows \eqn{(t_{i1},\dots,t_{iT})}; no longer 24 | used.} 25 | 26 | \item{xind}{vector of indices of evaluations of \eqn{X_i(s)}, 27 | i.e, \eqn{(s_{1},\dots,s_{S})}} 28 | 29 | \item{basistype}{defaults to "\code{\link[mgcv]{te}}", i.e. a tensor product 30 | spline to represent \eqn{f(X_i(s), t)}. Alternatively, use \code{"s"} for 31 | bivariate basis functions (see \code{\link[mgcv]{s}}) or \code{"t2"} for an 32 | alternative parameterization of tensor product splines (see 33 | \code{\link[mgcv]{t2}}).} 34 | 35 | \item{integration}{method used for numerical integration. Defaults to 36 | \code{"simpson"}'s rule. Alternatively and for non-equidistant grids, 37 | \code{"trapezoidal"}.} 38 | 39 | \item{L}{optional: an n by \code{ncol(xind)} giving the weights for the 40 | numerical integration over \eqn{s}.} 41 | 42 | \item{limits}{defaults to NULL for integration across the entire range of 43 | \eqn{X(s)}, otherwise specifies the integration limits \eqn{s_{hi, i}, 44 | s_{lo, i}}: either one of \code{"s squared_diff(ef1[,i], - ef2[,i])){ 15 | ef2[,i] <- -ef2[,i] 16 | } 17 | } 18 | ef2 19 | } 20 | 21 | test_that("all fpca functions agree on toy example", { 22 | skip_on_cran() 23 | 24 | sc <- fpca.sc(Y) 25 | face <- fpca.face(Y) 26 | #ssvd <- fpca.ssvd(Y) 27 | #twos <- fpca2s(Y) 28 | 29 | expect_equal(sc$Yhat, unname(face$Yhat), tolerance=.01) 30 | #expect_equal(sc$Yhat, ssvd$Yhat, tolerance=.01) 31 | #expect_equal(sc$Yhat, twos$Yhat, tolerance=.01) 32 | 33 | #ssvd$efunctions <- flip_efunctions(sc$efunctions, ssvd$efunctions) 34 | #expect_equal(sc$efunctions, ssvd$efunctions, tolerance=.1) 35 | #expect_equal(sc$evalues, ssvd$evalues, tolerance=.1) 36 | 37 | #twos$efunctions <- flip_efunctions(sc$efunctions, twos$efunctions) 38 | #expect_equal(sc$efunctions, twos$efunctions, tolerance=.1) 39 | #expect_equal(sc$evalues, twos$evalues, tolerance=.1) 40 | 41 | if(FALSE){ 42 | ##TODO: - fix quadrature weights first 43 | ## - flip sign of efunctions if necessary 44 | expect_equal(sc$efunctions, face$efunctions, tolerance=.01) 45 | #expect_equal(sc$efunctions, twos$efunctions, tolerance=.01) 46 | expect_equal(sc$evalues, face$evalues, tolerance=.01) 47 | #expect_equal(sc$evalues, ssvd$evalues, tolerance=.01) 48 | #expect_equal(sc$evalues, twos$evalues, tolerance=.01) 49 | } 50 | }) 51 | 52 | test_that("fpca.sc options work", { 53 | skip_on_cran() 54 | 55 | sc <- fpca.sc(Y) 56 | sc_cov1 <- fpca.sc(Y, cov.est.method = 1) 57 | #sc_sym <- fpca.sc(Y, useSymm = TRUE) 58 | #sc_int <- fpca.sc(Y, random.int = TRUE) 59 | 60 | expect_equal(sc$Yhat, sc_cov1$Yhat, tolerance=.02) 61 | #expect_equal(sc$Yhat, sc_sym$Yhat, tolerance=.01) 62 | #expect_equal(sc$Yhat, sc_int$Yhat, tolerance=.01) 63 | }) 64 | 65 | 66 | # test_that("fpca.ssvd options work", { 67 | # skip_on_cran() 68 | # 69 | # expect_error(fpca.ssvd(Y = 1:10, ydata=data.frame()), "irregular data") 70 | # expect_warning(fpca.ssvd(Y = Y, argvals=sqrt(t)), "non-equidistant") 71 | # ssvd <- fpca.ssvd(Y) 72 | # ssvd_npc1 <- fpca.ssvd(Y, npc=1) 73 | # ssvd_d2 <- fpca.ssvd(Y, diffpen = 2) 74 | # expect_equal(ssvd_npc1$efunctions[,1], ssvd$efunctions[,1]) 75 | # expect_true(ncol(ssvd_npc1$efunctions) == 1) 76 | # expect_equal(ssvd_d2$efunctions, ssvd$efunctions, tol=.01) 77 | # }) 78 | # 79 | # test_that("fpca2s options work", { 80 | # skip_on_cran() 81 | # 82 | # expect_error(fpca2s(Y = 1:10, ydata=data.frame()), "irregular data") 83 | # expect_warning(fpca2s(Y = Y, argvals=sqrt(t)), "non-equidistant") 84 | # twos <- fpca2s(Y) 85 | # twos_npc1 <- fpca2s(Y, npc=1) 86 | # expect_equal(twos_npc1$efunctions[,1], twos$efunctions[,1]) 87 | # expect_true(ncol(twos_npc1$efunctions) == 1) 88 | # }) 89 | -------------------------------------------------------------------------------- /tests/testthat/test-fpcr.R: -------------------------------------------------------------------------------- 1 | context("Testing functional PCR") 2 | 3 | 4 | test_that("Check that all 3 fpcr calls yield essentially identical estimates", { 5 | skip_on_cran() 6 | 7 | data(gasoline) 8 | 9 | # Create the requisite functional data objects 10 | bbasis = create.bspline.basis(c(900, 1700), 40) 11 | wavelengths = 2*450:850 12 | nir <- t(gasoline$NIR) 13 | gas.fd = smooth.basisPar(wavelengths, nir, bbasis)$fd 14 | 15 | # Method 1: Call fpcr with fdobj argument 16 | gasmod1 = fpcr(gasoline$octane, fdobj = gas.fd, ncomp = 30) 17 | #plot(gasmod1, xlab="Wavelength") 18 | 19 | # Method 2: Call fpcr with explicit signal matrix 20 | gasmod2 = fpcr(gasoline$octane, xfuncs = gasoline$NIR, ncomp = 30) 21 | # Method 3: Call fpcr with explicit signal, basis, and penalty matrices 22 | gasmod3 = fpcr(gasoline$octane, xfuncs = gasoline$NIR, 23 | basismat = eval.basis(wavelengths, bbasis), 24 | penmat = getbasispenalty(bbasis), ncomp = 30) 25 | 26 | # Check that all 3 calls yield essentially identical estimates 27 | #expect_equal(gasmod1$fhat, gasmod2$fhat, gasmod3$fhat) 28 | tmp <- abs(c(gasmod1$fhat-gasmod2$fhat, gasmod1$fhat-gasmod2$fhat)) 29 | expect_lt(max(tmp), 1e-10) 30 | # But note that, in general, you'd have to specify argvals in Method 1 31 | # to get the same coefficient function values as with Methods 2 & 3. 32 | }) 33 | 34 | test_that("Cross-validation is working", { 35 | skip_on_cran() 36 | set.seed(8659) 37 | data(gasoline) 38 | cv.gas = fpcr(gasoline$octane, xfuncs = gasoline$NIR, 39 | nbasis=seq(20,40,5), ncomp = seq(10,20,5), store.cv = TRUE) 40 | expect_is(cv.gas, "fpcr") 41 | 42 | ## expect_equal_to_reference(cv.gas$cv.table, "fpcr.cv.rds") 43 | }) -------------------------------------------------------------------------------- /tests/testthat/test-lpfr.R: -------------------------------------------------------------------------------- 1 | context("Testing old lpfr") 2 | 3 | test_that("lpfr works with one predictor", { 4 | skip_on_cran() 5 | 6 | data(DTI) 7 | 8 | # subset data as needed for this example 9 | cca = DTI$cca[which(DTI$case == 1),] 10 | rcst = DTI$rcst[which(DTI$case == 1),] 11 | DTI = DTI[which(DTI$case == 1),] 12 | 13 | 14 | # note there is missingness in the functional predictors 15 | # apply(is.na(cca), 2, mean) 16 | # apply(is.na(rcst), 2, mean) 17 | 18 | 19 | # fit two models with single functional predictors and plot the results 20 | #fit.cca = lpfr(Y=DTI$pasat, subj=DTI$ID, funcs = cca, smooth.cov=FALSE) 21 | fit.rcst = lpfr(Y=DTI$pasat, subj=DTI$ID, funcs = rcst, smooth.cov=FALSE) 22 | ## expect_equal_to_reference(fit.cca$BetaHat, "lpfr.cca.coef.rds") 23 | expect_is(fit.rcst, "list") 24 | expect_equal(length(fit.rcst), 10) 25 | }) 26 | 27 | test_that("lpfr works two predictors", { 28 | skip_on_cran() 29 | 30 | data(DTI) 31 | 32 | # subset data as needed for this example 33 | cca = DTI$cca[which(DTI$case == 1),] 34 | rcst = DTI$rcst[which(DTI$case == 1),] 35 | DTI = DTI[which(DTI$case == 1),] 36 | 37 | # fit a model with two functional predictors and plot the results 38 | fit.cca.rcst = lpfr(Y=DTI$pasat, subj=DTI$ID, funcs = list(cca,rcst), 39 | smooth.cov=FALSE) 40 | expect_is(fit.cca.rcst, "list") 41 | ## expect_equal_to_reference(fit.cca.rcst, "lpfr.fit.rds") 42 | }) -------------------------------------------------------------------------------- /tests/testthat/test-mfpca.R: -------------------------------------------------------------------------------- 1 | context("Test mfpca") 2 | 3 | test_that("all mfpca functions work on the DTI example", { 4 | skip_on_cran() 5 | 6 | data(DTI) 7 | DTI <- subset(DTI, Nscans < 6) ## example where all subjects have 6 or fewer visits 8 | id <- DTI$ID 9 | Y <- DTI$cca 10 | 11 | mfpca.sc.DTI <- mfpca.sc(Y = Y, id = id, twoway = TRUE) 12 | mfpca.face.DTI <- mfpca.face(Y = Y, id = id, twoway = TRUE) 13 | 14 | expect_equal(dim(mfpca.face.DTI$Yhat)[1], dim(mfpca.sc.DTI$Yhat)[1]) 15 | expect_equal(dim(mfpca.face.DTI$Yhat.subject)[1], dim(mfpca.sc.DTI$Yhat.subject)[1]) 16 | expect_equal(dim(mfpca.face.DTI$scores$level1)[1], dim(mfpca.sc.DTI$scores$level1)[1]) 17 | expect_equal(dim(mfpca.face.DTI$scores$level2)[1], dim(mfpca.sc.DTI$scores$level2)[1]) 18 | }) 19 | 20 | test_that("mfpca.face options work", { 21 | skip_on_cran() 22 | 23 | data(DTI) 24 | DTI <- subset(DTI, Nscans < 6) ## example where all subjects have 6 or fewer visits 25 | id <- DTI$ID 26 | Y <- DTI$cca 27 | mfpca.base <- mfpca.face(Y = Y, id = id) 28 | 29 | # visit argument 30 | mfpca.visit <- mfpca.face(Y = Y, id = id, visit = DTI$visit) 31 | expect_equal(mfpca.base$npc$level1, mfpca.visit$npc$level1) 32 | 33 | # weight argument 34 | mfpca.weight <- mfpca.face(Y = Y, id = id, weight = "subj") 35 | expect_equal(dim(mfpca.base$scores$level1)[1], dim(mfpca.weight$scores$level1)[1]) 36 | 37 | # pve argument 38 | mfpca.pve <- mfpca.face(Y = Y, id = id, pve = 0.95) 39 | expect_equal(dim(mfpca.base$scores$level1)[1], dim(mfpca.pve$scores$level1)[1]) 40 | 41 | # npc argument 42 | mfpca.npc <- mfpca.face(Y = Y, id = id, npc = 5) 43 | expect_equal(mfpca.npc$npc$level1, 5) 44 | expect_equal(mfpca.npc$npc$level2, 5) 45 | }) 46 | 47 | 48 | -------------------------------------------------------------------------------- /tests/testthat/test-pcre.R: -------------------------------------------------------------------------------- 1 | context("Testing pcre") 2 | 3 | test_that("pcre works as expected", { 4 | skip_on_cran() 5 | 6 | residualfunction <- function(t){ 7 | #generate quintic polynomial error functions 8 | drop(poly(t, 5)%*%rnorm(5, sd=sqrt(2:6))) 9 | } 10 | # generate data Y(t) = mu(t) + E(t) + white noise 11 | set.seed(1122) 12 | n <- 50 13 | T <- 30 14 | t <- seq(0,1, l=T) 15 | # E(t): smooth residual functions 16 | E <- t(replicate(n, residualfunction(t))) 17 | int <- matrix(scale(3*dnorm(t, m=.5, sd=.5) - dbeta(t, 5, 2)), byrow=T, n, T) 18 | Y <- int + E + matrix(.2*rnorm(n*T), n, T) 19 | data <- data.frame(Y=I(Y)) 20 | # fit model under independence assumption: 21 | summary(m0 <- pffr(Y ~ 1, yind=t, data=data)) 22 | # get first 5 eigenfunctions of residual covariance 23 | # (i.e. first 5 functional PCs of empirical residual process) 24 | Ehat <- resid(m0) 25 | fpcE <- fpca.sc(Ehat, npc=5) 26 | ##expect_equal_to_reference(fpcE, "pcre.fpca.obj.rds") 27 | #expect_is(fpcE, "list") 28 | 29 | efunctions <- fpcE$efunctions 30 | evalues <- fpcE$evalues 31 | data$id <- factor(1:nrow(data)) 32 | # refit model with fpc-based residuals 33 | m1 <- pffr(Y ~ 1 + pcre(id=id, efunctions=efunctions, evalues=evalues, yind=t), yind=t, data=data) 34 | ## expect_equal_to_reference(m1, "pcre.pffr.obj.rds") 35 | expect_is(m1, "pffr") 36 | re <- predict(m1, type = "terms")[[2]] 37 | expect_equivalent(colSums(re), rep(0, T)) 38 | 39 | expect_is(summary(m1), "summary.pffr") 40 | }) 41 | 42 | test_that("pcre works for sparse", { 43 | skip_on_cran() 44 | 45 | residualfunction <- function(t){ 46 | #generate quintic polynomial error functions 47 | drop(poly(t, 5)%*%rnorm(5, sd=sqrt(2:6))) 48 | } 49 | # generate data Y(t) = mu(t) + E(t) + white noise 50 | set.seed(1122) 51 | n <- 50 52 | T <- 30 53 | t <- seq(0,1, l=T) 54 | # E(t): smooth residual functions 55 | E <- t(replicate(n, residualfunction(t))) 56 | int <- matrix(scale(3*dnorm(t, m=.5, sd=.5) - dbeta(t, 5, 2)), byrow=T, n, T) 57 | Y <- int + E + matrix(.2*rnorm(n*T), n, T) 58 | data <- data.frame(Y=I(Y)) 59 | # fit model under independence assumption: 60 | summary(m0 <- pffr(Y ~ 1, yind=t, data=data)) 61 | # get first 5 eigenfunctions of residual covariance 62 | # (i.e. first 5 functional PCs of empirical residual process) 63 | Ehat <- resid(m0) 64 | fpcE <- fpca.sc(Ehat, npc=5) 65 | ##expect_equal_to_reference(fpcE, "pcre.fpca.obj.rds") 66 | #expect_is(fpcE, "list") 67 | 68 | efunctions <- fpcE$efunctions 69 | evalues <- fpcE$evalues 70 | 71 | # make sparse data: 72 | propmissing <- .5 73 | nygrid <- T 74 | missing <- sample(c(rep(T, propmissing*n*nygrid), 75 | rep(F, n*nygrid-propmissing*n*nygrid))) 76 | 77 | ydata <- data.frame(.obs = rep(1:n, each=nygrid)[!missing], 78 | .index = rep(t, times=n)[!missing], 79 | .value = as.vector(t(data$Y))[!missing]) 80 | 81 | data <- data.frame(id = factor(1:nrow(data))) 82 | 83 | # refit model with fpc-based residuals 84 | m1 <- pffr(Y ~ 1 + pcre(id=id, efunctions=efunctions, evalues=evalues, yind=t), yind=t, 85 | data=data, ydata = ydata) 86 | ## expect_equal_to_reference(m1, "pcre.pffr.obj.rds") 87 | expect_is(m1, "pffr") 88 | 89 | t1 <- predict(m1, type="terms") 90 | ## expect_equal_to_reference(t1, "pcre.prediction.obj.rds") 91 | expect_is(t1, "list") 92 | 93 | re <- predict(m1, type = "terms", newdata = data)[[2]] 94 | expect_equivalent(colSums(re), rep(0, T)) 95 | 96 | expect_is(summary(m1), "summary.pffr") 97 | }) 98 | -------------------------------------------------------------------------------- /tests/testthat/test-peer.R: -------------------------------------------------------------------------------- 1 | # context("Testing pfr's peer()") 2 | # 3 | # test_that("peer with D2 penalty", { 4 | # skip_on_cran() 5 | # 6 | # data(DTI) 7 | # DTI = DTI[which(DTI$case == 1),] 8 | # fit.D2 <- pfr(pasat ~ peer(cca, pentype="D"), data=DTI) 9 | # expect_is(fit.D2, "pfr") 10 | # }) 11 | 12 | # test_that("peer with structured penalty works", { 13 | # skip_on_cran() 14 | # 15 | # data(PEER.Sim, Q) 16 | # 17 | # # Setting k to max possible value 18 | # fit.decomp <- pfr(Y ~ peer(W, pentype="Decomp", Q=Q, k=99), 19 | # data=subset(PEER.Sim, t==0)) 20 | # expect_is(fit.decomp, "pfr") 21 | # }) 22 | --------------------------------------------------------------------------------