├── .Rinstignore ├── DESCRIPTION ├── MD5 ├── NAMESPACE ├── R ├── ParetoTest.R ├── anova.R ├── boot.R ├── combos.R ├── crq.R ├── dynrq.R ├── khmal.R ├── kuantile.R ├── lprq.R ├── nlrq.R ├── qrisk.R ├── quantreg.R ├── rqss.R ├── sfn.R ├── table.R └── tools.R ├── README ├── build └── vignette.rds ├── data ├── Bosco.rda ├── CobarOre.rda ├── Mammals.rda ├── MelTemp.rda ├── Peirce.rda ├── barro.rda ├── engel.rda ├── gasprice.rda └── uis.rda ├── demo ├── 00Index ├── Frank.R ├── KMvCRQ.R ├── MCV.R ├── Mammals.R ├── Mel.R ├── Mel2.R ├── Panel.R ├── Polson.R ├── RB-r.R ├── arqss.R ├── cobar.R ├── combos.R ├── cpoint.R ├── crquis.R ├── engel1.R ├── engel2.R ├── hinged.R ├── panelfig.R ├── predemo.R ├── rqsslasso.R ├── stack.R └── subset.R ├── inst ├── ChangeLog ├── FAQ ├── TODO └── doc │ ├── crq.pdf │ ├── crq.pdf.asis │ ├── rq.pdf │ └── rq.pdf.asis ├── man ├── Bosco.Rd ├── CobarOre.Rd ├── FAQ.Rd ├── KhmaladzeTest.Rd ├── LassoLambdaHat.Rd ├── Mammals.Rd ├── MelTemp.Rd ├── Munge.Rd ├── ParetoTest.Rd ├── Peirce.Rd ├── QTECox.Rd ├── akj.Rd ├── anova.rq.Rd ├── bandwidth.rq.Rd ├── barro.Rd ├── boot.crq.Rd ├── boot.rq.Rd ├── boot.rq.pwxy.Rd ├── boot.rq.pxy.Rd ├── combos.Rd ├── critval.Rd ├── crq.Rd ├── dither.Rd ├── dynrq.Rd ├── engel.Rd ├── gasprice.Rd ├── kuantile.Rd ├── latex.Rd ├── latex.summary.rqs.Rd ├── latex.table.Rd ├── lm.fit.recursive.Rd ├── lprq.Rd ├── nlrq.Rd ├── nlrq.control.Rd ├── plot.KhmaladzeTest.Rd ├── plot.rq.process.Rd ├── plot.rqs.Rd ├── plot.rqss.Rd ├── plot.summary.rqs.Rd ├── predict.rq.Rd ├── predict.rqss.Rd ├── print.KhmaladzeTest.Rd ├── print.rq.Rd ├── print.summary.rq.Rd ├── q489.Rd ├── qrisk.Rd ├── qss.Rd ├── ranks.Rd ├── rearrange.Rd ├── residuals.nlrq.Rd ├── rq.Rd ├── rq.fit.Rd ├── rq.fit.br.Rd ├── rq.fit.conquer.Rd ├── rq.fit.fnb.Rd ├── rq.fit.fnc.Rd ├── rq.fit.hogg.Rd ├── rq.fit.lasso.Rd ├── rq.fit.pfn.Rd ├── rq.fit.pfnb.Rd ├── rq.fit.ppro.Rd ├── rq.fit.qfnb.Rd ├── rq.fit.scad.Rd ├── rq.fit.sfn.Rd ├── rq.fit.sfnc.Rd ├── rq.object.Rd ├── rq.process.object.Rd ├── rq.wfit.Rd ├── rqProcess.Rd ├── rqs.fit.Rd ├── rqss.Rd ├── rqss.object.Rd ├── sfn.control.Rd ├── srisk.Rd ├── summary.crq.Rd ├── summary.rq.Rd ├── summary.rqss.Rd ├── table.rq.Rd └── uis.Rd ├── src ├── Makevars ├── akj.f ├── boot.f ├── bound.f ├── boundc.f ├── brute.f ├── chlfct.f ├── cholesky.f ├── combos.f ├── crqf.f ├── crqfnb.f ├── dsel05.f ├── extract.f ├── frand.c ├── grexp.f ├── idmin.f ├── iswap.f ├── kuantile.f ├── kuantiles.f ├── linpack.f ├── mcmb.c ├── penalty.f ├── pfnb.f ├── powell.f ├── profnb.f ├── pwxy.f ├── qfnb.f ├── qselect.f ├── quantreg_init.c ├── ratfor │ ├── README │ ├── boot.r │ ├── brute.r │ ├── combos.r │ ├── crqfnb.r │ ├── grexp.r │ ├── kuantiles.r │ ├── penalty.r │ ├── pfnb.r │ ├── powell.r │ ├── pwxy.r │ ├── qfnb.r │ ├── qselect.r │ ├── rls.r │ ├── rqbr.r │ ├── rqfn.r │ ├── rqfnb.r │ ├── rqfnc.r │ ├── rqs.r │ └── sakj.r ├── rls.f ├── rq0.f ├── rq1.f ├── rqbr.f ├── rqfn.f ├── rqfnb.f ├── rqfnc.f ├── rqs.f ├── sakj.f ├── sparskit2.f ├── srqfn.f ├── srqfnc.f └── srtpai.f ├── tests ├── panel.R ├── rq.R ├── rq.fit.panel.R └── run-demos.R └── vignettes ├── crq.pdf.asis └── rq.pdf.asis /.Rinstignore: -------------------------------------------------------------------------------- 1 | doc/Makefile 2 | -------------------------------------------------------------------------------- /R/combos.R: -------------------------------------------------------------------------------- 1 | "combos" <- function(n,p){ 2 | if(length(n) != 1){ 3 | n <- n[1] 4 | warning("Using first element as n") 5 | } 6 | if(length(p) != 1){ 7 | p <- p[1] 8 | warning("Using first element as p") 9 | } 10 | if(n != as.integer(n)){ 11 | warning("Coercing n to integer") 12 | n <- as.integer(n) 13 | } 14 | if(p != as.integer(p)){ 15 | warning("Coercing p to integer") 16 | p <- as.integer(p) 17 | } 18 | if(p > n) stop("p is greater than n") 19 | m <- choose(n,p) 20 | z <- .Fortran("combin", 21 | as.integer(n), 22 | as.integer(p), 23 | as.integer(m), 24 | a = integer(p*m), 25 | integer(n), 26 | integer(n), 27 | integer(n)) 28 | matrix(z$a,p) 29 | } 30 | -------------------------------------------------------------------------------- /R/kuantile.R: -------------------------------------------------------------------------------- 1 | "kuantile"<- 2 | function(x, probs = seq(0,1, 0.25),na.rm = FALSE, 3 | names = TRUE, type = 7, ...) { 4 | if(na.rm) 5 | x <- x[!is.na(x)] 6 | else if(any(is.na(x))) 7 | stop("NA's and NaN's not allowed in 'x' if 'na.rm' is FALSE") 8 | if(any(is.na(probs))) 9 | stop("NA's and NaN's in 'probs' not allowed") 10 | if(any(probs < 0 | probs > 1)) 11 | stop("probs outside [0,1]") 12 | p <- probs 13 | op <- order(p) 14 | p <- p[op] 15 | n <- length(x) 16 | m <- length(p) 17 | g <- rep(.5,m) 18 | if(type == 1) #Hyndman-Fan Typology 19 | k <- j <- pmax(1,ceiling(p*n)) 20 | else if(type == 2){ 21 | j <- pmax(1,floor(p*n)) 22 | k <- sort(c(pmax(1,j),pmin(j+1,n))) 23 | g <- ifelse(p*n > j, 1, 0.5) 24 | } 25 | else if(type == 3) 26 | k <- j <- pmax(1,round(p*n)) 27 | else{ 28 | switch(type - 3, 29 | {a <- 0; b <- 1},#Type 4 30 | {a <- b <- 0.5}, #Type 5 31 | {a <- b <- 0}, #Type 6 32 | {a <- b <- 1}, #Type 7 33 | {a <- b <- 1/3}, #Type 8 34 | {a <- b <- 3/8}) #Type 9 35 | d <- a + p * (1 - a - b) 36 | j <- floor(p*n + d) 37 | g <- p*n + d - j 38 | k <- sort(c(pmax(1,j),pmin(j+1,n))) 39 | } 40 | uk <- kunique(k) 41 | uz <- kselect(x,uk$xU) 42 | z <- uz[uk$ix] 43 | if(type %in% c(1,3)) 44 | A <- matrix(z,m,2) 45 | else 46 | A <- t(matrix(z,2,m)) 47 | G <- cbind(1-g,g) 48 | y <- (A * G) %*% c(1,1) # <=> diag(crossprod(A,G)) 49 | y <- y[rank(probs)] 50 | if(names && m > 0){ 51 | dig <- max(2,getOption("digits")) 52 | names(y) <- paste(format(100*probs, trim = TRUE, digits = dig), "%", sep="") 53 | } 54 | class(y) <- "kuantile" 55 | return(y) 56 | } 57 | "kselect" <- 58 | function(x,k){ 59 | n <- length(x) 60 | m <- length(k) 61 | z <- .Fortran("kuantiles", 62 | k = as.integer(k), 63 | m = as.integer(m), 64 | n = as.integer(n), 65 | x = as.double(x)) 66 | return(z$x[z$k]) 67 | } 68 | "kunique" <- 69 | function (x, isuniq = !duplicated(x)) 70 | { # shamelessly plagarized from Martin Maechler's sfsmisc package 71 | need.sort <- is.unsorted(x) 72 | if (need.sort) { 73 | xs <- sort(x, index.return = TRUE) 74 | ixS <- xs$ix 75 | isuniq <- isuniq[ixS] 76 | x <- xs$x 77 | } 78 | ix <- as.integer(cumsum(isuniq)) 79 | if (need.sort) 80 | ix <- ix[sort.list(ixS)] 81 | list(ix = ix, xU = x[isuniq]) 82 | } 83 | q489 <- function(x, tau = .5){ 84 | n <- length(x) 85 | z <- .Fortran("qselect", 86 | as.integer(n), 87 | as.double(x), 88 | q = as.double(tau)) 89 | z$q 90 | } 91 | -------------------------------------------------------------------------------- /R/lprq.R: -------------------------------------------------------------------------------- 1 | lprq <- function(x, y, h, tau = .5, m = 50) 2 | { 3 | ## A toy routine to do locally polynomial quantile regression 4 | xx <- seq(min(x),max(x),length=m) 5 | fv <- xx 6 | dv <- xx 7 | for(i in 1:length(xx)) { 8 | z <- x - xx[i] 9 | wx <- dnorm(z/h) 10 | r <- rq(y~z, weights=wx, tau=tau, ci=FALSE) 11 | fv[i] <- r$coef[1.] 12 | dv[i] <- r$coef[2.] 13 | } 14 | list(xx = xx, fv = fv, dv = dv) 15 | } 16 | 17 | -------------------------------------------------------------------------------- /R/qrisk.R: -------------------------------------------------------------------------------- 1 | "qrisk" <- 2 | function(x, alpha=c(.1,.3), w = c(.7,.3), mu = .07, R = NULL, r = NULL, lambda = 10000){ 3 | # 4 | # find optimal Choquet-risk portfolios given: 5 | # 6 | # x (n by p) matrix of asset returns 7 | # alphas alphas defining a Choquet capacity risk function 8 | # w w defining weights for Choquet capacity risk function 9 | # R Matrix defining constraints on the parameters 10 | # r rhs defining constraints on the parameters 11 | # mu required mean rate of return 12 | # lambda Lagrange multiplier for RoR constraint 13 | # 14 | n <- nrow(x) 15 | p <- ncol(x) 16 | m <- length(alpha) 17 | if(length(w)!=m)stop("length of w doesn't match length of alpha") 18 | xbar <- apply(x,2,mean) 19 | y <- x[,1] 20 | r <- c(r,lambda*(xbar[1]-mu), -lambda*(xbar[1]-mu)) 21 | X <- x[,1]-x[,-1] 22 | R <- rbind(R,lambda*(xbar[1]-xbar[-1]), -lambda*(xbar[1]-xbar[-1])) 23 | R <- cbind(matrix(0,nrow(R),m),R) 24 | f <- rq.fit.hogg(X,y,taus=alpha,weights=w,R=R,r=r) 25 | fit <- f$coefficients 26 | pihat <- c(1-sum(fit[-(1:m)]),fit[-(1:m)]) 27 | x <- as.matrix(x) 28 | yhat <- x%*%pihat 29 | etahat <- quantile(yhat,alpha) 30 | muhat <- mean(yhat) 31 | qrisk <- 0 32 | for(i in 1:length(alpha)) 33 | qrisk <- qrisk + w[i]*sum(yhat[yhat eps) return("lambda too small?") 56 | yhat <- x%*%pihat 57 | muhat <- mean(x%*%pihat) 58 | sigma <- sqrt(var(x%*%pihat)) 59 | list(pihat = pihat, muhat = muhat, sigma = sigma) 60 | } 61 | -------------------------------------------------------------------------------- /R/tools.R: -------------------------------------------------------------------------------- 1 | "FAQ" <- function(pkg = "quantreg") 2 | file.show(file.path(system.file(package = pkg),"FAQ")) 3 | "ChangeLog" <- function(pkg = "quantreg") 4 | file.show(file.path(system.file(package = pkg),"ChangeLog")) 5 | -------------------------------------------------------------------------------- /README: -------------------------------------------------------------------------------- 1 | Versions of quantreg between 3.70 and 4.75 were removed from the CRAN 2 | archive due to uncertainties over the licensing status of the fortran 3 | code in src/cholesky.f. As of 9 March 2012, original authors of cholesky.f, 4 | Esmond Ng and Barry Peyton, have now, very kindly, given permission to 5 | use cholesky.f under an open source license. They have requested that 6 | their code be credited via the following two publications: 7 | 8 | Esmond G. Ng and Barry W. Peyton, "Block sparse Cholesky algorithms 9 | on advanced uniprocessor computers". SIAM J. Sci. Stat. Comput. 10 | 14 (1993), pp. 1034-1056. 11 | 12 | John R. Gilbert, Esmond G. Ng, and Barry W. Peyton, "An efficient 13 | algorithm to compute row and column counts for sparse Cholesky 14 | factorization". SIAM J. Matrix Anal. Appl. 15 (1994), pp. 1075-1091. 15 | 16 | Use of the sparse Cholesky code in cholesky.f in quantreg is limited to calls 17 | to rqss() or to rq with method = "sfn". 18 | 19 | In the event that readers wish to access the intermediate versions of quantreg 20 | that have been removed from CRAN, they will be available at: 21 | 22 | http://www.econ.uiuc.edu/~roger/research/rq/quantreg 23 | 24 | Current versions are best accessed from CRAN and its many mirrors. 25 | 26 | -------------------------------------------------------------------------------- /build/vignette.rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/build/vignette.rds -------------------------------------------------------------------------------- /data/Bosco.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/data/Bosco.rda -------------------------------------------------------------------------------- /data/CobarOre.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/data/CobarOre.rda -------------------------------------------------------------------------------- /data/Mammals.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/data/Mammals.rda -------------------------------------------------------------------------------- /data/MelTemp.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/data/MelTemp.rda -------------------------------------------------------------------------------- /data/Peirce.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/data/Peirce.rda -------------------------------------------------------------------------------- /data/barro.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/data/barro.rda -------------------------------------------------------------------------------- /data/engel.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/data/engel.rda -------------------------------------------------------------------------------- /data/gasprice.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/data/gasprice.rda -------------------------------------------------------------------------------- /data/uis.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/data/uis.rda -------------------------------------------------------------------------------- /demo/00Index: -------------------------------------------------------------------------------- 1 | cobar Demo of the triogram fitting and rgl perspective plot for Cobar Ore data 2 | Frank Demo of nonlinear in parameters fitting of Frank copula model 3 | cobar Demo of contour plot of triogram fit of cobar model 4 | combos Demo of combos function: ordered combinations 5 | cpoint Demo for a simple change point (broken-stick) problem 6 | hinged Demo for a simple rqss bivariate fitting of hinge function 7 | engel1 Demo of QR fitting of Engel curves 8 | engel2 Demo of estimated conditional quantile functions for Engel data. 9 | Mel Demo of conditional density estimation using Melbourne daily temperature. 10 | Mel2 Demo of Melbourne daily temperature. 11 | KMvCRQ Demo to compare Kaplan Meier and Portnoy's crq estimation in one-sample case. 12 | predemo Demo for prediction and confidence intervals 13 | rqsslasso Demo for lasso penalized rqss 14 | panelfig Demo for panel data example 15 | stack Demo with Stackloss data from Brownlee 16 | arqss Demo for automatic lambda selection 17 | Mammals Demo for rqss with running speed of mammals data 18 | crquis Demo for crq method using UIS data and Peng Huang method. 19 | RB-r Demo for testing R beta = r using anova[.rq] 20 | Polson Demo to compare rqss fit with Polson-Scott ADMM fit 21 | Panel Demo to illustrate fixed effect estimation for panel data 22 | subset Demo to test subset option for rqss 23 | MCV Demo to illustrate multifold cross validation lambda selection for rqss 24 | -------------------------------------------------------------------------------- /demo/Frank.R: -------------------------------------------------------------------------------- 1 | ## Demo of nonlinear quantile regression model based on Frank copula 2 | 3 | 4 | vFrank <- function(x, df, delta, u) 5 | -log(1-(1-exp(-delta))/(1+exp(-delta*pt(x,df))*((1/u)-1)))/delta 6 | 7 | FrankModel <- function(x, delta, mu,sigma, df, tau) { 8 | z <- qt(vFrank(x, df, delta, u = tau), df) 9 | mu + sigma*z 10 | } 11 | 12 | n <- 200 13 | df <- 8 14 | delta <- 8 15 | 16 | set.seed(1989) 17 | 18 | x <- sort(rt(n,df)) 19 | v <- vFrank(x, df, delta, u = runif(n)) 20 | y <- qt(v, df) 21 | plot(x, y, pch="o", col="blue", cex = .25) 22 | Dat <- data.frame(x = x, y = y) 23 | 24 | us <- c(.25,.5,.75) 25 | for(i in 1:length(us)){ 26 | v <- vFrank(x, df, delta, u = us[i]) 27 | lines(x, qt(v,df)) 28 | } 29 | 30 | cfMat <- matrix(0, 3, length(us)) 31 | 32 | trace <- TRUE # a bit noisy ... 33 | trace <- FALSE 34 | for(i in 1:length(us)) { 35 | tau <- us[i] 36 | cat("tau = ", format(tau), ".. ") 37 | fit <- nlrq(y ~ FrankModel(x, delta,mu,sigma, df = 8, tau = tau), 38 | data = Dat, tau = tau, 39 | start= list(delta=5, mu = 0, sigma = 1), 40 | trace = trace) 41 | lines(x, predict(fit, newdata=x), lty=2, col="red") 42 | cfMat[i,] <- coef(fit) 43 | cat("\n") 44 | } 45 | colnames(cfMat) <- names(coef(fit)) 46 | cfMat 47 | -------------------------------------------------------------------------------- /demo/KMvCRQ.R: -------------------------------------------------------------------------------- 1 | # Example Comparison of Kaplan-Meier vs crq fitting 2 | # The red crq estimate should overplot the black KM Survival Curve. 3 | if (requireNamespace("survival", quietly = TRUE)){ 4 | n <- 100 5 | y <- rchisq(n,3) 6 | c <- rchisq(n,5) 7 | Y <- pmin(y,c) 8 | d <- (y < c) 9 | Surv <- survival::Surv 10 | plot(survival::survfit(Surv(Y,d)~1)) 11 | f <- crq(Surv(Y,d)~1, method = "Portnoy", grid = "pivot") 12 | x <- f$sol[2,] 13 | p <- 1-f$sol[1,] 14 | p <- c(p,p[length(p)]) 15 | par(col = "red") 16 | fs <- plot(stepfun(x, p),do.points = FALSE, add = TRUE) 17 | } 18 | -------------------------------------------------------------------------------- /demo/MCV.R: -------------------------------------------------------------------------------- 1 | 2 | MCV <- function(lambdas, formula, data, tau = 0.5, k = 10){ 3 | F <- Munge(formula, lambdas = lambdas) 4 | f <- rqss(F, data, tau = tau) 5 | n <- f$n 6 | m <- length(f$qss) 7 | y <- f$y[1:n] 8 | folds = sample(rep(1:k, length = n)) 9 | U = NULL 10 | for(i in 1:k){ 11 | s = which(folds != i) 12 | M = rqss(F, data = data[s,], tau = tau) 13 | nd = data[-s,] 14 | G = matrix(0,nrow(nd),m) 15 | for(j in 1:m){ #remove extrapolates, if any 16 | g = f$qss[[j]]$xyz[,1] 17 | G[,j] = (min(g[s]) < g[-s]) & (g[-s] < max(g[s])) 18 | } 19 | h = as.logical(apply(G,1,prod)) 20 | u = predict(M, newdata = nd[h,]) - (y[-s])[h] 21 | U = c(U,(u * (tau - (u < 0)))) 22 | } 23 | mean(U) 24 | } 25 | set.seed(1729) 26 | n <- 200 27 | x <- sort(runif(n, 0, 20)) 28 | g0 <- function(x, tau) 29 | log(x) + 0.2*(log(x))^3 + log(x) * qnorm(tau)/4 30 | y <- g0(x, runif(n)) 31 | D <- data.frame(y = y, x = x) 32 | lams <- mcvs <- seq(.02, 5, by = 0.2) 33 | for(i in 1:length(mcvs)) 34 | mcvs[i] <- MCV(lams[i], y ~ qss(x, lambda = lambdas[1]), D) 35 | par(mfrow = c(1,2)) 36 | plot(lams, mcvs, cex = .5, lwd = 2, type = 'l', 37 | xlab = expression(lambda), ylab = expression(MCV( lambda ))) 38 | lambdastar <- lams[which.min(mcvs)] 39 | 40 | plot(x, y, cex = .5, col = "grey") 41 | f <- rqss(y ~ qss(x, lambda = lambdastar), data = D) 42 | plot(f, add = TRUE, lwd = 2) 43 | lines(x,g0(x, 0.5), col = "red", lwd = 2) 44 | text(10, 1,bquote(lambda == ~ .(lambdastar))) 45 | 46 | -------------------------------------------------------------------------------- /demo/Mammals.R: -------------------------------------------------------------------------------- 1 | require(quantreg) 2 | data(Mammals) 3 | attach(Mammals) 4 | x <- log(weight) 5 | xx <- unique(x[order(x)]) 6 | y <- log(speed) 7 | plot(x,y, xlab="Weight in log(Kg)", ylab="Speed in log(Km/hour)",type="n") 8 | points(x[hoppers],y[hoppers],pch = "h", col="red") 9 | points(x[specials],y[specials],pch = "s", col="blue") 10 | others <- (!hoppers & !specials) 11 | points(x[others],y[others], col="black",cex = .75) 12 | taus <- c(.5, .9) 13 | for(i in 1:length(taus)){ 14 | fit <- rqss(y ~ qss(x, lambda = 1, constraint = "C"),tau = taus[i]) 15 | plot(fit,title = "Running Speed of Mammals", add = TRUE, col = i, lwd = 1.5) 16 | } 17 | legend(4,2,c("Median", "0.9 Quantile"), lty = 1, col = 1:2, lwd = 1.5) 18 | #Now plot confidence bands for the tau = .9 fit 19 | plot(fit,title = "Running Speed of Mammals", band = "both", col = i, lwd = 1.5) 20 | #Now plot slope of the tau = .9 line 21 | xy <- fit$qss[[1]]$xyz 22 | xx <- xy[,1] 23 | yhat <- fit$coef[1] + xy[,2] 24 | g <- diff(yhat)/diff(xx) 25 | plot(xx[-1], g, main = "Fitted Slopes of Running Speed", 26 | xlab="Weight in log(Kg)", ylab="dlog(Speed) /dlog(Weight)") 27 | -------------------------------------------------------------------------------- /demo/Mel.R: -------------------------------------------------------------------------------- 1 | #Analysis of the QAR(1) Melbourne Temperature Example 2 | require(splines) 3 | if(interactive()){ 4 | oldpar <- par(ask = TRUE) 5 | data(MelTemp) 6 | x <- MelTemp[-3650] 7 | y <- MelTemp[-1] 8 | s <- (x<40) #Delete a few (influential, ridiculously hot) days 9 | x <- x[s] 10 | y <- y[s] 11 | z <- seq(10,36,length=100) 12 | 13 | fit <- rq(y~ bs(x,knots=quantile(x,c(.05,.25,.5,.75,.95))), tau = 1:19/20) 14 | par(cex=1,pty="s") 15 | xlab <- "yesterday's max temperature" 16 | ylab <- "today's max temperature" 17 | plot(x,y,pch=".",xlab=xlab,ylab=ylab) 18 | matlines(z,predict(fit, newdata = data.frame(x = z)), lty = 1) 19 | abline(c(0,1),lty=3) 20 | title("Melbourne QAR Model") 21 | 22 | taus <- 1:199/200 23 | xs <- c(11,16,21,25,30,35) 24 | fit <- rq(y~ bs(x,knots=quantile(x,c(.05,.25,.5,.75,.95))), tau = taus) 25 | Qy <- predict(fit,newdata = data.frame(x = xs)) 26 | par(mfrow = c(2,3)) 27 | for(i in 1:length(xs)){ 28 | Qyi <- Qy[i,-1] 29 | fhat <- akj(Qyi,Qyi,diff(taus), h = 1)$dens 30 | xlab <- "today's max temperature" 31 | plot(Qyi,fhat,type="l",xlab=xlab,ylab="density") 32 | abline(v=xs[i], col="red") 33 | title(paste("Yesterday's Temp", format(round(xs[i])))) 34 | } 35 | par(oldpar) 36 | 37 | } 38 | 39 | -------------------------------------------------------------------------------- /demo/Mel2.R: -------------------------------------------------------------------------------- 1 | # Analysis of the QAR(1) Melbourne Temperature Example 2 | # Using new type = "fhat" option in predict.rqs 3 | require(splines) 4 | if(interactive()){ 5 | oldpar <- par(ask = TRUE) 6 | data(MelTemp) 7 | x <- MelTemp[-3650] 8 | y <- MelTemp[-1] 9 | s <- (x<40) #Delete a few (influential, ridiculously hot) days 10 | x <- x[s] 11 | y <- y[s] 12 | z <- seq(10,36,length=100) 13 | 14 | fit <- rq(y~ bs(x,knots=quantile(x,c(.05,.25,.5,.75,.95))), tau = 1:19/20) 15 | par(cex=1,pty="s") 16 | xlab <- "yesterday's max temperature" 17 | ylab <- "today's max temperature" 18 | plot(x,y,pch=".",xlab=xlab,ylab=ylab) 19 | matlines(z,predict(fit, newdata = data.frame(x = z)), lty = 1) 20 | abline(c(0,1),lty=3) 21 | title("Melbourne QAR Model") 22 | 23 | taus <- 1:199/200 24 | xs <- c(11,16,21,25,30,35) 25 | fit <- rq(y~ bs(x,knots=quantile(x,c(.05,.25,.5,.75,.95))), tau = taus) 26 | fhats <- predict(fit,newdata = data.frame(x = xs), type = "fhat", h = 1) 27 | par(mfrow = c(2,3)) 28 | for(i in 1:length(xs)){ 29 | fhat <- fhats[[i]] 30 | x <- environment(fhat)$x 31 | xlab <- "today's max temperature" 32 | plot(x,fhat(x),type="l",xlab=xlab,ylab="density") 33 | abline(v=xs[i], col="red") 34 | title(paste("Yesterday's Temp", format(round(xs[i])))) 35 | } 36 | par(oldpar) 37 | } 38 | -------------------------------------------------------------------------------- /demo/Panel.R: -------------------------------------------------------------------------------- 1 | library(quantreg) 2 | 3 | rq.fit.panel <- function(X,y,s,w=c(1/3,1/3,1/3),taus=c(0.25,0.5,0.75),lambda = 0){ 4 | # prototype function for fixed effect panel data fitting of QR models 5 | # the vector s is a strata indicator assumed (so far) to be a one-way layout 6 | # NB: 7 | # 1. The value of the shrinkage parameter lambda is an open research problem in 8 | # the simplest homogeneous settings it should be the ratio of the scale parameters 9 | # of the fixed effects and the idiosyncratic errors 10 | # 2. On return the coefficient vector has m*p + n elements where m is the number 11 | # quantiles being estimated, p is the number of columns of X, and n is the 12 | # number of distinct values of s. The first m*p coefficients are the 13 | # slope estimates, and the last n are the "fixed effects" 14 | # 3. Like all shrinkage (regularization) estimators, asymptotic inference is somewhat 15 | # problematic... so the bootstrap is the natural first resort. 16 | 17 | 18 | require(SparseM) 19 | require(quantreg) 20 | m <- length(w) 21 | if(m != length(taus)) 22 | stop("length of w and taus must match") 23 | X <- as.matrix(X) 24 | p <- ncol(X) 25 | n <- length(levels(as.factor(s))) 26 | N <- length(y) 27 | if(N != length(s) || N != nrow(X)) 28 | stop("dimensions of y,X,s must match") 29 | Z <- as.matrix.csr(model.matrix(~as.factor(s)-1)) 30 | Fidelity <- cbind(as(w,"matrix.diag.csr") %x% X, cbind(w) %x% Z) 31 | Penalty <- cbind(as.matrix.csr(0,n,m*p),lambda*as(n,"matrix.diag.csr")) 32 | D <- rbind(Fidelity,Penalty) 33 | y <- c(w %x% y,rep(0,n)) 34 | a <- c((w*(1-taus)) %x% (t(X)%*%rep(1,N)), 35 | sum(w*(1-taus)) * (t(Z) %*% rep(1,N)) + lambda * rep(1,n)) 36 | rq.fit.sfn(D,y,rhs=a) 37 | } 38 | 39 | n<-3 40 | T<-50 41 | nT<-n*T 42 | u1<-rnorm(T) 43 | u2<-rnorm(T) 44 | u3<-rnorm(T) 45 | x1<-rnorm(T,1,0.85) 46 | x2<-rnorm(T,4,1) 47 | x3<-rnorm(T,7,1) 48 | 49 | beta1<-1 50 | beta2<-0.5 51 | 52 | y1<- 0+beta1*x1+(beta2*x1)*u1 53 | y2<- 4+beta1*x2+(beta2*x2)*u2 54 | y3<- 8+beta1*x3+(beta2*x3)*u3 55 | 56 | 57 | plot(c(0,9), c(0,25), type='n', xlab=expression(x[it]), ylab=expression(y[it])) 58 | points(x1,y1,pch=15) 59 | points(x2,y2,pch=15,col="blue") 60 | points(x3,y3,pch=15,col="red") 61 | legend(1,17,paste("i = ",1:3,sep = ""),pch = 15, col = c("black","blue","red")) 62 | 63 | ya<-c(y1,y2,y3) 64 | xa<-c(x1,x2,x3) 65 | 66 | # Naive cross-section QR 67 | 68 | taus <- c(.25,0.5,.75) 69 | xx <- seq(min(xa),max(xa),0.25) 70 | f <- coef(rq(ya~xa,tau=taus)) 71 | yy <- cbind(1,xx)%*%f 72 | for(i in 1:3) 73 | lines(xx,yy[,i],col = "grey") 74 | 75 | # Fixed effect QR 76 | 77 | s <- rep(1:n,rep(T,n)) 78 | fp<-rq.fit.panel(xa,ya,s)$coef 79 | 80 | bhat <- fp[1:3] 81 | fehat <- fp[4:6] 82 | 83 | xx1 <- seq(min(x1),max(x1),0.25) 84 | for(i in 1:3){ 85 | yy1 <- fehat[1] + bhat[i] * xx1 86 | lines(xx1,yy1,col = "black") 87 | } 88 | 89 | xx2 <- seq(min(x2),max(x2),0.25) 90 | for(i in 1:3){ 91 | yy2 <- fehat[2] + bhat[i] * xx2 92 | lines(xx2,yy2,col = "blue") 93 | } 94 | 95 | xx3 <- seq(min(x3),max(x3),0.25) 96 | for(i in 1:3){ 97 | yy3 <- fehat[3] + bhat[i] * xx3 98 | lines(xx3,yy3,col = "red") 99 | } 100 | -------------------------------------------------------------------------------- /demo/Polson.R: -------------------------------------------------------------------------------- 1 | # Toy rqss example based on Figure 2 of Polson and Scott (2016, JRSSB) 2 | # NB: Solutions are piecewise linear, unlike those of Polson and Scott 3 | # whose ADMM procedure does only 30 iterations. 4 | dgp <- function(n) { 5 | x <- 0:n/n 6 | y <- rnorm(n+1, 5 * sin(2 * pi * x), 0.5 + exp(1.5 * sin(4 * pi * x))) 7 | data.frame(x = x, y = y) 8 | } 9 | D <- dgp(1000) 10 | plot(D$x, D$y, cex = .5) 11 | taus <- 1:9/10 12 | for(i in 1:length(taus)) 13 | plot(rqss(y ~ qss(x, lambda = 1/10), tau = taus[i], data = D), 14 | rug = FALSE, add = TRUE) 15 | 16 | -------------------------------------------------------------------------------- /demo/RB-r.R: -------------------------------------------------------------------------------- 1 | # Convert H_0: R beta = r to an exclusion restriction a la Section 3.7.3 of QR book 2 | # Note the typo in the definition of yt in that source! 3 | 4 | if(require(MASS)){ # For Null 5 | require(quantreg) # For Null 6 | X <- cbind(1, matrix(rnorm(500),100,5)) 7 | y <- rnorm(100) 8 | R <- matrix(rnorm(18),3,6) 9 | r <- rep(1,3) 10 | R <- t(R) 11 | P <- MASS::Null(R) 12 | Xt <- t(lsfit(P,t(X),intercept = FALSE)$coef) 13 | Zt <- t(lsfit(R,t(X),intercept = FALSE)$coef) 14 | yt <- y - Zt %*% r 15 | f0 <- rq(yt ~ Xt - 1) 16 | f1 <- rq(yt ~ Xt + Zt - 1) 17 | T <- anova(f0,f1) 18 | } 19 | 20 | -------------------------------------------------------------------------------- /demo/arqss.R: -------------------------------------------------------------------------------- 1 | # A toy example to illustrate univariate smoothing with automatic lambda selection 2 | n <- 2000 3 | x <- 1:n/n 4 | noise <- rgamma(n,3,1) 5 | g0 <- function(x) sin(10*x) 6 | y <- g0(x)+noise 7 | arqss <- function(x,y,tau,g0 = NULL){ 8 | g <- function(lam,y,x,tau) AIC(rqss(y ~ qss(x, lambda = lam),tau = tau),k = -1) 9 | lamstar <- optimize(g, interval = c(0.01, .5), x = x, y = y, tau = tau) 10 | f <- rqss(y ~ qss(x, lambda = lamstar$min)) 11 | plot(f) 12 | lines(x,g0(x)+qgamma(tau,3,1),col = "red") 13 | text(.7,2,paste("lambda = ", round(lamstar$min,3))) 14 | } 15 | arqss(x,y,.5,g0) 16 | 17 | -------------------------------------------------------------------------------- /demo/cobar.R: -------------------------------------------------------------------------------- 1 | #### Demo for an rgl Animation of Cobar Ore fitting 2 | 3 | require(quantreg) 4 | 5 | if(requireNamespace("interp")){ 6 | ## Make sure the demo does not ``die'' when rgl is not available: 7 | do.rgl <- interactive() && require(rgl) 8 | 9 | data(CobarOre) 10 | 11 | ### Make an initial quite rough fit of the data 12 | fit <- rqss(z ~ qss(cbind(x,y), lambda = .01, ndum = 100), 13 | data = CobarOre) 14 | dummies <- fit$qss[[1]]$dummies 15 | zcol <- CobarOre$z 16 | 17 | if(do.rgl) { 18 | plot(fit, render = "rgl") 19 | cat("Now orient the plot as needed:", 20 | "Resize window,", 21 | "mouse button 1 to change viewpoint,", 22 | "mouse button 2 to zoom,", 23 | "and hit return when ready",sep="\n") 24 | scan() 25 | rgl.bg(color="8") 26 | } else { 27 | if(!interactive()) pdf(file = "cobar-demo.pdf") 28 | plot(fit) 29 | } 30 | 31 | for(i in 1:20) { 32 | fname <- paste("cobar",i,".png",sep="") 33 | lam <- 2*i/100 34 | fit <- rqss(z ~ qss(cbind(x,y), lambda = lam, dummies = dummies), 35 | data = CobarOre) 36 | if(do.rgl) { 37 | rgl.clear() 38 | plot(fit, render = "rgl", zcol = zcol) 39 | rgl.snapshot(fname) 40 | } else { 41 | plot(fit, zcol = zcol) 42 | } 43 | } 44 | } 45 | -------------------------------------------------------------------------------- /demo/combos.R: -------------------------------------------------------------------------------- 1 | # Demo of combos functions 2 | H <- combos(20,3) 3 | if(!require("rgl",quietly=TRUE)){ 4 | warning("The package rgl is needed for plotting") 5 | } else{ 6 | if(interactive()){ 7 | plot3d(t(H)) 8 | lines3d(t(H),col=rep(topo.colors(57),20)) 9 | } 10 | } 11 | -------------------------------------------------------------------------------- /demo/cpoint.R: -------------------------------------------------------------------------------- 1 | ## Demo for a simple change point (broken-stick) problem 2 | 3 | x <- runif(200,0,10) 4 | u <- rnorm(200)/5 5 | y <- 1 + x - .5 * (x-3) * (x > 3) + u 6 | plot(y ~ x, cex= .5, col = "grey") 7 | 8 | 9 | z <- rqss(y ~ qss(x,lambda = 10), tau= .50) 10 | plot(z, col = "dark blue") 11 | 12 | #Now plot the fitted points and jump points in derivative 13 | 14 | eps <- 0.00001 # Zero Tolerance 15 | Nz <- abs(z$resid[1:200]) < eps 16 | Nj <- abs(z$resid[201:398]) > eps 17 | xx <- z$qss[[1]]$xyz[,1] 18 | yy <- z$coef[1] + z$qss[[1]]$xyz[,2] 19 | xj <- xx[3:200] 20 | yj <- yy[3:200] 21 | points(xx[Nz],yy[Nz],col="green") 22 | points(xj[Nj],yj[Nj],col="red") 23 | print(paste("Number of zero residuals: ",sum(Nz))) 24 | print(paste("Number of jumps in slope: ",sum(Nj))) 25 | legend(6,3,c("Derivative Jumps", "Zero residuals"),pch = "o", col=c("red","green")) 26 | -------------------------------------------------------------------------------- /demo/crquis.R: -------------------------------------------------------------------------------- 1 | # UIS example for the crq Peng-Hwang method. 2 | 3 | #estimate the Peng and Huang model using log(TIME) AFT specification 4 | if(requireNamespace("survival", quietly = TRUE)) { 5 | data(uis) 6 | Surv <- survival::Surv 7 | fit <- crq(Surv(log(TIME), CENSOR) ~ ND1 + ND2 + IV3 + 8 | TREAT + FRAC + RACE + AGE * SITE, method = "Portnoy", data = uis) 9 | Sfit <- summary(fit,1:19/20) 10 | PHit <- survival::coxph(Surv(TIME, CENSOR) ~ ND1 + ND2 + IV3 + 11 | TREAT + FRAC + RACE + AGE * SITE, data = uis) 12 | plot(Sfit, CoxPHit = PHit) 13 | formula <- ~ ND1 + ND2 + IV3 + TREAT + FRAC + RACE + AGE * SITE -1 14 | X <- data.frame(model.matrix(formula,data=uis)) 15 | newd <- as.list(apply(X,2,median)) 16 | pred <- predict(fit, newdata=newd, stepfun = TRUE) 17 | plot(pred,do.points=FALSE,xlab = expression(tau), ylab = expression(Q(tau)), 18 | lwd = 1.5, main= "Quantiles at Median Covariate Values") 19 | plot(rearrange(pred),add=TRUE,do.points=FALSE,col.vert ="red", col.hor="red") 20 | legend(.10,10,c("Raw","Rearranged"),lty = rep(1,2),col=c("black","red")) 21 | } 22 | -------------------------------------------------------------------------------- /demo/engel1.R: -------------------------------------------------------------------------------- 1 | ## Demo for a Plot of Engel curve in sample space 2 | 3 | data(engel) 4 | 5 | plot(foodexp ~ income, data = engel, cex= .5, col = "blue", 6 | xlab = "Household Income", ylab = "Food Expenditure") 7 | 8 | z <- rq(foodexp ~ income, tau= .50, data = engel)# "median line": L1 - regression 9 | abline(z, col = "dark blue") 10 | abline(lm(foodexp ~ income, data = engel), lty=2, col="red") #the dreaded ols line 11 | 12 | taus <- c(.05,.1,.25,.75,.90,.95) 13 | nt <- length(taus) 14 | 15 | for( i in 1:length(taus)) { 16 | abline(rq(foodexp~income, tau=taus[i], data = engel), col="gray") 17 | } 18 | 19 | legend("bottomright", 20 | c("L1 (tau = .50)", "OLS", paste("tau= ", formatC(rev(taus)))), 21 | col = c("dark blue", "red", rep("gray", nt)), 22 | lty = c(1,2, rep(1, nt)), 23 | inset = 0.03) 24 | -------------------------------------------------------------------------------- /demo/engel2.R: -------------------------------------------------------------------------------- 1 | 2 | #### Demo for a plot of two quantile functions of food expenditure 3 | 4 | ###-- short version of the rq *VIGNETTE* --- use that! 5 | 6 | data(engel) 7 | ## do *NOT* attach() 8 | 9 | ## Poor is defined as at the .1 quantile of the sample distn 10 | ## Rich is defined as at the .9 quantile of the sample distn 11 | x.poor <- quantile(engel[,"income"], .10) 12 | x.rich <- quantile(engel[,"income"], .90) 13 | 14 | z <- rq(foodexp ~ income, tau= -1, data = engel) 15 | 16 | ps <- z$sol["tau",] 17 | coefs <- z$sol[4:5,] 18 | qs.poor <- c(c(1,x.poor) %*% coefs) 19 | qs.rich <- c(c(1,x.rich) %*% coefs) 20 | ## now plot the two quantile functions to compare 21 | par(mfrow = c(1,2)) 22 | plot(c(ps,ps),c(qs.poor,qs.rich),type="n",xlab=expression(tau),ylab="quantile") 23 | plot(stepfun(ps,c(qs.poor[1],qs.poor)),do.points=FALSE,add=TRUE) 24 | plot(stepfun(ps,c(qs.poor[1],qs.rich)),do.points=FALSE,add=TRUE, 25 | col.hor = "gray", col.vert = "gray") 26 | ## now plot associated conditional density estimates 27 | ## weights from ps (process) 28 | ps.wts <- (c(0,diff(ps)) + c(diff(ps),0)) / 2 29 | ap <- akj(qs.poor, z=qs.poor, p = ps.wts) 30 | ar <- akj(qs.rich, z=qs.rich, p = ps.wts) 31 | plot(c(qs.poor,qs.rich), c(ap$dens,ar$dens), type="n", 32 | xlab= "Food Expenditure", ylab= "Density") 33 | lines(qs.rich, ar$dens, col="gray") 34 | lines(qs.poor, ap$dens, col="black") 35 | legend("topright", c("poor","rich"), lty = c(1,1), col=c("black","gray")) 36 | 37 | 38 | -------------------------------------------------------------------------------- /demo/hinged.R: -------------------------------------------------------------------------------- 1 | # A Demo of simple bivariate rqss fitting of a hinge function 2 | 3 | require(quantreg) 4 | if(requireNamespace("interp")){ 5 | ## Make sure the demo does not ``die'' when rgl is not available: 6 | do.rgl <- interactive() && require(rgl) 7 | 8 | #generate the data 9 | 10 | n <- 1000 11 | x <- runif(n) 12 | y <- runif(n) 13 | z <- -abs(x-y) 14 | 15 | ### Make an initial quite rough fit of the data 16 | fit <- rqss(z ~ qss(cbind(x,y),lambda = .005)) 17 | print(summary(fit)$penalty) 18 | 19 | if(do.rgl) { 20 | plot(fit, render = "rgl") 21 | cat("Now orient the plot as desired:", 22 | "Resize window,", 23 | "mouse button 1 to change viewpoint,", 24 | "mouse button 2 to zoom,", 25 | "and hit return when ready",sep="\n") 26 | scan() 27 | rgl.bg(color="8") 28 | cat("To try another value of lambda:", 29 | "Type a positive number", 30 | "To quit hit return", sep="\n") 31 | repeat{ 32 | cat("lambda: ") 33 | lam <- scan(what = double(1)) 34 | if(length(lam)>0){ 35 | fit <- rqss(z ~ qss(cbind(x,y),lambda = lam)) 36 | rgl.clear() 37 | plot(fit, render = "rgl") 38 | } else 39 | break 40 | } 41 | } else { 42 | if(!interactive()) pdf(file = "hinge-demo.pdf") 43 | plot(fit) 44 | } 45 | } 46 | -------------------------------------------------------------------------------- /demo/panelfig.R: -------------------------------------------------------------------------------- 1 | library(quantreg) 2 | 3 | rq.fit.panel <- function(X,y,s,w=c(1/3,1/3,1/3),taus=c(0.25,0.5,0.75),lambda = 0){ 4 | # prototype function for fixed effect panel data fitting of QR models 5 | # the vector s is a strata indicator assumed (so far) to be a one-way layout 6 | # NB: 7 | # 1. The value of the shrinkage parameter lambda is an open research problem in 8 | # the simplest homogeneous settings it should be the ratio of the scale parameters 9 | # of the fixed effects and the idiosyncratic errors 10 | # 2. On return the coefficient vector has m*p + n elements where m is the number 11 | # quantiles being estimated, p is the number of columns of X, and n is the 12 | # number of distinct values of s. The first m*p coefficients are the 13 | # slope estimates, and the last n are the "fixed effects" 14 | # 3. Like all shrinkage (regularization) estimators, asymptotic inference is somewhat 15 | # problematic... so the bootstrap is the natural first resort. 16 | 17 | 18 | require(SparseM) 19 | require(quantreg) 20 | m <- length(w) 21 | if(m != length(taus)) 22 | stop("length of w and taus must match") 23 | X <- as.matrix(X) 24 | p <- ncol(X) 25 | n <- length(levels(as.factor(s))) 26 | N <- length(y) 27 | if(N != length(s) || N != nrow(X)) 28 | stop("dimensions of y,X,s must match") 29 | Z <- as.matrix.csr(model.matrix(~as.factor(s)-1)) 30 | Fidelity <- cbind(as(w,"matrix.diag.csr") %x% X, cbind(w) %x% Z) 31 | Penalty <- cbind(as.matrix.csr(0,n,m*p),lambda*as(n,"matrix.diag.csr")) 32 | D <- rbind(Fidelity,Penalty) 33 | y <- c(w %x% y,rep(0,n)) 34 | a <- c((w*(1-taus)) %x% (t(X)%*%rep(1,N)), 35 | sum(w*(1-taus)) * (t(Z) %*% rep(1,N)) + lambda * rep(1,n)) 36 | rq.fit.sfn(D,y,rhs=a) 37 | } 38 | 39 | n<-3 40 | T<-50 41 | nT<-n*T 42 | u1<-rnorm(T) 43 | u2<-rnorm(T) 44 | u3<-rnorm(T) 45 | x1<-rnorm(T,1,0.85) 46 | x2<-rnorm(T,4,1) 47 | x3<-rnorm(T,7,1) 48 | 49 | beta1<-1 50 | beta2<-0.5 51 | 52 | y1<- 0+beta1*x1+(beta2*x1)*u1 53 | y2<- 4+beta1*x2+(beta2*x2)*u2 54 | y3<- 8+beta1*x3+(beta2*x3)*u3 55 | 56 | 57 | plot(c(0,9), c(0,25), type='n', xlab=expression(x[it]), ylab=expression(y[it])) 58 | points(x1,y1,pch=15) 59 | points(x2,y2,pch=15,col="blue") 60 | points(x3,y3,pch=15,col="red") 61 | legend(1,17,paste("i = ",1:3,sep = ""),pch = 15, col = c("black","blue","red")) 62 | 63 | ya<-c(y1,y2,y3) 64 | xa<-c(x1,x2,x3) 65 | 66 | # Naive cross-section QR 67 | 68 | taus <- c(.25,0.5,.75) 69 | xx <- seq(min(xa),max(xa),0.25) 70 | f <- coef(rq(ya~xa,tau=taus)) 71 | yy <- cbind(1,xx)%*%f 72 | for(i in 1:3) 73 | lines(xx,yy[,i],col = "grey") 74 | 75 | # Fixed effect QR 76 | 77 | s <- rep(1:n,rep(T,n)) 78 | fp<-rq.fit.panel(xa,ya,s)$coef 79 | 80 | bhat <- fp[1:3] 81 | fehat <- fp[4:6] 82 | 83 | xx1 <- seq(min(x1),max(x1),0.25) 84 | for(i in 1:3){ 85 | yy1 <- fehat[1] + bhat[i] * xx1 86 | lines(xx1,yy1,col = "black") 87 | } 88 | 89 | xx2 <- seq(min(x2),max(x2),0.25) 90 | for(i in 1:3){ 91 | yy2 <- fehat[2] + bhat[i] * xx2 92 | lines(xx2,yy2,col = "blue") 93 | } 94 | 95 | xx3 <- seq(min(x3),max(x3),0.25) 96 | for(i in 1:3){ 97 | yy3 <- fehat[3] + bhat[i] * xx3 98 | lines(xx3,yy3,col = "red") 99 | } 100 | -------------------------------------------------------------------------------- /demo/rqsslasso.R: -------------------------------------------------------------------------------- 1 | #Toy rqss example with lasso shrinkage of linear covariate effects 2 | 3 | n <- 100 4 | p <- 9 5 | q <- 3 6 | beta <- c( rep(1,q), rep(0,p-q)) 7 | w <- matrix(rnorm(n*p),n,p) 8 | x <- runif(n,0,10) 9 | z <- runif(n,0,10) 10 | y <- w %*% beta + sin(x) + (z^2)/50 + rnorm(n)/5 11 | d <- data.frame(w,x,y,z) 12 | f <- rqss(y ~ w + qss(x,lambda = 3) + qss(z,lambda = 2), 13 | method = "lasso", lambda = 3, data = d) 14 | plot(f, bands = "both", bcol = c("lightsteelblue", "lightsteelblue4")) 15 | -------------------------------------------------------------------------------- /demo/stack.R: -------------------------------------------------------------------------------- 1 | # log likelihood for stackloss fit 2 | require(quantreg) 3 | data(stackloss) 4 | logLik.rq.process <- function(fit){ 5 | y <- model.response(model.frame(fit)) 6 | fhat <- predict(fit, type = "fhat") 7 | fy <- mapply(function(f,y) f(y), fhat, y) 8 | sum(log(fy)) 9 | } 10 | # First try with full process estimates 11 | f0 <- rq(stack.loss ~ 1, tau=-1) 12 | f1 <- rq(stack.loss ~ stack.x, tau=-1) 13 | l0 <- logLik(f0) 14 | l1 <- logLik(f1) 15 | # Now try with discrete process estimates 16 | f0 <- rq(stack.loss ~ 1, tau=1:19/20) 17 | f1 <- rq(stack.loss ~ stack.x, tau=1:19/20) 18 | l0 <- logLik(f0) 19 | l1 <- logLik(f1) 20 | 21 | -------------------------------------------------------------------------------- /demo/subset.R: -------------------------------------------------------------------------------- 1 | # Test case for the new subset argument 2 | require(quantreg) 3 | n <- 200 4 | x <- sort(rchisq(n,4)) 5 | z <- rnorm(n) 6 | s <- sample(1:n, n/2) 7 | y <- log(x) + rnorm(n)/5 8 | D = data.frame(y = y, x = x, z = z, s = (1:n) %in% s) 9 | plot(x, y, cex = .5, col = "grey") 10 | points(x[s], y[s],col = "pink", cex = .5) 11 | lam = 0.2 12 | f0 <- rqss(y ~ qss(x,lambda = lam) + z, subset = s) 13 | f1 <- rqss(y ~ qss(x, lambda = lam) + z, subset = s, data = D) 14 | plot(f0, add = TRUE, col = 2, lwd = 3) 15 | plot(f1, add = TRUE, col = 4, lwd = 3) 16 | -------------------------------------------------------------------------------- /inst/ChangeLog: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/inst/ChangeLog -------------------------------------------------------------------------------- /inst/TODO: -------------------------------------------------------------------------------- 1 | # To Do List 2 | 3 | 1. Continue forensic investigation of KhmaladzeTest. 4 | -------------------------------------------------------------------------------- /inst/doc/crq.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/inst/doc/crq.pdf -------------------------------------------------------------------------------- /inst/doc/crq.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{quantreg: crq} 2 | %\VignetteEngine{R.rsp::asis} 3 | 4 | -------------------------------------------------------------------------------- /inst/doc/rq.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/cran/quantreg/43abb16645d5e78d1377896ebd19d760efd2ace9/inst/doc/rq.pdf -------------------------------------------------------------------------------- /inst/doc/rq.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{quantreg: rq} 2 | %\VignetteEngine{R.rsp::asis} 3 | 4 | -------------------------------------------------------------------------------- /man/Bosco.Rd: -------------------------------------------------------------------------------- 1 | \name{Bosco} 2 | \alias{Bosco} 3 | \title{Boscovich Data} 4 | \description{ 5 | Boscovich data used to estimate the ellipticity of the earth. 6 | There are five measurements of the arc length of one degree of 7 | latitude taken at 5 different latitudes. See Koenker (2005) for 8 | further details and references. 9 | } 10 | \usage{data(Bosco)} 11 | \format{A data frame containing 5 observations on 2 variables 12 | \describe{ 13 | \item{x}{sine squared of latitude measured in degrees} 14 | \item{y}{arc length of one degree of latitude measured in toise - 56,700, 15 | one toise approximately equals 1.95 meters. } 16 | } 17 | } 18 | \references{ 19 | Koenker, R. (2005), "Quantile Regression", Cambridge. 20 | } 21 | \examples{ 22 | data(Bosco) 23 | plot(0:10/10,0:10*100,xlab="sin^2(latitude)", 24 | ylab="arc-length of 1 degree of latitude",type="n") 25 | points(Bosco) 26 | text(Bosco, pos = 3, rownames(Bosco)) 27 | z <- rq(y ~ x, tau = -1, data = Bosco) 28 | title("Boscovitch Ellipticity of the Earth Example") 29 | xb <- c(.85,.9,.6,.6) 30 | yb <- c(400,600,450,600) 31 | for(i in 1:4){ 32 | abline(c(z$sol[4:5,i])) 33 | interval <- paste("t=(",format(round(z$sol[1,i],2)),",", 34 | format(round(z$sol[1,i+1],2)),")",delim="") 35 | text(xb[i],yb[i],interval) 36 | } 37 | } 38 | \keyword{datasets} 39 | -------------------------------------------------------------------------------- /man/CobarOre.Rd: -------------------------------------------------------------------------------- 1 | \name{CobarOre} 2 | \alias{CobarOre} 3 | \docType{data} 4 | \title{ Cobar Ore data } 5 | \description{ 6 | Cobar Ore data from Green and Silverman (1994). 7 | The data consists of measurements on the "true width" 8 | of an ore-bearing rock layer from a mine in Cobar, Australia. 9 | } 10 | \usage{data(CobarOre)} 11 | \format{ 12 | A data frame with 38 observations on the following 3 variables. 13 | \describe{ 14 | \item{x}{x-coordinate of location of mine site} 15 | \item{y}{y-coordinate of location of mine site} 16 | \item{z}{ore thickness} 17 | } 18 | } 19 | \source{ 20 | Green, P.J. and B.W. Silverman (1994) Nonparametric Regression Generalized Linear Models: 21 | A roughness penalty approach, Chapman Hall. 22 | } 23 | \examples{ 24 | data(CobarOre) 25 | plot(CobarOre) 26 | } 27 | \keyword{datasets} 28 | -------------------------------------------------------------------------------- /man/FAQ.Rd: -------------------------------------------------------------------------------- 1 | \name{FAQ} 2 | \alias{FAQ} 3 | \alias{ChangeLog} 4 | \title{FAQ and ChangeLog of a package} 5 | \description{ Show the FAQ or ChangeLog of a specified package } 6 | \usage{ 7 | FAQ(pkg = "quantreg") 8 | ChangeLog(pkg = "quantreg") 9 | } 10 | \arguments{ 11 | \item{pkg}{ Package Name } 12 | } 13 | \details{ 14 | Assumes that the FAQ and/or ChangeLog files exist in the proper "inst" directory. 15 | } 16 | \value{ 17 | Has only the side effect of showing the files on the screen. 18 | } 19 | \keyword{ documentation } 20 | -------------------------------------------------------------------------------- /man/KhmaladzeTest.Rd: -------------------------------------------------------------------------------- 1 | \name{KhmaladzeTest} 2 | \alias{KhmaladzeTest} 3 | \title{ Tests of Location and Location Scale Shift Hypotheses for Linear Models} 4 | \description{Tests of the hypothesis that a linear model specification 5 | is of the location shift or location-scale shift form. The tests are based 6 | on the Doob-Meyer Martingale transformation approach proposed by Khmaladze(1981) 7 | for general goodness of fit problems as adapted to quantile regression by 8 | Koenker and Xiao (2002).} 9 | 10 | \usage{ 11 | KhmaladzeTest(formula, data = NULL, taus = 1:99/100, nullH = "location" , 12 | trim = c(0.05, 0.95), h = 1, ...) 13 | } 14 | \arguments{ 15 | \item{formula}{a formula specifying the model to fit by \code{\link{rqProcess}}} 16 | \item{data}{a data frame within which to interpret the formula} 17 | \item{taus}{An equally spaced grid of points on which to evaluate the 18 | quantile regression process, if any taus fall outside (0,1) then the full 19 | process is computed.} 20 | \item{nullH}{a character vector indicating whether the "location" shift hypothesis 21 | (default) or the "location-scale" shift hypothesis should be tested. } 22 | \item{trim}{ a vector indicating the lower and upper bound of the quantiles to 23 | included in the computation of the test statistics (only, not 24 | estimates). } 25 | \item{h}{an initial bandwidth for the call to \code{\link{akj}}.} 26 | \item{...}{other arguments to be passed to \code{\link{summary.rq}.}} 27 | } 28 | \value{ 29 | an object of class KhmaladzeTest is returned containing: 30 | 31 | \item{nullH}{ The form of the null hypothesis.} 32 | 33 | \item{Tn}{ 34 | Joint test statistic of the hypothesis that all the slope 35 | parameters of the model satisfy the hypothesis. 36 | } 37 | \item{THn}{ 38 | Vector of test statistics testing whether individual slope 39 | parameters satisfy the null hypothesis. 40 | } 41 | } 42 | 43 | \examples{ 44 | data(barro) 45 | T = KhmaladzeTest( y.net ~ lgdp2 + fse2 + gedy2 + Iy2 + gcony2, 46 | data = barro, taus = seq(.05,.95,by = .01)) 47 | plot(T) 48 | } 49 | \keyword{htest} 50 | \references{ 51 | Khmaladze, E. (1981) ``Martingale Approach in the Theory of 52 | Goodness-of-fit Tests,'' \emph{Theory of Prob. and its Apps}, 26, 53 | 240--257. 54 | 55 | Koenker, Roger and Zhijie Xiao (2002), ``Inference on the Quantile 56 | Regression Process'', \emph{Econometrica}, 81, 1583--1612. 57 | \url{http://www.econ.uiuc.edu/~roger/research/inference/inference.html} 58 | } 59 | -------------------------------------------------------------------------------- /man/LassoLambdaHat.Rd: -------------------------------------------------------------------------------- 1 | \name{LassoLambdaHat} 2 | \alias{LassoLambdaHat} 3 | \title{Lambda selection for QR lasso problems} 4 | \description{ 5 | Default procedure for selection of lambda in lasso constrained 6 | quantile regression as proposed by Belloni and Chernozhukov (2011) 7 | } 8 | \usage{ 9 | LassoLambdaHat(X, R = 1000, tau = 0.5, C = 1, alpha = 0.95) 10 | } 11 | \arguments{ 12 | \item{X}{Design matrix} 13 | \item{R}{Number of replications} 14 | \item{tau}{quantile of interest} 15 | \item{C}{Cosmological constant} 16 | \item{alpha}{Interval threshold} 17 | } 18 | \value{ 19 | vector of default lambda values of length p, the column dimension of X. 20 | } 21 | \details{ 22 | As proposed by Belloni and Chernozhukov, a reasonable default lambda 23 | would be the upper quantile of the simulated values. The procedure is based 24 | on idea that a simulated gradient can be used as a pivotal statistic. 25 | Elements of the default vector are standardized by the respective standard deviations 26 | of the covariates. Note that the sqrt(tau(1-tau)) factor cancels in their (2.4) (2.6). 27 | In this formulation even the intercept is penalized. If the lower limit of the 28 | simulated interval is desired one can specify \code{alpha = 0.05}. 29 | 30 | } 31 | \references{ 32 | Belloni, A. and V. Chernozhukov. (2011) l1-penalized quantile regression 33 | in high-dimensional sparse models. \emph{Annals of Statistics}, 39 82 - 130. 34 | } 35 | \examples{ 36 | n <- 200 37 | p <- 10 38 | x <- matrix(rnorm(n*p), n, p) 39 | b <- c(1,1, rep(0, p-2)) 40 | y <- x \%*\% b + rnorm(n) 41 | f <- rq(y ~ x, tau = 0.8, method = "lasso") 42 | # See f$lambda to see the default lambda selection 43 | } 44 | -------------------------------------------------------------------------------- /man/Mammals.Rd: -------------------------------------------------------------------------------- 1 | \name{Mammals} 2 | \alias{Mammals} 3 | \docType{data} 4 | \title{Garland(1983) Data on Running Speed of Mammals} 5 | \description{ 6 | Observations on the maximal running speed of mammal species 7 | and their body mass. 8 | } 9 | \usage{data(Mammals)} 10 | \format{ 11 | A data frame with 107 observations on the following 4 variables. 12 | \describe{ 13 | \item{weight}{Body mass in Kg for "typical adult sizes"} 14 | \item{speed}{Maximal running speed (fastest sprint velocity on record)} 15 | \item{hoppers}{logical variable indicating animals that ambulate 16 | by hopping, e.g. kangaroos} 17 | \item{specials}{logical variable indicating special animals with 18 | "lifestyles in which speed does not figure as an important 19 | factor": Hippopotamus, raccoon (Procyon), badger (Meles), 20 | coati (Nasua), skunk (Mephitis), man (Homo), porcupine 21 | (Erithizon), oppossum (didelphis), and sloth (Bradypus) 22 | } 23 | } 24 | } 25 | \examples{ 26 | data(Mammals) 27 | attach(Mammals) 28 | x <- log(weight) 29 | y <- log(speed) 30 | plot(x,y, xlab="Weight in log(Kg)", ylab="Speed in log(Km/hour)",type="n") 31 | points(x[hoppers],y[hoppers],pch = "h", col="red") 32 | points(x[specials],y[specials],pch = "s", col="blue") 33 | others <- (!hoppers & !specials) 34 | points(x[others],y[others], col="black",cex = .75) 35 | fit <- rqss(y ~ qss(x, lambda = 1),tau = .9) 36 | plot(fit) 37 | } 38 | 39 | \details{ 40 | Used by Chappell (1989) and Koenker, Ng and Portnoy (1994) to 41 | illustrate the fitting of piecewise linear curves. 42 | } 43 | \source{ 44 | Garland, T. (1983) The relation between maximal running speed and body 45 | mass in terrestrial mammals, \emph{J. Zoology}, 199, 1557-1570. 46 | } 47 | \references{ 48 | Koenker, R., P. Ng and S. Portnoy, (1994) Quantile Smoothing Splines'' 49 | \emph{Biometrika}, 81, 673-680. 50 | 51 | Chappell, R. (1989) Fitting Bent Lines to Data, with Applications ot 52 | Allometry, \emph{J. Theo. Biology}, 138, 235-256. 53 | 54 | } 55 | \seealso{\code{\link{rqss}}} 56 | \keyword{datasets} 57 | -------------------------------------------------------------------------------- /man/MelTemp.Rd: -------------------------------------------------------------------------------- 1 | \name{MelTemp} 2 | \alias{MelTemp} 3 | \docType{data} 4 | \title{Daily maximum temperatures in Melbourne, Australia} 5 | \description{ 6 | Daily maximum temperatures in Melbourne, Australia, from 7 | 1981-1990. Leap days have been omitted. 8 | } 9 | \usage{data(MelTemp)} 10 | \format{Time series of frequency 365} 11 | 12 | \source{ 13 | Hyndman, R.J., Bashtannyk, D.M. and Grunwald, G.K. (1996) 14 | "Estimating and visualizing conditional densities". _Journal of 15 | Computational and Graphical Statistics_, *5*, 315-336. 16 | } 17 | \examples{ 18 | data(MelTemp) 19 | demo(Mel) 20 | } 21 | \keyword{datasets} 22 | -------------------------------------------------------------------------------- /man/Munge.Rd: -------------------------------------------------------------------------------- 1 | \name{Munge} 2 | \alias{Munge} 3 | \title{ 4 | Munge rqss formula 5 | } 6 | \description{ 7 | function to recursively substitute arguments into rqss formula 8 | } 9 | \usage{ 10 | Munge(formula, ...) 11 | } 12 | \arguments{ 13 | \item{formula}{ 14 | A rqss formula 15 | } 16 | \item{\dots}{ 17 | Arguments to be substituted into formula 18 | } 19 | } 20 | \details{ 21 | Intended (originally) for use with \code{demo(MCV)}. 22 | Based on an R-help suggestion of Gabor Grothendieck. 23 | } 24 | \value{ 25 | A new formula after substitution 26 | } 27 | \seealso{ 28 | \code{demo(MCV)} 29 | } 30 | \examples{ 31 | lams <- c(1.3, 3.3) 32 | f <- y ~ qss(x, lambda = lams[1]) + qss(z, lambda = lams[2]) + s 33 | ff <- Munge(f, lams = lams) 34 | } 35 | \keyword{~manip} 36 | -------------------------------------------------------------------------------- /man/ParetoTest.Rd: -------------------------------------------------------------------------------- 1 | \name{ParetoTest} 2 | \alias{ParetoTest} 3 | \alias{Hill} 4 | \alias{Hill.fit} 5 | \alias{print.Hill} 6 | \alias{summary.Hill} 7 | \alias{print.summary.Hill} 8 | \alias{Pickands.fit} 9 | \alias{Pickands} 10 | \alias{print.Pickands} 11 | \alias{summary.Pickands} 12 | \alias{print.summary.Pickands} 13 | \alias{Pickands.fit} 14 | \title{Estimation and Inference on the Pareto Tail Exponent for Linear Models} 15 | \description{Estimation and inference about the tail behavior of the response in 16 | linear models are based on the adaptation of the univariate Hill (1975) 17 | and Pickands (1975) estimators for quantile regression by Chernozhukov, 18 | Fernandez-Val and Kaji (2018).} 19 | 20 | \usage{ 21 | ParetoTest(formula, tau = 0.1, data = NULL, flavor = "Hill", m = 2, cicov = .9, ...) 22 | } 23 | \arguments{ 24 | \item{formula}{a formula specifying the model to fit by \code{\link{rq}}} 25 | \item{tau}{A threshold on which to base the estimation} 26 | \item{data}{a data frame within which to interpret the formula} 27 | \item{flavor}{Currently limited to either "Hill" or "Pickands"} 28 | \item{m}{a tuning parameter for the Pickands method .} 29 | \item{cicov}{Desired coverage probability of confidence interval.} 30 | \item{...}{other arguments to be passed to \code{\link{summary.rq}}. 31 | by default the summary method is the usual xy bootstrap, with 32 | \code{B = 200} replications.} 33 | } 34 | \value{ 35 | an object of class ParetoTest is returned containing: 36 | 37 | \item{z}{ A named vector with components: the estimate, a bias 38 | corrected estimate, a lower bound of the confidence interval, 39 | an upper bound of the confidence interval, and a Bootstrap 40 | Standard Error estimate.} 41 | 42 | \item{tau}{ 43 | The tau threshold used to compute the estimate 44 | } 45 | } 46 | 47 | \examples{ 48 | n = 500 49 | x = rnorm(n) 50 | y = x + rt(n,2) 51 | Z = ParetoTest(y ~ x, .9, flavor = "Pickands") 52 | } 53 | 54 | \keyword{htest} 55 | \references{ 56 | Chernozhukov, Victor, Ivan Fernandez-Val, and Tetsuya Kaji, (2018) 57 | Extremal Quantile Regression, in Handbook of Quantile Regression, 58 | Eds. Roger Koenker, Victor Chernozhukov, Xuming He, Limin Peng, 59 | CRC Press. 60 | 61 | Hill, B. M. (1975). A simple general approach to inference about the tail of a distribution. 62 | The Annals of Statistics 3(5), 1163-1174. 63 | 64 | Pickands, J. (1975). Statistical inference using extreme order statistics. 65 | The Annals of Statistics 3(1), 119-131. 66 | } 67 | 68 | -------------------------------------------------------------------------------- /man/QTECox.Rd: -------------------------------------------------------------------------------- 1 | \name{QTECox} 2 | \alias{QTECox} 3 | \title{Function to obtain QTE from a Cox model} 4 | \description{Computes quantile treatment effects comparable to those of 5 | crq model from a coxph object.} 6 | \usage{ 7 | QTECox(x, smooth = TRUE) 8 | } 9 | \arguments{ 10 | \item{x}{An object of class coxph produced by \code{coxph}.} 11 | \item{smooth}{Logical indicator if TRUE (default) 12 | then Cox survival function is smoothed.} 13 | } 14 | \details{ Estimates of the Cox QTE, \eqn{\frac{dQ(t|x)}{dx_{j}}}{(d/dx_j) Q( t | x ) } 15 | at \eqn{x=\bar{x}}{x=xbar}, can be expressed as a function of t as follows: 16 | 17 | \deqn{\frac{dQ(t|x)}{dx_{j}}=\frac{dt}{dx_{j}}\frac{dQ(t|x)}{dt}}{ 18 | (d/dx_j) Q( t | x ) = (d/dx_j)t * (d/dt) Q(t | x)} 19 | 20 | The Cox survival function, \eqn{S(y|x)=\exp \{-H_{0}(y)\exp (b^{\prime 21 | }x)\}}{S( y | x ) = exp{ - H_o(y) exp(b'x) }} 22 | 23 | \deqn{\frac{dS(y|x)}{dx_{j}}=S(y|x)log \{S(y|x)\}b_{j}}{(d/dx_j) 24 | S( y | x ) = S( y | x ) log(S( y | x )) b_j} 25 | 26 | 27 | where \eqn{\frac{dQ(t|x)}{dx_{j}}}{ (d/dt) Q(t | x) } 28 | can be estimated by \eqn{\frac{\Delta (t)}{\Delta (S)} 29 | (1-t)}{- (diff(t)/diff(S) (1-t)} 30 | where $S$ and $t$ denote the \code{surv} and \code{time} components 31 | of the \code{survfit} object. 32 | Note that since \eqn{t=1-S(y|x)}{t = 1 - S( y | x )}, the above is the 33 | value corresponding to the argument $(1-t)$; and furthermore 34 | 35 | \deqn{\frac{dt}{dx_{j}}=-\frac{dS(y|x)}{dx_{j}}=-(1-t) log (1-t)b_{j}}{ 36 | (d/dx_j)t = - (d/dx_j) S( y | x ) = - (1-t) log(1-t) b_j} 37 | 38 | Thus the QTE at the mean of x's is: 39 | 40 | \deqn{(1-S)= \frac{\Delta (t)}{\Delta (S)}S ~log 41 | (S)b_{j}}{(1 - S) = (diff(t)/diff(S) S log(S) b_j} 42 | 43 | 44 | Since \eqn{\Delta S}{diff(S)} is negative and $log (S)$ is also negative 45 | this has the same sign as \eqn{b_{j}} 46 | The crq model fits the usual AFT form Surv(log(Time),Status), then 47 | 48 | \deqn{\frac{d log (Q(t|x))}{dx_{j}}=\frac{dQ(t|x)}{dx_{j}}/ 49 | Q(t|x)}{(d/dx_j) log(Q( t | x )) = (d/dx_j) Q( t | x ) / Q( t | x )} 50 | 51 | This is the matrix form returned. 52 | } 53 | 54 | \value{ 55 | \item{taus }{points of evaluation of the QTE.} 56 | \item{QTE}{matrix of QTEs, the ith column contains the QTE for the 57 | ith covariate effect. Note that there is no intercept effect. 58 | see \code{plot.summary.crqs} for usage.} 59 | } 60 | 61 | \references{Koenker, R. and Geling, O. (2001). Reappraising Medfly 62 | longevity: a quantile regression survival analysis, J. Amer. Statist. 63 | Assoc., 96, 458-468} 64 | 65 | \author{Roger Koenker Stephen Portnoy & Tereza Neocleous} 66 | \seealso{\code{\link{crq}}} 67 | 68 | \keyword{survival} 69 | -------------------------------------------------------------------------------- /man/akj.Rd: -------------------------------------------------------------------------------- 1 | \name{akj} 2 | \alias{akj} 3 | \title{Density Estimation using Adaptive Kernel method} 4 | \description{ 5 | Univariate \emph{adaptive} kernel density estimation a la Silverman. 6 | As used by Portnoy and Koenker (1989). 7 | } 8 | \usage{ 9 | akj(x, z =, p =, h = -1, alpha = 0.5, kappa = 0.9, iker1 = 0) 10 | } 11 | \arguments{ 12 | \item{x}{points used for centers of kernel assumed to be sorted.} 13 | \item{z}{points at which density is calculated; defaults to an 14 | equispaced sequence covering the range of x.} 15 | \item{p}{vector of probabilities associated with \code{x}s; defaults 16 | to 1/n for each x.} 17 | \item{h}{initial window size (overall); defaults to Silverman's normal 18 | reference.} 19 | \item{alpha}{a sensitivity parameter that determines the sensitivity of 20 | the local bandwidth to variations in the pilot density; defaults to .5.} 21 | \item{kappa}{constant multiplier for initial (default) window width} 22 | \item{iker1}{integer kernel indicator: 0 for normal kernel (default) 23 | while 1 for Cauchy kernel (\code{\link{dcauchy}}).} 24 | } 25 | \value{ 26 | a \code{\link{list}} structure is with components 27 | \item{dens}{the vector of estimated density values \eqn{f(z)}} 28 | \item{psi}{a vector of \eqn{\psi=-f'/f} function values.} 29 | \item{score}{a vector of score \eqn{\psi' = (f'/f)^2 - f''/f} function 30 | values.} 31 | \item{h}{same as the input argument h} 32 | } 33 | \note{ 34 | if the \code{score} function values are of interest, the Cauchy kernel 35 | may be preferable. 36 | } 37 | \references{ 38 | Portnoy, S and R Koenker, (1989) 39 | Adaptive L Estimation of Linear Models; 40 | \emph{Annals of Statistics} \bold{17}, 362--81. 41 | 42 | Silverman, B. (1986) 43 | \emph{Density Estimation}, pp 100--104. 44 | } 45 | \examples{ 46 | set.seed(1) 47 | x <- c(rnorm(600), 2 + 2*rnorm(400)) 48 | xx <- seq(-5, 8, length=200) 49 | z <- akj(x, xx) 50 | plot(xx, z$dens, ylim=range(0,z$dens), type ="l", col=2) 51 | abline(h=0, col="gray", lty=3) 52 | plot(xx, z$psi, type ="l", col=2, main = expression(hat(psi(x)))) 53 | plot(xx, z$score, type ="l", col=2, 54 | main = expression("score " * hat(psi) * "'" * (x))) 55 | 56 | if(require("nor1mix")) { 57 | m3 <- norMix(mu= c(-4, 0, 3), sigma = c(1/3, 1, 2), 58 | w = c(.1,.5,.4)) 59 | plot(m3, p.norm = FALSE) 60 | set.seed(11) 61 | x <- rnorMix(1000, m3) 62 | z2 <- akj(x, xx) 63 | lines(xx, z2$dens, col=2) 64 | z3 <- akj(x, xx, kappa = 0.5, alpha = 0.88) 65 | lines(xx, z3$dens, col=3) 66 | } 67 | } 68 | \keyword{smooth} 69 | -------------------------------------------------------------------------------- /man/bandwidth.rq.Rd: -------------------------------------------------------------------------------- 1 | \name{bandwidth.rq} 2 | \alias{bandwidth.rq} 3 | \title{ bandwidth selection for rq functions } 4 | \description{ 5 | function to compute bandwidth for sparsity estimation 6 | } 7 | \usage{ 8 | bandwidth.rq(p, n, hs=TRUE, alpha=0.05) 9 | } 10 | \arguments{ 11 | \item{p}{ quantile(s) of interest } 12 | \item{n}{ sample size } 13 | \item{hs}{ flag for hall-sheather method } 14 | \item{alpha}{ alpha level for intended confidence intervals } 15 | } 16 | \details{ If hs=TRUE (default) then the Hall-Sheather(1988) rule \eqn{O(n^{-1/3})} 17 | is used, if hs=FALSE then the Bofinger \eqn{O(n^{-1/5})} is used. 18 | } 19 | \value{ 20 | returns a vector of bandwidths corresponding to the argument p. 21 | } 22 | \references{ Hall and Sheather(1988, JRSS(B)),Bofinger (1975, Aus. J. Stat)} 23 | \author{ Roger Koenker rkoenker@uiuc.edu} 24 | \keyword{ regression } 25 | -------------------------------------------------------------------------------- /man/barro.Rd: -------------------------------------------------------------------------------- 1 | \name{barro} 2 | \alias{barro} 3 | \title{Barro Data} 4 | \description{ 5 | Version of the Barro Growth Data used in Koenker and Machado(1999). 6 | This is a regression data set consisting of 161 observations on determinants 7 | of cross country GDP growth rates. There are 13 covariates with dimnames 8 | corresponding to the original Barro and Lee source. See 9 | https://www.nber.org/pub/barro.lee/. The first 71 observations are on 10 | the period 1965-75, remainder on 1987-85. 11 | } 12 | \usage{data(barro)} 13 | \format{A data frame containing 161 observations on 14 variables: 14 | \tabular{rl}{ 15 | [,1] \tab "Annual Change Per Capita GDP"\cr 16 | [,2] \tab "Initial Per Capita GDP"\cr 17 | [,3] \tab "Male Secondary Education"\cr 18 | [,4] \tab "Female Secondary Education"\cr 19 | [,5] \tab "Female Higher Education"\cr 20 | [,6] \tab "Male Higher Education"\cr 21 | [,7] \tab "Life Expectancy"\cr 22 | [,8] \tab "Human Capital"\cr 23 | [,9] \tab "Education/GDP"\cr 24 | [,10] \tab "Investment/GDP"\cr 25 | [,11] \tab "Public Consumption/GDP"\cr 26 | [,12] \tab "Black Market Premium"\cr 27 | [,13] \tab "Political Instability"\cr 28 | [,14] \tab "Growth Rate Terms Trade"} 29 | 30 | } 31 | \references{ 32 | Koenker, R. and J.A.F. Machado (1999) Goodness of Fit and Related Inference Processes for Quantile Regression, JASA, 1296-1310.} 33 | \keyword{datasets} 34 | -------------------------------------------------------------------------------- /man/boot.crq.Rd: -------------------------------------------------------------------------------- 1 | \name{boot.crq} 2 | \alias{boot.crq} 3 | \title{ Bootstrapping Censored Quantile Regression} 4 | \description{ 5 | Functions used to estimated standard errors, confidence 6 | intervals and tests of hypotheses for censored quantile regression models 7 | using the Portnoy and Peng-Huang methods. } 8 | \usage{ 9 | boot.crq(x, y, c, taus, method, ctype = "right", R = 100, mboot, bmethod = "jack", ...) 10 | } 11 | \arguments{ 12 | \item{x}{ The regression design matrix} 13 | \item{y}{ The regression response vector} 14 | \item{c}{ The censoring indicator} 15 | \item{taus}{ The quantiles of interest} 16 | \item{method}{ The fitting method: either "P" for Portnoy or "PH" for Peng and Huang.} 17 | \item{ctype}{ Either "right" or "left"} 18 | \item{R}{ The number of bootstrap replications} 19 | \item{bmethod}{ The bootstrap method to be employed. There are (as yet) three 20 | options: method = "jack" uses the delete-d jackknife method 21 | described by Portnoy (2013), method = "xy-pair" uses the xy-pair method, 22 | that is the usual multinomial resampling of xy-pairs, while method 23 | = "Bose" uses the Bose and Chatterjee (2003) weighted resampling 24 | method with exponential weights. The "jack" method is now the default.} 25 | \item{mboot}{ optional argument for the bootstrap method: for bmethod = "jack" 26 | it specifies the number, d, of the delete-d jackknife, for 27 | method = "xy-pair" it specifies the size of the bootstrap samples, 28 | that permits subsampling (m out of n) bootstrap. By default in the 29 | former case it is set to 2 [sqrt(n)], for the latter the default is 30 | n. Obviously mboot should be substantially larger than the column dimension 31 | of x, and should be less than the sample size in both cases.} 32 | \item{...}{ Optional further arguments to control bootstrapping} 33 | } 34 | \details{ 35 | There are several refinements that are still unimplemented. Percentile 36 | methods should be incorporated, and extensions of the methods to be used 37 | in anova.rq should be made. Note that bootstrapping for the Powell 38 | method "Powell" is done via \code{\link{boot.rq}}. For problems with 39 | \code{n > 3000} a message is printed indicated progress in the resampling. 40 | } 41 | \value{ 42 | A matrix of dimension R by p is returned with the R resampled 43 | estimates of the vector of quantile regression parameters. When 44 | mofn < n for the "xy" method this matrix has been deflated by 45 | the factor sqrt(m/n) 46 | } 47 | \references{ 48 | Bose, A. and S. Chatterjee, (2003) Generalized bootstrap for estimators 49 | of minimizers of convex functions, \emph{J. Stat. Planning and Inf}, 117, 50 | 225-239. 51 | Portnoy, S. (2013) The Jackknife's Edge: Inference for Censored Quantile Regression, 52 | \emph{CSDA}, forthcoming. 53 | 54 | } 55 | 56 | \author{ Roger Koenker } 57 | \seealso{ \code{\link{summary.crq}}} 58 | \keyword{ regression} 59 | -------------------------------------------------------------------------------- /man/boot.rq.pwxy.Rd: -------------------------------------------------------------------------------- 1 | \name{boot.rq.pwxy} 2 | \alias{boot.rq.pwxy} 3 | \title{ 4 | Preprocessing weighted bootstrap method 5 | } 6 | \description{ 7 | Bootstrap method exploiting preprocessing strategy to reduce 8 | computation time for large problem. In contrast to 9 | \code{\link{boot.rq.pxy}} which uses the classical multinomial 10 | sampling scheme and is coded in R, this uses the exponentially 11 | weighted bootstrap scheme and is coded in fortran and consequently 12 | is considerably faster in larger problems. 13 | } 14 | \usage{ 15 | boot.rq.pwxy(x, y, tau, coef, R = 200, m0 = NULL, eps = 1e-06, ...) 16 | } 17 | \arguments{ 18 | \item{x}{ 19 | Design matrix 20 | } 21 | \item{y}{ 22 | response vector 23 | } 24 | \item{tau}{ 25 | quantile of interest 26 | } 27 | \item{coef}{ 28 | point estimate of fitted object 29 | } 30 | \item{R}{ 31 | the number of bootstrap replications desired. 32 | } 33 | \item{m0}{ 34 | constant to determine initial sample size, defaults to sqrt(n*p) 35 | but could use some further tuning... 36 | } 37 | \item{eps}{ 38 | tolerance for convergence of fitting algorithm 39 | } 40 | \item{...}{ 41 | other parameters not yet envisaged. 42 | } 43 | } 44 | \details{ 45 | The fortran implementation is quite similar to the R code for 46 | \code{\link{boot.rq.pxy}} except that there is no multinomial sampling. 47 | Instead \code{rexp(n)} weights are used. 48 | } 49 | \value{ 50 | returns a list with elements: 51 | \item{coefficients}{a matrix of dimension ncol(x) by R} 52 | \item{nit}{a 5 by m matrix of iteration counts} 53 | \item{info}{an m-vector of convergence flags} 54 | } 55 | \references{ 56 | Chernozhukov, V. I. Fernandez-Val and B. Melly, 57 | Fast Algorithms for the Quantile Regression Process, 2019, 58 | arXiv, 1909.05782, 59 | 60 | Portnoy, S. and R. Koenker, The Gaussian Hare and the Laplacian 61 | Tortoise, Statistical Science, (1997) 279-300 62 | } 63 | \author{ 64 | Blaise Melly and Roger Koenker 65 | } 66 | \seealso{ 67 | \code{\link{boot.rq.pxy}} 68 | } 69 | \keyword{bootstrap} 70 | -------------------------------------------------------------------------------- /man/boot.rq.pxy.Rd: -------------------------------------------------------------------------------- 1 | \name{boot.rq.pxy} 2 | \alias{boot.rq.pxy} 3 | \title{ 4 | Preprocessing bootstrap method 5 | } 6 | \description{ 7 | Bootstrap method exploiting preprocessing strategy to reduce 8 | computation time for large problem. 9 | } 10 | \usage{ 11 | boot.rq.pxy(x, y, s, tau = 0.5, coef, method = "fn", Mm.factor = 3) 12 | } 13 | \arguments{ 14 | \item{x}{ 15 | Design matrix 16 | } 17 | \item{y}{ 18 | response vector 19 | } 20 | \item{s}{ 21 | matrix of multinomial draws for xy bootstrap 22 | } 23 | \item{tau}{ 24 | quantile of interest 25 | } 26 | \item{coef}{ 27 | point estimate of fitted object 28 | } 29 | \item{method}{ 30 | fitting method for bootstrap 31 | } 32 | \item{Mm.factor}{ 33 | constant to determine initial sample size 34 | } 35 | } 36 | \details{ 37 | See references for further details. 38 | } 39 | \value{ 40 | Returns matrix of bootstrap estimates. 41 | } 42 | \references{ 43 | Chernozhukov, V. I. Fernandez-Val and B. Melly, 44 | Fast Algorithms for the Quantile Regression Process, 2019, 45 | arXiv, 1909.05782, 46 | 47 | Portnoy, S. and R. Koenker, The Gaussian Hare and the Laplacian 48 | Tortoise, Statistical Science, (1997) 279-300 49 | } 50 | \author{ 51 | Blaise Melly and Roger Koenker 52 | } 53 | \seealso{ 54 | \code{\link{rq.fit.ppro}} 55 | } 56 | \keyword{bootstrap} 57 | -------------------------------------------------------------------------------- /man/combos.Rd: -------------------------------------------------------------------------------- 1 | \name{combos} 2 | \alias{combos} 3 | \title{Ordered Combinations} 4 | \description{ 5 | All m combinations of the first n integers taken p at a time 6 | are computed and return as an p by m matrix. The columns 7 | of the matrix are ordered so that adjacent columns differ 8 | by only one element. This is just a reordered version of 9 | \code{combn} in base R, but the ordering is useful for some 10 | applications. 11 | } 12 | \usage{ 13 | combos(n,p) 14 | } 15 | \arguments{ 16 | \item{n}{The n in n choose p} 17 | \item{p}{The p in n choose p} 18 | } 19 | \value{ 20 | a \code{matrix} of dimension p by \code{choose(n,p)} 21 | } 22 | \note{ 23 | Implementation based on a Pascal algorithm of Limin Xiang 24 | and Kazuo Ushijima (2001) translated to ratfor for R. 25 | If you have \pkg{rgl} installed you might try \code{demo("combos")} 26 | for a visual impression of how this works. 27 | } 28 | \references{ 29 | Limin Xiang and Kazuo Ushijima (2001) 30 | "On O(1) Time Algorithms for Combinatorial Generation," 31 | \emph{Computer Journal}, 44(4), 292-302. 32 | } 33 | \examples{ 34 | H <- combos(20,3) 35 | } 36 | \keyword{utilities} 37 | -------------------------------------------------------------------------------- /man/critval.Rd: -------------------------------------------------------------------------------- 1 | 2 | \name{critval} 3 | \alias{critval} 4 | \title{ 5 | Hotelling Critical Values 6 | } 7 | \description{ 8 | Critical values for uniform confidence bands for rqss fitting 9 | } 10 | \usage{ 11 | critval(kappa, alpha = 0.05, rdf = 0) 12 | } 13 | \arguments{ 14 | \item{kappa}{ 15 | length of the tube 16 | } 17 | \item{alpha}{ 18 | desired non-coverage of the band, intended coverage is 1 - alpha 19 | } 20 | \item{rdf}{ 21 | "residual" degrees of freedom of the fitted object. If \code{rdf=0} 22 | then the Gaussian version of the critical value is computed, otherwise 23 | the value is based on standard Student t theory. 24 | } 25 | } 26 | \value{ 27 | A scalar critical value that acts as a multiplier for the uniform 28 | confidence band construction. 29 | } 30 | \details{ 31 | The Hotelling tube approach to inference has a long and illustrious 32 | history. See Johansen and Johnstone (1989) for an overview. The implementation 33 | here is based on Sun and Loader (1994) and Loader's \pkg{locfit} package, although 34 | a simpler root finding approach is substituted for the iterative method used 35 | there. At this stage, only univariate bands may be constructed. 36 | } 37 | \references{ 38 | Hotelling, H. (1939): ``Tubes and Spheres in $n$-spaces, and a class 39 | of statistical problems,'' \emph{Am J. Math}, 61, 440--460. 40 | 41 | Johansen, S., I.M. Johnstone (1990): ``Hotelling's 42 | Theorem on the Volume of Tubes: Some Illustrations in Simultaneous 43 | Inference and Data Analysis,'' \emph{The Annals of Statistics}, 18, 652--684. 44 | 45 | Sun, J. and C.V. Loader: (1994) ``Simultaneous Confidence Bands for Linear Regression 46 | and smoothing,'' \emph{The Annals of Statistics}, 22, 1328--1345. 47 | } 48 | \seealso{ 49 | \code{\link{plot.rqss}} 50 | } 51 | \keyword{regression} 52 | -------------------------------------------------------------------------------- /man/dither.Rd: -------------------------------------------------------------------------------- 1 | 2 | \name{dither} 3 | \alias{dither} 4 | \title{ Function to randomly perturb a vector} 5 | \description{ 6 | With malice aforethought, dither adds a specified random perturbation to each element 7 | of the input vector, usually employed as a device to mitigate the effect of ties. 8 | } 9 | 10 | \usage{ 11 | dither(x, type = "symmetric", value = NULL) 12 | } 13 | \arguments{ 14 | \item{x}{\code{x} a numeric vector } 15 | \item{type}{\code{type} is either 'symmetric' or 'right' } 16 | \item{value}{\code{value} scale of dequantization } 17 | } 18 | \details{ 19 | The function \code{dither} operates slightly differently than the function 20 | \code{jitter} in base R, permitting strictly positive perturbations with 21 | the option \code{type = "right"} and using somewhat different default schemes 22 | for the scale of the perturbation. Dithering the response variable is 23 | frequently a useful option in quantile regression fitting to avoid deleterious 24 | effects of degenerate solutions. See, e.g. Machado and Santos Silva (2005). 25 | For a general introduction and some etymology see the Wikipedia article on "dither". 26 | For integer data it is usually advisable to use \code{value = 1}. 27 | When 'x' is a matrix or array dither treats all elements as a vector but returns 28 | an object of the original class. 29 | } 30 | \value{ 31 | A dithered version of the input vector 'x'. 32 | } 33 | 34 | \references{ 35 | Machado, J.A.F. and Santos Silva, J.M.C. (2005), Quantiles for Counts, Journal of the American Statistical Association, vol. 100, no. 472, pp. 1226-1237. 36 | } 37 | \author{ R. Koenker } 38 | \note{ Some further generality might be nice, for example something other than 39 | uniform noise would be desirable in some circumstances. Note that when dithering 40 | you are entering into the "state of sin" that John von Neumann famously attributed 41 | to anyone considering "arithmetical methods of producing random digits." If you 42 | need to preserve reproducibility, then \code{set.seed} is your friend. 43 | } 44 | \seealso{ \code{\link{jitter}} } 45 | \examples{ 46 | x <- rlnorm(40) 47 | y <- rpois(40, exp(.5 + log(x))) 48 | f <- rq(dither(y, type = "right", value = 1) ~ x) 49 | } 50 | \keyword{ manip } 51 | 52 | -------------------------------------------------------------------------------- /man/engel.Rd: -------------------------------------------------------------------------------- 1 | \name{engel} 2 | \alias{engel} 3 | \title{Engel Data} 4 | \description{ 5 | Engel food expenditure data used in Koenker and Bassett(1982). 6 | This is a regression data set consisting of 235 observations on 7 | income and expenditure on food for Belgian working class households. 8 | } 9 | \usage{data(engel)} 10 | \format{A data frame containing 235 observations on 2 variables 11 | \describe{ 12 | \item{income}{annual household income in Belgian francs} 13 | \item{foodexp}{annual household food expenditure in Belgian francs} 14 | } 15 | } 16 | \references{ 17 | Koenker, R. and Bassett, G (1982) 18 | Robust Tests of Heteroscedasticity based on Regression Quantiles; 19 | \emph{Econometrica} \bold{50}, 43--61. 20 | } 21 | \examples{ 22 | ## See also demo("engel1") 23 | ## -------------- 24 | 25 | data(engel) 26 | plot(engel, log = "xy", 27 | main = "'engel' data (log - log scale)") 28 | plot(log10(foodexp) ~ log10(income), data = engel, 29 | main = "'engel' data (log10 - transformed)") 30 | taus <- c(.15, .25, .50, .75, .95, .99) 31 | rqs <- as.list(taus) 32 | for(i in seq(along = taus)) { 33 | rqs[[i]] <- rq(log10(foodexp) ~ log10(income), tau = taus[i], data = engel) 34 | lines(log10(engel$income), fitted(rqs[[i]]), col = i+1) 35 | } 36 | legend("bottomright", paste("tau = ", taus), inset = .04, 37 | col = 2:(length(taus)+1), lty=1) 38 | } 39 | \keyword{datasets} 40 | -------------------------------------------------------------------------------- /man/gasprice.Rd: -------------------------------------------------------------------------------- 1 | \name{gasprice} 2 | \alias{gasprice} 3 | \docType{data} 4 | \title{Time Series of US Gasoline Prices 5 | } 6 | \description{ Time Series of Weekly US Gasoline Prices: 1990:8 -- 2003:26 7 | } 8 | \usage{data("gasprice")} 9 | \examples{ 10 | data(gasprice) 11 | } 12 | \keyword{datasets} 13 | -------------------------------------------------------------------------------- /man/kuantile.Rd: -------------------------------------------------------------------------------- 1 | \name{kuantile} 2 | \alias{kuantile} 3 | \alias{kselect} 4 | \alias{kunique} 5 | \title{Quicker Sample Quantiles } 6 | \description{ 7 | The function 'kuantile' computes sample quantiles corresponding 8 | to the specified probabilities. The intent is to mimic the generic 9 | (base) function 'quantile' but using a variant of the Floyd and 10 | Rivest (1975) algorithm which is somewhat quicker, especially for 11 | large sample sizes. 12 | } 13 | \usage{ 14 | kuantile(x, probs = seq(0, 1, .25), na.rm = FALSE, names = TRUE, type = 7, ...) 15 | } 16 | \arguments{ 17 | \item{x}{numeric vector whose sample quantiles are wanted.} 18 | \item{probs}{numeric vector of probabilities with values in [0,1].} 19 | \item{type}{ an integer between 1 and 9 selecting one of the nine quantile 20 | algorithms detailed below to be used.} 21 | \item{na.rm}{logical: if true, any 'NA' and 'NaN''s are removed from 'x' 22 | before the quantiles are computed.} 23 | \item{names}{logical: if true, the result has a 'names' attribute. } 24 | \item{...}{further arguments passed to or from other methods.} 25 | } 26 | \details{ A vector of length 'length(p)' is returned. See the documentation 27 | for 'quantile' for further details on the types. The algorithm was written 28 | by K.C. Kiwiel. It is a modified version of the (algol 68) SELECT procedure of 29 | Floyd and Rivest (1975), incorporating modifications of Brown(1976). 30 | The algorithm has linear growth in the number of comparisons required as 31 | sample size grows. For the median, average case behavior requires 32 | \eqn{1.5 n + O((n log n)^{1/2})} comparisons. See Kiwiel (2005) and Knuth (1998) 33 | for further details. When the number of required elements of p is large, it 34 | may be preferable to revert to a full sort.} 35 | \value{ 36 | A vector of quantiles of the same length as the vector p. 37 | } 38 | \references{ 39 | R.W. Floyd and R.L. Rivest: "Algorithm 489: The Algorithm 40 | SELECT---for Finding the $i$th Smallest of $n$ Elements", 41 | Comm. ACM 18, 3 (1975) 173, 42 | 43 | T. Brown: "Remark on Algorithm 489", ACM Trans. Math. 44 | Software 3, 2 (1976), 301-304. 45 | 46 | K.C. Kiwiel: On Floyd and Rivest's SELECT Algorithm, Theoretical 47 | Computer Sci. 347 (2005) 214-238. 48 | 49 | D. Knuth, The Art of Computer Programming, Volume 3, Sorting and 50 | Searching, 2nd Ed., (1998), Addison-Wesley. 51 | } 52 | \author{ K.C. Kiwiel, R interface: Roger Koenker } 53 | \seealso{\code{\link{quantile}}} 54 | \examples{ 55 | kuantile(x <- rnorm(1001))# Extremes & Quartiles by default 56 | 57 | ### Compare different types 58 | p <- c(0.1,0.5,1,2,5,10,50)/100 59 | res <- matrix(as.numeric(NA), 9, 7) 60 | for(type in 1:9) res[type, ] <- y <- kuantile(x, p, type=type) 61 | dimnames(res) <- list(1:9, names(y)) 62 | ktiles <- res 63 | 64 | ### Compare different types 65 | p <- c(0.1,0.5,1,2,5,10,50)/100 66 | res <- matrix(as.numeric(NA), 9, 7) 67 | for(type in 1:9) res[type, ] <- y <- quantile(x, p, type=type) 68 | dimnames(res) <- list(1:9, names(y)) 69 | qtiles <- res 70 | 71 | max(abs(ktiles - qtiles)) 72 | 73 | 74 | } 75 | \keyword{univar} 76 | -------------------------------------------------------------------------------- /man/latex.Rd: -------------------------------------------------------------------------------- 1 | \name{latex} 2 | \alias{latex} 3 | \title{ Make a latex version of an R object } 4 | \description{ 5 | Generic function for converting an \R object into a latex file. 6 | } 7 | \usage{ 8 | latex(x, ...) 9 | } 10 | \arguments{ 11 | \item{x}{ \code{x} is an \R object } 12 | \item{\dots}{ \code{\dots} optional arguments } 13 | } 14 | \seealso{ \code{\link{latex.table}}, \code{\link{latex.summary.rqs}} 15 | } 16 | \keyword{IO} 17 | 18 | -------------------------------------------------------------------------------- /man/latex.summary.rqs.Rd: -------------------------------------------------------------------------------- 1 | \name{latex.summary.rqs} 2 | \alias{latex.summary.rqs} 3 | \title{ Make a latex table from a table of rq results} 4 | \description{ 5 | Produces a file with latex commands for a table of rq results. 6 | } 7 | \usage{\method{latex}{summary.rqs}(x, transpose = FALSE, caption = "caption goes here.", 8 | digits = 3, file = as.character(substitute(x)), ...) 9 | } 10 | \arguments{ 11 | \item{x}{\code{x} is an object of class \code{summary.rqs}} 12 | \item{transpose}{if \code{TRUE} transpose table so that 13 | rows are quantiles and columns are covariates. } 14 | \item{caption}{ caption for the table} 15 | \item{digits}{ decimal precision of table entries.} 16 | \item{file}{ name of file } 17 | \item{\dots}{ optional arguments for \code{latex.table} } 18 | } 19 | \details{ 20 | Calls \code{latex.table}. 21 | } 22 | \value{ 23 | Returns invisibly after writing the file. 24 | } 25 | \author{ R. Koenker} 26 | \seealso{ \code{\link{summary.rqs}}, \code{\link{latex.table}}} 27 | \keyword{IO} 28 | -------------------------------------------------------------------------------- /man/lm.fit.recursive.Rd: -------------------------------------------------------------------------------- 1 | \name{lm.fit.recursive} 2 | \alias{lm.fit.recursive} 3 | \title{ Recursive Least Squares } 4 | \description{ 5 | This function fits a linear model by recursive least squares. It is 6 | a utility routine for the \code{\link{KhmaladzeTest}} function of the quantile regression 7 | package. 8 | } 9 | \usage{ 10 | lm.fit.recursive(X, y, int=TRUE) 11 | } 12 | \arguments{ 13 | \item{X}{ Design Matrix } 14 | \item{y}{ Response Variable} 15 | \item{int}{ if TRUE then append intercept to X} 16 | } 17 | \value{ 18 | return p by n matrix of fitted parameters, where p. The 19 | ith column gives the solution up to "time" i. 20 | } 21 | \references{ A. Harvey, (1993) Time Series Models, MIT } 22 | \author{ R. Koenker } 23 | 24 | \keyword{methods} 25 | -------------------------------------------------------------------------------- /man/lprq.Rd: -------------------------------------------------------------------------------- 1 | \name{lprq} 2 | \alias{lprq} 3 | \title{ locally polynomial quantile regression } 4 | \description{ 5 | This is a toy function to illustrate how to do locally polynomial 6 | quantile regression univariate smoothing. 7 | } 8 | \usage{ 9 | lprq(x, y, h, tau = .5, m = 50) 10 | } 11 | \arguments{ 12 | \item{x}{ The conditioning covariate} 13 | \item{y}{ The response variable } 14 | \item{h}{ The bandwidth parameter } 15 | \item{tau}{ The quantile to be estimated } 16 | \item{m}{ The number of points at which the function is to be estimated } 17 | } 18 | \details{ 19 | The function obviously only does locally linear fitting but can be easily 20 | adapted to locally polynomial fitting of higher order. The author doesn't 21 | really approve of this sort of smoothing, being more of a spline person, 22 | so the code is left is its (almost) most trivial form. 23 | } 24 | \value{ 25 | The function compute a locally weighted linear quantile regression fit 26 | at each of the m design points, and returns: 27 | \item{xx}{The design points at which the evaluation occurs} 28 | \item{fv}{The estimated function values at these design points} 29 | \item{dev}{The estimated first derivative values at the design points} 30 | } 31 | \references{ Koenker, R. (2004) Quantile Regression } 32 | \author{R. Koenker } 33 | \note{One can also consider using B-spline expansions see \code{bs}.} 34 | 35 | \seealso{ \code{rqss} for a general approach to oonparametric QR fitting. } 36 | \examples{ 37 | require(MASS) 38 | data(mcycle) 39 | attach(mcycle) 40 | plot(times,accel,xlab = "milliseconds", ylab = "acceleration (in g)") 41 | hs <- c(1,2,3,4) 42 | for(i in hs){ 43 | h = hs[i] 44 | fit <- lprq(times,accel,h=h,tau=.5) 45 | lines(fit$xx,fit$fv,lty=i) 46 | } 47 | legend(50,-70,c("h=1","h=2","h=3","h=4"),lty=1:length(hs)) 48 | } 49 | \keyword{smooth} 50 | \keyword{robust} 51 | -------------------------------------------------------------------------------- /man/nlrq.control.Rd: -------------------------------------------------------------------------------- 1 | \name{nlrq.control} 2 | \alias{nlrq.control} 3 | \title{ Set control parameters for nlrq } 4 | \description{ 5 | Set algorithmic parameters for nlrq (nonlinear quantile regression function) 6 | } 7 | \usage{ 8 | nlrq.control(maxiter=100, k=2, InitialStepSize = 1, big=1e+20, eps=1e-07, beta=0.97) 9 | } 10 | \arguments{ 11 | \item{maxiter}{maximum number of allowed iterations} 12 | \item{k}{the number of iterations of the Meketon algorithm to be calculated 13 | in each step, usually 2 is reasonable, occasionally it may be helpful 14 | to set k=1 } 15 | \item{InitialStepSize}{ Starting value in \code{optim} to determine the step 16 | length of iterations. The default value of 1 is sometimes too optimistic. 17 | In such cases, the value 0 forces optim to just barely stick its toe in 18 | the water.} 19 | \item{big}{ a large scalar} 20 | \item{eps}{ tolerance for convergence of the algorithm } 21 | \item{beta}{ a shrinkage parameter which controls the recentering process 22 | in the interior point algorithm. } 23 | } 24 | \seealso{ \code{\link{nlrq}} } 25 | 26 | \keyword{ environment} 27 | -------------------------------------------------------------------------------- /man/plot.KhmaladzeTest.Rd: -------------------------------------------------------------------------------- 1 | 2 | \name{plot.KhmaladzeTest} 3 | \alias{plot.KhmaladzeTest} 4 | \title{ Plot a KhmaladzeTest object} 5 | \description{ 6 | Plot an object generated by KhmaladzeTest 7 | } 8 | \usage{ 9 | \method{plot}{KhmaladzeTest}(x, ...) 10 | } 11 | \arguments{ 12 | \item{x}{ 13 | Object returned from KhmaladzeTest representing the fit of the model. 14 | } 15 | \item{...}{ 16 | Optional arguments. 17 | } 18 | } 19 | \seealso{ \code{\link{KhmaladzeTest}}} 20 | 21 | \keyword{ regression} 22 | -------------------------------------------------------------------------------- /man/plot.rq.process.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.rq} 2 | \alias{plot.rq.process} 3 | \title{ plot the coordinates of the quantile regression process} 4 | \description{ Function to plot quantile regression process. } 5 | \usage{ 6 | \method{plot}{rq.process}(x, nrow=3, ncol=2, ...) } 7 | \arguments{ 8 | \item{x}{ an object produced by rq() fitting } 9 | \item{nrow}{ rows in mfrow } 10 | \item{ncol}{ columns in mfrow} 11 | \item{...}{ optional arguments to plot} 12 | } 13 | \author{ Roger Koenker rkoenker@uiuc.edu} 14 | \seealso{ \code{\link{rq}}} 15 | \keyword{hplot} 16 | -------------------------------------------------------------------------------- /man/plot.rqs.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.rqs} 2 | \alias{plot.rqs} 3 | 4 | \title{Visualizing sequences of quantile regressions} 5 | 6 | \description{A sequence of coefficient estimates for quantile 7 | regressions with varying \code{tau} parameters is visualized.} 8 | 9 | \usage{ 10 | \method{plot}{rqs}(x, parm = NULL, ols = TRUE, 11 | mfrow = NULL, mar = NULL, ylim = NULL, main = NULL, col = 1:2, lty = 1:2, 12 | cex = 0.5, pch = 20, type = "b", xlab = "", ylab = "", \dots) 13 | } 14 | 15 | \arguments{ 16 | \item{x}{an object of class \code{"rqs"} as produce by \code{\link{rq}} 17 | (with a vector of \code{tau} values).} 18 | \item{parm}{a specification of which parameters are to be plotted, 19 | either a vector of numbers or a vector of names. By default, all 20 | parameters are considered.} 21 | \item{ols}{logical. Should a line for the OLS coefficient (as estimated 22 | by \code{\link[stats]{lm}}) be added?} 23 | \item{mfrow, mar, ylim, main}{graphical parameters. Suitable defaults are chosen 24 | based on the coefficients to be visualized.} 25 | \item{col, lty}{graphical parameters. For each parameter, the first 26 | element corresponds to the \code{rq} coefficients and the second to 27 | the \code{lm} coefficients.} 28 | \item{cex, pch, type, xlab, ylab, \dots}{further graphical parameters 29 | passed.} 30 | } 31 | 32 | \details{The \code{plot} method for \code{"rqs"} objects visualizes the 33 | coefficients only, confidence bands can be added by using the \code{plot} 34 | method for the associated \code{"summary.rqs"} object.} 35 | 36 | \seealso{\code{\link{rq}}, \code{\link{plot.summary.rqs}}} 37 | 38 | \value{A matrix with all coefficients visualized is returned invisibly.} 39 | 40 | \examples{ 41 | ## fit Engel models (in levels) for tau = 0.1, ..., 0.9 42 | data("engel") 43 | fm <- rq(foodexp ~ income, data = engel, tau = 1:9/10) 44 | 45 | ## visualizations 46 | plot(fm) 47 | plot(fm, parm = 2, mar = c(5.1, 4.1, 2.1, 2.1), main = "", xlab = "tau", 48 | ylab = "income coefficient", cex = 1, pch = 19) 49 | } 50 | 51 | \keyword{hplot} 52 | -------------------------------------------------------------------------------- /man/plot.summary.rqs.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.summary.rqs} 2 | \alias{plot.summary.rqs} 3 | \alias{plot.summary.rq} 4 | 5 | \title{Visualizing sequences of quantile regression summaries} 6 | 7 | \description{A sequence of coefficient estimates for quantile 8 | regressions with varying \code{tau} parameters is visualized 9 | along with associated confidence bands.} 10 | 11 | \usage{ 12 | \method{plot}{summary.rqs}(x, parm = NULL, level = 0.9, ols = TRUE, 13 | mfrow = NULL, mar = NULL, ylim = NULL, main = NULL, 14 | col = gray(c(0, 0.75)), border = NULL, lcol = 2, lty = 1:2, 15 | cex = 0.5, pch = 20, type = "b", xlab = "", ylab = "", \dots) 16 | } 17 | 18 | \arguments{ 19 | \item{x}{an object of class \code{"summary.rqs"} as produce by 20 | applying the \code{summary} method to a \code{\link{rq}} object 21 | (with a vector of \code{tau} values).} 22 | \item{parm}{a specification of which parameters are to be plotted, 23 | either a vector of numbers or a vector of names. By default, all 24 | parameters are considered.} 25 | \item{level}{Confidence level of bands. When using 26 | the rank based confidence intervals in summary, which is the default 27 | method for sample sizes under 1000, you will need to control the level 28 | of the intervals by passing the parameter alpha to 29 | \code{\link{summary.rq}}, prior to calling 30 | \code{\link{plot.summary.rqs}}. Note also that alpha = 1 - level.} 31 | \item{ols}{logical. Should a line for the OLS coefficient and their confidence 32 | bands (as estimated by \code{\link[stats]{lm}}) be added?} 33 | \item{mfrow, mar, ylim, main}{graphical parameters. Suitable defaults are chosen 34 | based on the coefficients to be visualized. It can be useful to use a common 35 | vertical scale when plotting as a way of comparing confidence bands constructed 36 | by different methods. For this purpose one can specify a \code{ylim} as a 37 | 2 by \code{length(parm)} matrix.} 38 | \item{col}{vector of color specification for \code{rq} coefficients 39 | and the associated confidence polygon.} 40 | \item{border}{color specification for the confidence polygon. By default, 41 | the second element of \code{col} is used.} 42 | \item{lcol, lty}{color and line type specification for OLS coefficients 43 | and their confidence bounds.} 44 | \item{cex, pch, type, xlab, ylab, \dots}{further graphical parameters 45 | passed to \code{\link[graphics]{points}}.} 46 | } 47 | 48 | \details{The \code{plot} method for \code{"summary.rqs"} objects visualizes 49 | the coefficients along with their confidence bands. The bands can be 50 | omitted by using the \code{plot} method for \code{"rqs"} objects directly.} 51 | 52 | \seealso{\code{\link{rq}}, \code{\link{plot.rqs}}} 53 | 54 | \value{A list with components \code{z}, an array with all coefficients visualized 55 | (and associated confidence bands), and \code{Ylim}, a 2 by p matrix containing 56 | the y plotting limits. The latter component may be useful for establishing a 57 | common scale for two or more similar plots. The list is returned invisibly.} 58 | 59 | \examples{ 60 | ## fit Engel models (in levels) for tau = 0.1, ..., 0.9 61 | data("engel") 62 | fm <- rq(foodexp ~ income, data = engel, tau = 1:9/10) 63 | sfm <- summary(fm) 64 | 65 | ## visualizations 66 | plot(sfm) 67 | plot(sfm, parm = 2, mar = c(5.1, 4.1, 2.1, 2.1), main = "", xlab = "tau", 68 | ylab = "income coefficient", cex = 1, pch = 19) 69 | } 70 | 71 | \keyword{hplot} 72 | -------------------------------------------------------------------------------- /man/predict.rqss.Rd: -------------------------------------------------------------------------------- 1 | \name{predict.rqss} 2 | \alias{predict.rqss} 3 | \alias{predict.qss1} 4 | \alias{predict.qss2} 5 | \title{Predict from fitted nonparametric quantile regression smoothing spline models} 6 | \description{ 7 | Additive models for nonparametric quantile regression using total 8 | variation penalty methods can be fit with the \code{\link{rqss}} 9 | function. Univarariate and bivariate components can be predicted 10 | using these functions. 11 | } 12 | \usage{ 13 | \method{predict}{rqss}(object, newdata, interval = "none", level = 0.95, ...) 14 | \method{predict}{qss1}(object, newdata, ...) 15 | \method{predict}{qss2}(object, newdata, ...) 16 | } 17 | \arguments{ 18 | \item{object}{ is a fitted object produced by \code{\link{rqss}} } 19 | \item{newdata}{ a data frame describing the observations at which 20 | prediction is to be made. For qss components, newdata should 21 | lie in strictly within the convex hull of the fitting data. Newdata 22 | corresponding to the partially linear component of the model 23 | may require caution concerning the treatment of factor levels, if any.} 24 | \item{interval}{If set to \code{confidence} then a \code{level} confidence interval 25 | for the predictions is returned.} 26 | \item{level}{intended coverage probability for the confidence intervals} 27 | \item{\dots}{ optional arguments } 28 | } 29 | \details{ 30 | For both univariate and bivariate prediction linear interpolation is 31 | done. In the bivariate case, this involves computing barycentric 32 | coordinates of the new points relative to their enclosing triangles. 33 | It may be of interest to plot individual components of fitted rqss 34 | models: this is usually best done by fixing the values of other 35 | covariates at reference values typical of the sample data and 36 | predicting the response at varying values of one qss term at a 37 | time. Direct use of the \code{predict.qss1} and \code{predict.qss2} functions 38 | is discouraged since it usually corresponds to predicted values 39 | at absurd reference values of the other covariates, i.e. zero. 40 | } 41 | \value{ 42 | A vector of predictions, or in the case that \code{interval = "confidence")} 43 | a matrix whose first column is the vector of predictions and whose second and 44 | third columns are the lower and upper confidence limits for each prediction. 45 | } 46 | \author{ R. Koenker } 47 | \seealso{ \code{\link{rqss}} 48 | } 49 | \examples{ 50 | n <- 200 51 | lam <- 2 52 | x <- sort(rchisq(n,4)) 53 | z <- exp(rnorm(n)) + x 54 | y <- log(x)+ .1*(log(x))^2 + z/4 + log(x)*rnorm(n)/4 55 | plot(x,y - z/4 + mean(z)/4) 56 | Ifit <- rqss(y ~ qss(x,constraint="I") + z) 57 | sfit <- rqss(y ~ qss(x,lambda = lam) + z) 58 | xz <- data.frame(z = mean(z), 59 | x = seq(min(x)+.01,max(x)-.01,by=.25)) 60 | lines(xz[["x"]], predict(Ifit, xz), col=2) 61 | lines(xz[["x"]], predict(sfit, xz), col=3) 62 | legend(10,2,c("Increasing","Smooth"),lty = 1, col = c(2,3)) 63 | title("Predicted Median Response at Mean Value of z") 64 | %%keep objects for inspection : do not rm(x,y,z,xz,fit) 65 | 66 | ## Bivariate example -- loads pkg "interp" 67 | if(requireNamespace("interp")){ 68 | if(requireNamespace("interp")){ 69 | data(CobarOre) 70 | fit <- rqss(z ~ qss(cbind(x,y), lambda=.08), 71 | data= CobarOre) 72 | plot(fit, col="grey", 73 | main = "CobarOre data -- rqss(z ~ qss(cbind(x,y)))") 74 | T <- with(CobarOre, interp::tri.mesh(x, y)) 75 | set.seed(77) 76 | ndum <- 100 77 | xd <- with(CobarOre, runif(ndum, min(x), max(x))) 78 | yd <- with(CobarOre, runif(ndum, min(y), max(y))) 79 | table(s <- interp::in.convex.hull(T, xd, yd)) 80 | pred <- predict(fit, data.frame(x = xd[s], y = yd[s])) 81 | contour(interp::interp(xd[s],yd[s], pred), 82 | col="red", add = TRUE) 83 | }}} 84 | \keyword{regression} 85 | \keyword{smooth} 86 | \keyword{robust} 87 | -------------------------------------------------------------------------------- /man/print.KhmaladzeTest.Rd: -------------------------------------------------------------------------------- 1 | \name{print.KhmaladzeTest} 2 | \alias{print.KhmaladzeTest} 3 | \title{ Print a KhmaladzeTest object} 4 | \description{ 5 | Print an object generated by KhmaladzeTest 6 | } 7 | \usage{ 8 | \method{print}{KhmaladzeTest}(x, ...) 9 | } 10 | \arguments{ 11 | \item{x}{ 12 | Object returned from KhmaladzeTest representing the fit of the model. 13 | } 14 | \item{...}{ 15 | Optional arguments. 16 | } 17 | } 18 | \seealso{ \code{\link{KhmaladzeTest}}} 19 | 20 | \keyword{ regression} 21 | -------------------------------------------------------------------------------- /man/print.rq.Rd: -------------------------------------------------------------------------------- 1 | \name{print.rq} 2 | \alias{print.rq} 3 | \alias{print.rqs} 4 | \title{ Print an rq object} 5 | \description{ 6 | Print an object generated by rq 7 | } 8 | \usage{ 9 | \method{print}{rq}(x, ...) 10 | \method{print}{rqs}(x, ...) 11 | } 12 | \arguments{ 13 | \item{x}{ 14 | Object returned from rq representing the fit of the model. 15 | } 16 | \item{...}{ 17 | Optional arguments. 18 | } 19 | } 20 | \seealso{ \code{\link{rq}}} 21 | 22 | \keyword{ regression} 23 | -------------------------------------------------------------------------------- /man/print.summary.rq.Rd: -------------------------------------------------------------------------------- 1 | \name{print.summary.rq} 2 | \alias{print.summary.rq} 3 | \alias{print.summary.rqs} 4 | \title{ Print Quantile Regression Summary Object } 5 | \usage{ 6 | \method{print}{summary.rq}(x, digits=max(5, .Options$digits - 2), ...) 7 | \method{print}{summary.rqs}(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{ 11 | This is an object of class \code{"summary.rq"} produced by a call to 12 | \code{summary.rq()}. 13 | } 14 | \item{digits}{ 15 | Significant digits reported in the printed table. 16 | } 17 | \item{...}{ 18 | Optional arguments passed to printing function 19 | } 20 | } 21 | \description{Print summary of quantile regression object} 22 | \seealso{ \code{\link{summary.rq}} } 23 | 24 | \keyword{ regression } 25 | -------------------------------------------------------------------------------- /man/q489.Rd: -------------------------------------------------------------------------------- 1 | \name{q489} 2 | \alias{q489} 3 | \title{Even Quicker Sample Quantiles } 4 | \description{ 5 | The function \code{q489} computes a single sample quantile using a 6 | fortran implementation of the Floyd and Rivest (1975) algorithm. 7 | In contrast to the more elaborate function \code{kuantile} that uses 8 | the Kiweil (2005) implementation it does not attempt to replicate the 9 | nine varieties of quantiles as documented in the base function. 10 | \code{quantile} 11 | } 12 | \usage{ 13 | q489(x, tau = .5) 14 | } 15 | \arguments{ 16 | \item{x}{numeric vector} 17 | \item{tau}{the quantile of intereste.} 18 | } 19 | \details{ This is a direct translation of the Algol 68 implementation of 20 | Floyd and Rivest (1975), implemented in Ratfor. For the median, average 21 | case behavior requires \eqn{1.5 n + O((n log n)^{1/2})} comparisons. 22 | In preliminary experiments it seems to be somewhat faster in large samples 23 | than the implementation \code{kuantile} of Kiwiel (2005). See Knuth (1998) 24 | for further details. No provision is made for non-uniqueness of the quantile. 25 | so, when \eqn{\tau n} is an integer there may be some discrepancy.} 26 | \value{ 27 | A scalar quantile of the same length as the vector p. 28 | } 29 | \references{ 30 | R.W. Floyd and R.L. Rivest: "Algorithm 489: The Algorithm 31 | SELECT---for Finding the $i$th Smallest of $n$ Elements", 32 | Comm. ACM 18, 3 (1975) 173, 33 | 34 | K.C. Kiwiel: On Floyd and Rivest's SELECT Algorithm, Theoretical 35 | Computer Sci. 347 (2005) 214-238. 36 | 37 | D. Knuth, The Art of Computer Programming, Volume 3, Sorting and 38 | Searching, 2nd Ed., (1998), Addison-Wesley. 39 | } 40 | \author{ R.W.Floyd and R.L.Rivest, R implementation: Roger Koenker } 41 | \seealso{\code{\link{quantile}}} 42 | \examples{ 43 | medx <- q489(rnorm(1001)) 44 | } 45 | \keyword{univar} 46 | -------------------------------------------------------------------------------- /man/qrisk.Rd: -------------------------------------------------------------------------------- 1 | \name{qrisk} 2 | \alias{qrisk} 3 | \title{ Function to compute Choquet portfolio weights} 4 | \description{ 5 | This function solves a weighted quantile regression problem to find the 6 | optimal portfolio weights minimizing a Choquet risk criterion described 7 | in Bassett, Koenker, and Kordas (2002). 8 | } 9 | \usage{ 10 | qrisk(x, alpha = c(0.1, 0.3), w = c(0.7, 0.3), mu = 0.07, 11 | R = NULL, r = NULL, lambda = 10000) 12 | } 13 | \arguments{ 14 | \item{x}{n by q matrix of historical or simulated asset returns } 15 | \item{alpha}{vector of alphas receiving positive weights in the Choquet criterion} 16 | \item{w}{weights associated with alpha in the Choquet criterion } 17 | \item{mu}{targeted rate of return for the portfolio} 18 | \item{R}{matrix of constraints on the parameters of the quantile regression, see below} 19 | \item{r}{rhs vector of the constraints described by R} 20 | \item{lambda}{Lagrange multiplier associated with the constraints} 21 | } 22 | \details{ 23 | The function calls \code{rq.fit.hogg} which in turn calls the constrained Frisch 24 | Newton algorithm. The constraints Rb=r are intended to apply only to the slope 25 | parameters, not the intercept parameters. The user is completely responsible to 26 | specify constraints that are consistent, ie that have at least one feasible point. 27 | See examples for imposing non-negative portfolio weights. 28 | } 29 | \value{ 30 | \item{pihat}{the optimal portfolio weights} 31 | \item{muhat }{the in-sample mean return of the optimal portfolio} 32 | \item{qrisk}{the in-sample Choquet risk of the optimal portfolio} 33 | } 34 | \references{ 35 | \url{http://www.econ.uiuc.edu/~roger/research/risk/risk.html} 36 | 37 | Bassett, G., R. Koenker, G Kordas, (2004) Pessimistic Portfolio Allocation and 38 | Choquet Expected Utility, J. of Financial Econometrics, 2, 477-492. 39 | } 40 | \author{ R. Koenker } 41 | \examples{ 42 | #Fig 1: ... of Choquet paper 43 | mu1 <- .05; sig1 <- .02; mu2 <- .09; sig2 <- .07 44 | x <- -10:40/100 45 | u <- seq(min(c(x)),max(c(x)),length=100) 46 | f1 <- dnorm(u,mu1,sig1) 47 | F1 <- pnorm(u,mu1,sig1) 48 | f2 <- dchisq(3-sqrt(6)*(u-mu1)/sig1,3)*(sqrt(6)/sig1) 49 | F2 <- pchisq(3-sqrt(6)*(u-mu1)/sig1,3) 50 | f3 <- dnorm(u,mu2,sig2) 51 | F3 <- pnorm(u,mu2,sig2) 52 | f4 <- dchisq(3+sqrt(6)*(u-mu2)/sig2,3)*(sqrt(6)/sig2) 53 | F4 <- pchisq(3+sqrt(6)*(u-mu2)/sig2,3) 54 | plot(rep(u,4),c(f1,f2,f3,f4),type="n",xlab="return",ylab="density") 55 | lines(u,f1,lty=1,col="blue") 56 | lines(u,f2,lty=2,col="red") 57 | lines(u,f3,lty=3,col="green") 58 | lines(u,f4,lty=4,col="brown") 59 | legend(.25,25,paste("Asset ",1:4),lty=1:4,col=c("blue","red","green","brown")) 60 | #Now generate random sample of returns from these four densities. 61 | n <- 1000 62 | if(TRUE){ #generate a new returns sample if TRUE 63 | x1 <- rnorm(n) 64 | x1 <- (x1-mean(x1))/sqrt(var(x1)) 65 | x1 <- x1*sig1 + mu1 66 | x2 <- -rchisq(n,3) 67 | x2 <- (x2-mean(x2))/sqrt(var(x2)) 68 | x2 <- x2*sig1 +mu1 69 | x3 <- rnorm(n) 70 | x3 <- (x3-mean(x3))/sqrt(var(x3)) 71 | x3 <- x3*sig2 +mu2 72 | x4 <- rchisq(n,3) 73 | x4 <- (x4-mean(x4))/sqrt(var(x4)) 74 | x4 <- x4*sig2 +mu2 75 | } 76 | library(quantreg) 77 | x <- cbind(x1,x2,x3,x4) 78 | qfit <- qrisk(x) 79 | sfit <- srisk(x) 80 | # Try new distortion function 81 | qfit1 <- qrisk(x,alpha = c(.05,.1), w = c(.9,.1),mu = 0.09) 82 | # Constrain portfolio weights to be non-negative 83 | qfit2 <- qrisk(x,alpha = c(.05,.1), w = c(.9,.1),mu = 0.09, 84 | R = rbind(rep(-1,3), diag(3)), r = c(-1, rep(0,3))) 85 | } 86 | \keyword{regression} 87 | \keyword{robust} 88 | \seealso{\code{\link{rq.fit.hogg}}, \code{\link{srisk}}} 89 | -------------------------------------------------------------------------------- /man/ranks.Rd: -------------------------------------------------------------------------------- 1 | \name{ranks} 2 | \alias{ranks} 3 | \title{ 4 | Quantile Regression Ranks 5 | } 6 | \description{ 7 | Function to compute ranks from the dual (regression rankscore) process. 8 | } 9 | \usage{ 10 | ranks(v, score="wilcoxon", tau=0.5, trim = NULL) 11 | } 12 | \arguments{ 13 | \item{v}{ 14 | object of class \code{"rq.process"} generated by \code{rq()} 15 | } 16 | \item{score}{ 17 | The score function desired. Currently implemented score functions 18 | are \code{"wilcoxon"}, \code{"normal"}, and \code{"sign"} 19 | which are asymptotically optimal for 20 | the logistic, Gaussian and Laplace location shift models respectively. 21 | The "normal" score function is also sometimes called van der Waerden scores. 22 | Also implemented are the \code{"tau"} which generalizes sign scores to an 23 | arbitrary quantile, \code{"interquartile"} which is appropriate 24 | for tests of scale shift, \code{normalscale} for Gaussian scale shift, 25 | \code{halfnormalscale} for Gaussian scale shift only to the right of the median, 26 | and \code{lehmann} for Lehmann local alternatives. See Koenker (2010) for 27 | further details on the last three of these scores. 28 | } 29 | \item{tau}{ 30 | the optional value of \code{tau} if the \code{"tau"} score function is used. 31 | } 32 | \item{trim}{optional trimming proportion parameter(s) -- only applicable for the 33 | Wilcoxon score function -- when one value is provided there is symmetric 34 | trimming of the score integral to the interval \code{(trim, 1-trim)}, when 35 | there are two values provided, then the trimming restricts the integration 36 | to \code{(trim[1], trim[2])}.} 37 | } 38 | \value{ 39 | The function returns two components. One is the ranks, the 40 | other is a scale factor which is the \eqn{L_2} norm of the score 41 | function. All score functions should be normalized to have mean zero. 42 | } 43 | \details{ 44 | See GJKP(1993) for further details. 45 | } 46 | \references{ 47 | Gutenbrunner, C., J. Jureckova, Koenker, R. and Portnoy, 48 | S. (1993) Tests of linear hypotheses based on regression 49 | rank scores, \emph{Journal of Nonparametric Statistics}, (2), 307--331. 50 | 51 | Koenker, R. Rank Tests for Heterogeneous Treatment Effects with Covariates, preprint. 52 | } 53 | \seealso{ 54 | \code{\link{rq}}, \code{\link{rq.test.rank}} \code{\link{anova}} 55 | } 56 | \examples{ 57 | data(stackloss) 58 | ranks(rq(stack.loss ~ stack.x, tau=-1)) 59 | } 60 | \keyword{regression} 61 | -------------------------------------------------------------------------------- /man/rearrange.Rd: -------------------------------------------------------------------------------- 1 | \name{rearrange} 2 | \alias{rearrange} 3 | \title{Rearrangement} 4 | \description{ Monotonize a step function by rearrangement } 5 | \usage{ rearrange(f,xmin,xmax) } 6 | \arguments{ 7 | \item{f}{ object of class stepfun } 8 | \item{xmin}{minimum of the support of the rearranged f} 9 | \item{xmax}{maximum of the support of the rearranged f} 10 | 11 | } 12 | \details{ 13 | Given a stepfunction \eqn{Q(u)}, not necessarily monotone, let 14 | \eqn{F(y) = \int \{ Q(u) \le y \} du} denote the associated cdf 15 | obtained by randomly evaluating \eqn{Q} at \eqn{U \sim U[0,1]}. The 16 | rearranged version of \eqn{Q} is \eqn{\tilde Q (u) = \inf \{ 17 | u: F(y) \ge u \}. The rearranged function inherits the right 18 | or left continuity of original stepfunction.} 19 | } 20 | \value{ Produces transformed stepfunction that is monotonic increasing. } 21 | \references{ 22 | Chernozhukov, V., I. Fernandez-Val, and A. Galichon, (2006) Quantile and Probability 23 | Curves without Crossing, Econometrica, forthcoming. 24 | 25 | Chernozhukov, V., I. Fernandez-Val, and A. Galichon, (2009) Improving Estimates of 26 | Monotone Functions by Rearrangement, Biometrika, 96, 559--575. 27 | 28 | Hardy, G.H., J.E. Littlewood, and G. Polya (1934) Inequalities, Cambridge U. Press. 29 | } 30 | \author{R. Koenker} 31 | \seealso{ \code{\link{rq}} \code{\link{rearrange}}} 32 | \examples{ 33 | data(engel) 34 | z <- rq(foodexp ~ income, tau = -1,data =engel) 35 | zp <- predict(z,newdata=list(income=quantile(engel$income,.03)),stepfun = TRUE) 36 | plot(zp,do.points = FALSE, xlab = expression(tau), 37 | ylab = expression(Q ( tau )), main="Engel Food Expenditure Quantiles") 38 | plot(rearrange(zp),do.points = FALSE, add=TRUE,col.h="red",col.v="red") 39 | legend(.6,300,c("Before Rearrangement","After Rearrangement"),lty=1,col=c("black","red")) 40 | } 41 | \keyword{regression} 42 | -------------------------------------------------------------------------------- /man/residuals.nlrq.Rd: -------------------------------------------------------------------------------- 1 | \name{residuals.nlrq} 2 | \alias{residuals.nlrq} 3 | \title{ Return residuals of an nlrq object } 4 | \description{ 5 | Set algorithmic parameters for nlrq (nonlinear quantile regression function) 6 | } 7 | \usage{ 8 | \method{residuals}{nlrq}(object, type = c("response", "rho"), ...) 9 | } 10 | \arguments{ 11 | \item{object}{an `nlrq' object as returned by function `nlrq'} 12 | \item{type}{the type of residuals to return: "response" is the distance 13 | between observed and predicted values; "rho" is the weighted distance used 14 | to calculate the objective function in the minimisation algorithm as 15 | tau * pmax(resid, 0) + (1 - tau) * pmin(resid, 0), where resid are the 16 | simple residuals as above (with type="response"). 17 | } 18 | \item{...}{further arguments passed to or from other methods.} 19 | } 20 | \seealso{ \code{\link{nlrq}} } 21 | 22 | \keyword{models} 23 | \keyword{regression} 24 | \keyword{nonlinear} 25 | -------------------------------------------------------------------------------- /man/rq.fit.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit} 2 | \alias{rq.fit} 3 | \title{Function to choose method for Quantile Regression } 4 | \usage{ 5 | rq.fit(x, y, tau=0.5, method="br", ...) 6 | } 7 | \arguments{ 8 | \item{x}{ 9 | the design matrix 10 | } 11 | \item{y}{ 12 | the response variable 13 | } 14 | \item{tau}{ 15 | the quantile desired, if tau lies outside (0,1) the whole process 16 | is estimated. 17 | } 18 | \item{method}{ 19 | method of computation: "br" is Barrodale and Roberts exterior point 20 | "fn" is the Frisch-Newton interior point method. 21 | } 22 | \item{...}{ 23 | Optional arguments passed to fitting routine. 24 | } 25 | } 26 | 27 | \description{Function to choose method for quantile regression} 28 | \seealso{ \code{\link{rq}} \code{\link{rq.fit.br}} \code{\link{rq.fit.fnb}}} 29 | 30 | \keyword{ regression } 31 | -------------------------------------------------------------------------------- /man/rq.fit.br.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.br} 2 | \alias{rq.fit.br} 3 | \title{ 4 | Quantile Regression Fitting by Exterior Point Methods 5 | } 6 | \description{ 7 | This function controls the details of QR fitting by the simplex approach 8 | embodied in the algorithm of Koenker and d'Orey based on the median 9 | regression algorithm of Barrodale and Roberts. Typically, options 10 | controlling the construction of the confidence intervals would be passed 11 | via the \code{\dots{}} argument of \code{rq()}. 12 | } 13 | \usage{ 14 | rq.fit.br(x, y, tau=0.5, alpha=0.1, ci=FALSE, iid=TRUE, interp=TRUE, tcrit=TRUE) 15 | } 16 | \arguments{ 17 | \item{x}{ 18 | the design matrix 19 | } 20 | \item{y}{ 21 | the response variable 22 | } 23 | \item{tau}{ 24 | the quantile desired, if tau lies outside (0,1) the whole process 25 | is estimated. 26 | } 27 | \item{alpha}{ 28 | the nominal noncoverage probability for the confidence intervals, i.e. 1-alpha 29 | is the nominal coverage probability of the intervals. 30 | } 31 | \item{ci}{ 32 | logical flag if T then compute confidence intervals for the parameters 33 | using the rank inversion method of Koenker (1994). See \code{rq()} for more 34 | details. If F then return only the estimated coefficients. Note that 35 | for large problems the default option ci = TRUE can be rather slow. 36 | Note also that rank inversion only works for p>1, an error message is 37 | printed in the case that ci=T and p=1. 38 | } 39 | \item{iid}{ 40 | logical flag if T then the rank inversion is based on an assumption of 41 | iid error model, if F then it is based on an nid error assumption. 42 | See Koenker and Machado (1999) for further details on this distinction. 43 | } 44 | \item{interp}{ 45 | As with typical order statistic type confidence intervals the test 46 | statistic is discrete, so it is reasonable to consider intervals that 47 | interpolate between values of the parameter just below the specified 48 | cutoff and values just above the specified cutoff. If \code{interp = 49 | F} then 50 | the 2 ``exact'' values above and below on which the interpolation would 51 | be based are returned. 52 | } 53 | \item{tcrit}{ 54 | Logical flag if T - Student t critical values are used, if F then normal 55 | values are used. 56 | } 57 | } 58 | \value{ 59 | Returns an object of class \code{"rq"} 60 | for tau in (0,1), or else of class \code{"rq.process"}. 61 | Note that \code{rq.fit.br} when called for a single tau value 62 | will return the vector of optimal dual variables. 63 | See \code{\link{rq.object}} and \code{\link{rq.process.object}} 64 | for further details. 65 | } 66 | \details{ 67 | If tau lies in (0,1) then an object of class \code{"rq"} is 68 | returned with various 69 | related inference apparatus. If tau lies outside [0,1] then an object 70 | of class \code{rq.process} is returned. In this case parametric programming 71 | methods are used to find all of the solutions to the QR problem for 72 | tau in (0,1), the p-variate resulting process is then returned as the 73 | array sol containing the primal solution and dsol containing the dual 74 | solution. There are roughly \eqn{O(n \log n))}{O(n log n)} distinct 75 | solutions, so users should 76 | be aware that these arrays may be large and somewhat time consuming to 77 | compute for large problems. 78 | } 79 | \references{ 80 | Koenker, R. and J.A.F. Machado, (1999) Goodness of fit and related inference 81 | processes for quantile regression, 82 | \emph{J. of Am Stat. Assoc.}, 94, 1296-1310. 83 | } 84 | \seealso{ 85 | \code{\link{rq}}, \code{\link{rq.fit.fnb}} 86 | } 87 | \examples{ 88 | data(stackloss) 89 | rq.fit.br(stack.x, stack.loss, tau=.73 ,interp=FALSE) 90 | } 91 | \keyword{regression} 92 | -------------------------------------------------------------------------------- /man/rq.fit.conquer.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.conquer} 2 | \alias{rq.fit.conquer} 3 | \title{Optional Fitting Method for Quantile Regression} 4 | \description{ 5 | This fitting method provides a link to the gradient descent 6 | for convolution smoothed quantile regression problem implemented 7 | in the \pkg{conquer} package of He et al (2020).} 8 | \usage{ 9 | rq.fit.conquer (x, y, tau=0.5, kernel = c("Gaussian", "uniform", 10 | "parabolic", "triangular"), h = 0, tol = 1e-04, 11 | iteMax = 5000, ci = FALSE, alpha = 0.05, B = 200) 12 | } 13 | \arguments{ 14 | \item{x}{design matrix usually supplied via rq(), expected to 15 | have a intercept as the first column } 16 | \item{y}{ response vector usually supplied via rq() } 17 | \item{tau}{ quantile of interest } 18 | \item{kernel}{A character string specifying the choice of 19 | kernel function. Default is "Gaussian". Other choices are 20 | "uniform", "parabolic" or "triangular".} 21 | \item{h}{The bandwidth parameter for kernel smoothing of the QR 22 | objective function. Default is \eqn{max{((log(n) + p) / n)^0.4, 0.05}}. 23 | The default is used if the input value is less than 0.05.} 24 | \item{tol}{Tolerance level of the gradient descent 25 | algorithm. The gradient descent algorithm terminates when the 26 | maximal entry of the gradient is less than "tol". Default is 27 | 1e-05.} 28 | \item{iteMax}{Maximum number of iterations. Default is 5000.} 29 | \item{ci}{A logical flag. Default is FALSE. If "ci = 30 | TRUE", then three types of confidence intervals (percentile, 31 | pivotal and normal) will be constructed via multiplier 32 | bootstrap. This option is subsumed in normal use by the 33 | \code{summary.rq} functionality.} 34 | \item{alpha}{Nominal level for confidence intervals, may be passed 35 | via the call to \code{summary}} 36 | \item{B}{Number of bootstrap replications. May be passed via summary.} 37 | } 38 | \details{ 39 | See documentation in the \pkg{conquer} package. 40 | } 41 | \value{ 42 | Returns an object of class "rq". 43 | } 44 | \references{ 45 | Xuming He and Xiaoou Pan and Kean Ming Tan and Wen-Xin Zhou, (2020) 46 | conquer: Convolution-Type Smoothed Quantile Regression, 47 | \url{https://CRAN.R-project.org/package=conquer}} 48 | 49 | \seealso{\code{\link{rq}}} 50 | 51 | \keyword{regression} 52 | -------------------------------------------------------------------------------- /man/rq.fit.fnb.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.fnb} 2 | \alias{rq.fit.fnb} 3 | \title{ 4 | Quantile Regression Fitting via Interior Point Methods 5 | } 6 | \description{ 7 | This is a lower level routine called by \code{rq()} to compute quantile 8 | regression methods using the Frisch-Newton algorithm. 9 | } 10 | \usage{ 11 | rq.fit.fnb(x, y, tau=0.5, rhs = (1-tau)*apply(x,2,sum), beta=0.99995, eps=1e-06) 12 | } 13 | \arguments{ 14 | \item{x}{ 15 | The design matrix 16 | } 17 | \item{y}{ 18 | The response vector 19 | } 20 | \item{tau}{ 21 | The quantile of interest, must lie in (0,1) 22 | } 23 | \item{rhs}{ 24 | The right hand size of the dual equality constraint, modify at your own risk. 25 | } 26 | \item{beta}{ 27 | technical step length parameter -- alter at your own risk! 28 | } 29 | \item{eps}{ 30 | tolerance parameter for convergence. In cases of multiple optimal solutions 31 | there may be some discrepancy between solutions produced by method 32 | \code{"fn"} and method \code{"br"}. This is due to the fact that 33 | \code{"fn"} tends to converge to a point near the centroid of the 34 | solution set, while \code{"br"} stops at a vertex of the set. 35 | } 36 | } 37 | \value{ 38 | returns an object of class \code{"rq"}, which can be passed to 39 | \code{\link{summary.rq}} to obtain standard errors, etc. 40 | } 41 | \details{ 42 | The details of the algorithm are explained in Koenker and Portnoy (1997). 43 | The basic idea can be traced back to the log-barrier methods proposed by 44 | Frisch in the 1950's for constrained optimization. But the current 45 | implementation is based on proposals by Mehrotra and others in the 46 | recent (explosive) literature on interior point methods for solving linear 47 | programming problems. This function replaces an earlier one \code{rq.fit.fn}, 48 | which required the initial dual values to be feasible. This version allows the 49 | user to specify an infeasible starting point for the dual problem, that 50 | is one that may not satisfy the dual equality constraints. It still 51 | assumes that the starting value satisfies the upper and lower bounds. 52 | } 53 | \references{ 54 | Koenker, R. and S. Portnoy (1997). 55 | The Gaussian Hare and the Laplacian Tortoise: 56 | Computability of squared-error vs. absolute-error estimators, with discussion, 57 | \emph{Statistical Science}, \bold{12}, 279-300. 58 | } 59 | \seealso{ 60 | \code{\link{rq}}, \code{\link{rq.fit.br}}, 61 | \code{\link{rq.fit.pfn}} 62 | } 63 | \keyword{regression} 64 | -------------------------------------------------------------------------------- /man/rq.fit.fnc.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.fnc} 2 | \alias{rq.fit.fnc} 3 | \title{ 4 | Quantile Regression Fitting via Interior Point Methods 5 | } 6 | \description{ 7 | This is a lower level routine called by \code{rq()} to compute quantile 8 | regression methods using the Frisch-Newton algorithm. It allows the 9 | call to specify linear inequality constraints to which the fitted 10 | coefficients will be subjected. The constraints are assumed to be 11 | formulated as Rb >= r. 12 | } 13 | \usage{ 14 | rq.fit.fnc(x, y, R, r, tau=0.5, beta=0.9995, eps=1e-06) 15 | } 16 | \arguments{ 17 | \item{x}{ 18 | The design matrix 19 | } 20 | \item{y}{ 21 | The response vector 22 | } 23 | \item{R}{ 24 | The matrix describing the inequality constraints 25 | } 26 | \item{r}{ 27 | The right hand side vector of inequality constraints 28 | } 29 | \item{tau}{ 30 | The quantile of interest, must lie in (0,1) 31 | } 32 | \item{beta}{ 33 | technical step length parameter -- alter at your own risk! 34 | } 35 | \item{eps}{ 36 | tolerance parameter for convergence. In cases of multiple optimal solutions 37 | there may be some discrepancy between solutions produced by method 38 | \code{"fn"} and method \code{"br"}. This is due to the fact that 39 | \code{"fn"} tends to converge to a point near the centroid of the 40 | solution set, while \code{"br"} stops at a vertex of the set. 41 | } 42 | } 43 | \value{ 44 | returns an object of class \code{"rq"}, which can be passed to 45 | \code{\link{summary.rq}} to obtain standard errors, etc. 46 | } 47 | \details{ 48 | The details of the algorithm are explained in Koenker and Ng (2002). 49 | The basic idea can be traced back to the log-barrier methods proposed by 50 | Frisch in the 1950's for constrained optimization. But the current 51 | implementation is based on proposals by Mehrotra and others in the 52 | recent (explosive) literature on interior point methods for solving linear 53 | programming problems. See \code{"rq"} helpfile for an example. 54 | It is an open research problem to provide an inference apparatus for 55 | inequality constrained quantile regression. 56 | } 57 | \references{ 58 | Koenker, R. and S. Portnoy (1997). 59 | The Gaussian Hare and the Laplacian Tortoise: 60 | Computability of squared-error vs. absolute-error estimators, with discussion, 61 | \emph{Statistical Science}, \bold{12}, 279-300. 62 | 63 | Koenker, R. and P. Ng(2005). 64 | Inequality Constrained Quantile Regression, \emph{Sankya}, 418-440. 65 | } 66 | \seealso{ 67 | \code{\link{rq}}, \code{\link{rq.fit.br}}, 68 | \code{\link{rq.fit.pfn}} 69 | } 70 | \keyword{regression} 71 | -------------------------------------------------------------------------------- /man/rq.fit.hogg.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.hogg} 2 | \alias{rq.fit.hogg} 3 | \title{weighted quantile regression fitting} 4 | \description{ 5 | Function to estimate a regression mmodel by minimizing the weighted sum of several 6 | quantile regression functions. See Koenker(1984) for an asymptotic look at these 7 | estimators. This is a slightly generalized version of what Zou and Yuan (2008) call 8 | composite quantile regression in that it permits weighting of the components of the 9 | objective function and also allows further linear inequality constraints on the coefficients. 10 | } 11 | \usage{ 12 | rq.fit.hogg(x, y, taus = c(0.1, 0.3, 0.5), weights = c(0.7, 0.2, 0.1), 13 | R = NULL, r = NULL, beta = 0.99995, eps = 1e-06) 14 | } 15 | \arguments{ 16 | \item{x}{design matrix} 17 | \item{y}{response vector } 18 | \item{taus}{quantiles getting positive weight} 19 | \item{weights}{weights assigned to the quantiles } 20 | \item{R}{optional matrix describing linear inequality constraints} 21 | \item{r}{optional vector describing linear inequality constraints} 22 | \item{beta}{step length parameter of the Frisch Newton Algorithm} 23 | \item{eps}{tolerance parameter for the Frisch Newton Algorithm} 24 | } 25 | \details{ 26 | Mimimizes a weighted sum of quantile regression objective functions using 27 | the specified taus. The model permits distinct intercept parameters at 28 | each of the specified taus, but the slope parameters are constrained to 29 | be the same for all taus. This estimator was originally suggested to 30 | the author by Bob Hogg in one of his famous blue notes of 1979. 31 | The algorithm used to solve the resulting linear programming problems 32 | is either the Frisch Newton algorithm described in Portnoy and Koenker (1997), 33 | or the closely related algorithm described in Koenker and Ng(2002) that 34 | handles linear inequality constraints. See \code{\link{qrisk}} for illustration 35 | of its use in portfolio allocation. 36 | 37 | Linear inequality constraints of the form \eqn{Rb \geq r} can be imposed with 38 | the convention that \eqn{b} is a \eqn{m+p} where \eqn{m} is the \code{length(taus)} 39 | and \eqn{p} is the column dimension of \code{x} without the intercept. 40 | } 41 | \value{ 42 | \item{coefficients}{estimated coefficients of the model} 43 | } 44 | \references{ 45 | Zou, Hui and and Ming Yuan (2008) Composite quantile regression and the 46 | Oracle model selection theory, Annals of Statistics, 36, 1108--11120. 47 | 48 | Koenker, R. (1984) A note on L-estimates for linear models, 49 | Stat. and Prob Letters, 2, 323-5. 50 | 51 | Portnoy, S. and Koenker, R. (1997) The Gaussian Hare and the 52 | Laplacean Tortoise: Computability of Squared-error vs Absolute Error Estimators, 53 | (with discussion). Statistical Science, (1997) 12, 279-300. 54 | 55 | Koenker, R. and Ng, P (2003) Inequality Constrained Quantile Regression, preprint. 56 | } 57 | \author{ Roger Koenker } 58 | 59 | 60 | \seealso{ \code{\link{qrisk}}} 61 | 62 | \keyword{regression} 63 | \keyword{ robust } 64 | -------------------------------------------------------------------------------- /man/rq.fit.lasso.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.lasso} 2 | \alias{rq.fit.lasso} 3 | \title{ 4 | Lasso Penalized Quantile Regression 5 | } 6 | \description{ 7 | The fitting method implements the lasso penalty for 8 | fitting quantile regression models. When the argument \code{lambda} 9 | is a scalar the penalty function is the l1 10 | norm of the last (p-1) coefficients, under the presumption that the 11 | first coefficient is an intercept parameter that should not be subject 12 | to the penalty. When \code{lambda} is a vector it should have length 13 | equal the column dimension of the matrix \code{x} and then defines a 14 | coordinatewise specific vector of lasso penalty parameters. In this 15 | case \code{lambda} entries of zero indicate covariates that are not 16 | penalized. If \code{lambda} is not specified, a default value is 17 | selected according to the proposal of Belloni and Chernozhukov (2011). 18 | See \code{LassoLambdaHat} for further details. 19 | There should be a sparse version of this, but isn't (yet). 20 | There should also be a preprocessing version, but isn't (yet). 21 | } 22 | \usage{ 23 | rq.fit.lasso(x, y, tau = 0.5, lambda = NULL, beta = .99995, eps = 1e-06) 24 | } 25 | \arguments{ 26 | \item{x}{ 27 | the design matrix 28 | } 29 | \item{y}{ 30 | the response variable 31 | } 32 | \item{tau}{ 33 | the quantile desired, defaults to 0.5. 34 | } 35 | \item{lambda}{ 36 | the value of the penalty parameter(s) that determine how much shrinkage is done. 37 | This should be either a scalar, or a vector of length equal to the column dimension 38 | of the \code{x} matrix. If unspecified, a default value is chosen according to 39 | the proposal of Belloni and Chernozhukov (2011). 40 | } 41 | \item{beta}{ 42 | step length parameter for Frisch-Newton method. 43 | } 44 | \item{eps}{ 45 | tolerance parameter for convergence. 46 | } 47 | } 48 | \value{ 49 | Returns a list with a coefficient, residual, tau and lambda components. 50 | When called from \code{"rq"} (as intended) the returned object 51 | has class "lassorqs". 52 | } 53 | \references{ 54 | Koenker, R. (2005) \emph{Quantile Regression}, CUP. 55 | 56 | Belloni, A. and V. Chernozhukov. (2011) l1-penalized quantile regression 57 | in high-dimensional sparse models. \emph{Annals of Statistics}, 39 82 - 130. 58 | } 59 | \author{R. Koenker} 60 | \seealso{ 61 | \code{\link{rq}}} 62 | \examples{ 63 | n <- 60 64 | p <- 7 65 | rho <- .5 66 | beta <- c(3,1.5,0,2,0,0,0) 67 | R <- matrix(0,p,p) 68 | for(i in 1:p){ 69 | for(j in 1:p){ 70 | R[i,j] <- rho^abs(i-j) 71 | } 72 | } 73 | set.seed(1234) 74 | x <- matrix(rnorm(n*p),n,p) \%*\% t(chol(R)) 75 | y <- x \%*\% beta + rnorm(n) 76 | 77 | f <- rq(y ~ x, method="lasso",lambda = 30) 78 | g <- rq(y ~ x, method="lasso",lambda = c(rep(0,4),rep(30,4))) 79 | } 80 | \keyword{regression} 81 | -------------------------------------------------------------------------------- /man/rq.fit.pfn.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.pfn} 2 | \alias{rq.fit.pfn} 3 | \title{ Preprocessing Algorithm for Quantile Regression} 4 | \description{ 5 | A preprocessing algorithm for the Frisch Newton algorithm 6 | for quantile regression. This is one possible method for rq().} 7 | \usage{ 8 | rq.fit.pfn(x, y, tau=0.5, Mm.factor=0.8, max.bad.fixups=3, eps=1e-06) 9 | } 10 | \arguments{ 11 | \item{x}{design matrix usually supplied via rq() } 12 | \item{y}{ response vector usually supplied via rq() } 13 | \item{tau}{ quantile of interest } 14 | \item{Mm.factor}{ constant to determine sub sample size m} 15 | \item{max.bad.fixups}{ number of allowed mispredicted signs of residuals } 16 | \item{eps}{ convergence tolerance } 17 | } 18 | \details{ 19 | Preprocessing algorithm to reduce the effective sample size for QR 20 | problems with (plausibly) iid samples. The preprocessing relies 21 | on subsampling of the original data, so situations in which the 22 | observations are not plausibly iid, are likely to cause problems. 23 | The tolerance eps may be relaxed somewhat. 24 | } 25 | \value{ 26 | Returns an object of type rq 27 | } 28 | \references{ Portnoy and Koenker, Statistical Science, (1997) 279-300} 29 | \author{ Roger Koenker } 30 | \seealso{ \code{\link{rq}}} 31 | 32 | \keyword{ regression } 33 | -------------------------------------------------------------------------------- /man/rq.fit.pfnb.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.pfnb} 2 | \alias{rq.fit.pfnb} 3 | \title{ 4 | Quantile Regression Fitting via Interior Point Methods 5 | } 6 | \description{ 7 | This is a lower level routine called by \code{rq()} to compute quantile 8 | regression parameters using the Frisch-Newton algorithm. It uses a form 9 | of preprocessing to accelerate the computations for situations in which 10 | several taus are required for the same model specification. 11 | } 12 | \usage{ 13 | rq.fit.pfnb(x, y, tau, m0 = NULL, eps = 1e-06) 14 | } 15 | \arguments{ 16 | \item{x}{ 17 | The design matrix 18 | } 19 | \item{y}{ 20 | The response vector 21 | } 22 | \item{tau}{ 23 | The quantiles of interest, must lie in (0,1), be sorted and preferably equally 24 | spaced. 25 | } 26 | \item{m0}{ 27 | An initial reduced sample size by default is set to be 28 | \code{round((n * (log(p) + 1) )^(2/3)} this could be explored further 29 | to aid performance in extreme cases. 30 | } 31 | \item{eps}{A tolerance parameter intended to bound the confidence band entries 32 | away from zero.} 33 | } 34 | \value{ 35 | returns a list with elements consisting of 36 | \item{coefficients}{a matrix of dimension ncol(x) by length(taus) 37 | } 38 | \item{nit}{a 5 by m matrix of iteration counts: first two coordinates 39 | of each column are the number of interior point iterations, the third is the 40 | number of observations in the final globbed sample size, and the last two 41 | are the number of fixups and bad-fixups respectively. This is intended to 42 | aid fine tuning of the initial sample size, m0.} 43 | \item{info}{an m-vector of convergence flags} 44 | } 45 | \details{ 46 | The details of the Frisch-Newton algorithm are explained in Koenker and Portnoy (1997), 47 | as is the preprocessing idea which is related to partial sorting and the algorithms 48 | such as \code{kuantile} for univariate quantiles that operate in time O(n). 49 | The preprocessing idea of exploiting nearby quantile solutions to accelerate 50 | estimation of adjacent quantiles is proposed in Chernozhukov et al (2020). 51 | This version calls a fortran version of the preprocessing algorithm that accepts 52 | multiple taus. The preprocessing approach is also implemented for a single tau 53 | in \code{rq.fit.pfn} which may be regarded as a prototype for this function since 54 | it is written entirely in R and therefore is easier to experiment with. 55 | } 56 | \references{ 57 | Koenker, R. and S. Portnoy (1997). 58 | The Gaussian Hare and the Laplacian Tortoise: 59 | Computability of squared-error vs. absolute-error estimators, with discussion, 60 | \emph{Statistical Science}, \bold{12}, 279-300. 61 | 62 | Chernozhukov, V., I., Fernandez-Val, and Melly, B. (2020), `Fast algorithms for 63 | the quantile regression process', Empirical Economics, forthcoming. 64 | } 65 | \seealso{ 66 | \code{\link{rq}}, \code{\link{rq.fit.br}}, 67 | \code{\link{rq.fit.pfn}} 68 | } 69 | \keyword{regression} 70 | -------------------------------------------------------------------------------- /man/rq.fit.ppro.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.ppro} 2 | \alias{rq.fit.ppro} 3 | \title{ 4 | Preprocessing fitting method for QR 5 | } 6 | \description{ 7 | Preprocessing method for fitting quantile regression models that 8 | exploits the fact that adjacent tau's should have nearly the same 9 | sign vectors for residuals. 10 | } 11 | \usage{ 12 | rq.fit.ppro(x, y, tau, weights = NULL, Mm.factor = 0.8, eps = 1e-06, ...) 13 | } 14 | \arguments{ 15 | \item{x}{ 16 | Design matrix 17 | } 18 | \item{y}{ 19 | Response vector 20 | } 21 | \item{tau}{ 22 | quantile vector of interest 23 | } 24 | \item{weights}{ 25 | case weights 26 | } 27 | \item{Mm.factor}{ 28 | constant determining initial sample size 29 | } 30 | \item{eps}{ 31 | Convergence tolerance 32 | } 33 | \item{\dots}{ 34 | Other arguments 35 | } 36 | } 37 | \details{ 38 | See references for further details. 39 | } 40 | \value{ 41 | Returns a list with components: 42 | \item{coefficients}{Matrix of coefficient estimates} 43 | \item{residuals}{Matrix of residual estimates} 44 | \item{rho}{vector of objective function values} 45 | \item{weights}{vector of case weights} 46 | } 47 | \references{ 48 | Chernozhukov, V. I. Fernandez-Val and B. Melly, 49 | Fast Algorithms for the Quantile Regression Process, 2020, 50 | Empirical Economics., 51 | 52 | Portnoy, S. and R. Koenker, The Gaussian Hare and the Laplacian 53 | Tortoise, Statistical Science, (1997) 279-300 54 | } 55 | \author{ 56 | Blaise Melly and Roger Koenker 57 | } 58 | \seealso{ 59 | \code{\link{rq.fit.pfn}}, \code{\link{boot.rq.pxy}} 60 | } 61 | \keyword{regression} 62 | -------------------------------------------------------------------------------- /man/rq.fit.qfnb.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.qfnb} 2 | \alias{rq.fit.qfnb} 3 | \title{ 4 | Quantile Regression Fitting via Interior Point Methods 5 | } 6 | \description{ 7 | This is a lower level routine called by \code{rq()} to compute quantile 8 | regression parameters using the Frisch-Newton algorithm. In contrast to 9 | method "fn" it computes solutions for all the specified taus inside a 10 | fortran loop. See \code{\link{rq.fit.pfnb}} for further details on a more 11 | efficient preprocessing method. 12 | } 13 | \usage{ 14 | rq.fit.qfnb(x, y, tau) 15 | } 16 | \arguments{ 17 | \item{x}{ 18 | The design matrix 19 | } 20 | \item{y}{ 21 | The response vector 22 | } 23 | \item{tau}{ 24 | The quantiles of interest, must lie in (0,1), be sorted and preferably equally 25 | spaced. 26 | } 27 | } 28 | \value{ 29 | returns a list with elements consisting of 30 | \item{coefficients}{a matrix of dimension ncol(x) by length(taus) 31 | } 32 | \item{nit}{a 3-vector of iteration counts} 33 | \item{info}{a convergence flag} 34 | } 35 | \details{ 36 | The details of the Frisch-Newton algorithm are explained in Koenker and Portnoy (1997). 37 | The basic idea can be traced back to the log-barrier methods proposed by 38 | Frisch in the 1950's for linear programming. But the current 39 | implementation is based on proposals by Mehrotra and others in the 40 | recent (explosive) literature on interior point methods for solving linear 41 | programming problems. This function replaces an earlier one \code{rq.fit.fn}, 42 | which required the initial dual values to be feasible. The current version allows the 43 | user to specify an infeasible starting point for the dual problem, that 44 | is one that may not satisfy the dual equality constraints. It still 45 | assumes that the starting value satisfies the upper and lower bounds. 46 | } 47 | \references{ 48 | Koenker, R. and S. Portnoy (1997). 49 | The Gaussian Hare and the Laplacian Tortoise: 50 | Computability of squared-error vs. absolute-error estimators, with discussion, 51 | \emph{Statistical Science}, \bold{12}, 279-300. 52 | } 53 | \seealso{ 54 | \code{\link{rq}}, \code{\link{rq.fit.br}}, 55 | \code{\link{rq.fit.pfn}} 56 | } 57 | \keyword{regression} 58 | -------------------------------------------------------------------------------- /man/rq.fit.scad.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.scad} 2 | \alias{rq.fit.scad} 3 | \title{ 4 | SCADPenalized Quantile Regression 5 | } 6 | \description{ 7 | The fitting method implements the smoothly clipped absolute deviation 8 | penalty of Fan and Li for fitting quantile regression models. 9 | When the argument \code{lambda} 10 | is a scalar the penalty function is the scad modified l1 11 | norm of the last (p-1) coefficients, under the presumption that the 12 | first coefficient is an intercept parameter that should not be subject 13 | to the penalty. When \code{lambda} is a vector it should have length 14 | equal the column dimension of the matrix \code{x} and then defines a 15 | coordinatewise specific vector of scad penalty parameters. In this 16 | case \code{lambda} entries of zero indicate covariates that are not 17 | penalized. There should be a sparse version of this, but isn't (yet). 18 | } 19 | \usage{ 20 | rq.fit.scad(x, y, tau = 0.5, alpha = 3.2, lambda = 1, start="rq", 21 | beta = .9995, eps = 1e-06) 22 | } 23 | \arguments{ 24 | \item{x}{ 25 | the design matrix 26 | } 27 | \item{y}{ 28 | the response variable 29 | } 30 | \item{tau}{ 31 | the quantile desired, defaults to 0.5. 32 | } 33 | \item{alpha}{ 34 | tuning parameter of the scad penalty. 35 | } 36 | \item{lambda}{ 37 | the value of the penalty parameter that determines how much shrinkage is done. 38 | This should be either a scalar, or a vector of length equal to the column dimension 39 | of the \code{x} matrix. 40 | 41 | } 42 | \item{start}{ 43 | starting method, default method 'rq' uses the unconstrained rq estimate, while 44 | method 'lasso' uses the corresponding lasso estimate with the specified lambda. 45 | } 46 | \item{beta}{ 47 | step length parameter for Frisch-Newton method. 48 | } 49 | \item{eps}{ 50 | tolerance parameter for convergence. 51 | } 52 | } 53 | \value{ 54 | Returns a list with a coefficient, residual, tau and lambda components. 55 | When called from \code{"rq"} as intended the returned object 56 | has class "scadrqs". 57 | } 58 | \details{The algorithm is an adaptation of the "difference convex algorithm" 59 | described in Wu and Liu (2008). It solves a sequence of (convex) QR problems 60 | to approximate solutions of the (non-convex) scad problem.} 61 | \references{ 62 | Wu, Y. and Y. Liu (2008) Variable Selection in Quantile Regression, \emph{Statistica 63 | Sinica}, to appear. 64 | } 65 | \author{R. Koenker} 66 | \seealso{ 67 | \code{\link{rq}}} 68 | \examples{ 69 | n <- 60 70 | p <- 7 71 | rho <- .5 72 | beta <- c(3,1.5,0,2,0,0,0) 73 | R <- matrix(0,p,p) 74 | for(i in 1:p){ 75 | for(j in 1:p){ 76 | R[i,j] <- rho^abs(i-j) 77 | } 78 | } 79 | set.seed(1234) 80 | x <- matrix(rnorm(n*p),n,p) \%*\% t(chol(R)) 81 | y <- x \%*\% beta + rnorm(n) 82 | 83 | f <- rq(y ~ x, method="scad",lambda = 30) 84 | g <- rq(y ~ x, method="scad", start = "lasso", lambda = 30) 85 | } 86 | \keyword{regression} 87 | -------------------------------------------------------------------------------- /man/rq.fit.sfn.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.sfn} 2 | \alias{rq.fit.sfn} 3 | \alias{sfnMessage} 4 | \title{Sparse Regression Quantile Fitting} 5 | \description{ 6 | Fit a quantile regression model using a sparse implementation of the 7 | Frisch-Newton interior-point algorithm. 8 | } 9 | \usage{ 10 | rq.fit.sfn(a, y, tau = 0.5, rhs = (1-tau)*c(t(a) \%*\% rep(1,length(y))), control) 11 | } 12 | \arguments{ 13 | \item{a}{structure of the design matrix X stored in csr format} 14 | \item{y}{response vector} 15 | \item{tau}{desired quantile} 16 | \item{rhs}{the right-hand-side of the dual problem; regular users 17 | shouldn't need to specify this, but in special cases can be quite 18 | usefully altered to meet special needs. See e.g. Section 6.8 of 19 | Koenker (2005).} 20 | \item{control}{control parameters for fitting routines: see \code{sfn.control}} 21 | } 22 | \details{ 23 | This is a sparse implementation of the Frisch-Newton algorithm for quantile 24 | regression described in Portnoy and Koenker (1997). The sparse matrix 25 | linear algebra is implemented through the functions available in the R 26 | package \pkg{SparseM}. 27 | } 28 | \value{ 29 | \item{coef}{Regression quantile coefficients} 30 | \item{ierr}{Error code for the internal Fortran routine \code{srqfnc}: 31 | \describe{ 32 | \item{1:}{ insufficient work space in call to \code{extract}} 33 | \item{2:}{ nnzd > nnzdmax} 34 | \item{3:}{ insufficient storage in iwork when calling ordmmd} 35 | \item{4:}{ insufficient storage in iwork when calling sfinit} 36 | \item{5:}{ nnzl > nnzlmax when calling sfinit} 37 | \item{6:}{ nsub > nsubmax when calling sfinit} 38 | \item{7:}{ insufficient work space in iwork when calling symfct} 39 | \item{8:}{ inconsistancy in input when calling symfct} 40 | \item{9:}{ tmpsiz > tmpmax when calling bfinit; increase tmpmax} 41 | \item{10:}{ nonpositive diagonal encountered, not positive definite} 42 | \item{11:}{ insufficient work storage in tmpvec when calling blkfct} 43 | \item{12:}{ insufficient work storage in iwork when calling blkfct} 44 | \item{17:}{ tiny diagonals replaced with Inf when calling blkfct} 45 | } 46 | } 47 | \item{it}{Iteration count} 48 | \item{time}{Amount of time used in the computation} 49 | } 50 | \references{ 51 | Portnoy, S. and R. Koenker (1997) The Gaussian Hare and the Laplacean Tortoise: 52 | Computability of Squared-error vs Absolute Error Estimators, (with discussion). 53 | \emph{Statistical Science}, 12, 279-300. 54 | 55 | Koenker, R and Ng, P. (2003). SparseM: A Sparse Matrix Package for \R, 56 | \emph{J. of Stat. Software}, 8, 1--9. 57 | 58 | Koenker, R. (2005) \emph{Quantile Regression}, Cambridge U. Press. 59 | } 60 | \author{Pin Ng} 61 | \seealso{\code{rq.fit.sfnc} for the constrained version, 62 | \code{SparseM} for a sparse matrix package for \R 63 | } 64 | \examples{ 65 | ## An artificial example : 66 | n <- 200 67 | p <- 50 68 | set.seed(101) 69 | X <- rnorm(n*p) 70 | X[abs(X) < 2.0] <- 0 71 | X <- cbind(1, matrix(X, n, p)) 72 | y <- 0.5 * apply(X,1,sum) + rnorm(n) ## true beta = (0.5, 0.5, ...) 73 | 74 | sX <- as.matrix.csr(X) 75 | try(rq.o <- rq.fit.sfn(sX, y)) #-> not enough tmp memory 76 | (tmpmax <- floor(1e5 + exp(-12.1)*(sX@ia[p+1]-1)^2.35)) 77 | ## now ok: 78 | rq.o <- rq.fit.sfn(sX, y, control = list(tmpmax= tmpmax)) 79 | } 80 | \keyword{regression} 81 | -------------------------------------------------------------------------------- /man/rq.fit.sfnc.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.fit.sfnc} 2 | \alias{rq.fit.sfnc} 3 | \title{Sparse Constrained Regression Quantile Fitting} 4 | \description{ 5 | Fit constrained regression quantiles using a sparse implementation of 6 | the Frisch-Newton Interior-point algorithm. 7 | } 8 | \usage{ 9 | rq.fit.sfnc(x, y, R, r, tau = 0.5, 10 | rhs = (1-tau)*c(t(x) \%*\% rep(1,length(y))),control) 11 | } 12 | \arguments{ 13 | \item{x}{structure of the design matrix X stored in csr format} 14 | \item{y}{response vector} 15 | \item{R}{constraint matrix stored in csr format} 16 | \item{r}{right-hand-side of the constraint} 17 | \item{tau}{desired quantile} 18 | \item{rhs}{the right-hand-side of the dual problem; regular users 19 | shouldn't need to specify this.} 20 | \item{control}{control paramters for fitting see \code{sfn.control}} 21 | } 22 | \details{ 23 | This is a sparse implementation of the Frisch-Newton algorithm for 24 | constrained quantile regression described in Koenker and Portnoy (1996). 25 | The sparse matrix linear algebra is implemented through the functions 26 | available in the R package \pkg{SparseM}. 27 | } 28 | \value{ 29 | \item{coef}{Regression quantile coefficients} 30 | \item{ierr}{Error code for the internal Fortran routine \code{srqfn}: 31 | \describe{ 32 | \item{1:}{ insufficient work space in call to \code{extract}} 33 | \item{3:}{ insufficient storage in iwork when calling ordmmd} 34 | \item{4:}{ insufficient storage in iwork when calling sfinit} 35 | \item{5:}{ nnzl > nnzlmax when calling sfinit} 36 | \item{6:}{ nsub > nsubmax when calling sfinit} 37 | \item{7:}{ insufficient work space in iwork when calling symfct} 38 | \item{8:}{ inconsistancy in input when calling symfct} 39 | \item{9:}{ tmpsiz > tmpmax when calling symfct; increase tmpmax} 40 | \item{10:}{ nonpositive diagonal encountered when calling blkfct} 41 | \item{11:}{ insufficient work storage in tmpvec when calling blkfct} 42 | \item{12:}{ insufficient work storage in iwork when calling blkfct} 43 | \item{13:}{ nnzd > nnzdmax in e,je when calling amub} 44 | \item{14:}{ nnzd > nnzdmax in g,jg when calling amub} 45 | \item{15:}{ nnzd > nnzdmax in h,jh when calling aplb} 46 | \item{15:}{ tiny diagonals replaced with Inf when calling blkfct} 47 | } 48 | } 49 | \item{it}{Iteration count} 50 | \item{time}{Amount of time used in the computation} 51 | } 52 | \references{ 53 | Koenker, R and Ng, P. (2002). SparseM: A Sparse Matrix Package for \R; 54 | \url{https://CRAN.R-project.org/package=SparseM} 55 | 56 | Koenker, R. and P. Ng(2005). 57 | Inequality Constrained Quantile Regression, \emph{Sankya}, 418-440. 58 | } 59 | \author{Pin Ng} 60 | \seealso{ 61 | \code{\link{rq.fit.sfn}} for the unconstrained version, 62 | \pkg{SparseM} for the underlying sparse matrix \R package. 63 | } 64 | \examples{ 65 | ## An artificial example : 66 | n <- 200 67 | p <- 50 68 | set.seed(17) 69 | X <- rnorm(n*p) 70 | X[abs(X) < 2.0] <- 0 71 | X <- cbind(1,matrix(X,n,p)) 72 | y <- 0.5 * apply(X,1,sum) + rnorm(n) ## true beta = (0.5, 0.5, ...) 73 | R <- rbind(diag(p+1), -diag(p+1)) 74 | r <- c(rep( 0, p+1), rep(-1, p+1)) 75 | 76 | sX <- as.matrix.csr(X) 77 | sR <- as.matrix.csr(R) 78 | try(rq.o <- rq.fit.sfnc(sX, y, sR, r)) #-> not enough tmp memory 79 | 80 | (tmpmax <- floor(1e5 + exp(-12.1)*(sX@ia[p+1]-1)^2.35)) 81 | ## now ok: 82 | rq.o <- rq.fit.sfnc(sX, y, sR, r, control = list(tmpmax = tmpmax)) 83 | } 84 | \keyword{regression} 85 | -------------------------------------------------------------------------------- /man/rq.process.object.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.process.object} 2 | \alias{rq.process.object} 3 | \title{ 4 | Linear Quantile Regression Process Object 5 | } 6 | \description{ 7 | These are objects of class \code{rq.process.} 8 | They represent the fit of a linear conditional quantile function model. 9 | } 10 | \section{Generation}{ 11 | This class of objects is returned from the \code{rq} 12 | function 13 | to represent a fitted linear quantile regression model. 14 | } 15 | \section{Methods}{ 16 | The \code{"rq.process"} class of objects has 17 | methods for the following generic 18 | functions: 19 | \code{effects}, \code{formula} 20 | , \code{labels} 21 | , \code{model.frame} 22 | , \code{model.matrix} 23 | , \code{plot} 24 | , \code{predict} 25 | , \code{print} 26 | , \code{print.summary} 27 | , \code{summary} 28 | } 29 | \section{Structure}{ 30 | The following components must be included in a legitimate \code{rq.process} 31 | object. 32 | 33 | \describe{ 34 | \item{\code{sol}}{ 35 | The primal solution array. This is a (p+3) by J matrix whose 36 | first row contains the 'breakpoints' 37 | \eqn{tau_1, tau_2, \dots, tau_J}, 38 | of the quantile function, i.e. the values in [0,1] at which the 39 | solution changes, row two contains the corresponding quantiles 40 | evaluated at the mean design point, i.e. the inner product of 41 | xbar and \eqn{b(tau_i)}, the third row contains the value of the objective 42 | function evaluated at the corresponding \eqn{tau_j}, and the last p rows 43 | of the matrix give \eqn{b(tau_i)}. The solution \eqn{b(tau_i)} prevails from 44 | \eqn{tau_i} to \eqn{tau_i+1}. Portnoy (1991) shows that 45 | \eqn{J=O_p(n \log n)}{J=O_p(n log n)}. 46 | } 47 | \item{\code{dsol}}{ 48 | The dual solution array. This is a 49 | n by J matrix containing the dual solution corresponding to sol, 50 | the ij-th entry is 1 if \eqn{y_i > x_i b(tau_j)}, is 0 if \eqn{y_i < x_i 51 | b(tau_j)}, and is between 0 and 1 otherwise, i.e. if the 52 | residual is zero. See Gutenbrunner and Jureckova(1991) 53 | for a detailed discussion of the statistical 54 | interpretation of dsol. The use of dsol in inference is described 55 | in Gutenbrunner, Jureckova, Koenker, and Portnoy (1994). 56 | } 57 | } 58 | } 59 | \details{ 60 | These arrays are computed by parametric linear programming methods 61 | using using the exterior point (simplex-type) methods of the 62 | Koenker--d'Orey algorithm based on Barrodale and Roberts median 63 | regression algorithm. 64 | } 65 | \references{ 66 | [1] Koenker, R. W. and Bassett, G. W. (1978). Regression quantiles, 67 | \emph{Econometrica}, \bold{46}, 33--50. 68 | 69 | [2] Koenker, R. W. and d'Orey (1987, 1994). 70 | Computing Regression Quantiles. 71 | \emph{Applied Statistics}, \bold{36}, 383--393, and \bold{43}, 410--414. 72 | 73 | [3] Gutenbrunner, C. Jureckova, J. (1991). 74 | Regression quantile and regression rank score process in the 75 | linear model and derived statistics, \emph{Annals of Statistics}, 76 | \bold{20}, 305--330. 77 | 78 | [4] Gutenbrunner, C., Jureckova, J., Koenker, R. and 79 | Portnoy, S. (1994) Tests of linear hypotheses based on regression 80 | rank scores. \emph{Journal of Nonparametric Statistics}, 81 | (2), 307--331. 82 | 83 | [5] Portnoy, S. (1991). Asymptotic behavior of the number of regression 84 | quantile breakpoints, \emph{SIAM Journal of Scientific 85 | and Statistical Computing}, \bold{12}, 867--883. 86 | } 87 | \seealso{ 88 | \code{\link{rq}}. 89 | } 90 | \keyword{regression} 91 | -------------------------------------------------------------------------------- /man/rq.wfit.Rd: -------------------------------------------------------------------------------- 1 | \name{rq.wfit} 2 | \alias{rq.wfit} 3 | \title{Function to choose method for Weighted Quantile Regression } 4 | \description{ Weight the data and then call the chosen fitting algorithm. } 5 | \usage{ 6 | rq.wfit(x, y, tau=0.5, weights, method="br", ...) 7 | } 8 | \arguments{ 9 | \item{x}{ 10 | the design matrix 11 | } 12 | \item{y}{ 13 | the response variable 14 | } 15 | \item{tau}{ 16 | the quantile desired, if tau lies outside (0,1) the whole process 17 | is estimated. 18 | } 19 | \item{weights}{ 20 | weights used in the fitting 21 | } 22 | \item{method}{ 23 | method of computation: "br" is Barrodale and Roberts exterior point 24 | "fn" is the Frisch-Newton interior point method. 25 | } 26 | \item{...}{ 27 | Optional arguments passed to fitting routine. 28 | } 29 | } 30 | 31 | \seealso{ \code{\link{rq}} \code{\link{rq.fit.br}} \code{\link{rq.fit.fnb}}} 32 | 33 | \keyword{ regression } 34 | -------------------------------------------------------------------------------- /man/rqProcess.Rd: -------------------------------------------------------------------------------- 1 | \name{rqProcess} 2 | \alias{rqProcess} 3 | \title{ Compute Standardized Quantile Regression Process } 4 | \description{ 5 | Computes a standardize quantile regression process for the model 6 | specified by the formula, on the partition of [0,1] specified by the 7 | taus argument, and standardized according to the argument nullH. 8 | Intended for use in \code{\link{KhmaladzeTest}}.} 9 | \usage{ 10 | rqProcess(formula, data, taus, nullH = "location", ...) 11 | } 12 | \arguments{ 13 | \item{formula}{model formula } 14 | \item{data}{data frame to be used to interpret formula } 15 | \item{taus}{ quantiles at which the process is to be evaluated, if any 16 | of the taus lie outside (0,1) then the full process is computed 17 | for all distinct solutions.} 18 | \item{nullH}{Null hypothesis to be used for standardization} 19 | \item{...}{optional arguments passed to \code{\link{summary.rq}}} 20 | } 21 | \details{ 22 | The process computes standardized estimates based on the 23 | hypothesis specified in the \code{nullH} argument. 24 | The Vhat component is rescaled by the Cholesky 25 | decomposition of the tau specific covariance matrix, the vhat component is 26 | rescaled by the marginal standard errors. The nature of the covariance 27 | matrix used for the standardization is controlled arguments passed via 28 | the \code{...} argument to \code{\link{summary.rq}}. If the full 29 | process is estimated then these covariance options aren't available 30 | and only a simple iid-error form of the covariance matrix is used. 31 | } 32 | \value{ 33 | \item{taus}{The points of evaluation of the process} 34 | \item{qtaus}{Values of xbar'betahat(taus)} 35 | \item{Vhat}{Joint parametric QR process} 36 | \item{vhat}{Marginal parametric QR processes} 37 | } 38 | \author{R. Koenker} 39 | \seealso{\code{\link{KhmaladzeTest}}} 40 | \keyword{regression} 41 | -------------------------------------------------------------------------------- /man/rqs.fit.Rd: -------------------------------------------------------------------------------- 1 | \name{rqs.fit} 2 | \alias{rqs.fit} 3 | \title{Function to fit multiple response quantile regression models} 4 | \usage{ 5 | rqs.fit(x, y, tau=0.5, tol = 0.0001) 6 | } 7 | \arguments{ 8 | \item{x}{ 9 | the design matrix an n by p matrix. 10 | } 11 | \item{y}{ 12 | the response variable as a n by m matrix 13 | } 14 | \item{tau}{ 15 | the quantile desired, if tau lies outside (0,1) 16 | } 17 | \item{tol}{ 18 | tolerance parameter for Barrodale and Roberts exterior point method. 19 | } 20 | } 21 | 22 | \description{Function intended for multiple response quantile regression 23 | called from \code{boot.rq} for wild bootstrap option.} 24 | \seealso{ \code{\link{boot.rq}} } 25 | 26 | \keyword{ regression } 27 | -------------------------------------------------------------------------------- /man/sfn.control.Rd: -------------------------------------------------------------------------------- 1 | \name{sfn.control} 2 | \alias{sfn.control} 3 | \title{Set Control Parameters for Sparse Fitting} 4 | \description{ 5 | Auxiliary function for setting storage dimensions and other parameters 6 | for \code{\link{rq.fit.sfn}()}, \code{\link{rq.fit.sfnc}()} and \code{\link{rqss}()}. 7 | } 8 | \usage{ 9 | sfn.control(nsubmax = NULL, tmpmax = NULL, nnzlmax = NULL, cachsz = 64, 10 | small = 1e-06, maxiter = 100, 11 | tiny = 1e-30, Large = 1e128, 12 | warn.mesg = TRUE) 13 | } 14 | \arguments{ 15 | \item{nsubmax}{upper bound for dimension of lindx} 16 | \item{tmpmax}{upper bound for dimension of tmpvec} 17 | \item{nnzlmax}{upper bound for non-zero entries of L stored in lnz, including diagonal} 18 | \item{cachsz}{size of cache in kbytes on target machine} 19 | \item{small}{convergence tolerance for interior point algorithm} 20 | \item{maxiter}{maximal number of interior point iterations} 21 | \item{tiny}{a tiny positive number; values below \code{tiny * max(diag)} are replaced by \code{Large}; 22 | originally was \eqn{10^{-30}} hardcoded in Fortran code.} 23 | \item{Large}{a large number, practically \dQuote{Infinite} to replace 24 | \code{tiny} diagonal entries in Cholesky; was \eqn{10^{128}}, hardcoded in 25 | compiled code.} 26 | \item{warn.mesg}{logical flag controlling printing of warnings.} 27 | } 28 | \details{ 29 | Sparse fitting requires a number of temporary storage arrays whose size depends 30 | on problem specific features in somewhat mysterious ways, parameters controlling 31 | these sizes and some other fitting aspects can be controlled by specifying elements 32 | of this control object. 33 | } 34 | \value{ 35 | A \code{\link{list}} with components named as the arguments given above. 36 | } 37 | 38 | \author{ 39 | Roger Koenker 40 | } 41 | 42 | \seealso{ 43 | \code{\link{rq.fit.sfn}}, 44 | \code{\link{rq.fit.sfnc}}, 45 | and \code{\link{rqss}} from which \code{sfn.control()} is called. 46 | } 47 | \keyword{ utilities } 48 | -------------------------------------------------------------------------------- /man/srisk.Rd: -------------------------------------------------------------------------------- 1 | \name{srisk} 2 | \alias{srisk} 3 | \title{ Markowitz (Mean-Variance) Portfolio Optimization} 4 | \description{ 5 | This function estimates optimal mean-variance portfolio weights from a matrix 6 | of historical or simulated asset returns. 7 | } 8 | \usage{ 9 | srisk(x, mu = 0.07, lambda = 1e+08, alpha = 0.1, eps = 1e-04) 10 | } 11 | \arguments{ 12 | \item{x}{ Matrix of asset returns } 13 | \item{mu}{Required mean rate of return for the portfolio } 14 | \item{lambda}{Lagrange multiplier associated with mean return constraint} 15 | \item{alpha}{Choquet risk parameter, unimplemented } 16 | \item{eps}{ tolerance parameter for mean return constraint} 17 | } 18 | \details{ 19 | The portfolio weights are estimated by solving a constrained least squares problem. 20 | } 21 | \value{ 22 | \item{pihat}{Optimal portfolio weights} 23 | \item{muhat }{Mean return in sample} 24 | \item{sighat }{Standard deviation of returns in sample} 25 | } 26 | \author{ R. Koenker } 27 | 28 | \seealso{ \code{\link{qrisk}}} 29 | 30 | \keyword{ regression } 31 | -------------------------------------------------------------------------------- /man/summary.rqss.Rd: -------------------------------------------------------------------------------- 1 | \name{summary.rqss} 2 | \alias{summary.rqss} 3 | \alias{print.summary.rqss} 4 | \title{Summary of rqss fit} 5 | \description{ Summary Method for a fitted rqss model. } 6 | \usage{\method{summary}{rqss}(object, cov = FALSE, ztol = 1e-5, ...) } 7 | \arguments{ 8 | \item{object}{an object returned from \code{rqss} fitting, describing 9 | an additive model estimating a conditional quantile function. 10 | See \code{\link{qss}} for details on how to specify these terms.} 11 | \item{cov}{if TRUE return covariance matrix for the parametric components 12 | as \code{Vcov} and a list of covariance matrices for the nonparametric 13 | components as \code{Vqss}} 14 | \item{ztol}{Zero tolerance parameter used to determine the number of zero 15 | residuals indicating the estimated parametric dimension of the model, 16 | the so-called effective degrees of freedom.} 17 | \item{...}{additional arguments} 18 | } 19 | \details{ This function is intended to explore 20 | inferential methods for rqss fitting. The function is modeled after 21 | \code{summary.gam} in Simon Wood's (2006) \pkg{mgcv} package. (Of course, 22 | Simon should not be blamed for any deficiencies in the current implementation. 23 | The basic idea is to condition on the lambda selection and construct 24 | quasi-Bayesian credibility intervals based on normal approximation of 25 | the "posterior," as computed using the Powell kernel estimate of the 26 | usual quantile regression sandwich. See \code{\link{summary.rq}} for 27 | further details and references. 28 | The function produces a conventional coefficient table with standard errors 29 | t-statistics and p-values for the coefficients on the parametric part of the 30 | model, and another table for additive nonparametric effects. The latter 31 | reports F statistics intended to evaluate the significance of these components 32 | individually. In addition the fidelity (value of the QR objective function 33 | evaluated at the fitted model), the effective degrees of freedom, and the 34 | sample size are reported. 35 | } 36 | \value{ 37 | \item{coef}{Table of estimated coefficients and their standard errors, 38 | t-statistics, and p-values for the parametric components of the model} 39 | \item{qsstab}{Table of approximate F statistics, effective degrees of freedom 40 | and values of the penalty terms for each of the additive nonparametric 41 | components of the model, and the lambda values assigned to each.} 42 | \item{fidelity}{Value of the quantile regression objective function.} 43 | \item{tau}{Quantile of the estimated model} 44 | \item{formula}{formula of the estimated model} 45 | \item{edf}{Effective degrees of freedom of the fitted model, defined 46 | as the number of zero residuals of the fitted model, see Koenker 47 | Mizera (2003) for details.} 48 | \item{n}{The sample size used to fit the model.} 49 | \item{Vcov}{Estimated covariance matrix of the fitted parametric component} 50 | \item{Vqss}{List of estimated covariance matrices of the fitted 51 | nonparametric component} 52 | 53 | } 54 | \references{ 55 | [1] Koenker, R., P. Ng and S. Portnoy, (1994) 56 | Quantile Smoothing Splines; 57 | \emph{Biometrika} \bold{81}, 673--680. 58 | 59 | [2] Koenker, R. and I. Mizera, (2003) 60 | Penalized Triograms: Total Variation Regularization for Bivariate Smoothing; 61 | \emph{JRSS(B)} \bold{66}, 145--163. 62 | 63 | [3] Wood, S. (2006) \emph{Generalized Additive Models}, Chapman-Hall. 64 | } 65 | \author{ Roger Koenker } 66 | \seealso{ \code{\link{plot.rqss}} } 67 | \examples{ 68 | n <- 200 69 | x <- sort(rchisq(n,4)) 70 | z <- x + rnorm(n) 71 | y <- log(x)+ .1*(log(x))^2 + log(x)*rnorm(n)/4 + z 72 | f <- rqss(y ~ qss(x) + z) 73 | summary(f) 74 | } 75 | \keyword{regression} 76 | \keyword{smooth} 77 | \keyword{robust} 78 | -------------------------------------------------------------------------------- /man/table.rq.Rd: -------------------------------------------------------------------------------- 1 | \name{table.rq} 2 | \alias{table.rq} 3 | \alias{latex.table.rq} 4 | \alias{plot.table.rq} 5 | \title{ 6 | Table of Quantile Regression Results 7 | } 8 | \description{ 9 | Defunct Function to produce a table of quantile regression results for a group 10 | of specified quantiles. See \code{rq} which now permits multiple taus. 11 | } 12 | \usage{ 13 | table.rq(x, \dots) 14 | } 15 | \arguments{ 16 | \item{x}{ 17 | input 18 | } 19 | \item{...}{ 20 | other optional arguments 21 | } 22 | } 23 | \value{ 24 | None. 25 | } 26 | \seealso{ \code{\link{rq}}, } 27 | \keyword{regression} 28 | -------------------------------------------------------------------------------- /man/uis.Rd: -------------------------------------------------------------------------------- 1 | \name{uis} 2 | \alias{uis} 3 | \title{UIS Drug Treatment study data} 4 | \description{There are 628 data points in the original data, 575 of which have no missing values. 5 | 6 | Variable descriptions: 7 | 8 | \tabular{lll}{ 9 | Variable \tab Description \tab Codes/Values \cr 10 | ID \tab Identification Code \tab 1 - 628 \cr 11 | AGE \tab Age at Enrollment \tab Years \cr 12 | BECK \tab Beck DepressionScore \tab 0.000 - 54.000 \cr 13 | HC \tab Heroin/Cocaine Use During \tab 1 = Heroin & Cocaine \cr 14 | \tab 3 Months Prior to Admission \tab 2 = Heroin Only \cr 15 | \tab \tab 3 = Cocaine Only \cr 16 | \tab \tab 4 = Neither Heroin nor Cocaine \cr 17 | IV \tab History of IV Drug Use \tab 1 = Never \cr 18 | \tab \tab 2 = Previous \cr 19 | \tab \tab 3 = Recent \cr 20 | NDT \tab Number of Prior Drug Treatments \tab 0 - 40 \cr 21 | RACE \tab Subject's Race \tab 0 = White \cr 22 | \tab \tab 1 = Non-White \cr 23 | TREAT \tab Treatment Randomization \tab 0 = Short \cr 24 | \tab Assignment \tab 1 = Long \cr 25 | SITE \tab Treatment Site \tab 0 = A \cr 26 | \tab \tab 1 = B \cr 27 | LEN.T \tab Length of Stay in Treatment \tab Days \cr 28 | \tab (Admission Date to Exit Date) \tab \cr 29 | TIME \tab Time to Drug Relapse \tab Days \cr 30 | \tab (Measured from Admission Date) \tab \cr 31 | CENSOR \tab Event for Treating Lost to \tab 1 = Returned to Drugs \cr 32 | \tab Follow-Up as Returned to Drugs \tab or Lost to Follow-Up \cr 33 | \tab \tab 0 = Otherwise \cr 34 | Y \tab log of TIME \tab \cr 35 | ND1 \tab Component of NDT \tab \cr 36 | ND2 \tab Component of NDT \tab \cr 37 | LNDT \tab \tab \cr 38 | FRAC \tab Compliance fraction \tab LEN.T/90 for short trt \cr 39 | \tab \tab LEN.T/180 for long trt \cr 40 | IV3 \tab Recent IV use \tab 1 = Yes \cr 41 | \tab \tab 0 = No 42 | 43 | } 44 | } 45 | \usage{data(uis)} 46 | \format{A data frame with dimension 575 by 18.} 47 | \source{Table 1.3 of Hosmer,D.W. and Lemeshow, S. (1998) } 48 | \references{Hosmer,D.W. and Lemeshow, S. (1998) Applied Survival 49 | Analysis: Regression Modeling of Time to Event Data, John Wiley and Sons Inc., 50 | New York, NY} 51 | 52 | \keyword{datasets} 53 | -------------------------------------------------------------------------------- /src/Makevars: -------------------------------------------------------------------------------- 1 | ## we use the BLAS and now also the LAPACK library: 2 | PKG_LIBS= $(LAPACK_LIBS) $(BLAS_LIBS) $(FLIBS) 3 | -------------------------------------------------------------------------------- /src/akj.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine akj(x,z,p,iker,dens,psi,score,nx,nz,h,alpha,kappa,xlam 3 | *) 4 | double precision dens(nz),score(nz),psi(nz),h,kappa 5 | double precision z(nz),x(nx),xlam(nx),p(nx),qrange,pi 6 | double precision con1,sum,sqsum,xsd,a,fifth,hinv,half 7 | double precision xn,xker,dxker,ddxker,fact,xponen,alpha,glog,zero, 8 | *one,two 9 | parameter( zero = 0.d0) 10 | parameter( one = 1.d0) 11 | parameter( two = 2.d0) 12 | parameter( four = 4.d0) 13 | parameter( half = 0.5d0) 14 | parameter( fifth = 0.2d0) 15 | parameter( pi = 3.141593d0) 16 | xn=nx 17 | if(iker.eq.0)then 18 | con1=one/sqrt(2.0*pi) 19 | else 20 | if(iker.eq.1)then 21 | con1=one/pi 22 | endif 23 | endif 24 | if(h.le.0.)then 25 | sum=0. 26 | sqsum=0. 27 | do23006 i=1,nx 28 | sqsum=sqsum+x(i)*x(i)*p(i) 29 | sum=sum+x(i)*p(i) 30 | 23006 continue 31 | 23007 continue 32 | xsd=dsqrt(sqsum-sum*sum) 33 | sum=zero 34 | i=1 35 | 23008 if(.not.(i.lt.nx))goto 23010 36 | sum=sum+p(i) 37 | if(sum.lt..25)then 38 | goto 23009 39 | else 40 | qrange=x(i) 41 | goto 23010 42 | endif 43 | 23009 i=i+1 44 | goto 23008 45 | 23010 continue 46 | sum=one 47 | i=nx 48 | 23013 if(.not.(i.gt.0))goto 23015 49 | sum=sum-p(i) 50 | if(sum.gt..75)then 51 | goto 23014 52 | else 53 | qrange=x(i)-qrange 54 | goto 23015 55 | endif 56 | 23014 i=i-1 57 | goto 23013 58 | 23015 continue 59 | a=min(xsd,qrange/1.34) 60 | h=kappa*a/(xn**fifth) 61 | endif 62 | hinv=one/h 63 | do23018 j=1,nx 64 | xker=0. 65 | if(iker.eq.0)then 66 | do23022 i=1,nx 67 | xponen=(x(j)-x(i))*hinv 68 | xponen=half*xponen**2 69 | xker=xker+p(i)*exp(-xponen)*hinv 70 | 23022 continue 71 | 23023 continue 72 | else 73 | if(iker.eq.1)then 74 | do23026 i=1,nx 75 | xponen=(x(j)-x(i))*hinv 76 | xker=xker+p(i)*hinv/(1+xponen**2) 77 | 23026 continue 78 | 23027 continue 79 | endif 80 | endif 81 | xlam(j)=con1*xker 82 | 23018 continue 83 | 23019 continue 84 | glog=zero 85 | do23028 i=1,nx 86 | glog=glog+p(i)*log(xlam(i)) 87 | 23028 continue 88 | 23029 continue 89 | g=exp(glog) 90 | ginv=one/g 91 | do23030 i=1,nx 92 | xlam(i)=hinv/((xlam(i)*ginv)**(-alpha)) 93 | 23030 continue 94 | 23031 continue 95 | do23032 j=1,nz 96 | xker=zero 97 | dxker=zero 98 | ddxker=zero 99 | if(iker.eq.0)then 100 | do23036 i=1,nx 101 | xponen=(z(j)-x(i))*xlam(i) 102 | fact=exp(-half*xponen*xponen)*xlam(i) 103 | xker=xker+p(i)*fact 104 | dxker=dxker-p(i)*fact*xponen*xlam(i) 105 | ddxker=ddxker- p(i)*fact*(one - xponen**2)*xlam(i)**2 106 | 23036 continue 107 | 23037 continue 108 | else 109 | if(iker.eq.1)then 110 | do23040 i=1,nx 111 | xponen=(z(j)-x(i))*xlam(i) 112 | fact=xlam(i)/(one+xponen**2) 113 | xker=xker+p(i)*fact 114 | dxker=dxker-p(i)*two*xponen*fact**2 115 | ddxker=ddxker- p(i)*two*(fact**2)*(xlam(i)- four*(xponen**2)*fact) 116 | 23040 continue 117 | 23041 continue 118 | endif 119 | endif 120 | dens(j)=con1*xker 121 | psi(j)=-(dxker/xker) 122 | score(j)=(dxker/xker)**2-ddxker/xker 123 | 23032 continue 124 | 23033 continue 125 | return 126 | end 127 | -------------------------------------------------------------------------------- /src/boot.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine pwy(m,n,k,m5,n2,a,c,b,t,toler,ift,x,e,s, wa,wb) 3 | double precision b(m),a(k,n),x(n,k) 4 | double precision wa(m5,n2),wb(m),e(m),c(m,n) 5 | double precision t,toler 6 | integer m,n,k,m5,n2,ift 7 | integer s(m) 8 | do23000 i=1,k 9 | call dcopy(n,a(i,1),k,c(m,1),m) 10 | call rq0(m,n,m5,n2,c,b,t,toler,ift,x(1,i),e,s,wa,wb) 11 | 23000 continue 12 | 23001 continue 13 | return 14 | end 15 | subroutine xys(mofn,m,n,k,mofn5,n2,a,b,tau,toler,ift,x,e,s, wa,wb, 16 | *aa,bb,ss) 17 | double precision b(m),a(m,n),x(n,k) 18 | double precision wa(mofn5,n2),wb(mofn) 19 | double precision aa(mofn,n),bb(mofn),e(mofn) 20 | double precision tau,toler 21 | integer ss(mofn,k),s(mofn),mofn,m,n,k,mofn5,n2,ift(k) 22 | do23002 i=1,k 23 | do23004 ii=1,mofn 24 | bb(ii)=b(ss(ii,i)) 25 | do23006 jj=1,n 26 | aa(ii,jj)=a(ss(ii,i),jj) 27 | 23006 continue 28 | 23007 continue 29 | 23004 continue 30 | 23005 continue 31 | call rq0(mofn,n,mofn5,n2,aa,bb,tau,toler,ift(i),x(1,i),e,s,wa,wb) 32 | 23002 continue 33 | 23003 continue 34 | return 35 | end 36 | subroutine wxy(m,n,k,m5,n2,a,b,tau,toler,ift,x,e,s,wa,wb,aa,bb,w) 37 | double precision b(m),a(m,n),x(n,k) 38 | double precision w(m,k),wa(m5,n2),wb(m) 39 | double precision aa(m,n),bb(m),e(m) 40 | double precision tau,toler 41 | integer s(m),m,n,k,m5,n2,ift(k) 42 | do23008 i=1,k 43 | do23010 ii=1,m 44 | bb(ii)=b(ii)*w(ii,i) 45 | do23012 jj=1,n 46 | aa(ii,jj)=a(ii,jj)*w(ii,i) 47 | 23012 continue 48 | 23013 continue 49 | 23010 continue 50 | 23011 continue 51 | call rq0(m,n,m5,n2,aa,bb,tau,toler,ift(i),x(1,i),e,s,wa,wb) 52 | 23008 continue 53 | 23009 continue 54 | return 55 | end 56 | subroutine heqfy(n,p,r,x,b,y) 57 | integer n,p,r 58 | double precision x(n,p),b(p,n,r),y(n,r),ddot 59 | do23014 i=1,r 60 | do23016 j=1,n 61 | y(j,i)=ddot(p,x(j,1),n,b(1,j,i),1) 62 | 23016 continue 63 | 23017 continue 64 | 23014 continue 65 | 23015 continue 66 | return 67 | end 68 | -------------------------------------------------------------------------------- /src/bound.f: -------------------------------------------------------------------------------- 1 | c 1 2 3 4 5 6 7 2 | c23456789012345678901234567890123456789012345678901234567890123456789012 3 | c 4 | c Function to obtain the step length 5 | c 6 | c 1 2 3 4 5 6 7 7 | c23456789012345678901234567890123456789012345678901234567890123456789012 8 | c 9 | subroutine bound(x,dx,s,ds,z,dz,w,dw,n,beta,deltap,deltad) 10 | c 11 | integer n 12 | double precision x(n),dx(n),s(n),ds(n),z(n),dz(n),w(n),dw(n) 13 | double precision deltap,deltad,dmin1,big,one,beta 14 | parameter (big = 1.0d20, one = 1.0d0) 15 | deltap = big 16 | deltad = big 17 | do i=1,n 18 | if(dx(i) .lt. 0) deltap = dmin1(deltap, -x(i)/dx(i)) 19 | if(ds(i) .lt. 0) deltap = dmin1(deltap, -s(i)/ds(i)) 20 | if(dz(i) .lt. 0) deltad = dmin1(deltad, -z(i)/dz(i)) 21 | if(dw(i) .lt. 0) deltad = dmin1(deltad, -w(i)/dw(i)) 22 | enddo 23 | deltap = dmin1(beta*deltap,one) 24 | deltad = dmin1(beta*deltad,one) 25 | return 26 | end 27 | -------------------------------------------------------------------------------- /src/boundc.f: -------------------------------------------------------------------------------- 1 | c 1 2 3 4 5 6 7 2 | c23456789012345678901234567890123456789012345678901234567890123456789012 3 | c 4 | c Function to obtain the step length 5 | c 6 | c 1 2 3 4 5 6 7 7 | c23456789012345678901234567890123456789012345678901234567890123456789012 8 | c 9 | subroutine boundc(x1,dx1,x2,dx2,s,ds,z1,dz1,z2,dz2,w,dw,n1,n2, 10 | & beta,deltap,deltad) 11 | c 12 | integer n1,n2 13 | double precision x1(n1),dx1(n1),x2(n2),dx2(n2),s(n1),ds(n1), 14 | & z1(n1),dz1(n1),z2(n2),dz2(n2),w(n1),dw(n1) 15 | double precision deltap,deltad,dmin1,big,one,beta 16 | parameter (big = 1.0d20, one = 1.0d0) 17 | deltap = big 18 | deltad = big 19 | do i = 1,n1 20 | if(dx1(i) .lt. 0) deltap = dmin1(deltap, -x1(i)/dx1(i)) 21 | if(ds(i) .lt. 0) deltap = dmin1(deltap, -s(i)/ds(i)) 22 | if(dz1(i) .lt. 0) deltad = dmin1(deltad, -z1(i)/dz1(i)) 23 | if(dw(i) .lt. 0) deltad = dmin1(deltad, -w(i)/dw(i)) 24 | enddo 25 | do i = 1,n2 26 | if(dx2(i) .lt. 0) deltap = dmin1(deltap, -x2(i)/dx2(i)) 27 | if(dz2(i) .lt. 0) deltad = dmin1(deltad, -z2(i)/dz2(i)) 28 | enddo 29 | deltap = dmin1(beta*deltap,one) 30 | deltad = dmin1(beta*deltad,one) 31 | return 32 | end 33 | -------------------------------------------------------------------------------- /src/brute.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine brutpow(n,p,m,h,a,b,c,x,tau,u,xh,d,jminz,nflag) 3 | integer n,p,m 4 | double precision x(p),a(n,p),b(n),c(n) 5 | double precision u(p,p),d(p),xh(p) 6 | double precision zero, one,tau,pow,minz,z 7 | integer h(p,m),k,findk,jminz,nflag 8 | parameter(zero = 0.0d0, one = 1.d0) 9 | jminz = 1 10 | minz = pow(n,p,x,a,b,c,tau) 11 | do23000 j = 2,m 12 | k = findk(p,h(1,j),h(1,j-1)) 13 | if(k .eq. 0)then 14 | nflag = 4 15 | return 16 | endif 17 | call pivot(n,p,h(1,j-1),h(k,j),h(k,j-1),a,u,d,xh,nflag) 18 | if(nflag .gt. 0)then 19 | return 20 | endif 21 | do23006 i = 1,p 22 | xh(i) = b(h(i,j)) 23 | 23006 continue 24 | 23007 continue 25 | call dgemv('N',p,p,one,u,p,xh,1,zero,x,1) 26 | z = pow(n,p,x,a,b,c,tau) 27 | if(z .lt. minz)then 28 | minz = z 29 | jminz = j 30 | endif 31 | 23000 continue 32 | 23001 continue 33 | return 34 | end 35 | integer function findk(p,h,g) 36 | integer p,k,h(p),g(p) 37 | findk = 0 38 | do23010 k = 1,p 39 | if(h(k) .ne. g(k))then 40 | findk = k 41 | goto 23011 42 | endif 43 | 23010 continue 44 | 23011 continue 45 | return 46 | end 47 | -------------------------------------------------------------------------------- /src/combos.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine combin(r,n,m,a,c,e,last) 3 | integer r,n,m,t,k,j,m0,mj,s 4 | integer a(n,m),c(r),e(r),last(r) 5 | logical odd 6 | m0 = r-n 7 | t = n+1 8 | k = 1 9 | j = 0 10 | c(1) = 0 11 | 23000 continue 12 | j = j + 1 13 | c(j) = j 14 | e(j) = j - 1 15 | if(odd(j))then 16 | last(j) = m0 + j 17 | else 18 | last(j) = j + 1 19 | endif 20 | if(j .eq. n)then 21 | goto 23002 22 | endif 23 | 23001 goto 23000 24 | 23002 continue 25 | do23007 i = 1,n 26 | a(i,1) = c(i) 27 | 23007 continue 28 | 23008 continue 29 | if(n .lt. r)then 30 | 23011 continue 31 | k = k + 1 32 | s = c(j) 33 | mj = m0 + j 34 | e(n+1) = n 35 | if(odd(j))then 36 | if(c(j) .eq. mj)then 37 | c(j) = c(j-1) + 1 38 | last(j+1) = c(j) + 1 39 | else 40 | c(j) = c(j) + 1 41 | endif 42 | else 43 | if(c(j) .eq. c(j-1) + 1)then 44 | c(j) = mj 45 | else 46 | last(j+1) = c(j) 47 | c(j) = c(j) - 1 48 | endif 49 | endif 50 | if(c(j) .eq. last(j))then 51 | last(j) = s 52 | e(j+1) = e(j) 53 | e(j) = j-1 54 | endif 55 | if( (j .lt. n) .and. (c(j) .eq. mj))then 56 | t = j 57 | j = e(t+1) 58 | e(t+1) = t 59 | else 60 | if(t .eq. j)then 61 | t = t + 1 62 | endif 63 | if(t .lt. e(n+1))then 64 | j = t 65 | else 66 | j = e(n+1) 67 | endif 68 | endif 69 | do23028 i = 1,n 70 | a(i,k) = c(i) 71 | 23028 continue 72 | 23029 continue 73 | if(j .eq. 0)then 74 | goto 23013 75 | endif 76 | 23012 goto 23011 77 | 23013 continue 78 | endif 79 | return 80 | end 81 | logical function odd(j) 82 | integer j 83 | odd = (mod(j,2) .eq. 1) 84 | return 85 | end 86 | -------------------------------------------------------------------------------- /src/crqfnb.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine crqfnb(n,p,a1,c1,n1,x,y,c,b,g,m,r,s,d,u,wn,wp,info) 3 | integer n,p,n1,m,info,nit(3) 4 | double precision a1(p,n1),c1(n),x(n,p),y(n),c(n),b(p,m),g(m) 5 | double precision wn(n1,9),wp(p,p+3),r(p),s(n),d(n),u(n) 6 | double precision zero,half,one,beta,eps,dh 7 | parameter( zero = 0.0d0) 8 | parameter( half = 0.5d0) 9 | parameter( one = 1.0d0) 10 | parameter( beta = 0.99995d0) 11 | parameter( eps = 1.0d-8) 12 | do23000 k = 2,m 13 | dh = -log(one - g(k)) + log(one - g(k-1)) 14 | do23002 i = 1,n 15 | u(i) = one 16 | wn(i,1) = half 17 | if(d(i) .ge. zero)then 18 | s(i) = s(i) + dh 19 | endif 20 | d(i) = c(i) - s(i) 21 | 23002 continue 22 | 23003 continue 23 | call dgemv('T',n,p,one,x,n,d,1,zero,r,1) 24 | call rqfnb(n1,p,a1,c1,r,d,u,beta,eps,wn,wp,nit,info) 25 | if(info .ne. 0)then 26 | goto 23001 27 | endif 28 | call dcopy(p,wp,1,b(1,k-1),1) 29 | call dcopy(n,y,1,d,1) 30 | call dgemv('N',n,p,one,x,n,b(1,k-1),1,one,d,1) 31 | 23000 continue 32 | 23001 continue 33 | m = k-1 34 | return 35 | end 36 | -------------------------------------------------------------------------------- /src/extract.f: -------------------------------------------------------------------------------- 1 | c 2 | c Extract: Subroutine to extract the non-diagonal structure and 3 | c entries from A stored in CSR format 4 | c 5 | subroutine extract(d,jd,id,dsub,jdsub,m,nnzd,nnzds,ierr) 6 | integer jd(nnzd),jdsub(nnzds),id(*),m,mp1,ierr,nnzd,nnzds 7 | double precision d(nnzd),dsub(nnzds) 8 | c 9 | c Call csrmsr in SPARSKIT2 to transform the storage format in d 10 | c from csr to msr 11 | c 12 | call csrmsr(m,d,jd,id,dsub,jdsub,dsub,jdsub,nnzds,ierr) 13 | mp1 = m + 1 14 | do i=1,mp1 15 | jdsub(i) = jdsub(i) - mp1 16 | enddo 17 | return 18 | end 19 | -------------------------------------------------------------------------------- /src/frand.c: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | void F77_SUB(fseedi)(void) 5 | { 6 | GetRNGstate(); 7 | } 8 | void F77_SUB(fseedo)(void) 9 | { 10 | PutRNGstate(); 11 | } 12 | void F77_SUB(frexp)(double* px, double* pa) 13 | { 14 | *px = rexp(*pa); 15 | } 16 | -------------------------------------------------------------------------------- /src/grexp.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine grexp(n, x, a) 3 | integer i,n 4 | double precision x(n),a 5 | call fseedi() 6 | do23000 i = 1,n 7 | call frexp(x(i), a) 8 | 23000 continue 9 | 23001 continue 10 | call fseedo() 11 | return 12 | end 13 | -------------------------------------------------------------------------------- /src/idmin.f: -------------------------------------------------------------------------------- 1 | 2 | INTEGER FUNCTION IDMIN(N,X,INCX) 3 | INTEGER I,N,INCX 4 | DOUBLE PRECISION X(INCX,*),SMIN 5 | C 6 | C THIS FUNCTION RETURNS THE INDEX OF THE SMALLEST (ALGEBRAIC) 7 | C COMPONENT OF X. ADAPTED FROM PORT3 LIBRARY -- FEB 2008. 8 | C ONLY EVERY INCXTH COMPONENT OF X IS CONSIDERED. 9 | C 10 | IDMIN=0 11 | IF(N.EQ.0) RETURN 12 | SMIN=X(1,1) 13 | IDMIN=1 14 | DO 10 I=1,N 15 | IF(SMIN.LE.X(1,I)) GO TO 10 16 | SMIN=X(1,I) 17 | IDMIN=I 18 | 10 CONTINUE 19 | RETURN 20 | END 21 | 22 | -------------------------------------------------------------------------------- /src/iswap.f: -------------------------------------------------------------------------------- 1 | SUBROUTINE ISWAP (N, IX, INCX, IY, INCY) 2 | C***AUTHOR Vandevender, W. H., (SNLA) 3 | C***DESCRIPTION 4 | C 5 | C Extended B L A S Subprogram 6 | C Description of Parameters 7 | C 8 | C --Input-- 9 | C N number of elements in input vector(s) 10 | C IX integer vector with N elements 11 | C INCX storage spacing between elements of IX 12 | C IY integer vector with N elements 13 | C INCY storage spacing between elements of IY 14 | C 15 | C --Output-- 16 | C IX input vector IY (unchanged if N .LE. 0) 17 | C IY input vector IX (unchanged if N .LE. 0) 18 | C 19 | C Interchange integer IX and integer IY. 20 | C For I = 0 to N-1, interchange IX(LX+I*INCX) and IY(LY+I*INCY), 21 | C where LX = 1 if INCX .GE. 0, else LX = 1+(1-N)*INCX, and LY is 22 | C defined in a similar way using INCY. 23 | C 24 | C***REFERENCES C. L. Lawson, R. J. Hanson, D. R. Kincaid and F. T. 25 | C Krogh, Basic linear algebra subprograms for Fortran 26 | C usage, Algorithm No. 539, Transactions on Mathematical 27 | C Software 5, 3 (September 1979), pp. 308-323. 28 | C***ROUTINES CALLED (NONE) 29 | C***REVISION HISTORY (YYMMDD) 30 | C 850601 DATE WRITTEN 31 | C 861211 REVISION DATE from Version 3.2 32 | C 891214 Prologue converted to Version 4.0 format. (BAB) 33 | C 920310 Corrected definition of LX in DESCRIPTION. (WRB) 34 | C 920501 Reformatted the REFERENCES section. (WRB) 35 | C***END PROLOGUE ISWAP 36 | INTEGER IX(*), IY(*), ITEMP1, ITEMP2, ITEMP3 37 | C***FIRST EXECUTABLE STATEMENT ISWAP 38 | IF (N .LE. 0) RETURN 39 | IF (INCX .NE. INCY) GO TO 5 40 | C IF (INCX-1) 5,20,60 41 | IF (INCX-1 .LT. 0) THEN 42 | GO TO 5 43 | ELSE IF (INCX-1 .GT.0) THEN 44 | GO TO 60 45 | ELSE 46 | GO TO 20 47 | ENDIF 48 | C 49 | C Code for unequal or nonpositive increments. 50 | C 51 | 5 IIX = 1 52 | IIY = 1 53 | IF (INCX .LT. 0) IIX = (1-N)*INCX + 1 54 | IF (INCY .LT. 0) IIY = (1-N)*INCY + 1 55 | DO 10 I = 1,N 56 | ITEMP1 = IX(IIX) 57 | IX(IIX) = IY(IIY) 58 | IY(IIY) = ITEMP1 59 | IIX = IIX + INCX 60 | IIY = IIY + INCY 61 | 10 CONTINUE 62 | RETURN 63 | C 64 | C Code for both increments equal to 1. 65 | C 66 | C Clean-up loop so remaining vector length is a multiple of 3. 67 | C 68 | 20 M = MOD(N,3) 69 | IF (M .EQ. 0) GO TO 40 70 | DO 30 I = 1,M 71 | ITEMP1 = IX(I) 72 | IX(I) = IY(I) 73 | IY(I) = ITEMP1 74 | 30 CONTINUE 75 | IF (N .LT. 3) RETURN 76 | 40 MP1 = M + 1 77 | DO 50 I = MP1,N,3 78 | ITEMP1 = IX(I) 79 | ITEMP2 = IX(I+1) 80 | ITEMP3 = IX(I+2) 81 | IX(I) = IY(I) 82 | IX(I+1) = IY(I+1) 83 | IX(I+2) = IY(I+2) 84 | IY(I) = ITEMP1 85 | IY(I+1) = ITEMP2 86 | IY(I+2) = ITEMP3 87 | 50 CONTINUE 88 | RETURN 89 | C 90 | C Code for equal, positive, non-unit increments. 91 | C 92 | 60 NS = N*INCX 93 | DO 70 I = 1,NS,INCX 94 | ITEMP1 = IX(I) 95 | IX(I) = IY(I) 96 | IY(I) = ITEMP1 97 | 70 CONTINUE 98 | RETURN 99 | END 100 | 101 | -------------------------------------------------------------------------------- /src/kuantile.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine kuantile(k,m,n,x) 3 | integer i,j,k(m),m,n 4 | double precision x(n) 5 | j = 0 6 | do23000 i = 1,m 7 | call dsel05(k(i)-j,n-j,x(j+1)) 8 | j = k(i) 9 | 23000 continue 10 | 23001 continue 11 | return 12 | end 13 | -------------------------------------------------------------------------------- /src/kuantiles.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine kuantiles(k,m,n,x) 3 | integer i,j,k(m),m,n 4 | double precision x(n) 5 | j = 0 6 | do23000 i = 1,m 7 | call dsel05(k(i)-j,n-j,x(j+1)) 8 | j = k(i) 9 | 23000 continue 10 | 23001 continue 11 | return 12 | end 13 | -------------------------------------------------------------------------------- /src/penalty.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine penalty(n,m,q,x,y,bnd,tlist,tlptr,tlend,rax,jax,ned,eps 3 | *,ierr) 4 | integer n,m,q,lp,lpl,ned,ierr 5 | integer bnd(n),tlist(q),tlptr(q),tlend(n),n4(4),p4(4),jax(m) 6 | double precision x(n),y(n),rax(m),eps 7 | double precision x4(4),y4(4),g4(4) 8 | logical orient 9 | ned = 0 10 | do23000 i=1,n 11 | lpl = tlend(i) 12 | lp = lpl 13 | 23002 continue 14 | lp = tlptr(lp) 15 | j = iabs(tlist(lp)) 16 | if(j .gt. i)then 17 | n4(1) = i 18 | n4(2) = j 19 | call fadjs(n4,n,q,tlist,tlptr,tlend) 20 | if(bnd(i)*bnd(j) .eq. 0)then 21 | ned = ned + 1 22 | do23009 k = 1,4 23 | x4(k) = x(n4(k)) 24 | y4(k) = y(n4(k)) 25 | 23009 continue 26 | 23010 continue 27 | if(orient(x4,y4))then 28 | call iswap(1,n4(3),1,n4(4),1) 29 | call dswap(1,x4(3),1,x4(4),1) 30 | call dswap(1,y4(3),1,y4(4),1) 31 | endif 32 | call ggap(x4,y4,g4,eps,ierr) 33 | if(ierr .eq. 1)then 34 | return 35 | endif 36 | call srtpai(n4,1,p4,1,4) 37 | do23015 k = 1,4 38 | rax((ned - 1)*4 + k) = g4(p4(k)) 39 | jax((ned - 1)*4 + k) = n4(p4(k)) 40 | 23015 continue 41 | 23016 continue 42 | if(ned*4 .gt. m)then 43 | return 44 | endif 45 | endif 46 | endif 47 | if(lp .eq. lpl)then 48 | goto 23004 49 | endif 50 | 23003 goto 23002 51 | 23004 continue 52 | 23000 continue 53 | 23001 continue 54 | return 55 | end 56 | logical function orient(x,y) 57 | double precision x(4), y(4) 58 | orient = (y(2) -y(1))*(x(3)-x(4))+(x(1)-x(2))*(y(3)-y(4)) .gt. 0 59 | return 60 | end 61 | subroutine fadjs(n4,n,q,tlist,tlptr,tlend) 62 | integer n,q,vp,vpl,v,v0,match 63 | integer n4(4),tlist(q),tlptr(q),tlend(n) 64 | match = 0 65 | vpl = tlend(n4(1)) 66 | vp = vpl 67 | k = 0 68 | 23021 continue 69 | k = k+1 70 | vp = tlptr(vp) 71 | v = tlist(vp) 72 | if(k.gt.1 .and. iabs(v) .eq. n4(2))then 73 | n4(3) = iabs(v0) 74 | match = 1 75 | goto 23022 76 | endif 77 | if(match .gt. 0)then 78 | n4(4) = iabs(v) 79 | goto 23023 80 | endif 81 | v0 = v 82 | 23022 goto 23021 83 | 23023 continue 84 | return 85 | end 86 | subroutine ggap(x,y,g,eps,ierr) 87 | double precision x(4),y(4),g(4),w(2,4),h(2),d1,d2,eps 88 | d1 = -x(2) * y(1) + x(3) * y(1) + x(1) * y(2) - x(3) * y(2) - x(1) 89 | * * y(3) + x(2) * y(3) 90 | d2 = -x(2) * y(1) + x(4) * y(1) + x(1) * y(2) - x(4) * y(2) - x(1) 91 | * * y(4) + x(2) * y(4) 92 | if(dabs(d1) .lt. eps .or. dabs(d2) .lt. eps)then 93 | ierr = 1 94 | return 95 | endif 96 | h(1) = -(y(1) - y(2)) 97 | h(2) = (x(1) - x(2)) 98 | w(1, 1) = (y(2) - y(3))/d1 - (y(2) - y(4))/d2 99 | w(2, 1) = (x(3) - x(2))/d1 - (x(4) - x(2))/d2 100 | w(1, 2) = (y(3) - y(1))/d1 - (y(4) - y(1))/d2 101 | w(2, 2) = (x(1) - x(3))/d1 - (x(1) - x(4))/d2 102 | w(1, 3) = (y(1) - y(2))/d1 103 | w(2, 3) = (x(2) - x(1))/d1 104 | w(1, 4) = (y(2) - y(1))/d2 105 | w(2, 4) = (x(1) - x(2))/d2 106 | do23030 i = 1,4 107 | g(i) = h(1)*w(1,i)+h(2)*w(2,i) 108 | 23030 continue 109 | 23031 continue 110 | ierr = 0 111 | return 112 | end 113 | -------------------------------------------------------------------------------- /src/profnb.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine profnb(n,p,m,a,y,t,r,d,u,wn,wp,b,nit, info) 3 | integer n,p,m,nit(3),info 4 | double precision a(p,n), y(n), t(m), b(p,m), r(p) 5 | double precision d(n), u(n), wn(n,9), wp(p,p+3) 6 | double precision zero, one, eps, beta 7 | parameter( zero = 0.0d0) 8 | parameter( one = 1.0d0) 9 | parameter( beta = 0.99995d0) 10 | parameter( eps = 1.0d-6) 11 | do23000 i = 1,m 12 | call dgemv('N',p,n,one-t(i),a,p,d,1,zero,r,1) 13 | call dscal(n,zero,wn,1) 14 | call daxpy(n,one-t(i),u,1,wn,1) 15 | call rqfnb(n,p,a,y,r,d,u,beta,eps,wn,wp,nit,info) 16 | if(info .ne. 0)then 17 | goto 23001 18 | endif 19 | do23004 j = 1,n 20 | u(j) = one 21 | d(j) = one 22 | 23004 continue 23 | 23005 continue 24 | call dcopy(p,wp,1,b(1,i),1) 25 | 23000 continue 26 | 23001 continue 27 | return 28 | end 29 | -------------------------------------------------------------------------------- /src/pwxy.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine pwxy(n,p,m,a,y,tau,qk,r,b,w,band,n0,d,u,wn,wp, aa,yy,sl 3 | *o,shi,rhs,glob,ghib,nit,info) 4 | integer n,p,m,kk(2),nn,n0,nit(5,m),info(m),sumbad,i,j,k,ir 5 | integer loq,hiq, slo(n),shi(n),ifix,ibad 6 | logical notopt 7 | double precision a(p,n),y(n),tau,qk(2),r(n),d(n),u(n),b(p,m),w(n) 8 | double precision wn(n,9), wp(p,(p+3)),band(n) 9 | double precision glob(p),ghib(p),aa(p,n),yy(n),rhs(p) 10 | double precision zero,one,beta,eps,big 11 | parameter(zero = 0.0d0) 12 | parameter(one = 1.0d0) 13 | parameter(beta = 0.99995d0) 14 | parameter(big = 1.0d+10) 15 | parameter(eps = 1.0d-06) 16 | do23000 ir = 1,m 17 | notopt = .true. 18 | call grexp(n,w,one) 19 | nn = n0 20 | ifix = 0 21 | ibad = 0 22 | 23002 if(notopt)then 23 | ibad = ibad + 1 24 | loq = max0(1, int(n*tau - nn/2.)) 25 | hiq = min0(int(n*tau + nn/2.), n) 26 | qk(1) = r(loq) 27 | qk(2) = r(hiq) 28 | call iphil(n,0,slo) 29 | call iphil(n,0,shi) 30 | do23004 i = 1,n 31 | if(r(i) .lt. qk(1))then 32 | slo(i) = 1 33 | else 34 | if(r(i) .gt. qk(2))then 35 | shi(i) = 1 36 | endif 37 | endif 38 | 23004 continue 39 | 23005 continue 40 | 23010 if(notopt)then 41 | ifix = ifix + 1 42 | call dphil(p,zero,glob) 43 | call dphil(p,zero,ghib) 44 | call dphil(n,one,d) 45 | call dphil(n,one,u) 46 | k = 0 47 | do23012 i = 1,n 48 | if(slo(i) .eq. 0 .and. shi(i) .eq. 0)then 49 | k = k + 1 50 | call dphil(p,zero,aa(1,k)) 51 | call daxpy(p,w(i),a(1,i),1,aa(1,k),1) 52 | yy(k) = -y(i)*w(i) 53 | else 54 | if(slo(i) .eq. 1)then 55 | do23018 j = 1,p 56 | glob(j) = glob(j) + a(j,i) * w(i) 57 | 23018 continue 58 | 23019 continue 59 | else 60 | if(shi(i) .eq. 1)then 61 | do23022 j = 1,p 62 | ghib(j) = ghib(j) + a(j,i) * w(i) 63 | 23022 continue 64 | 23023 continue 65 | endif 66 | endif 67 | endif 68 | 23012 continue 69 | 23013 continue 70 | call dcopy(p,glob,1,aa(1,k+1),1) 71 | call dcopy(p,ghib,1,aa(1,k+2),1) 72 | yy(k+1) = big 73 | yy(k+2) = -big 74 | call dgemv('N',p,k+2,one-tau,aa,p,d,1,zero,rhs,1) 75 | call dscal(k+2,zero,wn,1) 76 | call daxpy(k+2,one-tau,u,1,wn,1) 77 | call rqfnb(k+2,p,aa,yy,rhs,d,u,beta,eps,wn,wp,nit(1,ir),info(ir)) 78 | call dcopy(p,wp,1,b(1,ir),1) 79 | call dcopy(n,y,1,u,1) 80 | call dgemv('T',p,n,one,a,p,b(1,ir),1,one,u,1) 81 | sumbad = 0 82 | do23024 i = 1,n 83 | if((u(i) .gt. 0) .and. slo(i) .eq. 1)then 84 | slo(i) = 0 85 | sumbad = sumbad + 1 86 | endif 87 | if((u(i) .lt. 0) .and. shi(i) .eq. 1)then 88 | shi(i) = 0 89 | sumbad = sumbad + 1 90 | endif 91 | 23024 continue 92 | 23025 continue 93 | if(sumbad .gt. 0)then 94 | if(sumbad .gt. 0.1 * nn)then 95 | nn = min(2 * nn, n) 96 | goto 23011 97 | endif 98 | else 99 | notopt = .false. 100 | endif 101 | goto 23010 102 | endif 103 | 23011 continue 104 | nit(4,ir) = ifix 105 | nit(5,ir) = ibad 106 | goto 23002 107 | endif 108 | 23003 continue 109 | 23000 continue 110 | 23001 continue 111 | return 112 | end 113 | -------------------------------------------------------------------------------- /src/qfnb.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine qfnb(n,p,m,a,y,t,r,d,u,wn,wp,b,nit, info) 3 | integer n,p,m,nit(3),info 4 | double precision a(p,n), y(n), t(m), b(p,m), r(p) 5 | double precision d(n), u(n), wn(n,9), wp(p,p+3) 6 | double precision zero, one, eps, beta 7 | parameter( zero = 0.0d0) 8 | parameter( one = 1.0d0) 9 | parameter( beta = 0.99995d0) 10 | parameter( eps = 1.0d-6) 11 | do23000 i = 1,m 12 | call dgemv('N',p,n,one-t(i),a,p,d,1,zero,r,1) 13 | call dscal(n,zero,wn,1) 14 | call daxpy(n,one-t(i),u,1,wn,1) 15 | call rqfnb(n,p,a,y,r,d,u,beta,eps,wn,wp,nit,info) 16 | if(info .ne. 0)then 17 | goto 23001 18 | endif 19 | do23004 j = 1,n 20 | u(j) = one 21 | d(j) = one 22 | 23004 continue 23 | 23005 continue 24 | call dcopy(p,wp,1,b(1,i),1) 25 | 23000 continue 26 | 23001 continue 27 | return 28 | end 29 | -------------------------------------------------------------------------------- /src/qselect.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine qselect(n,x,q) 3 | integer n,k,l,r 4 | double precision x(n),q 5 | k=nint(q*n) 6 | l=1 7 | r=n 8 | call select(n,x,l,r,k) 9 | q=x(k) 10 | return 11 | end 12 | recursive subroutine select(n,x,l,r,k) 13 | integer n,m,l,r,k,ll,rr,i,j,mmax 14 | double precision x(n),z,s,d,t,fm,cs,cd 15 | parameter(cs = 0.5d0) 16 | parameter(cd = 0.5d0) 17 | parameter(mmax = 600) 18 | 23000 if(r.gt.l)then 19 | if(r-l.gt.mmax)then 20 | m=r-l+1 21 | i=k-l+1 22 | fm = dble(m) 23 | z=log(fm) 24 | s=cs*exp(2*z/3) 25 | d=cd*sqrt(z*s*(m-s)/fm)*sign(1,i-m/2) 26 | ll=max(l,nint(k-i*s/fm + d)) 27 | rr=min(r,nint(k+(m-i)*s/fm + d)) 28 | call select(n,x,ll,rr,k) 29 | endif 30 | t=x(k) 31 | i=l 32 | j=r 33 | call dswap(1,x(l),1,x(k),1) 34 | if(x(r).gt.t)then 35 | call dswap(1,x(r),1,x(l),1) 36 | endif 37 | 23006 if(i.lt.j)then 38 | call dswap(1,x(i),1,x(j),1) 39 | i=i+1 40 | j=j-1 41 | 23008 if(x(i).lt.t)then 42 | i=i+1 43 | goto 23008 44 | endif 45 | 23009 continue 46 | 23010 if(x(j).gt.t)then 47 | j=j-1 48 | goto 23010 49 | endif 50 | 23011 continue 51 | goto 23006 52 | endif 53 | 23007 continue 54 | if(x(l).eq.t)then 55 | call dswap(1,x(l),1,x(j),1) 56 | else 57 | j=j+1 58 | call dswap(1,x(j),1,x(r),1) 59 | endif 60 | if(j.le.k)then 61 | l=j+1 62 | endif 63 | if(k.le.j)then 64 | r=j-1 65 | endif 66 | goto 23000 67 | endif 68 | 23001 continue 69 | return 70 | end 71 | -------------------------------------------------------------------------------- /src/ratfor/boot.r: -------------------------------------------------------------------------------- 1 | #parzen, wei and ying's bootstrap 2 | subroutine pwy(m,n,k,m5,n2,a,c,b,t,toler,ift,x,e,s, wa,wb) 3 | double precision b(m),a(k,n),x(n,k) 4 | double precision wa(m5,n2),wb(m),e(m),c(m,n) 5 | double precision t,toler 6 | integer m,n,k,m5,n2,ift 7 | integer s(m) 8 | do i=1,k{ 9 | call dcopy(n,a(i,1),k,c(m,1),m) 10 | call rq0(m,n,m5,n2,c,b,t,toler,ift,x(1,i),e,s,wa,wb) 11 | } 12 | return 13 | end 14 | #ratfor outer loop for xy-pairs rq bootstrap 15 | #notation is horrendous 16 | # ratfor R-function 17 | #______________________ 18 | # m -> n number of original obs 19 | # n -> p number of parameters 20 | # k -> R number of BS replications 21 | # mofn -> m number of BS observations 22 | # 23 | subroutine xys(mofn,m,n,k,mofn5,n2,a,b,tau,toler,ift,x,e,s, wa,wb,aa,bb,ss) 24 | double precision b(m),a(m,n),x(n,k) 25 | double precision wa(mofn5,n2),wb(mofn) 26 | double precision aa(mofn,n),bb(mofn),e(mofn) 27 | double precision tau,toler 28 | integer ss(mofn,k),s(mofn),mofn,m,n,k,mofn5,n2,ift(k) 29 | do i=1,k { 30 | do ii=1,mofn{ 31 | bb(ii)=b(ss(ii,i)) 32 | do jj=1,n{ 33 | aa(ii,jj)=a(ss(ii,i),jj) 34 | } 35 | } 36 | call rq0(mofn,n,mofn5,n2,aa,bb,tau,toler,ift(i),x(1,i),e,s,wa,wb) 37 | } 38 | return 39 | end 40 | # Weighted (Bose) Bootstrap version 41 | subroutine wxy(m,n,k,m5,n2,a,b,tau,toler,ift,x,e,s,wa,wb,aa,bb,w) 42 | double precision b(m),a(m,n),x(n,k) 43 | double precision w(m,k),wa(m5,n2),wb(m) 44 | double precision aa(m,n),bb(m),e(m) 45 | double precision tau,toler 46 | integer s(m),m,n,k,m5,n2,ift(k) 47 | do i=1,k { 48 | do ii=1,m{ 49 | bb(ii)=b(ii)*w(ii,i) 50 | do jj=1,n{ 51 | aa(ii,jj)=a(ii,jj)*w(ii,i) 52 | } 53 | } 54 | call rq0(m,n,m5,n2,aa,bb,tau,toler,ift(i),x(1,i),e,s,wa,wb) 55 | } 56 | return 57 | end 58 | 59 | #does a matrix multiply to make Y matrix for heqf bootstrap 60 | subroutine heqfy(n,p,r,x,b,y) 61 | integer n,p,r 62 | double precision x(n,p),b(p,n,r),y(n,r),ddot 63 | do i=1,r{ 64 | do j=1,n{ 65 | y(j,i)=ddot(p,x(j,1),n,b(1,j,i),1) 66 | } 67 | } 68 | return 69 | end 70 | -------------------------------------------------------------------------------- /src/ratfor/brute.r: -------------------------------------------------------------------------------- 1 | # New Version of Brute Force Algorithm for the Powell Estimator 2 | # 3 | # Roger Koenker: last revision: 12 February 2008 4 | # 5 | # Problem: || min{Ax,c} - b ||_tau = min! 6 | # 7 | # The matrix H is p by m matrix of the n choose p basis indices h on input. 8 | # When called it is assumed (!!) that U = A[H[,1],]^{-1} and xh = U b[H[,1]]. 9 | subroutine brutpow(n,p,m,H,A,b,c,x,tau,U,xh,d,jminz,nflag) 10 | 11 | integer n,p,m 12 | double precision x(p),A(n,p),b(n),c(n) 13 | double precision U(p,p),d(p),xh(p) 14 | double precision zero, one,tau,pow,minz,z 15 | integer H(p,m),k,findk,jminz,nflag 16 | 17 | PARAMETER(zero = 0.0d0, one = 1.d0) 18 | 19 | jminz = 1 20 | minz = pow(n,p,x,A,b,c,tau) 21 | do j = 2,m { 22 | k = findk(p,H(1,j),H(1,j-1)) 23 | if(k == 0) 24 | {nflag = 4; return} 25 | call pivot(n,p,H(1,j-1),H(k,j),H(k,j-1),A,U,d,xh,nflag) 26 | if(nflag > 0) 27 | return 28 | do i = 1,p{ 29 | xh(i) = b(H(i,j)) 30 | } 31 | call dgemv('N',p,p,one,U,p,xh,1,zero,x,1) 32 | z = pow(n,p,x,A,b,c,tau) 33 | if(z < minz) { 34 | minz = z 35 | jminz = j 36 | } 37 | } 38 | return 39 | end 40 | 41 | ##################################################### 42 | 43 | integer function findk(p,h,g) 44 | integer p,k,h(p),g(p) 45 | findk = 0 46 | do k = 1,p{ 47 | if(h(k) != g(k)) 48 | {findk = k; break} 49 | } 50 | return 51 | end 52 | 53 | -------------------------------------------------------------------------------- /src/ratfor/combos.r: -------------------------------------------------------------------------------- 1 | # Subroutine to list the r choose n subsets of {1,2,...,r} in an order such that 2 | # adjacent subsets have only one element swapped. Algorithm modeled on the pascal 3 | # algorithm of Limin Xiang and Kazuo Ushijima (2001) "On O(1) Time Algorithms for 4 | # Combinatorial Generation," Computer Journal, 44(4), 292-302. 5 | # http://comjnl.oxfordjournals.org/cgi/reprint/44/4/292 6 | 7 | # Translated into ratfor: 12 February, 2008 R. Koenker. 8 | 9 | subroutine combin(r,n,m,a,c,e,Last) 10 | 11 | integer r,n,m,t,k,j,M0,Mj,s 12 | integer A(n,m),c(r),e(r),Last(r) 13 | logical odd 14 | 15 | M0 = r-n 16 | t = n+1 17 | k = 1 18 | j = 0 19 | c(1) = 0 20 | repeat{ 21 | j = j + 1 22 | c(j) = j 23 | e(j) = j - 1 24 | if(odd(j)) 25 | Last(j) = M0 + j 26 | else 27 | Last(j) = j + 1 28 | if(j == n) break 29 | } 30 | do i = 1,n 31 | A(i,1) = c(i) 32 | if(n < r) { 33 | repeat { 34 | k = k + 1 35 | S = c(j) 36 | Mj = M0 + j 37 | e(n+1) = n 38 | if(odd(j)){ 39 | if(c(j) == Mj) { 40 | c(j) = c(j-1) + 1 41 | Last(j+1) = c(j) + 1 42 | } 43 | else 44 | c(j) = c(j) + 1 45 | } 46 | else { 47 | if(c(j) == c(j-1) + 1) 48 | c(j) = Mj 49 | else { 50 | Last(j+1) = c(j) 51 | c(j) = c(j) - 1 52 | } 53 | } 54 | if(c(j) == Last(j)) { 55 | Last(j) = S 56 | e(j+1) = e(j) 57 | e(j) = j-1 58 | } 59 | if( (j < n) & (c(j) == Mj)) { 60 | t = j 61 | j = e(t+1) 62 | e(t+1) = t 63 | } 64 | else { 65 | if(t == j) t = t + 1 66 | if(t < e(n+1)) j = t 67 | else j = e(n+1) 68 | } 69 | do i = 1,n 70 | A(i,k) = c(i) 71 | if(j == 0) break 72 | } 73 | } 74 | return 75 | end 76 | 77 | 78 | logical function odd(j) 79 | integer j 80 | odd = (mod(j,2) == 1) 81 | return 82 | end 83 | -------------------------------------------------------------------------------- /src/ratfor/crqfnb.r: -------------------------------------------------------------------------------- 1 | # Peng-Huang Censored Quantile Regression 2 | subroutine crqfnb(n,p,a1,c1,n1,X,y,c,B,g,m,r,s,d,u,wn,wp,info) 3 | 4 | # Input 5 | # a1 = p by n1 design matrix (X[c,] transposed) 6 | # c1 = n1 response vector -y[c] 7 | # X = n by p design matrix 8 | # y = n response vector 9 | # c = n censoring indicator 10 | # g = m grid vector of taus 11 | # d = residual n-vector, initialized == 1 12 | # s = initialized cumhaz vector (rep(0,n)) 13 | # u = upper bound n-vector, initalized == 1 14 | # Workspace 15 | # r = rhs p-vector 16 | # wn = work array n1 by 9 17 | # wp = work array p by p+3 18 | # Output 19 | # B = p by m matrix of estimated coefficients 20 | # m = final column dimension of of B 21 | # Note 22 | # d is used both for residuals and as a work vector for rqfnb. 23 | # u needs to be reinitialized to ones at each iteration 24 | 25 | integer n,p,n1,m,info,nit(3) 26 | double precision a1(p,n1),c1(n),X(n,p),y(n),c(n),B(p,m),g(m) 27 | double precision wn(n1,9),wp(p,p+3),r(p),s(n),d(n),u(n) 28 | double precision zero,half,one,beta,eps,dH 29 | 30 | parameter( zero = 0.0d0) 31 | parameter( half = 0.5d0) 32 | parameter( one = 1.0d0) 33 | parameter( beta = 0.99995d0) 34 | parameter( eps = 1.0d-8) 35 | 36 | do k = 2,m { 37 | dH = -log(one - g(k)) + log(one - g(k-1)) 38 | do i = 1,n { 39 | u(i) = one 40 | wn(i,1) = half # initialize dual vector 41 | if(d(i) >= zero) s(i) = s(i) + dH 42 | d(i) = c(i) - s(i) 43 | } 44 | call dgemv('T',n,p,one,X,n,d,1,zero,r,1) 45 | call rqfnb(n1,p,a1,c1,r,d,u,beta,eps,wn,wp,nit,info) 46 | if(info != 0) break 47 | call dcopy(p,wp,1,B(1,k-1),1) 48 | call dcopy(n,y,1,d,1) 49 | call dgemv('N',n,p,one,X,n,B(1,k-1),1,one,d,1) 50 | } 51 | m = k-1 52 | return 53 | end 54 | -------------------------------------------------------------------------------- /src/ratfor/grexp.r: -------------------------------------------------------------------------------- 1 | 2 | subroutine grexp(n, x, a) 3 | integer i,n 4 | double precision x(n),a 5 | call fseedi() 6 | do i = 1,n 7 | call frexp(x(i), a) 8 | call fseedo() 9 | return 10 | end 11 | 12 | 13 | -------------------------------------------------------------------------------- /src/ratfor/kuantiles.r: -------------------------------------------------------------------------------- 1 | # Wrapper to compute several quantiles of a sample of n observations 2 | # Calls K.C. Kiwiel's version of Floyd and Rivest's select algorithm 3 | # Caveat Emptor!! The ks need to be sorted. 4 | subroutine kuantiles(k,m,n,x) 5 | integer i,j,k(m),m,n 6 | double precision x(n) 7 | 8 | j = 0 9 | do i = 1,m{ 10 | call dsel05(k(i)-j,n-j,x(j+1)) 11 | j = k(i) 12 | } 13 | return 14 | end 15 | -------------------------------------------------------------------------------- /src/ratfor/powell.r: -------------------------------------------------------------------------------- 1 | # Revised Version of Fitzenberger's Steepest Descent Algorithm for the Powell Estimator 2 | # 3 | # Roger Koenker: last revision: 6 February 2008 4 | # 5 | # Problem: || min{Ax,c} - b ||_tau = min! 6 | # 7 | subroutine powell(n,p,p2,A,b,c,x,tau,h,f,U,s,g,d,xh,maxit,nflag) 8 | 9 | integer n,p,p2 10 | double precision x(p),A(n,p),b(n),c(n) 11 | double precision f(n),U(p,p),s(n),g(p2),d(p),xh(p) 12 | double precision zero, one, mone, step, tau,pow 13 | integer h(p),hin,hout,k,it,inset,maxit,nflag 14 | 15 | PARAMETER(zero = 0.0d0, one = 1.d0, mone = -1.d0) 16 | 17 | it = 0 18 | 19 | repeat { 20 | it = it + 1 21 | if(it > 1) # Otherwise, assume U is A(h)^{-1} 22 | call pivot(n,p,h,hin,hout,A,U,d,xh,nflag) 23 | if(nflag > 0) 24 | {nflag = nflag + 2; return} 25 | do i = 1,p{ 26 | xh(i) = b(h(i)) 27 | } 28 | 29 | call dgemv('N',p,p,one,U,p,xh,1,zero,x,1) 30 | call dgemv('N',n,p,one,A,n,x,1,zero,f,1) 31 | do i = 1,n{ 32 | if(inset(p,i,h) > 0 | f(i) > c(i)) 33 | s(i) = zero 34 | else if(b(i) < f(i)) 35 | s(i) = one - tau 36 | else 37 | s(i) = - tau 38 | } 39 | call dgemv('T',n,p,one,A,n,s,1,zero,xh,1) 40 | call dgemv('T',p,p,one,U,p,xh,1,zero,g,1) 41 | do i = 1,p { 42 | if(f(h(i)) < c(h(i))) 43 | if(b(h(i)) < c(h(i))) 44 | g(i + p) = - g(i) + one - tau 45 | else 46 | g(i + p) = - g(i) - tau 47 | else 48 | g(i + p) = - g(i) + tau 49 | g(i) = g(i) + one - tau 50 | } 51 | k = idmin(p2,g,1) 52 | if(g(k) >= 0 | it > maxit) 53 | break 54 | call dscal(p,zero,d,1) 55 | if(k <= p) 56 | call daxpy(p,one,U(1,k),1,d,1) 57 | else{ 58 | k = k - p 59 | call daxpy(p,mone,U(1,k),1,d,1) 60 | } 61 | call dgemv('N',n,p,one,A,n,d,1,zero,s,1) 62 | do i = 1,n { 63 | call dcopy(p,x,1,xh,1) 64 | step = (b(i) - f(i))/s(i) 65 | call daxpy(p,step,d,1,xh,1) 66 | s(i) = pow(n,p,xh,A,b,c,tau) 67 | } 68 | hin = idmin(n,s,1) 69 | if(inset(p,hin,h) > 0) 70 | {nflag = 2; break} 71 | hout = h(k) 72 | } 73 | if(it > maxit) nflag = 1 74 | return 75 | end 76 | 77 | ##################################################### 78 | subroutine pivot(n,p,h,hin,hout,A,B,u,v,eflag) 79 | 80 | integer n,p,h(p),hin,hout,inset,k,eflag 81 | double precision A(n,p),B(p,p),u(p),v(p) 82 | double precision zero,one 83 | 84 | PARAMETER(zero = 0.d0, one = 1.d0) 85 | 86 | eflag = 0 87 | k = inset(p,hout,h) 88 | if(k == 0) 89 | {eflag = 1; return} 90 | if(inset(p,hin,h) > 0) 91 | {eflag = 2; return} 92 | if(hin < 1 | hin > n) 93 | {eflag = 3; return} 94 | 95 | call dcopy(p,A(hin,1),n,v,1) 96 | call dgemv('T',p,p,one,B,p,v,1,zero,u,1) 97 | call dcopy(p,B(1,k),1,v,1) 98 | do j = 1,p{ 99 | do i = 1,p{ 100 | if(j == k) 101 | B(i,j) = B(i,j)/u(k) 102 | else 103 | B(i,j) = B(i,j) - (u(j)/u(k)) * v(i) 104 | } 105 | } 106 | h(k) = hin 107 | return 108 | end 109 | 110 | 111 | ##################################################### 112 | integer function inset(p,k,h) 113 | integer p,k,h(p) 114 | 115 | do inset = 1,p{ 116 | if(h(inset) == k) return 117 | } 118 | inset = 0 119 | return 120 | end 121 | 122 | ##################################################### 123 | double precision function pow(n,p,x,A,b,c,tau) 124 | integer n,p 125 | double precision x(p),A(n,p),b(n),c(n) 126 | double precision tau,u,zero,rho,fit,ddot 127 | 128 | PARAMETER(zero= 0.d0) 129 | 130 | pow = zero 131 | do i = 1,n{ 132 | fit = ddot(p,A(i,1),n,x,1) 133 | u = b(i) - min(fit,c(i)) 134 | pow = pow + rho(u, tau) 135 | } 136 | return 137 | end 138 | ##################################################### 139 | double precision function rho(u,tau) 140 | double precision u,tau,one 141 | 142 | PARAMETER(one = 1.d0) 143 | 144 | if(u < 0) 145 | rho = u * (tau - one) 146 | else 147 | rho = u * tau 148 | return 149 | end 150 | -------------------------------------------------------------------------------- /src/ratfor/qfnb.r: -------------------------------------------------------------------------------- 1 | # Toy fnb routine for multiple taus 2 | subroutine qfnb(n,p,m,a,y,t,r,d,u,wn,wp,B,nit, info) 3 | 4 | # Input: 5 | # n = sample size 6 | # p = parametric dimension of model 7 | # m = dimension of tau vector 8 | # a = p by n design matrix (transposed) 9 | # y = n dimensional response vector 10 | # t = m dimensional tau vector 11 | # r = p dimensional rhs vector 12 | # d = n dimensional vector of ones 13 | # u = n dimensional vector of ones 14 | # wn = n by 9 work array 15 | # wp = p by p+3 work array 16 | # 17 | # Output: 18 | # B = p by m matrix of coefficients 19 | 20 | integer n,p,m,nit(3),info 21 | double precision a(p,n), y(n), t(m), B(p,m), r(p) 22 | double precision d(n), u(n), wn(n,9), wp(p,p+3) 23 | double precision zero, one, eps, beta 24 | 25 | parameter( zero = 0.0d0) 26 | parameter( one = 1.0d0) 27 | parameter( beta = 0.99995d0) 28 | parameter( eps = 1.0d-6) 29 | 30 | do i = 1,m{ 31 | call dgemv('N',p,n,one-t(i),a,p,d,1,zero,r,1) 32 | call dscal(n,zero,wn,1) 33 | call daxpy(n,one-t(i),u,1,wn,1) 34 | call rqfnb(n,p,a,y,r,d,u,beta,eps,wn,wp,nit,info) 35 | if(info != 0) break 36 | do j = 1,n{ 37 | u(j) = one 38 | d(j) = one 39 | } 40 | call dcopy(p,wp,1,B(1,i),1) 41 | } 42 | return 43 | end 44 | -------------------------------------------------------------------------------- /src/ratfor/qselect.r: -------------------------------------------------------------------------------- 1 | #function to compute qth quantile of a sample of n observations 2 | subroutine qselect(n,x,q) 3 | integer n,k,l,r 4 | double precision x(n),q 5 | k=nint(q*n) 6 | l=1 7 | r=n 8 | call select(n,x,l,r,k) 9 | q=x(k) 10 | return 11 | end 12 | #This is a ratfor implementation of the floyd-rivest algorithm--SELECT 13 | #Reference: CACM 1975, alg #489, p173, algol-68 version 14 | #Translation by Roger Koenker August, 1996. 15 | #As originally proposed mmax=600, and cs=cd=.5 16 | #Calls blas routine dswap 17 | recursive subroutine select(n,x,l,r,k) 18 | integer n,m,l,r,k,ll,rr,i,j,mmax 19 | double precision x(n),z,s,d,t,fm,cs,cd 20 | parameter(cs = 0.5d0) 21 | parameter(cd = 0.5d0) 22 | parameter(mmax = 600) 23 | while(r>l){ 24 | if(r-l>mmax){ 25 | m=r-l+1 26 | i=k-l+1 27 | fm = dble(m) 28 | z=log(fm) 29 | s=cs*exp(2*z/3) 30 | d=cd*sqrt(z*s*(m-s)/fm)*sign(1,i-m/2) 31 | ll=max(l,nint(k-i*s/fm + d)) 32 | rr=min(r,nint(k+(m-i)*s/fm + d)) 33 | call select(n,x,ll,rr,k) 34 | } 35 | t=x(k) 36 | i=l 37 | j=r 38 | call dswap(1,x(l),1,x(k),1) 39 | if(x(r)>t)call dswap(1,x(r),1,x(l),1) 40 | while(it)j=j-1 46 | } 47 | if(x(l)==t) 48 | call dswap(1,x(l),1,x(j),1) 49 | else{ 50 | j=j+1 51 | call dswap(1,x(j),1,x(r),1) 52 | } 53 | if(j<=k)l=j+1 54 | if(k<=j)r=j-1 55 | } 56 | return 57 | end 58 | -------------------------------------------------------------------------------- /src/ratfor/rls.r: -------------------------------------------------------------------------------- 1 | #This is a simple recursive least squares routine Reference: Harvey TSM p. 100 2 | # 3 | subroutine rls(n,p,x,y,b,A,Ax) 4 | integer n,p 5 | double precision x(p,n),y(n),b(p,n),A(p,p),Ax(p) 6 | double precision zero,one,mone,f,r,ddot 7 | data one/1.d0/ 8 | data mone/-1.d0/ 9 | data zero/0.d0/ 10 | # 11 | #On input: 12 | # 13 | # A = crossprod(x[1:p,1:p))^{-1} 14 | # b(,p) = A crossprod(x[1:p,1:p],y[1:p]) 15 | # 16 | do i = (p+1),n { 17 | call dgemv('N',p,p,one,A,p,x(1,i),1,zero,Ax,1) 18 | f = one + ddot(p,x(1,i),1,Ax,1) 19 | r = (y(i)-ddot(p,x(1,i),1,b(1,i-1),1))/f 20 | call daxpy(p,one,b(1,i-1),1,b(1,i),1) 21 | call daxpy(p,r,Ax,1,b(1,i),1) 22 | call dger(p,p,mone/f,Ax,1,Ax,1,A,p) 23 | } 24 | return 25 | end 26 | 27 | 28 | -------------------------------------------------------------------------------- /src/ratfor/rqs.r: -------------------------------------------------------------------------------- 1 | # Wrapper for rq solutions with multiple y's 2 | subroutine rqs(m,n,k,m5,n2,a,b,tau,toler,ift,x,e,s,wa,wb) 3 | double precision b(m,k),a(m,n),x(n,k),e(m),wa(m5,n2),wb(m) 4 | double precision tau,toler 5 | integer s(m),m,n,k,m5,n2,ift(k) 6 | do i=1,k 7 | call rq0(m,n,m5,n2,a,b(1,i),tau,toler,ift(i),x(1,i),e,s,wa,wb) 8 | return 9 | end 10 | -------------------------------------------------------------------------------- /src/rls.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine rls(n,p,x,y,b,a,ax) 3 | integer n,p 4 | double precision x(p,n),y(n),b(p,n),a(p,p),ax(p) 5 | double precision zero,one,mone,f,r,ddot 6 | data one/1.d0/ 7 | data mone/-1.d0/ 8 | data zero/0.d0/ 9 | do23000 i = (p+1),n 10 | call dgemv('N',p,p,one,a,p,x(1,i),1,zero,ax,1) 11 | f = one + ddot(p,x(1,i),1,ax,1) 12 | r = (y(i)-ddot(p,x(1,i),1,b(1,i-1),1))/f 13 | call daxpy(p,one,b(1,i-1),1,b(1,i),1) 14 | call daxpy(p,r,ax,1,b(1,i),1) 15 | call dger(p,p,mone/f,ax,1,ax,1,a,p) 16 | 23000 continue 17 | 23001 continue 18 | return 19 | end 20 | -------------------------------------------------------------------------------- /src/rqs.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine rqs(m,n,k,m5,n2,a,b,tau,toler,ift,x,e,s,wa,wb) 3 | double precision b(m,k),a(m,n),x(n,k),e(m),wa(m5,n2),wb(m) 4 | double precision tau,toler 5 | integer s(m),m,n,k,m5,n2,ift(k) 6 | do23000 i=1,k 7 | call rq0(m,n,m5,n2,a,b(1,i),tau,toler,ift(i),x(1,i),e,s,wa,wb) 8 | 23000 continue 9 | 23001 continue 10 | return 11 | end 12 | -------------------------------------------------------------------------------- /src/sakj.f: -------------------------------------------------------------------------------- 1 | C Output from Public domain Ratfor, version 1.05 2 | subroutine sakj(x,z,p,iker,dens,psi,score,nx,nz,h,alpha,kappa,xlam 3 | *) 4 | double precision dens(nz),score(nz),psi(nz),h,kappa 5 | double precision z(nz),x(nx),xlam(nx),p(nx),qrange,pi 6 | double precision con1,sum,sqsum,xsd,a,fifth,hinv,half 7 | double precision xn,xker,dxker,ddxker,fact,xponen,alpha,glog,zero, 8 | *one,two 9 | parameter( zero = 0.d0) 10 | parameter( one = 1.d0) 11 | parameter( two = 2.d0) 12 | parameter( four = 4.d0) 13 | parameter( half = 0.5d0) 14 | parameter( fifth = 0.2d0) 15 | parameter( pi = 3.141593d0) 16 | xn=nx 17 | if(iker.eq.0)then 18 | con1=one/sqrt(2.0*pi) 19 | else 20 | if(iker.eq.1)then 21 | con1=one/pi 22 | endif 23 | endif 24 | if(h.le.0.)then 25 | sum=0. 26 | sqsum=0. 27 | do23006 i=1,nx 28 | sqsum=sqsum+x(i)*x(i)*p(i) 29 | sum=sum+x(i)*p(i) 30 | 23006 continue 31 | 23007 continue 32 | xsd=dsqrt(sqsum-sum*sum) 33 | sum=zero 34 | i=1 35 | 23008 if(.not.(i.lt.nx))goto 23010 36 | sum=sum+p(i) 37 | if(sum.lt..25)then 38 | goto 23009 39 | else 40 | qrange=x(i) 41 | goto 23010 42 | endif 43 | 23009 i=i+1 44 | goto 23008 45 | 23010 continue 46 | sum=one 47 | i=nx 48 | 23013 if(.not.(i.gt.0))goto 23015 49 | sum=sum-p(i) 50 | if(sum.gt..75)then 51 | goto 23014 52 | else 53 | qrange=x(i)-qrange 54 | goto 23015 55 | endif 56 | 23014 i=i-1 57 | goto 23013 58 | 23015 continue 59 | a=min(xsd,qrange/1.34) 60 | h=kappa*a/(xn**fifth) 61 | endif 62 | hinv=one/h 63 | do23018 j=1,nx 64 | xker=0. 65 | if(iker.eq.0)then 66 | do23022 i=1,nx 67 | xponen=(x(j)-x(i))*hinv 68 | xponen=half*xponen**2 69 | xker=xker+p(i)*exp(-xponen)*hinv 70 | 23022 continue 71 | 23023 continue 72 | else 73 | if(iker.eq.1)then 74 | do23026 i=1,nx 75 | xponen=(x(j)-x(i))*hinv 76 | xker=xker+p(i)*hinv/(1+xponen**2) 77 | 23026 continue 78 | 23027 continue 79 | endif 80 | endif 81 | xlam(j)=con1*xker 82 | 23018 continue 83 | 23019 continue 84 | glog=zero 85 | do23028 i=1,nx 86 | glog=glog+p(i)*log(xlam(i)) 87 | 23028 continue 88 | 23029 continue 89 | g=exp(glog) 90 | ginv=one/g 91 | do23030 i=1,nx 92 | xlam(i)=hinv/((xlam(i)*ginv)**(-alpha)) 93 | 23030 continue 94 | 23031 continue 95 | do23032 j=1,nz 96 | xker=zero 97 | dxker=zero 98 | ddxker=zero 99 | if(iker.eq.0)then 100 | do23036 i=1,nx 101 | xponen=(z(j)-x(i))*xlam(i) 102 | fact=exp(-half*xponen*xponen)*xlam(i) 103 | xker=xker+p(i)*fact 104 | dxker=dxker-p(i)*fact*xponen*xlam(i) 105 | ddxker=ddxker- p(i)*fact*(one - xponen**2)*xlam(i)**2 106 | 23036 continue 107 | 23037 continue 108 | else 109 | if(iker.eq.1)then 110 | do23040 i=1,nx 111 | xponen=(z(j)-x(i))*xlam(i) 112 | fact=xlam(i)/(one+xponen**2) 113 | xker=xker+p(i)*fact 114 | dxker=dxker-p(i)*two*xponen*fact**2 115 | ddxker=ddxker- p(i)*two*(fact**2)*(xlam(i)- four*(xponen**2)*fact) 116 | 23040 continue 117 | 23041 continue 118 | endif 119 | endif 120 | dens(j)=con1*xker 121 | psi(j)=-(dxker/xker) 122 | score(j)=(dxker/xker)**2-ddxker/xker 123 | 23032 continue 124 | 23033 continue 125 | return 126 | end 127 | -------------------------------------------------------------------------------- /src/srtpai.f: -------------------------------------------------------------------------------- 1 | SUBROUTINE SRTPAI ( A, SA, P, SP, N ) 2 | C 3 | C SRTPAI SETS P(1) = 1, P(SP+1) = 2, ..., P((N-1)*SP+1) = N 4 | C AND THEN REARRANGES P(1), P(SP+1), ..., P((N-1)*SP+1) SO THAT 5 | C A( (P(I)-1)*SA+1 ) .LE. A( (P(J)-1)*SA+1 ) IF AND ONLY IF 6 | C I .LT. J, WHERE I AND J SUBSCRIPT PROPER ELEMENTS OF P 7 | C 8 | INTEGER SP, P(SP, *), SA, H, PIH, PI 9 | INTEGER A(SA, *) 10 | C 11 | C CHECK INPUT PARAMETERS AND INITIALIZE H 12 | C 13 | CALL I1SRT( SA, SP, N ) 14 | IF ( I0SRT( 1, N, H ) .LT. 1 ) RETURN 15 | C 16 | C INITIALIZE P 17 | C 18 | DO 10 I = 1, N 19 | P(1, I) = I 20 | 10 CONTINUE 21 | C 22 | C CHECK IF DONE WITH SORT 23 | C 24 | 20 IF ( H .LT. 1 ) RETURN 25 | K = N - H 26 | C 27 | C COMPARE 28 | C 29 | DO 40 J = 1, K 30 | I = J 31 | 30 IH = I + H 32 | PI = P(1, I) 33 | PIH = P(1, IH) 34 | IF ( A(1, PI) .LE. A(1, PIH) ) GOTO 40 35 | C 36 | C EXCHANGE 37 | C 38 | P(1, I) = PIH 39 | P(1, IH) = PI 40 | C 41 | C PERCOLATE EXCHANGED LIST ELEMENT UP TO PROPER PLACE 42 | C 43 | I = I - H 44 | IF ( I .GE. 1 ) GOTO 30 45 | 40 CONTINUE 46 | C 47 | H = ( H - 1 ) / 3 48 | GOTO 20 49 | C 50 | END 51 | SUBROUTINE I1SRT ( SA, SP, N ) 52 | C 53 | C I1SRT CHECKS LEGALITY OF VALUES OF SA, SP, N 54 | C 55 | INTEGER SA, SP 56 | C 57 | C/6S 58 | C IF ( N .LT. 0 ) 59 | C 1 CALL SETERR( 27HSRTXXX - ILLEGAL VALUE OF N, 27, 1, 2 ) 60 | C IF ( SA .LE. 0 ) 61 | C 1 CALL SETERR( 28HSRTXXX - ILLEGAL VALUE OF SA, 28, 2, 2 ) 62 | C IF ( SP .LE. 0 ) 63 | C 1 CALL SETERR( 28HSRTXXX - ILLEGAL VALUE OF SP, 28, 3, 2 ) 64 | C/7S 65 | C IF ( N .LT. 0 ) 66 | C 1 CALL SETERR( 'SRTXXX - ILLEGAL VALUE OF N', 27, 1, 2 ) 67 | C IF ( SA .LE. 0 ) 68 | C 1 CALL SETERR( 'SRTXXX - ILLEGAL VALUE OF SA', 28, 2, 2 ) 69 | C IF ( SP .LE. 0 ) 70 | C 1 CALL SETERR( 'SRTXXX - ILLEGAL VALUE OF SP', 28, 3, 2 ) 71 | C/ 72 | C 73 | RETURN 74 | C 75 | END 76 | 77 | 78 | INTEGER FUNCTION I0SRT ( SA, N, H ) 79 | C 80 | C I0SRT CHECKS INPUT PARAMETERS N, SA AND CALCULATES H 81 | C RETURNS H = 0 IF NO SORTING NECESSARY, ELSE 82 | C RETURNS SPACING, H, FOR FIRST INSERTION SORT. 83 | C I0SRT RETURNS TOTAL NUMBER OF ELEMENTS IN ARRAY = N * SA 84 | C 85 | INTEGER SA, H 86 | C 87 | C/6S 88 | C IF ( N .LT. 0 ) 89 | C 1 CALL SETERR( 27HSRTXXX - ILLEGAL VALUE OF N, 27, 1, 2 ) 90 | C IF ( SA .LE. 0 ) 91 | C 1 CALL SETERR( 28HSRTXXX - ILLEGAL VALUE OF SA, 28, 2, 2 ) 92 | C/7S 93 | C IF ( N .LT. 0 ) 94 | C 1 CALL SETERR( 'SRTXXX - ILLEGAL VALUE OF N', 27, 1, 2 ) 95 | C IF ( SA .LE. 0 ) 96 | C 1 CALL SETERR( 'SRTXXX - ILLEGAL VALUE OF SA', 28, 2, 2 ) 97 | C/ 98 | C 99 | C CHECK IF SORTING IS NECESSARY 100 | C 101 | H = 0 102 | I0SRT = N * SA 103 | IF ( N .LE. 1 ) RETURN 104 | C 105 | C CALCULATE H USING H NEW = 3 * H OLD + SA 106 | C 107 | H = 4 * SA 108 | C 109 | 10 H = 3 * H + SA 110 | IF ( H .LT. I0SRT ) GOTO 10 111 | C 112 | H = ( H - 4 * SA ) / 9 113 | C 114 | RETURN 115 | C 116 | END 117 | 118 | -------------------------------------------------------------------------------- /tests/panel.R: -------------------------------------------------------------------------------- 1 | # Example rqss vs rq.fit.panel vs rq.fit.lasso 2 | # Slightly modified version of a test problem of Stefan Bache and co. 3 | require(quantreg) 4 | source("rq.fit.panel.R") 5 | set.seed(1917) 6 | 7 | tt <- 3 # time periods 8 | nn <- 3 # individuals 9 | 10 | # Generate some data: 11 | some.data <- data.frame( 12 | id=rep(1:nn, each=tt) #ids 13 | ,ic=1 #intercept 14 | ,x1=rnorm(nn*tt) #regressors 15 | ,x2=runif(nn*tt) 16 | ,alpha=rep(runif(nn)*2 ,each=tt) #fixed effects 17 | ) 18 | 19 | # response: 20 | some.data$y <- some.data$x1 + some.data$x2 + some.data$alpha + rnorm(nn*tt) 21 | 22 | lambda <- .2 23 | tau <- 0.25 24 | 25 | 26 | # Fit with rq.fit.panel 27 | fit1 <- rq.fit.panel(cbind(1, some.data$x1, some.data$x2), some.data$y 28 | ,rep(1:nn, each=tt) 29 | ,tau=tau 30 | ,w=1 31 | ,lambda=lambda) 32 | 33 | # fit with rqss (using the global debug variable from the panel function 34 | # so remember to run that too! :-) : 35 | fit2 <- rqss(y ~ ic + x1 + x2 + as.factor(id) - 1 36 | ,data=some.data 37 | ,method="lasso" 38 | ,tau=tau 39 | ,lambda=c(0, 0, 0, rep(lambda, nn))) 40 | 41 | # fit with rq.lasso 42 | fit3 <- rq(y ~ ic + x1 + x2 + as.factor(id) - 1 43 | ,data=some.data 44 | ,tau=tau 45 | ,method="lasso" 46 | ,lambda=c(0, 0, 0, rep(lambda, nn))) 47 | 48 | # Print coefficients for comaparison. 49 | comparefit <- function() 50 | { 51 | compmat <- cbind(fit1$coef,fit2$coef, fit3$coef) 52 | colnames(compmat) <- c("rq.fit.panel", "rqss", "rq.fit.lasso") 53 | noquote(formatC(compmat, format="f", digits=8, width=12)) 54 | } 55 | 56 | cat("all(rq.fit.panel$coef==rq$coef): ", all.equal(fit1$coef,fit3$coef), "\n") 57 | cat("all(rq.fit.$coef==rqss$coef): ", all.equal(fit2$coef,fit3$coef), "\n") 58 | cat("all(rqss$coef==rq.fit.panel$coef): ", all.equal(fit2$coef,fit1$coef), "\n") 59 | 60 | -------------------------------------------------------------------------------- /tests/rq.R: -------------------------------------------------------------------------------- 1 | ## This is just from ?anova.rq (extended) 2 | library(quantreg) 3 | data(barro) 4 | fit0 <- rq(y.net ~ lgdp2 + fse2 + gedy2 , data = barro) 5 | fit1 <- rq(y.net ~ lgdp2 + fse2 + gedy2 + Iy2 + gcony2, data = barro) 6 | 7 | a01 <- anova(fit1,fit0) 8 | a01 9 | 10 | fit2 <- rq(y.net ~ lgdp2 + fse2 + gedy2 + Iy2 + gcony2, data = barro, 11 | tau = 0.75) 12 | fit3 <- rq(y.net ~ lgdp2 + fse2 + gedy2 + Iy2 + gcony2, data = barro, 13 | tau = 0.25) 14 | 15 | a123 <- anova(fit1,fit2, fit3) 16 | a.123 <- anova(fit1,fit2, fit3, joint=FALSE) 17 | a.123 18 | 19 | AE <- function(x,y) all.equal(x, y, tol = 1e-5) 20 | ## ---------- {giving a bit more digits below} 21 | stopifnot( 22 | AE(100 * unname(coef(fit0)), 23 | c(-0.74679759, 0.46539963, 0.15902838, -36.619915)) 24 | , 25 | AE(unlist(a01$table), 26 | c(ndf=2, ddf=155, Tn = 18.878717, pvalue= 4.6e-08)) 27 | , 28 | AE(100* unname(coef( fit2 )), 29 | c(13.103018, -1.4885239, -0.026452369, 30 | 0.3999839, 14.526663, -13.504643)) 31 | , 32 | AE(100* unname(coef( fit3 )), 33 | c(6.0860719, -0.88350554, 0.24596781, 34 | -14.962498, 15.592489, -15.861804)) 35 | , 36 | AE(unlist(a123$table), 37 | c(ndf = 10, ddf = 473, Tn = 1.80385526, pvalue=0.0575117558)) 38 | , 39 | AE(a.123$table[,"Tn"], 40 | c(1.0655561, 2.6398508, 0.78623238, 0.04467014, 0.065344348)) 41 | ) 42 | -------------------------------------------------------------------------------- /tests/rq.fit.panel.R: -------------------------------------------------------------------------------- 1 | rq.fit.panel <- function(X,y,s,w=c(.25,.5,.25),taus=(1:3)/4,lambda = 1){ 2 | # prototype function for panel data fitting of QR models 3 | # the matrix X is assumed to contain an intercept 4 | # the vector s is a strata indicator assumed (so far) to be a one-way layout 5 | # NB: 6 | # 0. This is an altered version from that originally posted -- the definition 7 | # of the rhs vector now incorporates a factor of 1/2 for the penalty. 8 | # 1. The value of the shrinkage parameter lambda is an open research problem in 9 | # the simplest homogneous settings it should be the ratio of the scale parameters 10 | # of the fixed effects and the idiocyncratic errors 11 | # 2. On return the coefficient vector has m*p + n elements where m is the number 12 | # quantiles being estimated, p is the number of colums of X, and n is the 13 | # number of distinct values of s. The first m*p coefficients are the 14 | # slope estimates, and the last n are the "fixed effects" 15 | # 3. Like all shrinkage (regularization) estimators, asymptotic inference is somewhat 16 | # problematic... so the bootstrap is the natural first resort. 17 | require(SparseM) 18 | require(quantreg) 19 | K <- length(w) 20 | if(K != length(taus)) 21 | stop("length of w and taus must match") 22 | X <- as.matrix(X) 23 | p <- ncol(X) 24 | n <- length(levels(as.factor(s))) 25 | N <- length(y) 26 | if(N != length(s) || N != nrow(X)) 27 | stop("dimensions of y,X,s must match") 28 | Z <- as.matrix.csr(model.matrix(~as.factor(s)-1)) 29 | Fidelity <- cbind(as(w,"matrix.diag.csr") %x% X,w %x% Z) 30 | Penalty <- cbind(as.matrix.csr(0,n,K*p),lambda*as(n,"matrix.diag.csr")) 31 | D <- rbind(Fidelity,Penalty) 32 | y <- c(w %x% y,rep(0,n)) 33 | a <- c((w*(1-taus)) %x% (t(X)%*%rep(1,N)), 34 | sum(w*(1-taus)) * (t(Z) %*% rep(1,N)) + lambda * rep(1/2,n)) 35 | rq.fit.sfn(D,y,rhs=a) 36 | } 37 | -------------------------------------------------------------------------------- /tests/run-demos.R: -------------------------------------------------------------------------------- 1 | library(quantreg) 2 | 3 | (dDIR <- system.file("demo", package = "quantreg")) 4 | set.seed(1) # since some demos randomly generate 5 | 6 | cat("Running demos from package 'quantreg' : \n\n") 7 | for(ff in list.files(dDIR, pattern="\\.R$", full.names = TRUE)) { 8 | f <- basename(ff) 9 | cat("\n", f," :\n", paste(rep.int("-", nchar(f)), collapse=''), 10 | "\n", sep='') 11 | 12 | source(ff, echo = TRUE) 13 | } 14 | 15 | -------------------------------------------------------------------------------- /vignettes/crq.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{quantreg: crq} 2 | %\VignetteEngine{R.rsp::asis} 3 | 4 | -------------------------------------------------------------------------------- /vignettes/rq.pdf.asis: -------------------------------------------------------------------------------- 1 | %\VignetteIndexEntry{quantreg: rq} 2 | %\VignetteEngine{R.rsp::asis} 3 | 4 | --------------------------------------------------------------------------------