├── codecov.yml ├── tests ├── testthat.R └── testthat │ ├── z.ash.test │ ├── error_dat.Rds │ ├── test_pois_data.Rds │ ├── test_one_obs.R │ ├── test_prune.R │ ├── test_unitprior.R │ ├── test_moments.R │ ├── test_missing.R │ ├── test_df.R │ ├── helper_functions.R │ ├── test_mode.R │ ├── test_optmethod.R │ ├── test_output.R │ ├── test_+uniform.R │ ├── testCI.R │ ├── test_w_mixEM.R │ ├── test_old_vs_new.R │ ├── test_postsample.R │ ├── test_truncgen.R │ ├── test_lik.R │ ├── test_logF.R │ ├── test_log_comp_dens.R │ ├── test_prior.R │ ├── test_myetruncnorm.R │ ├── test_loglik.R │ ├── test_trunct.R │ ├── test_tnormal.R │ ├── test_binom.R │ └── test_pois.R ├── .gitignore ├── .Rbuildignore ├── man ├── ncomp.Rd ├── postmean.Rd ├── postsd.Rd ├── prune.Rd ├── ncomp.default.Rd ├── postmean2.Rd ├── comp_sd.normalmix.Rd ├── loglik_conv.Rd ├── comp_mean.normalmix.Rd ├── dens.Rd ├── comp_postmean2.Rd ├── comp_postmean.Rd ├── dens_conv.Rd ├── calc_loglik.Rd ├── lik_normal.Rd ├── calc_null_vloglik.Rd ├── loglik_conv.default.Rd ├── my_etrunclogf.Rd ├── calc_logLR.Rd ├── cdf_conv.Rd ├── comp_mean.tnormalmix.Rd ├── comp_sd.tnormalmix.Rd ├── get_density.Rd ├── calc_null_loglik.Rd ├── my_etrunct.Rd ├── my_e2trunct.Rd ├── my_etruncbeta.Rd ├── comp_cdf_conv.Rd ├── lik_t.Rd ├── print.ash.Rd ├── comp_cdf_conv.unimix.Rd ├── mixprop.Rd ├── my_e2truncbeta.Rd ├── mixmean2.Rd ├── mixcdf.default.Rd ├── calc_mixmean.Rd ├── calc_mixsd.Rd ├── gen_etruncFUN.Rd ├── my_etruncgamma.Rd ├── comp_mean.Rd ├── cdf.ash.Rd ├── comp_sd.Rd ├── my_e2truncgamma.Rd ├── pm_on_zero.Rd ├── comp_mean2.Rd ├── comp_dens_conv.Rd ├── log_comp_dens_conv.unimix.Rd ├── compute_lfsr.Rd ├── calc_vloglik.Rd ├── comp_dens_conv.unimix.Rd ├── cxxMixSquarem.Rd ├── comp_postprob.Rd ├── summary.ash.Rd ├── igmix.Rd ├── calc_vlogLR.Rd ├── unimix.Rd ├── lik_logF.Rd ├── comp_dens.Rd ├── normalmix.Rd ├── log_comp_dens_conv.Rd ├── mixcdf.Rd ├── comp_cdf.Rd ├── plot.ash.Rd ├── dlogf.Rd ├── post_sample.unimix.Rd ├── comp_cdf_conv.normalmix.Rd ├── post_sample.normalmix.Rd ├── vcdf_post.Rd ├── comp_postsd.Rd ├── log_comp_dens_conv.normalmix.Rd ├── qval.from.lfdr.Rd ├── comp_dens_conv.normalmix.Rd ├── get_post_sample.Rd ├── post_sample.Rd ├── pcdf_post.Rd ├── plogf.Rd ├── comp_cdf_post.Rd ├── tnormalmix.Rd ├── cdf_post.Rd ├── set_data.Rd ├── posterior_dist.Rd ├── lik_normalmix.Rd ├── plot_diagnostic.Rd ├── ashr.Rd ├── mixSQP.Rd ├── lik_binom.Rd ├── lik_pois.Rd ├── my_vtruncnorm.Rd ├── ash_pois.Rd ├── mixIP.Rd ├── my_etruncnorm.Rd ├── mixEM.Rd ├── my_e2truncnorm.Rd ├── ashci.Rd ├── mixVBEM.Rd ├── estimate_mixprop.Rd ├── w_mixEM.Rd └── get_lfdr.Rd ├── ashr.Rproj ├── R ├── ashr-package.R ├── ash_estmode.R ├── RcppExports.R ├── trunct.R ├── truncgen.R ├── ash_pois.R ├── truncbeta.R ├── set_data.R ├── truncgamma.R ├── get_functions.R ├── process_args.R ├── igmix.R ├── logF.R ├── ashCI.R ├── output.R ├── normalmix.R ├── tnormalmix.R ├── unimix.R └── lik.R ├── .travis.yml ├── appveyor.yml ├── src ├── RcppExports.cpp └── MixSquarem.cpp ├── NEWS.md ├── DESCRIPTION ├── README.md ├── NAMESPACE └── vignettes └── adaptive_shrinkage.Rmd /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: false 2 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | library(ashr) 3 | context("ashr") 4 | test_check("ashr") 5 | -------------------------------------------------------------------------------- /tests/testthat/z.ash.test: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stephens999/ashr/HEAD/tests/testthat/z.ash.test -------------------------------------------------------------------------------- /tests/testthat/error_dat.Rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stephens999/ashr/HEAD/tests/testthat/error_dat.Rds -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .RData 4 | src/*.o 5 | src/*.so 6 | src/*.dll 7 | vignettes/*.html 8 | -------------------------------------------------------------------------------- /tests/testthat/test_pois_data.Rds: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/stephens999/ashr/HEAD/tests/testthat/test_pois_data.Rds -------------------------------------------------------------------------------- /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^\.travis\.yml$ 2 | ^.*\.Rproj$ 3 | ^\.Rproj\.user$ 4 | ^appveyor\.yml$ 5 | ^codecov\.yml$ 6 | ^README\.md$ 7 | ^LICENSE$ 8 | -------------------------------------------------------------------------------- /tests/testthat/test_one_obs.R: -------------------------------------------------------------------------------- 1 | context("ashr with 1 data sample") 2 | 3 | test_that("ash works with one observation", { 4 | expect_error(ash(1,1),NA) #tests for no error 5 | } 6 | ) 7 | -------------------------------------------------------------------------------- /man/ncomp.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{ncomp} 4 | \alias{ncomp} 5 | \title{ncomp} 6 | \usage{ 7 | ncomp(m) 8 | } 9 | \arguments{ 10 | \item{m}{a mixture of k components generated by normalmix() or 11 | unimix() or igmix()} 12 | } 13 | \description{ 14 | ncomp 15 | } 16 | -------------------------------------------------------------------------------- /tests/testthat/test_prune.R: -------------------------------------------------------------------------------- 1 | context("pruning mixture components in ashr") 2 | 3 | test_that("test pruning", { 4 | g= normalmix(c(0.5,0.4,0.1),c(0,0,0),c(1,2,3)) 5 | g.pruned = prune(g,0.2) 6 | expect_equal(g.pruned$mean,c(0,0)) 7 | g = unimix(c(0.5,0.4,0.1),c(0,0,0),c(1,2,3)) 8 | g.pruned = prune(g,0.2) 9 | expect_equal(g.pruned$a,c(0,0)) 10 | }) -------------------------------------------------------------------------------- /tests/testthat/test_unitprior.R: -------------------------------------------------------------------------------- 1 | context("ashr with \"unit\" prior") 2 | 3 | test_that("ash emits error if try to use prior=unit without VB", { 4 | expect_error(ash(rnorm(10),1,optmethod="mixEM",prior="unit")) 5 | }) 6 | 7 | test_that("ash emits error if try to use prior<1 without VB", { 8 | expect_error(ash(rnorm(10),1,optmethod="mixEM",nullweight=0.5)) 9 | }) 10 | -------------------------------------------------------------------------------- /man/postmean.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{postmean} 4 | \alias{postmean} 5 | \title{postmean} 6 | \usage{ 7 | postmean(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \description{ 15 | postmean 16 | } 17 | -------------------------------------------------------------------------------- /man/postsd.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{postsd} 4 | \alias{postsd} 5 | \title{postsd} 6 | \usage{ 7 | postsd(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \description{ 15 | output posterior sd given prior mixture m and data 16 | } 17 | -------------------------------------------------------------------------------- /tests/testthat/test_moments.R: -------------------------------------------------------------------------------- 1 | context("ashr moments") 2 | 3 | test_that("postsd.default and postmean2.default don't return NaN's or negative values", { 4 | temp <- readRDS("error_dat.Rds") 5 | data = set_data(temp$betahat,temp$sebetahat,lik_normal(),alpha=1) 6 | expect_false(any(is.nan(postsd.default(m = temp$m, data)))) 7 | expect_false(any(postmean2(m = temp$m, data) < 0)) 8 | } 9 | ) 10 | -------------------------------------------------------------------------------- /man/prune.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{prune} 4 | \alias{prune} 5 | \title{prune} 6 | \usage{ 7 | prune(m, thresh = 1e-10) 8 | } 9 | \arguments{ 10 | \item{m}{What is this argument?} 11 | 12 | \item{thresh}{the threshold below which components are removed} 13 | } 14 | \description{ 15 | prunes out mixture components with low weight 16 | } 17 | -------------------------------------------------------------------------------- /tests/testthat/test_missing.R: -------------------------------------------------------------------------------- 1 | context("ashr with missing data") 2 | 3 | test_that("missing data don't change results", { 4 | set.seed(11); z = rnorm(1000,0,2); s = rgamma(1000,10,10) 5 | z2 = c(z,rnorm(1000)); s2 = c(s, rep(Inf,1000)) 6 | a = ash(z,s); a2 = ash(z2,s2) 7 | expect_equal(get_psd(a), get_psd(a2)[1:1000],tolerance=0.001) 8 | expect_equal(get_lfsr(a), get_lfsr(a2)[1:1000],tolerance=0.001) 9 | }) 10 | -------------------------------------------------------------------------------- /ashr.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: knitr 13 | LaTeX: pdfLaTeX 14 | 15 | BuildType: Package 16 | PackageUseDevtools: Yes 17 | PackageInstallArgs: --no-multiarch --with-keep.source 18 | PackageRoxygenize: rd,collate,namespace 19 | -------------------------------------------------------------------------------- /man/ncomp.default.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{ncomp.default} 4 | \alias{ncomp.default} 5 | \title{ncomp.default} 6 | \usage{ 7 | \method{ncomp}{default}(m) 8 | } 9 | \arguments{ 10 | \item{m}{a mixture of k components generated by normalmix() or 11 | unimix() or igmix()} 12 | } 13 | \description{ 14 | The default version of \code{\link{ncomp}}. 15 | } 16 | -------------------------------------------------------------------------------- /man/postmean2.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{postmean2} 4 | \alias{postmean2} 5 | \title{postmean2} 6 | \usage{ 7 | postmean2(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \description{ 15 | output posterior mean-squared value given prior 16 | mixture m and data 17 | } 18 | -------------------------------------------------------------------------------- /R/ashr-package.R: -------------------------------------------------------------------------------- 1 | #' @title ashr 2 | #' @description The main function in the ashr package is \code{\link{ash}}, which should be examined for more details. For simplicity only the most commonly-used options are documented under \code{\link{ash}}. For expert or interested users the documentation for function \code{\link{ash.workhorse}} provides documentation on all implemented options. 3 | #' @name ashr 4 | #' @docType package 5 | #' @aliases ashr-package 6 | NULL 7 | -------------------------------------------------------------------------------- /man/comp_sd.normalmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normalmix.R 3 | \name{comp_sd.normalmix} 4 | \alias{comp_sd.normalmix} 5 | \title{comp_sd.normalmix} 6 | \usage{ 7 | \method{comp_sd}{normalmix}(m) 8 | } 9 | \arguments{ 10 | \item{m}{a normal mixture distribution with k components} 11 | } 12 | \value{ 13 | a vector of length k 14 | } 15 | \description{ 16 | returns sds of the normal mixture 17 | } 18 | -------------------------------------------------------------------------------- /man/loglik_conv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{loglik_conv} 4 | \alias{loglik_conv} 5 | \title{loglik_conv} 6 | \usage{ 7 | loglik_conv(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \description{ 15 | find log likelihood of data using convolution of mixture with error distribution 16 | } 17 | -------------------------------------------------------------------------------- /man/comp_mean.normalmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normalmix.R 3 | \name{comp_mean.normalmix} 4 | \alias{comp_mean.normalmix} 5 | \title{comp_mean.normalmix} 6 | \usage{ 7 | \method{comp_mean}{normalmix}(m) 8 | } 9 | \arguments{ 10 | \item{m}{a normal mixture distribution with k components} 11 | } 12 | \value{ 13 | a vector of length k 14 | } 15 | \description{ 16 | returns mean of the normal mixture 17 | } 18 | -------------------------------------------------------------------------------- /man/dens.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{dens} 4 | \alias{dens} 5 | \title{Find density at y, a generic function} 6 | \usage{ 7 | dens(x, y) 8 | } 9 | \arguments{ 10 | \item{x}{A mixture of k components generated by 11 | \code{\link{normalmix}} or \code{\link{unimix}}.} 12 | 13 | \item{y}{An n-vector of the location.} 14 | } 15 | \description{ 16 | Find density at y, a generic function 17 | } 18 | -------------------------------------------------------------------------------- /man/comp_postmean2.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_postmean2} 4 | \alias{comp_postmean2} 5 | \title{comp_postmean2} 6 | \usage{ 7 | comp_postmean2(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \description{ 15 | output posterior mean-squared value given prior 16 | mixture m and data 17 | } 18 | -------------------------------------------------------------------------------- /man/comp_postmean.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_postmean} 4 | \alias{comp_postmean} 5 | \title{comp_postmean} 6 | \usage{ 7 | comp_postmean(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \description{ 15 | output posterior mean for beta for each component of 16 | prior mixture m,given data 17 | } 18 | -------------------------------------------------------------------------------- /man/dens_conv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{dens_conv} 4 | \alias{dens_conv} 5 | \title{dens_conv} 6 | \usage{ 7 | dens_conv(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \description{ 15 | compute density of mixture m convoluted with normal of 16 | sd (s) or student t with df v at locations x 17 | } 18 | -------------------------------------------------------------------------------- /man/calc_loglik.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{calc_loglik} 4 | \alias{calc_loglik} 5 | \title{Compute loglikelihood for data from ash fit} 6 | \usage{ 7 | calc_loglik(g, data) 8 | } 9 | \arguments{ 10 | \item{g}{the fitted g, or an ash object containing g} 11 | 12 | \item{data}{a data object, see set_data} 13 | } 14 | \description{ 15 | Return the log-likelihood of the data for a given g() prior 16 | } 17 | -------------------------------------------------------------------------------- /man/lik_normal.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/lik.R 3 | \name{lik_normal} 4 | \alias{lik_normal} 5 | \title{Likelihood object for normal error distribution} 6 | \usage{ 7 | lik_normal() 8 | } 9 | \description{ 10 | Creates a likelihood object for ash for use with normal error distribution 11 | } 12 | \examples{ 13 | z = rnorm(100) + rnorm(100) # simulate some data with normal error 14 | ash(z,1,lik=lik_normal()) 15 | } 16 | -------------------------------------------------------------------------------- /man/calc_null_vloglik.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{calc_null_vloglik} 4 | \alias{calc_null_vloglik} 5 | \title{Compute vector of loglikelihood for data under null that all 6 | beta are 0} 7 | \usage{ 8 | calc_null_vloglik(data) 9 | } 10 | \arguments{ 11 | \item{data}{a data object; see set_data} 12 | } 13 | \description{ 14 | Return the vector of log-likelihoods of the data points under the null 15 | } 16 | -------------------------------------------------------------------------------- /man/loglik_conv.default.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{loglik_conv.default} 4 | \alias{loglik_conv.default} 5 | \title{loglik_conv.default} 6 | \usage{ 7 | \method{loglik_conv}{default}(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{data whose details depend on model} 13 | } 14 | \description{ 15 | The default version of \code{\link{loglik_conv}}. 16 | } 17 | -------------------------------------------------------------------------------- /man/my_etrunclogf.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/logF.R 3 | \name{my_etrunclogf} 4 | \alias{my_etrunclogf} 5 | \title{my_etrunclogf} 6 | \usage{ 7 | my_etrunclogf(a, b, df1, df2) 8 | } 9 | \arguments{ 10 | \item{a}{Left limit of distribution.} 11 | 12 | \item{b}{Right limit of distribution.} 13 | 14 | \item{df1, df2}{degrees of freedom} 15 | } 16 | \description{ 17 | Compute expectation of truncated log-F distribution. 18 | } 19 | -------------------------------------------------------------------------------- /man/calc_logLR.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{calc_logLR} 4 | \alias{calc_logLR} 5 | \title{Compute loglikelihood ratio for data from ash fit} 6 | \usage{ 7 | calc_logLR(g, data) 8 | } 9 | \arguments{ 10 | \item{g}{the fitted g, or an ash object containing g} 11 | 12 | \item{data}{a data object, see set_data} 13 | } 14 | \description{ 15 | Return the log-likelihood ratio of the data for a given g() prior 16 | } 17 | -------------------------------------------------------------------------------- /man/cdf_conv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{cdf_conv} 4 | \alias{cdf_conv} 5 | \title{cdf_conv} 6 | \usage{ 7 | cdf_conv(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \description{ 15 | compute cdf of mixture m convoluted with error distribution 16 | either normal of sd (s) or student t with df v at locations x 17 | } 18 | -------------------------------------------------------------------------------- /man/comp_mean.tnormalmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tnormalmix.R 3 | \name{comp_mean.tnormalmix} 4 | \alias{comp_mean.tnormalmix} 5 | \title{comp_mean.tnormalmix} 6 | \usage{ 7 | \method{comp_mean}{tnormalmix}(m) 8 | } 9 | \arguments{ 10 | \item{m}{A truncated normal mixture distribution with k components.} 11 | } 12 | \value{ 13 | A vector of length k. 14 | } 15 | \description{ 16 | Returns mean of the truncated-normal mixture. 17 | } 18 | -------------------------------------------------------------------------------- /man/comp_sd.tnormalmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tnormalmix.R 3 | \name{comp_sd.tnormalmix} 4 | \alias{comp_sd.tnormalmix} 5 | \title{comp_sd.normalmix} 6 | \usage{ 7 | \method{comp_sd}{tnormalmix}(m) 8 | } 9 | \arguments{ 10 | \item{m}{A truncated normal mixture distribution with k components.} 11 | } 12 | \value{ 13 | A vector of length k. 14 | } 15 | \description{ 16 | Returns standard deviations of the truncated normal mixture. 17 | } 18 | -------------------------------------------------------------------------------- /man/get_density.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{get_density} 4 | \alias{get_density} 5 | \title{Density method for ash object} 6 | \usage{ 7 | get_density(a, x) 8 | } 9 | \arguments{ 10 | \item{a}{the fitted ash object} 11 | 12 | \item{x}{the vector of locations at which density is to be computed} 13 | } 14 | \description{ 15 | Return the density of the underlying fitted distribution 16 | } 17 | \details{ 18 | None 19 | } 20 | -------------------------------------------------------------------------------- /R/ash_estmode.R: -------------------------------------------------------------------------------- 1 | # a wrapper function that estimates the mode, using optimize 2 | # called by ash if mode="estimate" 3 | ash.estmode = function(betahat, modemin, modemax, ...){ 4 | opt.fn = function(c) { 5 | return(-ash(betahat = betahat, mode = c, outputlevel = "loglik", ...)$loglik) 6 | } 7 | opt = stats::optimize(opt.fn, 8 | interval = c(modemin, modemax), 9 | tol = abs(modemax - modemin) * .Machine$double.eps^0.25) 10 | return(opt$minimum) 11 | } 12 | -------------------------------------------------------------------------------- /man/calc_null_loglik.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{calc_null_loglik} 4 | \alias{calc_null_loglik} 5 | \title{Compute loglikelihood for data under null that all beta are 0} 6 | \usage{ 7 | calc_null_loglik(data) 8 | } 9 | \arguments{ 10 | \item{data}{a data object; see set_data} 11 | } 12 | \description{ 13 | Return the log-likelihood of the data betahat, with 14 | standard errors betahatsd, under the null that beta==0 15 | } 16 | -------------------------------------------------------------------------------- /man/my_etrunct.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/trunct.R 3 | \name{my_etrunct} 4 | \alias{my_etrunct} 5 | \title{my_etrunct} 6 | \usage{ 7 | my_etrunct(a, b, df) 8 | } 9 | \arguments{ 10 | \item{a}{left limit of distribution} 11 | 12 | \item{b}{right limit of distribution} 13 | 14 | \item{df}{degree of freedom of error distribution} 15 | } 16 | \description{ 17 | Compute second moment of the truncated t. Uses results from O'Hagan, Biometrika, 1973 18 | } 19 | -------------------------------------------------------------------------------- /man/my_e2trunct.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/trunct.R 3 | \name{my_e2trunct} 4 | \alias{my_e2trunct} 5 | \title{my_e2trunct} 6 | \usage{ 7 | my_e2trunct(a, b, df) 8 | } 9 | \arguments{ 10 | \item{a}{left limit of distribution} 11 | 12 | \item{b}{right limit of distribution} 13 | 14 | \item{df}{degree of freedom of error distribution} 15 | } 16 | \description{ 17 | Compute second moment of the truncated t. Uses results from O'Hagan, Biometrika, 1973 18 | } 19 | -------------------------------------------------------------------------------- /man/my_etruncbeta.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/truncbeta.R 3 | \name{my_etruncbeta} 4 | \alias{my_etruncbeta} 5 | \title{mean of truncated Beta distribution} 6 | \usage{ 7 | my_etruncbeta(a, b, alpha, beta) 8 | } 9 | \arguments{ 10 | \item{a}{left limit of distribution} 11 | 12 | \item{b}{right limit of distribution} 13 | 14 | \item{alpha, beta}{shape parameters of Beta distribution} 15 | } 16 | \description{ 17 | Compute mean of the truncated Beta. 18 | } 19 | -------------------------------------------------------------------------------- /man/comp_cdf_conv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_cdf_conv} 4 | \alias{comp_cdf_conv} 5 | \title{comp_cdf_conv} 6 | \usage{ 7 | comp_cdf_conv(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \value{ 15 | a k by n matrix of cdfs 16 | } 17 | \description{ 18 | compute the cdf of data for each component of mixture when convolved with error distribution 19 | } 20 | -------------------------------------------------------------------------------- /man/lik_t.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/lik.R 3 | \name{lik_t} 4 | \alias{lik_t} 5 | \title{Likelihood object for t error distribution} 6 | \usage{ 7 | lik_t(df) 8 | } 9 | \arguments{ 10 | \item{df}{degree of freedom parameter of t distribution} 11 | } 12 | \description{ 13 | Creates a likelihood object for ash for use with t error distribution 14 | } 15 | \examples{ 16 | z = rnorm(100) + rt(100,df=4) # simulate some data with t error 17 | ash(z,1,lik=lik_t(df=4)) 18 | } 19 | -------------------------------------------------------------------------------- /man/print.ash.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{print.ash} 4 | \alias{print.ash} 5 | \title{Print method for ash object} 6 | \usage{ 7 | \method{print}{ash}(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{the fitted ash object} 11 | 12 | \item{...}{not used, included for consistency as an S3 13 | generic/method.} 14 | } 15 | \description{ 16 | Print the fitted distribution of beta values in the EB 17 | hierarchical model 18 | } 19 | \details{ 20 | None 21 | } 22 | -------------------------------------------------------------------------------- /man/comp_cdf_conv.unimix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/unimix.R 3 | \name{comp_cdf_conv.unimix} 4 | \alias{comp_cdf_conv.unimix} 5 | \title{cdf of convolution of each component of a unif mixture} 6 | \usage{ 7 | \method{comp_cdf_conv}{unimix}(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{a mixture of class unimix} 11 | 12 | \item{data, }{see set_data()} 13 | } 14 | \value{ 15 | a k by n matrix 16 | } 17 | \description{ 18 | cdf of convolution of each component of a unif mixture 19 | } 20 | -------------------------------------------------------------------------------- /man/mixprop.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{mixprop} 4 | \alias{mixprop} 5 | \title{Generic function of extracting the mixture proportions} 6 | \usage{ 7 | mixprop(m) 8 | } 9 | \arguments{ 10 | \item{m}{a mixture of k components generated by normalmix() or 11 | unimix() or igmix()} 12 | } 13 | \value{ 14 | it returns a vector of component probabilities, summing up 15 | to 1. 16 | } 17 | \description{ 18 | Generic function of extracting the mixture proportions 19 | } 20 | -------------------------------------------------------------------------------- /man/my_e2truncbeta.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/truncbeta.R 3 | \name{my_e2truncbeta} 4 | \alias{my_e2truncbeta} 5 | \title{second moment of truncated Beta distribution} 6 | \usage{ 7 | my_e2truncbeta(a, b, alpha, beta) 8 | } 9 | \arguments{ 10 | \item{a}{left limit of distribution} 11 | 12 | \item{b}{right limit of distribution} 13 | 14 | \item{alpha, beta}{shape parameters of Beta distribution} 15 | } 16 | \description{ 17 | Compute second moment of the truncated Beta. 18 | } 19 | -------------------------------------------------------------------------------- /man/mixmean2.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{mixmean2} 4 | \alias{mixmean2} 5 | \title{Generic function of calculating the overall second moment of the 6 | mixture} 7 | \usage{ 8 | mixmean2(m) 9 | } 10 | \arguments{ 11 | \item{m}{a mixture of k components generated by normalmix() or 12 | unimix() or igmix()} 13 | } 14 | \value{ 15 | it returns scalar 16 | } 17 | \description{ 18 | Generic function of calculating the overall second moment of the 19 | mixture 20 | } 21 | -------------------------------------------------------------------------------- /tests/testthat/test_df.R: -------------------------------------------------------------------------------- 1 | context("ashr \"df\" argument") 2 | 3 | test_that("df switches", { 4 | betahat <- c(1.01636974224394, -2.05686254738995, -0.7135781676358, 5 | -1.16906745227838, -0.917039991627176) 6 | 7 | sebetahat <- c(1.02572223086898, 0.499285201440522, 0.476520330150983, 8 | 0.624576594477857, 0.198152636610839) 9 | 10 | aout <- ash.workhorse(betahat = betahat[1:5], sebetahat = sebetahat[1:5], df = Inf) 11 | expect_true(all(!is.nan(aout$result$PosteriorMean))) 12 | } 13 | ) 14 | -------------------------------------------------------------------------------- /tests/testthat/helper_functions.R: -------------------------------------------------------------------------------- 1 | # The Rmosek package on CRAN will not work with REBayes. This function 2 | # is used for some of the tests to check whether the correct Rmosek 3 | # package (the one downloaded from mosek.com) is installed. 4 | # 5 | #' @importFrom testthat skip_if_not_installed 6 | #' @importFrom testthat skip_if 7 | #' 8 | skip_if_mixkwdual_doesnt_work <- function() { 9 | skip_if_not_installed("REBayes") 10 | skip_if_not_installed("Rmosek") 11 | skip_if(!is.element("mosek_lptoprob",getNamespaceExports("Rmosek"))) 12 | } 13 | -------------------------------------------------------------------------------- /man/mixcdf.default.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{mixcdf.default} 4 | \alias{mixcdf.default} 5 | \title{mixcdf.default} 6 | \usage{ 7 | \method{mixcdf}{default}(x, y, lower.tail = TRUE) 8 | } 9 | \arguments{ 10 | \item{x}{a mixture (eg of type normalmix or unimix)} 11 | 12 | \item{y}{locations at which cdf to be computed} 13 | 14 | \item{lower.tail}{boolean indicating whether to report lower tail} 15 | } 16 | \description{ 17 | The default version of \code{\link{mixcdf}}. 18 | } 19 | -------------------------------------------------------------------------------- /man/calc_mixmean.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{calc_mixmean} 4 | \alias{calc_mixmean} 5 | \title{Generic function of calculating the overall mean of the mixture} 6 | \usage{ 7 | calc_mixmean(m) 8 | } 9 | \arguments{ 10 | \item{m}{a mixture of k components generated by normalmix() or 11 | unimix() or igmix()} 12 | } 13 | \value{ 14 | it returns scalar, the mean of the mixture distribution. 15 | } 16 | \description{ 17 | Generic function of calculating the overall mean of the mixture 18 | } 19 | -------------------------------------------------------------------------------- /man/calc_mixsd.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{calc_mixsd} 4 | \alias{calc_mixsd} 5 | \title{Generic function of calculating the overall standard deviation of 6 | the mixture} 7 | \usage{ 8 | calc_mixsd(m) 9 | } 10 | \arguments{ 11 | \item{m}{a mixture of k components generated by normalmix() or 12 | unimix() or igmix()} 13 | } 14 | \value{ 15 | it returns scalar 16 | } 17 | \description{ 18 | Generic function of calculating the overall standard deviation of 19 | the mixture 20 | } 21 | -------------------------------------------------------------------------------- /man/gen_etruncFUN.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/truncgen.R 3 | \name{gen_etruncFUN} 4 | \alias{gen_etruncFUN} 5 | \title{gen_etruncFUN} 6 | \usage{ 7 | gen_etruncFUN(lcdfFUN, lpdfFUN) 8 | } 9 | \arguments{ 10 | \item{lcdfFUN}{the log cdfFUN of the error distribution} 11 | 12 | \item{lpdfFUN}{the log pdfFUN of the error distribution} 13 | } 14 | \description{ 15 | Produce function to compute expectation of truncated 16 | error distribution from log cdf and log pdf (using numerical integration) 17 | } 18 | -------------------------------------------------------------------------------- /man/my_etruncgamma.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/truncgamma.R 3 | \name{my_etruncgamma} 4 | \alias{my_etruncgamma} 5 | \title{mean of truncated gamma distribution} 6 | \usage{ 7 | my_etruncgamma(a, b, shape, rate) 8 | } 9 | \arguments{ 10 | \item{a}{left limit of distribution} 11 | 12 | \item{b}{right limit of distribution} 13 | 14 | \item{shape}{shape of gamma distribution} 15 | 16 | \item{rate}{rate of gamma distribution} 17 | } 18 | \description{ 19 | Compute mean of the truncated gamma. 20 | } 21 | -------------------------------------------------------------------------------- /man/comp_mean.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_mean} 4 | \alias{comp_mean} 5 | \title{Generic function of calculating the first moment of components of 6 | the mixture} 7 | \usage{ 8 | comp_mean(m) 9 | } 10 | \arguments{ 11 | \item{m}{a mixture of k components generated by normalmix() or 12 | unimix() or igmix()} 13 | } 14 | \value{ 15 | it returns a vector of means. 16 | } 17 | \description{ 18 | Generic function of calculating the first moment of components of 19 | the mixture 20 | } 21 | -------------------------------------------------------------------------------- /tests/testthat/test_mode.R: -------------------------------------------------------------------------------- 1 | context("ashr mode") 2 | 3 | test_that("ash emits error if try to use mode and g", { 4 | expect_error(ash(rnorm(10),1,g=unimix(1,0,0),mode=0)) 5 | }) 6 | 7 | test_that("pm_on_zero gives expected results", { 8 | expect_equal(pm_on_zero(unimix(c(0.5,0.5),c(0,1),c(0,1))),c(TRUE,FALSE)) 9 | }) 10 | 11 | 12 | test_that("ash mode 1 gives same results as mode 0 but shifted by 1", { 13 | set.seed(1) 14 | z = rnorm(100,0,2) 15 | z.ash = ash(z,1) 16 | z.ash1 = ash(z+1,1,mode=1) 17 | expect_equal(get_pm(z.ash),get_pm(z.ash1)-1) 18 | }) 19 | -------------------------------------------------------------------------------- /man/cdf.ash.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{cdf.ash} 4 | \alias{cdf.ash} 5 | \title{cdf method for ash object} 6 | \usage{ 7 | cdf.ash(a, x, lower.tail = TRUE) 8 | } 9 | \arguments{ 10 | \item{a}{the fitted ash object} 11 | 12 | \item{x}{the vector of locations at which cdf is to be computed} 13 | 14 | \item{lower.tail}{(default=TRUE) whether to compute the lower or upper tail} 15 | } 16 | \description{ 17 | Computed the cdf of the underlying fitted distribution 18 | } 19 | \details{ 20 | None 21 | } 22 | -------------------------------------------------------------------------------- /man/comp_sd.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_sd} 4 | \alias{comp_sd} 5 | \title{Generic function to extract the standard deviations of 6 | components of the mixture} 7 | \usage{ 8 | comp_sd(m) 9 | } 10 | \arguments{ 11 | \item{m}{a mixture of k components generated by normalmix() or 12 | unimix() or igmix()} 13 | } 14 | \value{ 15 | it returns a vector of standard deviations 16 | } 17 | \description{ 18 | Generic function to extract the standard deviations of 19 | components of the mixture 20 | } 21 | -------------------------------------------------------------------------------- /man/my_e2truncgamma.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/truncgamma.R 3 | \name{my_e2truncgamma} 4 | \alias{my_e2truncgamma} 5 | \title{second moment of truncated gamma distribution} 6 | \usage{ 7 | my_e2truncgamma(a, b, shape, rate) 8 | } 9 | \arguments{ 10 | \item{a}{left limit of distribution} 11 | 12 | \item{b}{right limit of distribution} 13 | 14 | \item{shape}{shape of gamma distribution} 15 | 16 | \item{rate}{rate of gamma distribution} 17 | } 18 | \description{ 19 | Compute second moment of the truncated gamma. 20 | } 21 | -------------------------------------------------------------------------------- /man/pm_on_zero.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{pm_on_zero} 4 | \alias{pm_on_zero} 5 | \title{Generic function to extract which components of mixture are point mass on 0} 6 | \usage{ 7 | pm_on_zero(m) 8 | } 9 | \arguments{ 10 | \item{m}{a mixture of k components generated by normalmix() or unimix() or igmix()} 11 | } 12 | \value{ 13 | a boolean vector indicating which components are point mass on 0 14 | } 15 | \description{ 16 | Generic function to extract which components of mixture are point mass on 0 17 | } 18 | -------------------------------------------------------------------------------- /man/comp_mean2.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_mean2} 4 | \alias{comp_mean2} 5 | \title{Generic function of calculating the second moment of components of 6 | the mixture} 7 | \usage{ 8 | comp_mean2(m) 9 | } 10 | \arguments{ 11 | \item{m}{a mixture of k components generated by normalmix() or 12 | unimix() or igmix()} 13 | } 14 | \value{ 15 | it returns a vector of second moments. 16 | } 17 | \description{ 18 | Generic function of calculating the second moment of components of 19 | the mixture 20 | } 21 | -------------------------------------------------------------------------------- /man/comp_dens_conv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_dens_conv} 4 | \alias{comp_dens_conv} 5 | \title{comp_dens_conv} 6 | \usage{ 7 | comp_dens_conv(m, data, ...) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | 14 | \item{\dots}{other arguments} 15 | } 16 | \value{ 17 | a k by n matrix of densities 18 | } 19 | \description{ 20 | compute the density of data for each component of mixture when convolved with error distribution 21 | } 22 | -------------------------------------------------------------------------------- /man/log_comp_dens_conv.unimix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/unimix.R 3 | \name{log_comp_dens_conv.unimix} 4 | \alias{log_comp_dens_conv.unimix} 5 | \title{log density of convolution of each component of a unif mixture} 6 | \usage{ 7 | \method{log_comp_dens_conv}{unimix}(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{a mixture of class unimix} 11 | 12 | \item{data}{see set_data()} 13 | } 14 | \value{ 15 | a k by n matrix of densities 16 | } 17 | \description{ 18 | log density of convolution of each component of a unif mixture 19 | } 20 | -------------------------------------------------------------------------------- /tests/testthat/test_optmethod.R: -------------------------------------------------------------------------------- 1 | context("ashr optimization algorithms") 2 | 3 | test_that("control is passed to optmethod correctly when method is mixIP", { 4 | skip_on_cran() 5 | skip_if_mixkwdual_doesnt_work() 6 | set.seed(1) 7 | z <- rnorm(10,0,2) 8 | z.ash <- ash(z,1,optmethod = "mixIP",control = list(rtol=1e-1), 9 | outputlevel = 3) 10 | expect_true(z.ash$fit_details$optreturn$control$rtol == 1e-1) 11 | 12 | z.ash <- ash(z,1,optmethod = "mixIP",outputlevel = 3) 13 | expect_true(z.ash$fit_details$optreturn$control$rtol == 1e-6) 14 | }) 15 | -------------------------------------------------------------------------------- /man/compute_lfsr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ash.R 3 | \name{compute_lfsr} 4 | \alias{compute_lfsr} 5 | \title{Function to compute the local false sign rate} 6 | \usage{ 7 | compute_lfsr(NegativeProb, ZeroProb) 8 | } 9 | \arguments{ 10 | \item{NegativeProb}{A vector of posterior probability that beta is 11 | negative.} 12 | 13 | \item{ZeroProb}{A vector of posterior probability that beta is 14 | zero.} 15 | } 16 | \value{ 17 | The local false sign rate. 18 | } 19 | \description{ 20 | Function to compute the local false sign rate 21 | } 22 | -------------------------------------------------------------------------------- /man/calc_vloglik.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{calc_vloglik} 4 | \alias{calc_vloglik} 5 | \title{Compute vector of loglikelihood for data from ash fit} 6 | \usage{ 7 | calc_vloglik(g, data) 8 | } 9 | \arguments{ 10 | \item{g}{the fitted g, or an ash object containing g} 11 | 12 | \item{data}{a data object, see set_data} 13 | } 14 | \description{ 15 | Return the vector of log-likelihoods of the data 16 | betahat, with standard errors betahatsd, for a given g() prior 17 | on beta, or an ash object containing that 18 | } 19 | -------------------------------------------------------------------------------- /man/comp_dens_conv.unimix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/unimix.R 3 | \name{comp_dens_conv.unimix} 4 | \alias{comp_dens_conv.unimix} 5 | \title{density of convolution of each component of a unif mixture} 6 | \usage{ 7 | \method{comp_dens_conv}{unimix}(m, data, ...) 8 | } 9 | \arguments{ 10 | \item{m}{a mixture of class unimix} 11 | 12 | \item{data, }{see set_data()} 13 | 14 | \item{\dots}{other arguments (unused)} 15 | } 16 | \value{ 17 | a k by n matrix 18 | } 19 | \description{ 20 | density of convolution of each component of a unif mixture 21 | } 22 | -------------------------------------------------------------------------------- /man/cxxMixSquarem.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/RcppExports.R 3 | \name{cxxMixSquarem} 4 | \alias{cxxMixSquarem} 5 | \title{Brief description of function.} 6 | \usage{ 7 | cxxMixSquarem(matrix_lik, prior, pi_init, control) 8 | } 9 | \arguments{ 10 | \item{matrix_lik}{Description of argument goes here.} 11 | 12 | \item{prior}{Description of argument goes here.} 13 | 14 | \item{pi_init}{Description of argument goes shere.} 15 | 16 | \item{control}{Description of argument goes here.} 17 | } 18 | \description{ 19 | Explain here what this function does. 20 | } 21 | -------------------------------------------------------------------------------- /tests/testthat/test_output.R: -------------------------------------------------------------------------------- 1 | context("check ashr outputs") 2 | 3 | test_that("outputlevel works as expected", { 4 | set.seed(1); z=rnorm(10); z.ash=ash(z,1, outputlevel = c("fitted_g","logLR")) 5 | expect_null(z.ash$result) 6 | expect_type(z.ash$logLR,"double") 7 | }) 8 | 9 | test_that("penloglik produced correctly when fixg=TRUE and outputlevel=5", { 10 | set.seed(1); z=rnorm(10); z.ash=ash(z,1) 11 | g= get_fitted_g(z.ash) 12 | z.ash = ash(z,1,outputlevel = 5) 13 | z.ash2 = ash(z,1,fixg=TRUE,g=g,outputlevel = 5) 14 | expect_equal(z.ash$flash_data$penloglik,z.ash2$flash_data$penloglik) 15 | }) 16 | -------------------------------------------------------------------------------- /man/comp_postprob.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_postprob} 4 | \alias{comp_postprob} 5 | \title{comp_postprob} 6 | \usage{ 7 | comp_postprob(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \description{ 15 | compute the posterior prob that each observation came 16 | from each component of the mixture m,output a k by n vector of 17 | probabilities computed by weighting the component densities by 18 | pi and then normalizing 19 | } 20 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: r 2 | sudo: false 3 | cache: packages 4 | latex: false 5 | 6 | branches: 7 | only: 8 | - master 9 | - cran 10 | 11 | # This is the minimal set of R packages needed to run "R CMD check" on 12 | # the package. 13 | install: 14 | - R -e 'install.packages(c("devtools","etrunct","truncnorm","Rcpp","invgamma","SQUAREM","testthat","rmarkdown","knitr","ggplot2","mixsqp"))' 15 | 16 | env: 17 | global: 18 | - R_REMOTES_NO_ERRORS_FROM_WARNINGS: true 19 | - _R_CHECK_FORCE_SUGGESTS_: false 20 | 21 | after_success: 22 | - Rscript -e 'covr::coveralls()' 23 | - Rscript -e 'covr::codecov()' 24 | -------------------------------------------------------------------------------- /man/summary.ash.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{summary.ash} 4 | \alias{summary.ash} 5 | \title{Summary method for ash object} 6 | \usage{ 7 | \method{summary}{ash}(object, ...) 8 | } 9 | \arguments{ 10 | \item{object}{the fitted ash object} 11 | 12 | \item{...}{not used, included for consistency as an S3 13 | generic/method.} 14 | } 15 | \description{ 16 | Print summary of fitted ash object 17 | } 18 | \details{ 19 | \code{\link{summary}} prints the fitted mixture, the 20 | fitted log likelihood with 10 digits and a flag to indicate 21 | convergence 22 | } 23 | -------------------------------------------------------------------------------- /man/igmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/igmix.R 3 | \name{igmix} 4 | \alias{igmix} 5 | \title{Constructor for igmix class} 6 | \usage{ 7 | igmix(pi, alpha, beta) 8 | } 9 | \arguments{ 10 | \item{pi}{vector of mixture proportions} 11 | 12 | \item{alpha}{vector of shape parameters} 13 | 14 | \item{beta}{vector of rate parameters} 15 | } 16 | \value{ 17 | an object of class igmix 18 | } 19 | \description{ 20 | Creates an object of class igmix (finite mixture of 21 | univariate inverse-gammas) 22 | } 23 | \details{ 24 | None 25 | } 26 | \examples{ 27 | igmix(c(0.5,0.5),c(1,1),c(1,2)) 28 | 29 | } 30 | -------------------------------------------------------------------------------- /man/calc_vlogLR.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{calc_vlogLR} 4 | \alias{calc_vlogLR} 5 | \title{Compute vector of loglikelihood ratio for data from ash fit} 6 | \usage{ 7 | calc_vlogLR(g, data) 8 | } 9 | \arguments{ 10 | \item{g}{the fitted g, or an ash object containing g} 11 | 12 | \item{data}{a data object, see set_data} 13 | } 14 | \description{ 15 | Return the vector of log-likelihood ratios of the data 16 | betahat, with standard errors betahatsd, for a given g() prior 17 | on beta, or an ash object containing that, vs the null that g() 18 | is point mass on 0 19 | } 20 | -------------------------------------------------------------------------------- /man/unimix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/unimix.R 3 | \name{unimix} 4 | \alias{unimix} 5 | \title{Constructor for unimix class} 6 | \usage{ 7 | unimix(pi, a, b) 8 | } 9 | \arguments{ 10 | \item{pi}{vector of mixture proportions} 11 | 12 | \item{a}{vector of left hand ends of uniforms} 13 | 14 | \item{b}{vector of right hand ends of uniforms} 15 | } 16 | \value{ 17 | an object of class unimix 18 | } 19 | \description{ 20 | Creates an object of class unimix (finite mixture of 21 | univariate uniforms) 22 | } 23 | \details{ 24 | None 25 | } 26 | \examples{ 27 | unimix(c(0.5,0.5),c(0,0),c(1,2)) 28 | } 29 | -------------------------------------------------------------------------------- /man/lik_logF.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/lik.R 3 | \name{lik_logF} 4 | \alias{lik_logF} 5 | \title{Likelihood object for logF error distribution} 6 | \usage{ 7 | lik_logF(df1, df2) 8 | } 9 | \arguments{ 10 | \item{df1}{first degree of freedom parameter of F distribution} 11 | 12 | \item{df2}{second degree of freedom parameter of F distribution} 13 | } 14 | \description{ 15 | Creates a likelihood object for ash for use with logF error distribution 16 | } 17 | \examples{ 18 | e = rnorm(100) + log(rf(100,df1=10,df2=10)) # simulate some data with log(F) error 19 | ash(e,1,lik=lik_logF(df1=10,df2=10)) 20 | } 21 | -------------------------------------------------------------------------------- /R/RcppExports.R: -------------------------------------------------------------------------------- 1 | # Generated by using Rcpp::compileAttributes() -> do not edit by hand 2 | # Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 3 | 4 | #' @title Brief description of function. 5 | #' @description Explain here what this function does. 6 | #' @param matrix_lik Description of argument goes here. 7 | #' @param prior Description of argument goes here. 8 | #' @param pi_init Description of argument goes shere. 9 | #' @param control Description of argument goes here. 10 | #' @export 11 | cxxMixSquarem <- function(matrix_lik, prior, pi_init, control) { 12 | .Call('_ashr_cxxMixSquarem', PACKAGE = 'ashr', matrix_lik, prior, pi_init, control) 13 | } 14 | 15 | -------------------------------------------------------------------------------- /man/comp_dens.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_dens} 4 | \alias{comp_dens} 5 | \title{Generic function of calculating the component densities of the 6 | mixture} 7 | \usage{ 8 | comp_dens(m, y, log = FALSE) 9 | } 10 | \arguments{ 11 | \item{m}{mixture of k components generated by normalmix() or 12 | unimix() or igmix()} 13 | 14 | \item{y}{is an n-vector of location} 15 | 16 | \item{log}{whether to use log-scale on densities} 17 | } 18 | \value{ 19 | A k by n matrix of densities 20 | } 21 | \description{ 22 | Generic function of calculating the component densities of the 23 | mixture 24 | } 25 | -------------------------------------------------------------------------------- /man/normalmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normalmix.R 3 | \name{normalmix} 4 | \alias{normalmix} 5 | \title{Constructor for normalmix class} 6 | \usage{ 7 | normalmix(pi, mean, sd) 8 | } 9 | \arguments{ 10 | \item{pi}{vector of mixture proportions} 11 | 12 | \item{mean}{vector of means} 13 | 14 | \item{sd}{vector of standard deviations} 15 | } 16 | \value{ 17 | an object of class normalmix 18 | } 19 | \description{ 20 | Creates an object of class normalmix (finite mixture 21 | of univariate normals) 22 | } 23 | \details{ 24 | None 25 | } 26 | \examples{ 27 | normalmix(c(0.5,0.5),c(0,0),c(1,2)) 28 | 29 | } 30 | -------------------------------------------------------------------------------- /man/log_comp_dens_conv.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{log_comp_dens_conv} 4 | \alias{log_comp_dens_conv} 5 | \title{log_comp_dens_conv} 6 | \usage{ 7 | log_comp_dens_conv(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \value{ 15 | a k by n matrix of log densities 16 | } 17 | \description{ 18 | compute the log density of the components of the 19 | mixture m when convoluted with a normal with standard deviation 20 | s or a scaled (se) student.t with df v, the density is 21 | evaluated at x 22 | } 23 | -------------------------------------------------------------------------------- /man/mixcdf.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{mixcdf} 4 | \alias{mixcdf} 5 | \title{mixcdf} 6 | \usage{ 7 | mixcdf(x, y, lower.tail = TRUE) 8 | } 9 | \arguments{ 10 | \item{x}{a mixture (eg of type normalmix or unimix)} 11 | 12 | \item{y}{locations at which cdf to be computed} 13 | 14 | \item{lower.tail}{boolean indicating whether to report lower tail} 15 | } 16 | \value{ 17 | an object of class normalmix 18 | } 19 | \description{ 20 | Returns cdf for a mixture (generic function) 21 | } 22 | \details{ 23 | None 24 | } 25 | \examples{ 26 | mixcdf(normalmix(c(0.5,0.5),c(0,0),c(1,2)),seq(-4,4,length=100)) 27 | 28 | } 29 | -------------------------------------------------------------------------------- /tests/testthat/test_+uniform.R: -------------------------------------------------------------------------------- 1 | context("ashr with half-uniform mixture priors") 2 | 3 | test_that("mixcompdist=+uniform gives all non-negative values for b and zero for a", { 4 | set.seed(1); z=rnorm(10); z.ash=ash(z,1,mixcompdist="+uniform") 5 | k = length(z.ash$fitted_g$pi) 6 | expect_true(all(z.ash$fitted_g$b >= rep(0,k))) 7 | expect_equal(z.ash$fitted_g$a,rep(0,k)) 8 | }) 9 | 10 | test_that("mixcompdist=-uniform gives all non-positive values for a and zero for b", { 11 | set.seed(1); z=rnorm(10); z.ash=ash(z,1,mixcompdist="-uniform") 12 | k = length(z.ash$fitted_g$pi) 13 | expect_equal(z.ash$fitted_g$b,rep(0,k)) 14 | expect_true(all(z.ash$fitted_g$a <= 0)) 15 | }) 16 | -------------------------------------------------------------------------------- /man/comp_cdf.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_cdf} 4 | \alias{comp_cdf} 5 | \title{Generic function of computing the cdf for each component} 6 | \usage{ 7 | comp_cdf(m, y, lower.tail = TRUE) 8 | } 9 | \arguments{ 10 | \item{m}{a mixture (eg of type normalmix or unimix)} 11 | 12 | \item{y}{locations at which cdf to be computed} 13 | 14 | \item{lower.tail}{boolean indicating whether to report lower tail} 15 | } 16 | \value{ 17 | it returns a vector of probabilities, with length equals to 18 | number of components in m 19 | } 20 | \description{ 21 | Generic function of computing the cdf for each component 22 | } 23 | -------------------------------------------------------------------------------- /man/plot.ash.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{plot.ash} 4 | \alias{plot.ash} 5 | \title{Plot method for ash object} 6 | \usage{ 7 | \method{plot}{ash}(x, ..., xmin, xmax) 8 | } 9 | \arguments{ 10 | \item{x}{the fitted ash object} 11 | 12 | \item{...}{Arguments to be passed to methods,such as graphical parameters (see \code{\link[graphics]{plot}})} 13 | 14 | \item{xmin}{xlim lower range, default is the lowest value of betahat} 15 | 16 | \item{xmax}{xlim upper range, default is the highest value of betahat} 17 | } 18 | \description{ 19 | Plot the cdf of the underlying fitted distribution 20 | } 21 | \details{ 22 | None 23 | } 24 | -------------------------------------------------------------------------------- /tests/testthat/testCI.R: -------------------------------------------------------------------------------- 1 | context("ashr credible intervals") 2 | 3 | test_that("CI result works for uniform prior", { 4 | set.seed(17) 5 | s = rgamma(100,10,10) 6 | z = rnorm(100,0,s+1) 7 | g = unimix(c(1),-1000,1000) #make prior very flat 8 | a = ash(z,s,g=g,fixg=TRUE) 9 | a2 = ash(z,s,df=4,g=g,fixg=TRUE) 10 | a3 = ash(z,s,alpha=1,g=g,fixg=TRUE) 11 | 12 | ci1=ashci(a,betaindex = 1:100) 13 | expect_equal(ci1[,1],z-1.96*s,tol=0.01) 14 | expect_equal(ci1[,2],z+1.96*s,tol=0.01) 15 | 16 | ci2 = ashci(a2,betaindex = 1:100) 17 | expect_equal(ci2[,1],z-qt(0.975,df=4)*s,tol=0.01) 18 | 19 | ci3 = ashci(a3,betaindex = 1:100) 20 | expect_equal(ci3[,1],z-1.96*s,tol=0.01) 21 | }) 22 | -------------------------------------------------------------------------------- /man/dlogf.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/logF.R 3 | \name{dlogf} 4 | \alias{dlogf} 5 | \title{The log-F distribution} 6 | \usage{ 7 | dlogf(x, df1, df2, ncp, log = FALSE) 8 | } 9 | \arguments{ 10 | \item{x}{vector of quantiles} 11 | 12 | \item{df1, df2}{degrees of freedom} 13 | 14 | \item{ncp}{non-centrality parameter. If omitted the central F is assumed.} 15 | 16 | \item{log}{logical; if TRUE, probabilities p are given as log(p).} 17 | } 18 | \value{ 19 | The density function. 20 | } 21 | \description{ 22 | Density function for the log-F distribution with \code{df1} and \code{df2} 23 | degrees of freedom (and optional non-centrality parameter \code{ncp}). 24 | } 25 | -------------------------------------------------------------------------------- /man/post_sample.unimix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/unimix.R 3 | \name{post_sample.unimix} 4 | \alias{post_sample.unimix} 5 | \title{post_sample.unimix} 6 | \usage{ 7 | \method{post_sample}{unimix}(m, data, nsamp) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{a list with components x and s to be interpreted as a 13 | normally-distributed observation and its standard error} 14 | 15 | \item{nsamp}{number of samples to return for each observation} 16 | } 17 | \value{ 18 | a nsamp by n matrix 19 | } 20 | \description{ 21 | returns random samples from the posterior, given a 22 | prior distribution m and n observed datapoints. 23 | } 24 | -------------------------------------------------------------------------------- /man/comp_cdf_conv.normalmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normalmix.R 3 | \name{comp_cdf_conv.normalmix} 4 | \alias{comp_cdf_conv.normalmix} 5 | \title{comp_cdf_conv.normalmix} 6 | \usage{ 7 | \method{comp_cdf_conv}{normalmix}(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{a list with components x and s to be interpreted as a normally-distributed observation and its standard error} 13 | } 14 | \value{ 15 | a k by n matrix 16 | } 17 | \description{ 18 | returns cdf of convolution of each component of a 19 | normal mixture with N(0,s^2) at x. Note that 20 | convolution of two normals is normal, so it works that way 21 | } 22 | -------------------------------------------------------------------------------- /man/post_sample.normalmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normalmix.R 3 | \name{post_sample.normalmix} 4 | \alias{post_sample.normalmix} 5 | \title{post_sample.normalmix} 6 | \usage{ 7 | \method{post_sample}{normalmix}(m, data, nsamp) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{a list with components x and s to be interpreted as a 13 | normally-distributed observation and its standard error} 14 | 15 | \item{nsamp}{number of samples to return for each observation} 16 | } 17 | \value{ 18 | a nsamp by n matrix 19 | } 20 | \description{ 21 | returns random samples from the posterior, given a 22 | prior distribution m and n observed datapoints. 23 | } 24 | -------------------------------------------------------------------------------- /R/trunct.R: -------------------------------------------------------------------------------- 1 | 2 | #' @title my_e2trunct 3 | #' @description Compute second moment of the truncated t. Uses results from O'Hagan, Biometrika, 1973 4 | #' @param a left limit of distribution 5 | #' @param b right limit of distribution 6 | #' @param df degree of freedom of error distribution 7 | #' @export 8 | my_e2trunct = function(a,b,df){ 9 | etrunct::e_trunct(a,b,df,r=2) 10 | } 11 | 12 | #' @title my_etrunct 13 | #' @description Compute second moment of the truncated t. Uses results from O'Hagan, Biometrika, 1973 14 | #' 15 | #' @param a left limit of distribution 16 | #' @param b right limit of distribution 17 | #' @param df degree of freedom of error distribution 18 | #' @export 19 | my_etrunct = function(a,b,df){ 20 | etrunct::e_trunct(a,b,df,r=1) 21 | } 22 | 23 | -------------------------------------------------------------------------------- /man/vcdf_post.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{vcdf_post} 4 | \alias{vcdf_post} 5 | \title{vcdf_post} 6 | \usage{ 7 | vcdf_post(m, c, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{c}{a numeric vector} 13 | 14 | \item{data}{depends on context} 15 | } 16 | \value{ 17 | an n vector containing the cdf for beta_i at c 18 | } 19 | \description{ 20 | vectorized version of \code{\link{cdf_post}} 21 | } 22 | \examples{ 23 | beta = rnorm(100,0,1) 24 | betahat= beta+rnorm(100,0,1) 25 | sebetahat=rep(1,100) 26 | ash.beta = ash(betahat,1,mixcompdist="normal") 27 | c = vcdf_post(get_fitted_g(ash.beta),seq(-5,5,length=1000),data = set_data(betahat,sebetahat)) 28 | } 29 | -------------------------------------------------------------------------------- /man/comp_postsd.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_postsd} 4 | \alias{comp_postsd} 5 | \title{comp_postsd} 6 | \usage{ 7 | comp_postsd(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{details depend on the model} 13 | } 14 | \description{ 15 | output posterior sd for beta for each component of 16 | prior mixture m,given data 17 | } 18 | \examples{ 19 | beta = rnorm(100,0,1) 20 | betahat= beta+rnorm(100,0,1) 21 | ash.beta = ash(betahat,1,mixcompdist="normal") 22 | data= set_data(betahat,rep(1,100)) 23 | comp_postmean(get_fitted_g(ash.beta),data) 24 | comp_postsd(get_fitted_g(ash.beta),data) 25 | comp_postprob(get_fitted_g(ash.beta),data) 26 | } 27 | -------------------------------------------------------------------------------- /man/log_comp_dens_conv.normalmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normalmix.R 3 | \name{log_comp_dens_conv.normalmix} 4 | \alias{log_comp_dens_conv.normalmix} 5 | \title{log_comp_dens_conv.normalmix} 6 | \usage{ 7 | \method{log_comp_dens_conv}{normalmix}(m, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{a list with components x and s to be interpreted as a normally-distributed observation and its standard error} 13 | } 14 | \value{ 15 | a k by n matrix 16 | } 17 | \description{ 18 | returns log-density of convolution of each component 19 | of a normal mixture with N(0,s^2) or s*t(v) at x. Note that 20 | convolution of two normals is normal, so it works that way 21 | } 22 | -------------------------------------------------------------------------------- /man/qval.from.lfdr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ash.R 3 | \name{qval.from.lfdr} 4 | \alias{qval.from.lfdr} 5 | \title{Function to compute q values from local false discovery rates} 6 | \usage{ 7 | qval.from.lfdr(lfdr) 8 | } 9 | \arguments{ 10 | \item{lfdr, }{a vector of local fdr estimates} 11 | } 12 | \value{ 13 | vector of q values 14 | } 15 | \description{ 16 | Computes q values from a vector of local fdr estimates 17 | } 18 | \details{ 19 | The q value for a given lfdr is an estimate of the (tail) 20 | False Discovery Rate for all findings with a smaller lfdr, and 21 | is found by the average of the lfdr for all more significant 22 | findings. See Storey (2003), Annals of Statistics, for 23 | definition of q value. 24 | } 25 | -------------------------------------------------------------------------------- /man/comp_dens_conv.normalmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/normalmix.R 3 | \name{comp_dens_conv.normalmix} 4 | \alias{comp_dens_conv.normalmix} 5 | \title{comp_dens_conv.normalmix} 6 | \usage{ 7 | \method{comp_dens_conv}{normalmix}(m, data, ...) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{data}{a list with components x and s to be interpreted as a normally-distributed observation and its standard error} 13 | 14 | \item{\dots}{other arguments (unused)} 15 | } 16 | \value{ 17 | a k by n matrix 18 | } 19 | \description{ 20 | returns density of convolution of each component of a 21 | normal mixture with N(0,s^2) at x. Note that 22 | convolution of two normals is normal, so it works that way 23 | } 24 | -------------------------------------------------------------------------------- /man/get_post_sample.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{get_post_sample} 4 | \alias{get_post_sample} 5 | \title{Sample from posterior} 6 | \usage{ 7 | get_post_sample(a, nsamp) 8 | } 9 | \arguments{ 10 | \item{a}{the fitted ash object} 11 | 12 | \item{nsamp}{number of samples to return (for each observation)} 13 | } 14 | \description{ 15 | Returns random samples from the posterior distribution for each 16 | observation in an ash object. A matrix is returned, with columns corresponding 17 | to observations and rows corresponding to samples. 18 | } 19 | \examples{ 20 | beta = rnorm(100,0,1) 21 | betahat= beta+rnorm(100,0,1) 22 | ash.beta = ash(betahat,1,mixcompdist="normal") 23 | post.beta = get_post_sample(ash.beta,1000) 24 | } 25 | -------------------------------------------------------------------------------- /man/post_sample.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{post_sample} 4 | \alias{post_sample} 5 | \title{post_sample} 6 | \usage{ 7 | post_sample(m, data, nsamp) 8 | } 9 | \arguments{ 10 | \item{m}{prior distribution (eg of type normalmix)} 11 | 12 | \item{data}{a list with components x and s, each vectors of length n, to be interpreted as a 13 | normally-distributed observations and corresponding standard errors} 14 | 15 | \item{nsamp}{number of random samples to return for each observation} 16 | } 17 | \value{ 18 | an nsamp by n matrix 19 | } 20 | \description{ 21 | returns random samples from the posterior, given a prior distribution 22 | m and n observed datapoints. 23 | } 24 | \details{ 25 | exported, but mostly users will want to use `get_post_sample` 26 | } 27 | -------------------------------------------------------------------------------- /man/pcdf_post.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{pcdf_post} 4 | \alias{pcdf_post} 5 | \title{pcdf_post} 6 | \usage{ 7 | pcdf_post(m, c, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{c}{a numeric vector with n elements} 13 | 14 | \item{data}{depends on context} 15 | } 16 | \value{ 17 | an n vector, whose ith element is the cdf for beta_i at c_i 18 | } 19 | \description{ 20 | ``parallel" vector version of \code{\link{cdf_post}} where c is a vector, of same length as betahat and sebetahat 21 | } 22 | \examples{ 23 | beta = rnorm(100,0,1) 24 | betahat= beta+rnorm(100,0,1) 25 | sebetahat=rep(1,100) 26 | ash.beta = ash(betahat,1,mixcompdist="normal") 27 | c = pcdf_post(get_fitted_g(ash.beta),beta,set_data(betahat,sebetahat)) 28 | } 29 | -------------------------------------------------------------------------------- /man/plogf.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/logF.R 3 | \name{plogf} 4 | \alias{plogf} 5 | \title{The log-F distribution} 6 | \usage{ 7 | plogf(q, df1, df2, ncp, lower.tail = TRUE, log.p = FALSE) 8 | } 9 | \arguments{ 10 | \item{q}{vector of quantiles} 11 | 12 | \item{df1, df2}{degrees of freedom} 13 | 14 | \item{ncp}{non-centrality parameter. If omitted the central F is assumed.} 15 | 16 | \item{lower.tail}{logical; if TRUE (default), probabilities are P[X <= x], otherwise, P[X > x].} 17 | 18 | \item{log.p}{logical; if TRUE, probabilities p are given as log(p).} 19 | } 20 | \value{ 21 | The distribution function. 22 | } 23 | \description{ 24 | Distribution function for the log-F distribution with \code{df1} and \code{df2} 25 | degrees of freedom (and optional non-centrality parameter \code{ncp}). 26 | } 27 | -------------------------------------------------------------------------------- /man/comp_cdf_post.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{comp_cdf_post} 4 | \alias{comp_cdf_post} 5 | \title{comp_cdf_post} 6 | \usage{ 7 | comp_cdf_post(m, c, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{c}{a scalar} 13 | 14 | \item{data}{details depend on model} 15 | } 16 | \value{ 17 | a k by n matrix 18 | } 19 | \description{ 20 | evaluate cdf of posterior distribution of beta at c. m 21 | is the prior on beta, a mixture; c is location of evaluation 22 | assumption is betahat | beta ~ t_v(beta,sebetahat) 23 | } 24 | \examples{ 25 | beta = rnorm(100,0,1) 26 | betahat= beta+rnorm(100,0,1) 27 | sebetahat=rep(1,100) 28 | ash.beta = ash(betahat,1,mixcompdist="normal") 29 | comp_cdf_post(get_fitted_g(ash.beta),0,data=set_data(beta,sebetahat)) 30 | } 31 | -------------------------------------------------------------------------------- /man/tnormalmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tnormalmix.R 3 | \name{tnormalmix} 4 | \alias{tnormalmix} 5 | \title{Constructor for tnormalmix class} 6 | \usage{ 7 | tnormalmix(pi, mean, sd, a, b) 8 | } 9 | \arguments{ 10 | \item{pi}{Cector of mixture proportions (length k say).} 11 | 12 | \item{mean}{Vector of means (length k).} 13 | 14 | \item{sd}{Vector of standard deviations (length k).} 15 | 16 | \item{a}{Vector of left truncation points of each component (length k).} 17 | 18 | \item{b}{Cector of right truncation points of each component (length k).} 19 | } 20 | \value{ 21 | An object of class \dQuote{tnormalmix}. 22 | } 23 | \description{ 24 | Creates an object of class tnormalmix (finite mixture 25 | of truncated univariate normals). 26 | } 27 | \examples{ 28 | tnormalmix(c(0.5,0.5),c(0,0),c(1,2),c(-10,0),c(0,10)) 29 | 30 | } 31 | -------------------------------------------------------------------------------- /man/cdf_post.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix.R 3 | \name{cdf_post} 4 | \alias{cdf_post} 5 | \title{cdf_post} 6 | \usage{ 7 | cdf_post(m, c, data) 8 | } 9 | \arguments{ 10 | \item{m}{mixture distribution with k components} 11 | 12 | \item{c}{a scalar} 13 | 14 | \item{data}{details depend on model} 15 | } 16 | \value{ 17 | an n vector containing the cdf for beta_i at c 18 | } 19 | \description{ 20 | evaluate cdf of posterior distribution of beta at c. m 21 | is the prior on beta, a mixture; c is location of evaluation 22 | assumption is betahat | beta ~ t_v(beta,sebetahat) 23 | } 24 | \examples{ 25 | beta = rnorm(100,0,1) 26 | betahat= beta+rnorm(100,0,1) 27 | sebetahat=rep(1,100) 28 | ash.beta = ash(betahat,1,mixcompdist="normal") 29 | cdf0 = cdf_post(ash.beta$fitted_g,0,set_data(betahat,sebetahat)) 30 | graphics::plot(cdf0,1-get_pp(ash.beta)) 31 | } 32 | -------------------------------------------------------------------------------- /man/set_data.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/set_data.R 3 | \name{set_data} 4 | \alias{set_data} 5 | \title{Takes raw data and sets up data object for use by ash} 6 | \usage{ 7 | set_data(betahat, sebetahat, lik = NULL, alpha = 0) 8 | } 9 | \arguments{ 10 | \item{betahat}{vector of betahats} 11 | 12 | \item{sebetahat}{vector of standard errors} 13 | 14 | \item{lik}{a likelihood (see e.g., lik_normal())} 15 | 16 | \item{alpha}{specifies value of alpha to use (model is for betahat/sebetahat^alpha | sebetahat)} 17 | } 18 | \value{ 19 | data object (list) 20 | } 21 | \description{ 22 | Takes raw data and sets up data object for use by ash 23 | } 24 | \details{ 25 | The data object stores both the data, and details of the model to be used for the data. 26 | For example, in the generalized version of ash the cdf and pdf of the likelihood are 27 | stored here. 28 | } 29 | -------------------------------------------------------------------------------- /man/posterior_dist.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ash.R 3 | \name{posterior_dist} 4 | \alias{posterior_dist} 5 | \title{Compute Posterior} 6 | \usage{ 7 | posterior_dist(g, betahat, sebetahat) 8 | } 9 | \arguments{ 10 | \item{g}{a normalmix with components indicating the prior; works 11 | only if g has means 0} 12 | 13 | \item{betahat}{(n vector of observations)} 14 | 15 | \item{sebetahat}{(n vector of standard errors/deviations of 16 | observations)} 17 | } 18 | \value{ 19 | A list, (pi1,mu1,sigma1) whose components are each k by n matrices 20 | where k is number of mixture components in g, n is number of observations in betahat 21 | } 22 | \description{ 23 | Return the posterior on beta given a prior (g) that is 24 | a mixture of normals (class normalmix) and observation 25 | \eqn{betahat ~ N(beta,sebetahat)} 26 | } 27 | \details{ 28 | This can be used to obt 29 | } 30 | -------------------------------------------------------------------------------- /tests/testthat/test_w_mixEM.R: -------------------------------------------------------------------------------- 1 | context("ashr with weighted samples") 2 | 3 | test_that("optimization with weights matches expectations", { 4 | set.seed(1) 5 | z=rnorm(100,0,2) 6 | z.ash= ash(z[1:50],1,optmethod="mixEM") 7 | z.ash.w = ash(z,1,optmethod="w_mixEM",weights = c(rep(1,50),rep(0,50)), 8 | g=get_fitted_g(z.ash)) 9 | expect_equal(get_fitted_g(z.ash.w)$pi, get_fitted_g(z.ash)$pi, tol=0.001) 10 | 11 | skip_if_not_installed("mixsqp") 12 | z.ash.w2 = ash(z,1,optmethod="mixSQP",weights = c(rep(1,50),rep(0,50)), 13 | g = get_fitted_g(z.ash)) 14 | expect_equal(get_fitted_g(z.ash.w2)$pi, get_fitted_g(z.ash.w)$pi,tol = 1e-4) 15 | 16 | skip_on_cran() 17 | skip_if_mixkwdual_doesnt_work() 18 | z.ash.w3 = ash(z,1,optmethod="mixIP",weights = c(rep(1,50),rep(0,50)), 19 | g = get_fitted_g(z.ash)) 20 | expect_equal(get_fitted_g(z.ash.w3)$pi, get_fitted_g(z.ash.w)$pi,tol = 1e-4) 21 | }) 22 | -------------------------------------------------------------------------------- /tests/testthat/test_old_vs_new.R: -------------------------------------------------------------------------------- 1 | context("ashr comparisons with previous versions") 2 | 3 | test_that("new results match old ones ", { 4 | # these results were saved under v1.1.3 before introducing more stable 5 | # calculation of likelihoods via first computing log-likelihood and normalizing 6 | # Then also under v1.1.10 after introducing svalue 7 | # set.seed(1); z=rnorm(100,0,2); z.ash=ash(z,1); saveRDS(z.ash,file="tests/testthat/z.ash.test") 8 | set.seed(1); z=rnorm(100,0,2); z.ash=ash(z,1); 9 | oldres = readRDS("z.ash.test") 10 | expect_equal(get_pm(oldres),get_pm(z.ash),tolerance=0.001) 11 | expect_equal(get_psd(oldres),get_psd(z.ash),tolerance=0.001) 12 | expect_equal(get_lfsr(oldres),get_lfsr(z.ash),tolerance=0.001) 13 | expect_equal(get_fitted_g(oldres),get_fitted_g(z.ash),tolerance=0.001) 14 | expect_equal(get_logLR(oldres),get_logLR(z.ash),tolerance=0.001) 15 | expect_equal(get_loglik(oldres),get_loglik(z.ash),tolerance=0.001) 16 | }) 17 | -------------------------------------------------------------------------------- /man/lik_normalmix.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/lik.R 3 | \name{lik_normalmix} 4 | \alias{lik_normalmix} 5 | \title{Likelihood object for normal mixture error distribution} 6 | \usage{ 7 | lik_normalmix(pilik, sdlik) 8 | } 9 | \arguments{ 10 | \item{pilik}{a k vector of mixture proportions (k is the number of mixture components), 11 | or an n*k matrix that the j'th row the is mixture proportions for betahat_j} 12 | 13 | \item{sdlik}{a k vector of component-wise standard deviations, 14 | or an n*k matrix that the j'th row the is component-wise standard deviations for betahat_j} 15 | } 16 | \description{ 17 | Creates a likelihood object for ash for use with normal mixture error distribution 18 | } 19 | \examples{ 20 | e = rnorm(100,0,0.8) 21 | e[seq(1,100,by=2)] = rnorm(50,0,1.5) # generate e~0.5*N(0,0.8^2)+0.5*N(0,1.5^2) 22 | betahat = rnorm(100)+e 23 | ash(betahat, 1, lik=lik_normalmix(c(0.5,0.5),c(0.8,1.5))) 24 | } 25 | -------------------------------------------------------------------------------- /man/plot_diagnostic.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashutility.R 3 | \name{plot_diagnostic} 4 | \alias{plot_diagnostic} 5 | \title{Diagnostic plots for ash object} 6 | \usage{ 7 | plot_diagnostic( 8 | x, 9 | plot.it = TRUE, 10 | sebetahat.tol = 0.001, 11 | plot.hist, 12 | xmin, 13 | xmax, 14 | breaks = "Sturges", 15 | alpha = 0.01, 16 | pch = 19, 17 | cex = 0.25 18 | ) 19 | } 20 | \arguments{ 21 | \item{x}{the fitted ash object} 22 | 23 | \item{plot.it}{logical. whether to plot the diagnostic result} 24 | 25 | \item{sebetahat.tol}{tolerance to test the equality of betahat} 26 | 27 | \item{plot.hist}{logical. whether to plot the histogram of betahat when sebetahat is not constant} 28 | 29 | \item{xmin, xmax}{range of the histogram of betahat to be plotted} 30 | 31 | \item{breaks}{histograms parameter (see \code{\link[graphics]{hist}})} 32 | 33 | \item{alpha}{error level for the de-trended diagnostic plot} 34 | 35 | \item{pch, cex}{plot parameters for dots} 36 | } 37 | \description{ 38 | Generate several plots to diagnose the fitness of ASH on the data 39 | } 40 | \details{ 41 | None. 42 | } 43 | -------------------------------------------------------------------------------- /tests/testthat/test_postsample.R: -------------------------------------------------------------------------------- 1 | context("ashr posterior sampling") 2 | 3 | test_that("get_post_sample works as expected",{ 4 | set.seed(1) 5 | n = 10 # number of observations 6 | se = 0.1 7 | nsamp = 1000 # number of samples per observation 8 | z = rnorm(n) 9 | z.ash = ash(z, se, "normal") 10 | samp = get_post_sample(z.ash, nsamp) 11 | # Check that the matrix of samples is of the correct dimensions: 12 | expect_equal(dim(samp), c(nsamp, n)) 13 | # Check that the sampled posterior means are close to the true posterior means: 14 | expect_equal(colMeans(samp), z.ash$result$PosteriorMean, tolerance = 0.01) 15 | samp_sds = sqrt(apply(samp, 2, var)) 16 | # Check that the sampled posterior SDs are close to the true posterior SDs: 17 | expect_equal(samp_sds, z.ash$result$PosteriorSD, tolerance = 0.01) 18 | 19 | u = runif(n) 20 | u.ash = ash(u, se, "uniform") 21 | u.samp = get_post_sample(u.ash, nsamp) 22 | expect_equal(dim(u.samp), c(nsamp, n)) 23 | expect_equal(colMeans(u.samp), u.ash$result$PosteriorMean, tolerance = 0.01) 24 | u.samp_sds = sqrt(apply(u.samp, 2, var)) 25 | expect_equal(u.samp_sds, u.ash$result$PosteriorSD, tolerance = 0.01) 26 | }) 27 | -------------------------------------------------------------------------------- /man/ashr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashr-package.R 3 | \docType{package} 4 | \name{ashr} 5 | \alias{ashr} 6 | \alias{ashr-package} 7 | \title{ashr} 8 | \description{ 9 | The main function in the ashr package is \code{\link{ash}}, which should be examined for more details. For simplicity only the most commonly-used options are documented under \code{\link{ash}}. For expert or interested users the documentation for function \code{\link{ash.workhorse}} provides documentation on all implemented options. 10 | } 11 | \seealso{ 12 | Useful links: 13 | \itemize{ 14 | \item \url{https://github.com/stephens999/ashr} 15 | \item Report bugs at \url{https://github.com/stephens999/ashr/issues} 16 | } 17 | 18 | } 19 | \author{ 20 | \strong{Maintainer}: Peter Carbonetto \email{pcarbo@uchicago.edu} 21 | 22 | Authors: 23 | \itemize{ 24 | \item Matthew Stephens \email{mstephens@uchicago.edu} 25 | \item David Gerard 26 | \item Mengyin Lu 27 | \item Lei Sun 28 | \item Jason Willwerscheid 29 | \item Nan Xiao 30 | } 31 | 32 | Other contributors: 33 | \itemize{ 34 | \item Chaoxing Dai [contributor] 35 | \item Mazon Zeng [contributor] 36 | } 37 | 38 | } 39 | -------------------------------------------------------------------------------- /tests/testthat/test_truncgen.R: -------------------------------------------------------------------------------- 1 | context("truncated-normal computations") 2 | 3 | test_that("gen_etruncFUN_single gives similar answers to etruncnorm and etrunct", { 4 | expect_equal( 5 | gen_etruncFUN_single(function(x){pnorm(x,log=TRUE)}, 6 | function(x){dnorm(x,log=TRUE)})(0,1), 7 | my_etruncnorm(0,1)) 8 | expect_equal( 9 | gen_etruncFUN_single(function(x){pt(x,df=4,log=TRUE)}, 10 | function(x){dt(x,df=4,log=TRUE)})(0,1), 11 | my_etrunct(0,1,df=4)) 12 | }) 13 | 14 | test_that("gen_etruncFUN gives similar answers to etruncnorm and etrunct", { 15 | a=cbind(c(1,2),c(3,4)) 16 | b=cbind(c(5,6),c(7,8)) 17 | expect_equal(gen_etruncFUN(function(x){pt(x,df=4,log=TRUE)}, 18 | function(x){dt(x,df=4,log=TRUE)})(a,b), 19 | my_etrunct(a,b,df=4)) 20 | }) 21 | 22 | test_that("ash with automatic truncfun gives similar answers to default", { 23 | set.seed(10); z=rnorm(10,0,4); 24 | a1 = ash(z,1) 25 | 26 | testlik = list(name="norm",lcdfFUN = function(x){pnorm(x,log=TRUE)}, 27 | lpdfFUN = function(x){dnorm(x,log=TRUE)}) 28 | a2 = ash(z,1,lik=testlik) 29 | expect_equal(a1$PosteriorMean,a2$PosteriorMean) 30 | }) 31 | -------------------------------------------------------------------------------- /tests/testthat/test_lik.R: -------------------------------------------------------------------------------- 1 | context("ashr with other likelihoods") 2 | 3 | test_that("general likelihood with multiple df works", { 4 | df = c(rep(100,50),rep(2,50)) 5 | s = rgamma(100,1,1) 6 | betahat = s*rt(n=100,df=df) 7 | data = set_data(betahat,s,lik = lik_normal(),alpha=0) 8 | expect_equal(is_normal(data$lik),TRUE) 9 | expect_equal(is_const(data$lik),TRUE) 10 | 11 | data =set_data(betahat,s,lik = lik_t(df),alpha=0) 12 | expect_equal(is_normal(data$lik),FALSE) 13 | expect_equal(is_const(data$lik),FALSE) 14 | 15 | data =set_data(betahat,s,lik = lik_t(1),alpha=0) 16 | expect_equal(is_normal(data$lik),FALSE) 17 | expect_equal(is_const(data$lik),TRUE) 18 | }) 19 | 20 | test_that("general likelihood with multiple df works", { 21 | set.seed(10) 22 | df = c(rep(100,50),rep(2,50)) 23 | s = rgamma(100,1,1) 24 | betahat = s*rt(n=100,df=df) 25 | 26 | #calc_null_loglik(data) 27 | 28 | data =set_data(betahat,s,lik = lik_normal(),alpha=0) 29 | expect_equal(calc_null_loglik(data),sum(dnorm(betahat,sd=s,log=TRUE))) 30 | 31 | data =set_data(betahat,s,lik = lik_t(df),alpha=0) 32 | expect_equal(calc_null_loglik(data),sum(dt(betahat/s,df=df,log=TRUE)-log(s))) 33 | 34 | 35 | }) 36 | -------------------------------------------------------------------------------- /tests/testthat/test_logF.R: -------------------------------------------------------------------------------- 1 | context("ashr with logF likelihood") 2 | 3 | test_that("logF error with df=(10,10) gives similar answers to normal error",{ 4 | 5 | # Simulate a data set. 6 | set.seed(1) 7 | x <- rnorm(100) + log(rf(100,df1 = 10,df2 = 10)) 8 | 9 | # Fit the ash model with two different likelihood densities: (1) the 10 | # normal distribution, and (2) the log-F distribution with degrees 11 | # of freedom (10,10). In the first case, we set the standard errors 12 | # (s.e.) to be match the empirical standard deviation of random 13 | # draws from the log-F distribution. 14 | s <- sd(log(rf(10000,df1 = 10,df2 = 10))) 15 | ash.norm.out <- ash(x,s) 16 | ash.logF.out <- ash.workhorse(x,1,lik = lik_logF(df1 = 10,df2 = 10), 17 | optmethod = "mixEM",control = list(tol = 1e-4)) 18 | 19 | # Compare the posterior mean estimates from ash using the two 20 | # different likelihood densities. We expect that the difference 21 | # between the two estimates should always be small (relative error 22 | # at most 5%). 23 | expect_equal(ash.norm.out$result$PosteriorMean, 24 | ash.logF.out$result$PosteriorMean, 25 | tolerance = 0.05) 26 | }) 27 | -------------------------------------------------------------------------------- /man/mixSQP.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix_opt.R 3 | \name{mixSQP} 4 | \alias{mixSQP} 5 | \title{Estimate mixture proportions of a mixture model using 6 | mix-SQP algorithm.} 7 | \usage{ 8 | mixSQP(matrix_lik, prior, pi_init = NULL, control = list(), weights = NULL) 9 | } 10 | \arguments{ 11 | \item{matrix_lik}{A matrix containing the conditional likelihood 12 | values, possibly normalized.} 13 | 14 | \item{prior}{A vector of the parameters of the Dirichlet prior on 15 | the mixture weights.} 16 | 17 | \item{pi_init}{The initial estimate of the mixture weights.} 18 | 19 | \item{control}{A list of settings for the mix-SQP optimization 20 | algorithm; see \code{\link[mixsqp]{mixsqp}} for details.} 21 | 22 | \item{weights}{The weights to be assigned to the observations. Must 23 | be a vector of length equal the number of rows of \code{matrix_lik}. 24 | If \code{weights = NULL}, all observations are assigned the same 25 | weight.} 26 | } 27 | \value{ 28 | A list object including the estimates (\code{pihat}) and a 29 | flag (\code{control}) indicating convergence success or failure. 30 | } 31 | \description{ 32 | Estimate mixture proportions of a mixture model using 33 | mix-SQP algorithm. 34 | } 35 | -------------------------------------------------------------------------------- /R/truncgen.R: -------------------------------------------------------------------------------- 1 | # This file contains functions written to compute 2 | # truncated expectations when no etruncFUN is provided 3 | 4 | 5 | #' @title gen_etruncFUN 6 | #' @description Produce function to compute expectation of truncated 7 | #' error distribution from log cdf and log pdf (using numerical integration) 8 | #' 9 | #' @param lcdfFUN the log cdfFUN of the error distribution 10 | #' @param lpdfFUN the log pdfFUN of the error distribution 11 | gen_etruncFUN = function(lcdfFUN,lpdfFUN){ 12 | return(function(a,b){ 13 | tmp=mapply(gen_etruncFUN_single(lcdfFUN,lpdfFUN),a,b) 14 | dim(tmp) = dim(a) 15 | return(tmp) 16 | }) 17 | } 18 | 19 | 20 | # compute expectation of truncated error distribution 21 | # for scalars a and b 22 | # 23 | # @importFrom stats integrate 24 | gen_etruncFUN_single = function(lcdfFUN,lpdfFUN){ 25 | return(function(a,b){ 26 | if(a == b){ 27 | return(a) 28 | }else{ 29 | denom = exp(lcdfFUN(b))-exp(lcdfFUN(a)) 30 | if(denom!=0){ 31 | 32 | # numerical integration 33 | xpdf = function(x){ 34 | x*exp(lpdfFUN(x)) 35 | } 36 | tmp = try(stats::integrate(xpdf,a,b)$value,silent=TRUE) 37 | if (!inherits(tmp,"try-error")) 38 | return(tmp/denom) 39 | } 40 | } 41 | return(NA) 42 | }) 43 | } 44 | 45 | -------------------------------------------------------------------------------- /man/lik_binom.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/lik.R 3 | \name{lik_binom} 4 | \alias{lik_binom} 5 | \title{Likelihood object for Binomial error distribution} 6 | \usage{ 7 | lik_binom(y, n, link = c("identity", "logit")) 8 | } 9 | \arguments{ 10 | \item{y}{Binomial observations} 11 | 12 | \item{n}{Binomial number of trials} 13 | 14 | \item{link}{Link function. The "identity" link directly puts unimodal prior on Binomial success 15 | probabilities p, and "logit" link puts unimodal prior on logit(p).} 16 | } 17 | \description{ 18 | Creates a likelihood object for ash for use with Binomial error distribution 19 | } 20 | \details{ 21 | Suppose we have Binomial observations \code{y} where \eqn{y_i\sim Bin(n_i,p_i)}. 22 | We either put an unimodal prior g on the success probabilities \eqn{p_i\sim g} (by specifying 23 | \code{link="identity"}) or on the logit success probabilities \eqn{logit(p_i)\sim g} 24 | (by specifying \code{link="logit"}). Either way, ASH with this Binomial likelihood function 25 | will compute the posterior mean of the success probabilities \eqn{p_i}. 26 | } 27 | \examples{ 28 | p = rbeta(100,2,2) # prior mode: 0.5 29 | n = rpois(100,10) 30 | y = rbinom(100,n,p) # simulate Binomial observations 31 | ash(rep(0,length(y)),1,lik=lik_binom(y,n)) 32 | } 33 | -------------------------------------------------------------------------------- /man/lik_pois.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/lik.R 3 | \name{lik_pois} 4 | \alias{lik_pois} 5 | \title{Likelihood object for Poisson error distribution} 6 | \usage{ 7 | lik_pois(y, scale = 1, link = c("identity", "log")) 8 | } 9 | \arguments{ 10 | \item{y}{Poisson observations.} 11 | 12 | \item{scale}{Scale factor for Poisson observations: y~Pois(scale*lambda).} 13 | 14 | \item{link}{Link function. The "identity" link directly puts unimodal prior on Poisson 15 | intensities lambda, and "log" link puts unimodal prior on log(lambda).} 16 | } 17 | \description{ 18 | Creates a likelihood object for ash for use with Poisson error distribution 19 | } 20 | \details{ 21 | Suppose we have Poisson observations \code{y} where \eqn{y_i\sim Poisson(c_i\lambda_i)}. 22 | We either put an unimodal prior g on the (scaled) intensities \eqn{\lambda_i\sim g} 23 | (by specifying \code{link="identity"}) or on the log intensities 24 | \eqn{log(\lambda_i)\sim g} (by specifying \code{link="log"}). Either way, 25 | ASH with this Poisson likelihood function will compute the posterior mean of the 26 | intensities \eqn{\lambda_i}. 27 | } 28 | \examples{ 29 | beta = c(rnorm(100,50,5)) # prior mode: 50 30 | y = rpois(100,beta) # simulate Poisson observations 31 | ash(rep(0,length(y)),1,lik=lik_pois(y)) 32 | 33 | } 34 | -------------------------------------------------------------------------------- /man/my_vtruncnorm.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/truncnorm.R 3 | \name{my_vtruncnorm} 4 | \alias{my_vtruncnorm} 5 | \title{Variance of Truncated Normal} 6 | \usage{ 7 | my_vtruncnorm(a, b, mean = 0, sd = 1) 8 | } 9 | \arguments{ 10 | \item{a}{The lower limit for the support of the truncated normal. Can be 11 | \code{-Inf}.} 12 | 13 | \item{b}{The upper limit for the support. Can be \code{Inf}. \code{a} and 14 | \code{b} must have the same length, and each element of \code{a} should 15 | be less than or equal to the corresponding element of \code{b}.} 16 | 17 | \item{mean}{The mean of the untruncated normal.} 18 | 19 | \item{sd}{The standard deviation of the untruncated normal.} 20 | } 21 | \value{ 22 | The variance of truncated normal distributions with parameters 23 | \code{a}, \code{b}, \code{mean}, and \code{sd}. If any of the arguments 24 | is a matrix, then a matrix will be returned. 25 | } 26 | \description{ 27 | Computes the variance of truncated normal distributions with 28 | parameters \code{a}, \code{b}, \code{mean}, and \code{sd}. Arguments can 29 | be scalars, vectors, or matrices. Arguments of shorter length will be 30 | recycled according to the usual recycling rules, but \code{a} and \code{b} 31 | must have the same length. Missing values are accepted for all arguments. 32 | } 33 | \seealso{ 34 | \code{\link{my_etruncnorm}}, \code{\link{my_e2truncnorm}} 35 | } 36 | -------------------------------------------------------------------------------- /appveyor.yml: -------------------------------------------------------------------------------- 1 | # DO NOT CHANGE the "init" and "install" sections below 2 | 3 | # Download script file from GitHub 4 | init: 5 | ps: | 6 | $ErrorActionPreference = "Stop" 7 | Invoke-WebRequest http://raw.github.com/krlmlr/r-appveyor/master/scripts/appveyor-tool.ps1 -OutFile "..\appveyor-tool.ps1" 8 | Import-Module '..\appveyor-tool.ps1' 9 | 10 | install: 11 | ps: Bootstrap 12 | 13 | # Adapt as necessary starting from here 14 | environment: 15 | global: 16 | R_REMOTES_NO_ERRORS_FROM_WARNINGS: true 17 | USE_RTOOLS: yes 18 | R_VERSION: release 19 | R_ARCH: x64 20 | _R_CHECK_FORCE_SUGGESTS_: false 21 | 22 | # This is the minimal set of R packages needed to run "R CMD check" on 23 | # the package. 24 | build_script: 25 | - R -e install.packages(c('devtools','etrunct','truncnorm','Rcpp','SQUAREM','testthat','rmarkdown','knitr','ggplot2','invgamma','mixsqp'),head(.libPaths(),1),'http://cran.wustl.edu') 26 | 27 | test_script: 28 | - travis-tool.sh run_tests 29 | 30 | on_failure: 31 | - 7z a failure.zip *.Rcheck\* 32 | - appveyor PushArtifact failure.zip 33 | 34 | artifacts: 35 | - path: '*.Rcheck\**\*.log' 36 | name: Logs 37 | 38 | - path: '*.Rcheck\**\*.out' 39 | name: Logs 40 | 41 | - path: '*.Rcheck\**\*.fail' 42 | name: Logs 43 | 44 | - path: '*.Rcheck\**\*.Rout' 45 | name: Logs 46 | 47 | - path: '\*_*.tar.gz' 48 | name: Bits 49 | 50 | - path: '\*_*.zip' 51 | name: Bits 52 | -------------------------------------------------------------------------------- /R/ash_pois.R: -------------------------------------------------------------------------------- 1 | #' @title Performs adaptive shrinkage on Poisson data 2 | #' @description Uses Empirical Bayes to fit the model \deqn{y_j | \lambda_j ~ Poi(c_j \lambda_j)} with \deqn{h(lambda_j) ~ g()} 3 | #' where \eqn{h} is a specified link function (either "identity" or "log" are permitted). 4 | 5 | #' @details The model is fit in two stages: i) estimate \eqn{g} by maximum likelihood (over the set of symmetric 6 | #' unimodal distributions) to give estimate \eqn{\hat{g}}; 7 | #' ii) Compute posterior distributions for \eqn{\lambda_j} given \eqn{y_j,\hat{g}}. 8 | #' Note that the link function \eqn{h} affects the prior assumptions (because, e.g., assuming a unimodal prior on \eqn{\lambda} is 9 | #' different from assuming unimodal on \eqn{\log\lambda}), but posterior quantities are always computed for the 10 | #' for \eqn{\lambda} and *not* \eqn{h(\lambda)}. 11 | #' @param y vector of Poisson observations. 12 | #' @param scale vector of scale factors for Poisson observations: the model is \eqn{y[j]~Pois(scale[j]*lambda[j])}. 13 | #' @param link string, either "identity" or "log", indicating the link function. 14 | #' @param ... other parameters to be passed to ash 15 | #' 16 | #' @examples 17 | #' beta = c(rep(0,50),rexp(50)) 18 | #' y = rpois(100,beta) # simulate Poisson observations 19 | #' y.ash = ash_pois(y,scale=1) 20 | #' @export 21 | ash_pois = function(y, scale=1, link=c("identity","log"), ...){ 22 | link = match.arg(link) 23 | ash(rep(0,length(y)), 1, lik=lik_pois(y,scale,link), ...) 24 | } -------------------------------------------------------------------------------- /src/RcppExports.cpp: -------------------------------------------------------------------------------- 1 | // Generated by using Rcpp::compileAttributes() -> do not edit by hand 2 | // Generator token: 10BE3573-1514-4C36-9D1C-5A225CD40393 3 | 4 | #include 5 | 6 | using namespace Rcpp; 7 | 8 | #ifdef RCPP_USE_GLOBAL_ROSTREAM 9 | Rcpp::Rostream& Rcpp::Rcout = Rcpp::Rcpp_cout_get(); 10 | Rcpp::Rostream& Rcpp::Rcerr = Rcpp::Rcpp_cerr_get(); 11 | #endif 12 | 13 | // cxxMixSquarem 14 | List cxxMixSquarem(NumericMatrix matrix_lik, NumericVector prior, NumericVector pi_init, List control); 15 | RcppExport SEXP _ashr_cxxMixSquarem(SEXP matrix_likSEXP, SEXP priorSEXP, SEXP pi_initSEXP, SEXP controlSEXP) { 16 | BEGIN_RCPP 17 | Rcpp::RObject rcpp_result_gen; 18 | Rcpp::RNGScope rcpp_rngScope_gen; 19 | Rcpp::traits::input_parameter< NumericMatrix >::type matrix_lik(matrix_likSEXP); 20 | Rcpp::traits::input_parameter< NumericVector >::type prior(priorSEXP); 21 | Rcpp::traits::input_parameter< NumericVector >::type pi_init(pi_initSEXP); 22 | Rcpp::traits::input_parameter< List >::type control(controlSEXP); 23 | rcpp_result_gen = Rcpp::wrap(cxxMixSquarem(matrix_lik, prior, pi_init, control)); 24 | return rcpp_result_gen; 25 | END_RCPP 26 | } 27 | 28 | static const R_CallMethodDef CallEntries[] = { 29 | {"_ashr_cxxMixSquarem", (DL_FUNC) &_ashr_cxxMixSquarem, 4}, 30 | {NULL, NULL, 0} 31 | }; 32 | 33 | RcppExport void R_init_ashr(DllInfo *dll) { 34 | R_registerRoutines(dll, NULL, CallEntries, NULL, NULL); 35 | R_useDynamicSymbols(dll, FALSE); 36 | } 37 | -------------------------------------------------------------------------------- /NEWS.md: -------------------------------------------------------------------------------- 1 | # ashr v2.0 2 | 3 | ## Major Changes 4 | 5 | There are a few major changes in output and input that will likely 6 | break existing dependencies. Here are the highlights 7 | 8 | - the main output (lfsr, lfdr, etc) is rearranged into a dataframe, 9 | called `result`. 10 | 11 | - so, for example, the lfsr is now `a$result$lfsr` instead of 12 | `a$lfsr` 13 | 14 | - or, better, use the accessor function `get_lfsr(a)` to extract 15 | the lfsr etc 16 | 17 | - I added accessor functions `get_lfsr`, `get_lfdr`, `get_pm`, 18 | `get_psd` etc to access the lfsr, lfdr, posterior mean and posterior 19 | standard deviation. Using these functions to access results will 20 | help ensure your code remains valid if I happen to change the 21 | internal structure of the results again (although not 22 | anticipated...) 23 | 24 | - output `fitted.g` is renamed `fitted_g`, and `flash.data` becomes 25 | `flash_data` to make the whole package convention more 26 | consistent. Also `fit` becomes `fit_details`. 27 | 28 | - function prefixes `comppost` and `compdens` replaced with 29 | `comp_post` and `comp_dens`, again for consistency. 30 | 31 | - `nonzeromode` option is replaced with the option `mode` to specify 32 | mode. Or use `mode="estimate"` to estimate the mode. 33 | 34 | - more flexible control of output. For example, you can say you want 35 | only the logLR output by specifying `outputlevel = 36 | c("lfsr","logLR")`, or only posterior mean by `outputlevel = 37 | c("PosteriorMean")`. 38 | 39 | -------------------------------------------------------------------------------- /tests/testthat/test_log_comp_dens.R: -------------------------------------------------------------------------------- 1 | context("ashr convolved density computations") 2 | 3 | test_that("normalmix functions behave as expected", { 4 | gn = normalmix(c(0.5,0.5),c(0,0),c(0.1,1)) 5 | data = set_data(c(1,2,3),c(1,10,100)) 6 | expect_equal(log(comp_dens_conv(gn,data)),log_comp_dens_conv(gn,data)) 7 | }) 8 | 9 | 10 | test_that("exp(log_comp_dens_conv) gives same results as comp_dens_conv", { 11 | g = unimix(c(0.1,0.45,0.45),c(0,0,0),c(0,1,2)) 12 | gn = normalmix(c(0.1,0.45,0.45),c(0,0,0),c(0,0.1,1)) 13 | # gig = igmix(c(0.5,0.5),c(1,2),c(3,4)) 14 | x=c(-10,2) 15 | s = c(1,2) 16 | data = set_data(x,s) 17 | data2 = set_data(x,s,lik_t(df=2)) 18 | expect_equal(comp_dens_conv(g, data), exp(log_comp_dens_conv(g,data))) 19 | expect_equal(comp_dens_conv(g, data2), exp(log_comp_dens_conv(g,data2))) 20 | expect_equal(comp_dens_conv(gn, data), exp(log_comp_dens_conv(gn,data))) 21 | 22 | data = set_data(x,s,alpha = 1) 23 | expect_equal(comp_dens_conv(g, data), exp(log_comp_dens_conv(g,data))) 24 | expect_equal(comp_dens_conv(gn, data), exp(log_comp_dens_conv(gn,data))) 25 | }) 26 | 27 | test_that("comp_postprob is numerically stable", { 28 | g = unimix(c(0.5,0.5),c(1,2),c(0,0)) 29 | gn = normalmix(c(0.5,0.5),c(0,0),c(0.1,1)) 30 | gig = igmix(c(0.5,0.5),c(1,2),c(3,4)) 31 | x=c(-10,2) 32 | s = c(1,2) 33 | data = set_data(x,s) 34 | data2 = set_data(x,s,lik_t(2)) 35 | expect_equal(comp_postprob(g,set_data(-10,0.5)),cbind(c(2/3,1/3))) 36 | expect_equal(comp_postprob(g,set_data(-20,0.5)),cbind(c(2/3,1/3))) 37 | }) 38 | -------------------------------------------------------------------------------- /man/ash_pois.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ash_pois.R 3 | \name{ash_pois} 4 | \alias{ash_pois} 5 | \title{Performs adaptive shrinkage on Poisson data} 6 | \usage{ 7 | ash_pois(y, scale = 1, link = c("identity", "log"), ...) 8 | } 9 | \arguments{ 10 | \item{y}{vector of Poisson observations.} 11 | 12 | \item{scale}{vector of scale factors for Poisson observations: the model is \eqn{y[j]~Pois(scale[j]*lambda[j])}.} 13 | 14 | \item{link}{string, either "identity" or "log", indicating the link function.} 15 | 16 | \item{...}{other parameters to be passed to ash} 17 | } 18 | \description{ 19 | Uses Empirical Bayes to fit the model \deqn{y_j | \lambda_j ~ Poi(c_j \lambda_j)} with \deqn{h(lambda_j) ~ g()} 20 | where \eqn{h} is a specified link function (either "identity" or "log" are permitted). 21 | } 22 | \details{ 23 | The model is fit in two stages: i) estimate \eqn{g} by maximum likelihood (over the set of symmetric 24 | unimodal distributions) to give estimate \eqn{\hat{g}}; 25 | ii) Compute posterior distributions for \eqn{\lambda_j} given \eqn{y_j,\hat{g}}. 26 | Note that the link function \eqn{h} affects the prior assumptions (because, e.g., assuming a unimodal prior on \eqn{\lambda} is 27 | different from assuming unimodal on \eqn{\log\lambda}), but posterior quantities are always computed for the 28 | for \eqn{\lambda} and *not* \eqn{h(\lambda)}. 29 | } 30 | \examples{ 31 | beta = c(rep(0,50),rexp(50)) 32 | y = rpois(100,beta) # simulate Poisson observations 33 | y.ash = ash_pois(y,scale=1) 34 | } 35 | -------------------------------------------------------------------------------- /man/mixIP.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix_opt.R 3 | \name{mixIP} 4 | \alias{mixIP} 5 | \title{Estimate mixture proportions of a mixture model by Interior Point method} 6 | \usage{ 7 | mixIP(matrix_lik, prior, pi_init = NULL, control = list(), weights = NULL) 8 | } 9 | \arguments{ 10 | \item{matrix_lik, }{a n by k matrix with (j,k)th element equal to \eqn{f_k(x_j)}.} 11 | 12 | \item{prior, }{a k vector of the parameters of the Dirichlet prior on \eqn{\pi}. Recommended to be rep(1,k)} 13 | 14 | \item{pi_init, }{the initial value of \eqn{\pi} to use. If not specified defaults to (1/k,...,1/k).} 15 | 16 | \item{control}{A list of control parameters to be passed to REBayes::KWDual} 17 | 18 | \item{weights}{weights to be assigned to the observations (an n vector)} 19 | } 20 | \value{ 21 | A list, including the estimates (pihat), the log likelihood for each interation (B) 22 | and a flag to indicate convergence 23 | } 24 | \description{ 25 | Given the individual component likelihoods for a mixture model, estimates the mixture proportions. 26 | } 27 | \details{ 28 | Optimizes \deqn{L(pi)= sum_j w_j log(sum_k pi_k f_{jk}) + h(pi)} 29 | subject to pi_k non-negative and sum_k pi_k = 1. Here \deqn{h(pi)} is 30 | a penalty function h(pi) = sum_k (prior_k-1) log pi_k. 31 | Calls REBayes::KWDual in the REBayes package, which is in turn a wrapper to the mosek 32 | convex optimization software. So REBayes must be installed to use this. 33 | Used by the ash main function; there is no need for a user to call this 34 | function separately, but it is exported for convenience. 35 | } 36 | -------------------------------------------------------------------------------- /man/my_etruncnorm.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/truncnorm.R 3 | \name{my_etruncnorm} 4 | \alias{my_etruncnorm} 5 | \title{Expected Value of Truncated Normal} 6 | \usage{ 7 | my_etruncnorm(a, b, mean = 0, sd = 1) 8 | } 9 | \arguments{ 10 | \item{a}{The lower limit for the support of the truncated normal. Can be 11 | \code{-Inf}.} 12 | 13 | \item{b}{The upper limit for the support. Can be \code{Inf}. \code{a} and 14 | \code{b} must have the same length, and each element of \code{a} should 15 | be less than or equal to the corresponding element of \code{b}.} 16 | 17 | \item{mean}{The mean of the untruncated normal.} 18 | 19 | \item{sd}{The standard deviation of the untruncated normal. Standard 20 | deviations of zero are interpreted as numerically (rather than exactly) 21 | zero, so that the untruncated mean is returned if it lies within 22 | \code{[a, b]} and the nearer of \code{a} and \code{b} is returned 23 | otherwise.} 24 | } 25 | \value{ 26 | The expected values of truncated normal distributions with 27 | parameters \code{a}, \code{b}, \code{mean}, and \code{sd}. If any of the 28 | arguments is a matrix, then a matrix will be returned. 29 | } 30 | \description{ 31 | Computes the means of truncated normal distributions with 32 | parameters \code{a}, \code{b}, \code{mean}, and \code{sd}. Arguments 33 | can be scalars, vectors, or matrices. Arguments of shorter length will 34 | be recycled according to the usual recycling rules, but \code{a} and 35 | \code{b} must have the same length. Missing values are accepted for all 36 | arguments. 37 | } 38 | \seealso{ 39 | \code{\link{my_e2truncnorm}}, \code{\link{my_vtruncnorm}} 40 | } 41 | -------------------------------------------------------------------------------- /man/mixEM.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix_opt.R 3 | \name{mixEM} 4 | \alias{mixEM} 5 | \title{Estimate mixture proportions of a mixture model by EM algorithm} 6 | \usage{ 7 | mixEM(matrix_lik, prior, pi_init = NULL, control = list()) 8 | } 9 | \arguments{ 10 | \item{matrix_lik, }{a n by k matrix with (j,k)th element equal to \eqn{f_k(x_j)}.} 11 | 12 | \item{prior, }{a k vector of the parameters of the Dirichlet prior on \eqn{\pi}. Recommended to be rep(1,k)} 13 | 14 | \item{pi_init, }{the initial value of \eqn{\pi} to use. If not specified defaults to (1/k,...,1/k).} 15 | 16 | \item{control}{A list of control parameters for the SQUAREM algorithm, default value is set to be control.default=list(K = 1, method=3, square=TRUE, step.min0=1, step.max0=1, mstep=4, kr=1, objfn.inc=1,tol=1.e-07, maxiter=5000, trace=FALSE).} 17 | } 18 | \value{ 19 | A list, including the estimates (pihat), the log likelihood for each interation (B) 20 | and a flag to indicate convergence 21 | } 22 | \description{ 23 | Given the individual component likelihoods for a mixture model, estimates the mixture proportions by an EM algorithm. 24 | } 25 | \details{ 26 | Fits a k component mixture model \deqn{f(x|\pi)= \sum_k \pi_k f_k(x)} to independent 27 | and identically distributed data \eqn{x_1,\dots,x_n}. 28 | Estimates mixture proportions \eqn{\pi} by maximum likelihood, or by maximum a posteriori (MAP) estimation for a Dirichlet prior on \eqn{\pi} 29 | (if a prior is specified). Uses the SQUAREM package to accelerate convergence of EM. Used by the ash main function; there is no need for a user to call this 30 | function separately, but it is exported for convenience. 31 | } 32 | -------------------------------------------------------------------------------- /man/my_e2truncnorm.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/truncnorm.R 3 | \name{my_e2truncnorm} 4 | \alias{my_e2truncnorm} 5 | \title{Expected Squared Value of Truncated Normal} 6 | \usage{ 7 | my_e2truncnorm(a, b, mean = 0, sd = 1) 8 | } 9 | \arguments{ 10 | \item{a}{The lower limit for the support of the truncated normal. Can be 11 | \code{-Inf}.} 12 | 13 | \item{b}{The upper limit for the support. Can be \code{Inf}. \code{a} and 14 | \code{b} must have the same length, and each element of \code{a} should 15 | be less than or equal to the corresponding element of \code{b}.} 16 | 17 | \item{mean}{The mean of the untruncated normal.} 18 | 19 | \item{sd}{The standard deviation of the untruncated normal. Standard 20 | deviations of zero are interpreted as numerically (rather than exactly) 21 | zero, so that the square of the untruncated mean is returned if it lies 22 | within \code{[a, b]} and the square of the nearer of \code{a} and 23 | \code{b} is returned otherwise.} 24 | } 25 | \value{ 26 | The expected squared values of truncated normal 27 | distributions with parameters \code{a}, \code{b}, \code{mean}, and 28 | \code{sd}. If any of the arguments is a matrix, then a matrix will 29 | be returned. 30 | } 31 | \description{ 32 | Computes the expected squared values of truncated normal 33 | distributions with parameters \code{a}, \code{b}, \code{mean}, and 34 | \code{sd}. Arguments can be scalars, vectors, or matrices. Arguments of 35 | shorter length will be recycled according to the usual recycling rules, 36 | but \code{a} and \code{b} must have the same length. Missing values are 37 | accepted for all arguments. 38 | } 39 | \seealso{ 40 | \code{\link{my_etruncnorm}}, \code{\link{my_vtruncnorm}} 41 | } 42 | -------------------------------------------------------------------------------- /man/ashci.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ashCI.R 3 | \name{ashci} 4 | \alias{ashci} 5 | \title{Credible Interval Computation for the ash object} 6 | \usage{ 7 | ashci( 8 | a, 9 | level = 0.95, 10 | betaindex, 11 | lfsr_threshold = 1, 12 | tol = 0.001, 13 | trace = FALSE 14 | ) 15 | } 16 | \arguments{ 17 | \item{a}{the fitted ash object} 18 | 19 | \item{level}{the level for the credible interval, (default=0.95)} 20 | 21 | \item{betaindex}{a vector consisting of locations of betahat where 22 | you would like to compute the credible interval} 23 | 24 | \item{lfsr_threshold}{a scalar, if specified then computes CIs only for observations 25 | more significant than that threshold.} 26 | 27 | \item{tol}{passed to uniroot; indicates desired accuracy.} 28 | 29 | \item{trace}{a logical variable denoting whether some of the 30 | intermediate results of iterations should be displayed to the 31 | user. Default is FALSE.} 32 | } 33 | \value{ 34 | A matrix, with 2 columns, ith row giving CI for ith observation 35 | } 36 | \description{ 37 | Given the ash object returned by the main function ash, 38 | this function computes a posterior credible interval (CI) for each observation. The ash object 39 | must include a data component to use this function (which it does by default). 40 | } 41 | \details{ 42 | Uses uniroot to find credible interval, one at a time for each observation. 43 | The computation cost is linear in number of observations. 44 | } 45 | \examples{ 46 | beta = c(rep(0,20),rnorm(20)) 47 | sebetahat = abs(rnorm(40,0,1)) 48 | betahat = rnorm(40,beta,sebetahat) 49 | beta.ash = ash(betahat, sebetahat) 50 | 51 | CImatrix=ashci(beta.ash,level=0.95) 52 | 53 | CImatrix1=ashci(beta.ash,level=0.95,betaindex=c(1,2,5)) 54 | CImatrix2=ashci(beta.ash,level=0.95,lfsr_threshold=0.1) 55 | } 56 | -------------------------------------------------------------------------------- /R/truncbeta.R: -------------------------------------------------------------------------------- 1 | #' @title mean of truncated Beta distribution 2 | #' @description Compute mean of the truncated Beta. 3 | #' @param a left limit of distribution 4 | #' @param b right limit of distribution 5 | #' @param alpha,beta shape parameters of Beta distribution 6 | #' @export 7 | my_etruncbeta = function(a, b, alpha, beta){ 8 | tmp = a 9 | tmp[a!=b] = (alpha/(alpha+beta)*(stats::pbeta(b,alpha+1,beta)-stats::pbeta(a,alpha+1,beta))/ 10 | (stats::pbeta(b,alpha,beta)-stats::pbeta(a,alpha,beta)))[a!=b] 11 | # zero denominator case: stats::pbeta(b,alpha,beta) and stats::pbeta(a,alpha,beta) are both 0 or 1 12 | tmp[(stats::pbeta(b,alpha,beta)-stats::pbeta(a,alpha,beta))==0] = 13 | ifelse(stats::dbeta(a,alpha,beta,log=TRUE)>stats::dbeta(b,alpha,beta,log=TRUE), 14 | a, b)[(stats::pbeta(b,alpha,beta)-stats::pbeta(a,alpha,beta))==0] 15 | return(tmp) 16 | } 17 | 18 | #' @title second moment of truncated Beta distribution 19 | #' @description Compute second moment of the truncated Beta. 20 | #' @param a left limit of distribution 21 | #' @param b right limit of distribution 22 | #' @param alpha,beta shape parameters of Beta distribution 23 | #' @export 24 | my_e2truncbeta = function(a, b, alpha, beta){ 25 | tmp = a^2 26 | tmp[a!=b] = (alpha*(alpha+1)/((alpha+beta)*(alpha+beta+1))* 27 | (stats::pbeta(b,alpha+2,beta)-stats::pbeta(a,alpha+2,beta))/ 28 | (stats::pbeta(b,alpha,beta)-stats::pbeta(a,alpha,beta)))[a!=b] 29 | # zero denominator case: stats::pbeta(b,alpha,beta) and stats::pbeta(a,alpha,beta) are both 0 or 1 30 | tmp[(stats::pbeta(b,alpha,beta)-stats::pbeta(a,alpha,beta))==0] = 31 | ifelse(stats::dbeta(a,alpha,beta,log=TRUE)>stats::dbeta(b,alpha,beta,log=TRUE), 32 | a^2, b^2)[(stats::pbeta(b,alpha,beta)-stats::pbeta(a,alpha,beta))==0] 33 | return(tmp) 34 | } 35 | -------------------------------------------------------------------------------- /tests/testthat/test_prior.R: -------------------------------------------------------------------------------- 1 | context("ashr \"prior\" argument") 2 | 3 | test_that("numeric and (partial) string arguments both work", { 4 | betahat <- c(1.01636974224394, -2.05686254738995, -0.7135781676358, 5 | -1.16906745227838, -0.917039991627176) 6 | 7 | sebetahat <- c(1.02572223086898, 0.499285201440522, 0.476520330150983, 8 | 0.624576594477857, 0.198152636610839) 9 | 10 | aout1 <- ash.workhorse(betahat = betahat[1:5], sebetahat = sebetahat[1:5], 11 | g = normalmix(rep(0, 5), rep(0, 5), 0:4), fixg = FALSE, 12 | prior = "uniform") 13 | aout2 <- ash.workhorse(betahat = betahat[1:5], sebetahat = sebetahat[1:5], 14 | g = normalmix(rep(0, 5), rep(0, 5), 0:4), fixg = FALSE, 15 | prior = rep(1, 5)) 16 | expect_identical(aout1$result$PosteriorMean, aout2$result$PosteriorMean) 17 | 18 | aout3 <- ash.workhorse(betahat = betahat[1:5], sebetahat = sebetahat[1:5], 19 | g = normalmix(rep(0, 5), rep(0, 5), 0:4), fixg = FALSE, 20 | prior = "null") 21 | expect_false(identical(aout1$result$PosteriorMean, aout3$result$PosteriorMean)) 22 | }) 23 | 24 | test_that("pi is nonzero for mixture components where prior > 1", { 25 | x <- 10:20 26 | s <- rep(1, 11) 27 | g <- unimix(rep(0, 3), c(0, -1, -20), c(0, 1, 20)) 28 | aout1 <- ash(x, s, g = g, fixg = FALSE, prior = c(1, 1, 1), mixcompdist = "uniform") 29 | expect_false(all(aout1$fitted_g$pi > 0)) 30 | 31 | aout2 <- ash(x, s, g = g, fixg = FALSE, prior = c(10, 10, 10), mixcompdist = "uniform") 32 | expect_true(all(aout2$fitted_g$pi > 0)) 33 | 34 | aout3 <- ash(x, s, g = g, fixg = FALSE, prior = c(10, 1, 10), mixcompdist = "uniform") 35 | expect_true(aout3$fitted_g$pi[1] > 0) 36 | expect_false(aout3$fitted_g$pi[2] > 0) 37 | }) -------------------------------------------------------------------------------- /man/mixVBEM.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix_opt.R 3 | \name{mixVBEM} 4 | \alias{mixVBEM} 5 | \title{Estimate posterior distribution on mixture proportions of a mixture model by a Variational Bayes EM algorithm} 6 | \usage{ 7 | mixVBEM(matrix_lik, prior, pi_init = NULL, control = list()) 8 | } 9 | \arguments{ 10 | \item{matrix_lik}{a n by k matrix with (j,k)th element equal to \eqn{f_k(x_j)}.} 11 | 12 | \item{prior}{a k vector of the parameters of the Dirichlet prior on \eqn{\pi}. Recommended to be rep(1,k)} 13 | 14 | \item{pi_init}{the initial value of the posterior parameters. If not specified defaults to the prior parameters.} 15 | 16 | \item{control}{A list of control parameters for the SQUAREM algorithm, default value is set to be control.default=list(K = 1, method=3, square=TRUE, step.min0=1, step.max0=1, mstep=4, kr=1, objfn.inc=1,tol=1.e-07, maxiter=5000, trace=FALSE).} 17 | } 18 | \value{ 19 | A list, whose components include point estimates (pihat), 20 | the parameters of the fitted posterior on \eqn{\pi} (pipost), 21 | the bound on the log likelihood for each iteration (B) 22 | and a flag to indicate convergence (converged). 23 | } 24 | \description{ 25 | Given the individual component likelihoods for a mixture model, estimates the posterior on 26 | the mixture proportions by an VBEM algorithm. Used by the ash main function; there is no need for a user to call this 27 | function separately, but it is exported for convenience. 28 | } 29 | \details{ 30 | Fits a k component mixture model \deqn{f(x|\pi) = \sum_k \pi_k f_k(x)} to independent 31 | and identically distributed data \eqn{x_1,\dots,x_n}. 32 | Estimates posterior on mixture proportions \eqn{\pi} by Variational Bayes, 33 | with a Dirichlet prior on \eqn{\pi}. 34 | Algorithm adapted from Bishop (2009), Pattern Recognition and Machine Learning, Chapter 10. 35 | } 36 | -------------------------------------------------------------------------------- /man/estimate_mixprop.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/ash.R 3 | \name{estimate_mixprop} 4 | \alias{estimate_mixprop} 5 | \title{Estimate mixture proportions of a mixture g given noisy (error-prone) data from that mixture.} 6 | \usage{ 7 | estimate_mixprop( 8 | data, 9 | g, 10 | prior, 11 | optmethod = c("mixSQP", "mixEM", "mixVBEM", "cxxMixSquarem", "mixIP", "w_mixEM"), 12 | control, 13 | weights = NULL 14 | ) 15 | } 16 | \arguments{ 17 | \item{data}{list to be passed to log_comp_dens_conv; details depend on model} 18 | 19 | \item{g}{an object representing a mixture distribution (eg normalmix for mixture of normals; 20 | unimix for mixture of uniforms). The component parameters of g (eg the means and variances) specify the 21 | components whose mixture proportions are to be estimated. The mixture proportions of g are the parameters to be estimated; 22 | the values passed in may be used to initialize the optimization (depending on the optmethod used)} 23 | 24 | \item{prior}{numeric vector indicating parameters of "Dirichlet prior" 25 | on mixture proportions} 26 | 27 | \item{optmethod}{name of function to use to do optimization} 28 | 29 | \item{control}{list of control parameters to be passed to optmethod, 30 | typically affecting things like convergence tolerance} 31 | 32 | \item{weights}{vector of weights (for use with w_mixEM; in beta)} 33 | } 34 | \value{ 35 | list, including the final loglikelihood, the null loglikelihood, 36 | an n by k likelihood matrix with (j,k)th element equal to \eqn{f_k(x_j)}, 37 | the fit 38 | and results of optmethod 39 | } 40 | \description{ 41 | Estimate mixture proportions of a mixture g given noisy (error-prone) data from that mixture. 42 | } 43 | \details{ 44 | This is used by the ash function. Most users won't need to call this directly, but is 45 | exported for use by some other related packages. 46 | } 47 | -------------------------------------------------------------------------------- /R/set_data.R: -------------------------------------------------------------------------------- 1 | # Issues: 2 | # - the cdfFUN has to take log parameter currently (used in log_comp_dens_conv) 3 | # - the FUNargs have to be the same to cdf, pdf and etruncFUN; this means I added mean and sd to my_etrunct, but that functino 4 | # actually doesn't use them. Need to think about what this means. 5 | # - 6 | 7 | #' Takes raw data and sets up data object for use by ash 8 | #' 9 | #' @details The data object stores both the data, and details of the model to be used for the data. 10 | #' For example, in the generalized version of ash the cdf and pdf of the likelihood are 11 | #' stored here. 12 | #' 13 | #' @param betahat vector of betahats 14 | #' @param sebetahat vector of standard errors 15 | #' @param lik a likelihood (see e.g., lik_normal()) 16 | #' @param alpha specifies value of alpha to use (model is for betahat/sebetahat^alpha | sebetahat) 17 | #' 18 | #' @return data object (list) 19 | #' @export 20 | set_data = function(betahat, sebetahat, lik=NULL, alpha=0){ 21 | 22 | if(length(sebetahat)==1L){sebetahat = rep(sebetahat, length(betahat))} 23 | 24 | data=list(x = betahat/(sebetahat^alpha), 25 | s = sebetahat^(1-alpha), 26 | alpha=alpha, 27 | s_orig = sebetahat) 28 | 29 | if(is.null(lik)){lik = lik_normal()} 30 | data$lik = lik 31 | 32 | return(data) 33 | } 34 | 35 | #extract data corresponding to ith data point 36 | extract_data=function(data,i){ 37 | if(!is_const(data$lik)){stop("extracting data not supported for non-constant likelihoods")} 38 | data_i = list(x=data$x[i], 39 | s=data$s[i], 40 | s_orig = data$s_orig[i], 41 | alpha = data$alpha, 42 | lik = data$lik) 43 | return(data_i) 44 | } 45 | 46 | n_obs = function(data){return(length(data$x))} 47 | 48 | get_exclusions=function(data){ 49 | return((data$s==0 | data$s == Inf | is.na(data$x) | is.na(data$s))) 50 | } 51 | -------------------------------------------------------------------------------- /man/w_mixEM.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/mix_opt.R 3 | \name{w_mixEM} 4 | \alias{w_mixEM} 5 | \title{Estimate mixture proportions of a mixture model by EM algorithm (weighted version)} 6 | \usage{ 7 | w_mixEM(matrix_lik, prior, pi_init = NULL, weights = NULL, control = list()) 8 | } 9 | \arguments{ 10 | \item{matrix_lik, }{a n by k matrix with (j,k)th element equal to \eqn{f_k(x_j)}.} 11 | 12 | \item{prior, }{a k vector of the parameters of the Dirichlet prior on \eqn{\pi}. Recommended to be rep(1,k)} 13 | 14 | \item{pi_init, }{the initial value of \eqn{\pi} to use. If not specified defaults to (1/k,...,1/k).} 15 | 16 | \item{weights, }{an n vector of weights} 17 | 18 | \item{control}{A list of control parameters for the SQUAREM algorithm, default value is set to be control.default=list(K = 1, method=3, square=TRUE, step.min0=1, step.max0=1, mstep=4, kr=1, objfn.inc=1,tol=1.e-07, maxiter=5000, trace=FALSE).} 19 | } 20 | \value{ 21 | A list, including the estimates (pihat), the log likelihood for each interation (B) 22 | and a flag to indicate convergence 23 | } 24 | \description{ 25 | Given the individual component likelihoods for a mixture model, and a set of weights, estimates the mixture proportions by an EM algorithm. 26 | } 27 | \details{ 28 | Fits a k component mixture model \deqn{f(x|\pi)= \sum_k \pi_k f_k(x)} to independent 29 | and identically distributed data \eqn{x_1,\dots,x_n} with weights \eqn{w_1,\dots,w_n}. 30 | Estimates mixture proportions \eqn{\pi} by maximum likelihood, or by maximum a posteriori (MAP) estimation for a Dirichlet prior on \eqn{\pi} 31 | (if a prior is specified). Here the log-likelihood for the weighted data is defined as \eqn{l(\pi) = \sum_j w_j log f(x_j | \pi)}. Uses the SQUAREM package to accelerate convergence of EM. Used by the ash main function; there is no need for a user to call this 32 | function separately, but it is exported for convenience. 33 | } 34 | -------------------------------------------------------------------------------- /man/get_lfdr.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/get_functions.R 3 | \name{get_lfsr} 4 | \alias{get_lfsr} 5 | \alias{get_lfdr} 6 | \alias{get_svalue} 7 | \alias{get_qvalue} 8 | \alias{get_pm} 9 | \alias{get_psd} 10 | \alias{get_pp} 11 | \alias{get_np} 12 | \alias{get_loglik} 13 | \alias{get_logLR} 14 | \alias{get_fitted_g} 15 | \alias{get_pi0} 16 | \title{Return lfsr from an ash object} 17 | \usage{ 18 | get_lfsr(x) 19 | 20 | get_lfdr(a) 21 | 22 | get_svalue(a) 23 | 24 | get_qvalue(a) 25 | 26 | get_pm(a) 27 | 28 | get_psd(a) 29 | 30 | get_pp(a) 31 | 32 | get_np(a) 33 | 34 | get_loglik(a) 35 | 36 | get_logLR(a) 37 | 38 | get_fitted_g(a) 39 | 40 | get_pi0(a) 41 | } 42 | \arguments{ 43 | \item{x}{an ash fit (e.g. from running ash)} 44 | 45 | \item{a}{an ash fit (e.g. from running ash)} 46 | } 47 | \value{ 48 | a vector (ash) of local false sign rates 49 | } 50 | \description{ 51 | These functions simply return elements of an ash object, generally without doing any calculations. 52 | (So if the value was not computed during the original call to ash, eg because of how outputlevel was set in the call, 53 | then NULL will be returned.) 54 | Accessing elements in this way 55 | rather than directly from the ash object will help ensure compatability moving forward 56 | (e.g. if the internal structure of the ash object changes during software development.) 57 | } 58 | \section{Functions}{ 59 | \itemize{ 60 | \item \code{get_lfsr()}: local false sign rate 61 | 62 | \item \code{get_lfdr()}: local false discovery rate 63 | 64 | \item \code{get_svalue()}: svalue 65 | 66 | \item \code{get_qvalue()}: qvalue 67 | 68 | \item \code{get_pm()}: posterior mean 69 | 70 | \item \code{get_psd()}: posterior standard deviation 71 | 72 | \item \code{get_pp()}: positive probability 73 | 74 | \item \code{get_np()}: negative probability 75 | 76 | \item \code{get_loglik()}: log-likelihood 77 | 78 | \item \code{get_logLR()}: log-likelihood ratio 79 | 80 | \item \code{get_fitted_g()}: fitted g mixture 81 | 82 | \item \code{get_pi0()}: pi0, the proportion of nulls 83 | 84 | }} 85 | -------------------------------------------------------------------------------- /R/truncgamma.R: -------------------------------------------------------------------------------- 1 | #' @title mean of truncated gamma distribution 2 | #' @description Compute mean of the truncated gamma. 3 | #' @param a left limit of distribution 4 | #' @param b right limit of distribution 5 | #' @param shape shape of gamma distribution 6 | #' @param rate rate of gamma distribution 7 | #' @export 8 | my_etruncgamma = function(a, b, shape, rate){ 9 | tmp = a 10 | tmp[a!=b] = (shape/rate*(stats::pgamma(b,shape=shape+1,rate=rate)-stats::pgamma(a,shape=shape+1,rate=rate))/ 11 | (stats::pgamma(b,shape=shape,rate=rate)-stats::pgamma(a,shape=shape,rate=rate)))[a!=b] 12 | # zero denominator case: stats::pgamma(b,shape,rate) and stats::pgamma(a,shape,rate) are both 0 or 1 13 | tmp[(stats::pgamma(b,shape=shape,rate=rate)-stats::pgamma(a,shape=shape,rate=rate))==0] = 14 | ifelse(stats::dgamma(a,shape=shape,rate=rate,log=TRUE)>stats::dgamma(b,shape=shape,rate=rate,log=TRUE), 15 | a, b)[(stats::pgamma(b,shape=shape,rate=rate)-stats::pgamma(a,shape=shape,rate=rate))==0] 16 | return(tmp) 17 | } 18 | 19 | #' @title second moment of truncated gamma distribution 20 | #' @description Compute second moment of the truncated gamma. 21 | #' @param a left limit of distribution 22 | #' @param b right limit of distribution 23 | #' @param shape shape of gamma distribution 24 | #' @param rate rate of gamma distribution 25 | #' @export 26 | my_e2truncgamma = function(a, b, shape, rate){ 27 | tmp = a^2 28 | tmp[a!=b] = (shape*(shape+1)/rate^2*(stats::pgamma(b,shape=shape+2,rate=rate)-stats::pgamma(a,shape=shape+2,rate=rate))/ 29 | (stats::pgamma(b,shape=shape,rate=rate)-stats::pgamma(a,shape=shape,rate=rate)))[a!=b] 30 | # zero denominator case: stats::pgamma(b,shape,rate) and stats::pgamma(a,shape,rate) are both 0 or 1 31 | tmp[(stats::pgamma(b,shape=shape,rate=rate)-stats::pgamma(a,shape=shape,rate=rate))==0] = 32 | ifelse(stats::dgamma(a,shape=shape,rate=rate,log=TRUE)>stats::dgamma(b,shape=shape,rate=rate,log=TRUE), 33 | a^2, b^2)[(stats::pgamma(b,shape=shape,rate=rate)-stats::pgamma(a,shape=shape,rate=rate))==0] 34 | return(tmp) 35 | } 36 | -------------------------------------------------------------------------------- /tests/testthat/test_myetruncnorm.R: -------------------------------------------------------------------------------- 1 | context("my_etruncnorm") 2 | 3 | test_that("my_etruncnorm returns expected results", { 4 | expect_equal(-100,my_etruncnorm(-Inf,-100,0,1),tolerance=0.01) 5 | expect_equal(-100,my_etruncnorm(-Inf,-100,0,0)) 6 | expect_equal(30,my_etruncnorm(30,100,0,0)) 7 | real = c(-100,-100,30) 8 | a=c(-Inf,-Inf,30) 9 | b=c(-100,-100,100) 10 | m=c(0,0,0) 11 | sd=c(1,0,0) 12 | expect_equal(real,my_etruncnorm(a,b,m,sd),tolerance=0.01) 13 | real = matrix(real,3,4) 14 | m = matrix(m,3,4) 15 | sd = matrix(sd,3,4) 16 | expect_equal(real,my_etruncnorm(a,b,m,sd),tolerance=0.01) 17 | a=c(0,0) 18 | b=c(1,2) 19 | m = rbind(c(0,2,4),c(0,0,0)) 20 | sd = 0 21 | real = rbind(c(0,1,1),c(0,0,0)) 22 | expect_equal(real,my_etruncnorm(a,b,m,sd)) 23 | 24 | expect_equal(my_etruncnorm(0,99,-2,3),truncnorm::etruncnorm(0,99,-2,3)) 25 | expect_equal(my_etruncnorm(0,9999,-2,3),my_etruncnorm(0,Inf,-2,3),tol=1e-3) 26 | expect_error(my_etruncnorm(0, 1:2, mean = 0, sd = 1)) 27 | expect_error(my_etruncnorm(1, 0, mean = 0, sd = 1)) 28 | }) 29 | 30 | context("my_vtruncnorm") 31 | 32 | test_that("my_vtruncnorm returns expected results", { 33 | expect_equal(0, my_vtruncnorm(-Inf, -100), tolerance = 0.01) 34 | expect_equal(0, my_vtruncnorm(-Inf, -100, sd = 0)) 35 | expect_equal(0, my_vtruncnorm(30, 100, sd = 0)) 36 | real = c(0, 0, 0) 37 | a = c(-Inf, -Inf, 30) 38 | b = c(-100, -100, 100) 39 | m = c(0, 0, 0) 40 | sd = c(1, 0, 0) 41 | expect_equal(real, my_vtruncnorm(a, b, m, sd), tolerance = 0.01) 42 | real = matrix(real, 3, 4) 43 | m = matrix(m, 3, 4) 44 | sd = matrix(sd, 3, 4) 45 | expect_equal(real, my_vtruncnorm(a, b, m, sd), tolerance = 0.01) 46 | a = c(0, 0) 47 | b = c(1, 2) 48 | m = rbind(c(0, 2, 4), c(0, 0, 0)) 49 | sd = 0 50 | real = rbind(c(0, 0, 0), c(0, 0, 0)) 51 | expect_equal(real, my_vtruncnorm(a,b,m,sd)) 52 | 53 | expect_equal(my_vtruncnorm(-2, 3), truncnorm::vtruncnorm(-2, 3)) 54 | expect_equal(my_vtruncnorm(6, 7, sd = 9), truncnorm::vtruncnorm(6, 7, sd = 9)) 55 | expect_equal(my_vtruncnorm(0, 9999, -2, 3), 56 | my_vtruncnorm(0, Inf, -2, 3), tol = 1e-3) 57 | }) 58 | -------------------------------------------------------------------------------- /R/get_functions.R: -------------------------------------------------------------------------------- 1 | #' @title Return lfsr from an ash object 2 | #' @describeIn get_lfdr local false sign rate 3 | #' @param x an ash fit (e.g. from running ash) 4 | #' @return a vector (ash) of local false sign rates 5 | #' @export 6 | get_lfsr=function(x){x$result$lfsr} 7 | 8 | #' @title Return lfdr, etc from ash object 9 | #' 10 | #' @description These functions simply return elements of an ash object, generally without doing any calculations. 11 | #' (So if the value was not computed during the original call to ash, eg because of how outputlevel was set in the call, 12 | #' then NULL will be returned.) 13 | #' Accessing elements in this way 14 | #' rather than directly from the ash object will help ensure compatability moving forward 15 | #' (e.g. if the internal structure of the ash object changes during software development.) 16 | #' 17 | #' @param a an ash fit (e.g. from running ash) 18 | #' @describeIn get_lfdr local false discovery rate 19 | #' @export 20 | get_lfdr=function(a){a$result$lfdr} 21 | 22 | #' @describeIn get_lfdr svalue 23 | #' @export 24 | get_svalue=function(a){a$result$svalue} 25 | 26 | #' @describeIn get_lfdr qvalue 27 | #' @export 28 | get_qvalue=function(a){a$result$qvalue} 29 | 30 | #' @describeIn get_lfdr posterior mean 31 | #' @export 32 | get_pm=function(a){a$result$PosteriorMean} 33 | 34 | #' @describeIn get_lfdr posterior standard deviation 35 | #' @export 36 | get_psd=function(a){a$result$PosteriorSD} 37 | 38 | #' @describeIn get_lfdr positive probability 39 | #' @export 40 | get_pp=function(a){a$result$PositiveProb} 41 | 42 | #' @describeIn get_lfdr negative probability 43 | #' @export 44 | get_np=function(a){a$result$NegativeProb} 45 | 46 | #' @describeIn get_lfdr log-likelihood 47 | #' @export 48 | get_loglik=function(a){a$loglik} 49 | 50 | #' @describeIn get_lfdr log-likelihood ratio 51 | #' @export 52 | get_logLR=function(a){a$logLR} 53 | 54 | #' @describeIn get_lfdr fitted g mixture 55 | #' @export 56 | get_fitted_g=function(a){a$fitted_g} 57 | 58 | 59 | #' @describeIn get_lfdr pi0, the proportion of nulls 60 | #' @export 61 | get_pi0 = function(a){ 62 | null.comp = comp_sd(a$fitted_g)==0 63 | return(sum(a$fitted_g$pi[null.comp])) 64 | } 65 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: ashr 2 | Encoding: UTF-8 3 | Type: Package 4 | Maintainer: Peter Carbonetto 5 | Authors@R: c(person("Matthew","Stephens",role="aut", 6 | email="mstephens@uchicago.edu"), 7 | person("Peter","Carbonetto",role=c("aut","cre"), 8 | email="pcarbo@uchicago.edu"), 9 | person("Chaoxing","Dai",role="ctb"), 10 | person("David","Gerard",role="aut"), 11 | person("Mengyin","Lu",role="aut"), 12 | person("Lei","Sun",role="aut"), 13 | person("Jason","Willwerscheid",role="aut"), 14 | person("Nan","Xiao",role="aut"), 15 | person("Mazon","Zeng",role="ctb")) 16 | Version: 2.2-67 17 | Date: 2025-07-24 18 | Title: Methods for Adaptive Shrinkage, using Empirical Bayes 19 | Description: The R package 'ashr' implements an Empirical Bayes 20 | approach for large-scale hypothesis testing and false discovery 21 | rate (FDR) estimation based on the methods proposed in 22 | M. Stephens, 2016, "False discovery rates: a new deal", 23 | . These methods can be applied 24 | whenever two sets of summary statistics---estimated effects and 25 | standard errors---are available, just as 'qvalue' can be applied 26 | to previously computed p-values. Two main interfaces are 27 | provided: ash(), which is more user-friendly; and ash.workhorse(), 28 | which has more options and is geared toward advanced users. The 29 | ash() and ash.workhorse() also provides a flexible modeling 30 | interface that can accommodate a variety of likelihoods (e.g., 31 | normal, Poisson) and mixture priors (e.g., uniform, normal). 32 | Depends: 33 | R (>= 3.1.0) 34 | Imports: 35 | Matrix, 36 | stats, 37 | graphics, 38 | Rcpp (>= 0.10.5), 39 | truncnorm, 40 | mixsqp, 41 | SQUAREM, 42 | etrunct, 43 | invgamma 44 | Suggests: 45 | testthat, 46 | knitr, 47 | rmarkdown, 48 | ggplot2, 49 | REBayes 50 | LinkingTo: Rcpp 51 | License: GPL (>=3) 52 | NeedsCompilation: yes 53 | URL: https://github.com/stephens999/ashr 54 | BugReports: https://github.com/stephens999/ashr/issues 55 | VignetteBuilder: knitr 56 | RoxygenNote: 7.3.1 57 | -------------------------------------------------------------------------------- /tests/testthat/test_loglik.R: -------------------------------------------------------------------------------- 1 | context("ashr likelihood computations") 2 | 3 | test_that("calc_null_loglik gives expected answer under normal", { 4 | set.seed(1); z=rnorm(100); s = rgamma(100,10,10); 5 | data1 = set_data(z,s,alpha=0.5) 6 | data2 = set_data(z,s,alpha=1) 7 | data3 = set_data(z,s,alpha=0.1) 8 | 9 | # likelihooods for different alpha should be same under null 10 | expect_equal(ashr::calc_null_loglik(data1),ashr::calc_null_loglik(data2)) 11 | expect_equal(ashr::calc_null_loglik(data1),ashr::calc_null_loglik(data3)) 12 | 13 | alpha=0.5 14 | expect_equal(sum(-alpha*log(s)+dnorm(z/(s^alpha),log=TRUE, sd=s^(1-alpha))),ashr::calc_null_loglik(data1)); 15 | expect_equal(sum(-log(s)+dnorm(z/s,log=TRUE, sd=1)),ashr::calc_null_loglik(data1)); 16 | 17 | alpha=0.1 18 | expect_equal(sum(-alpha*log(s)+dnorm(z/(s^alpha),log=TRUE, sd=s^(1-alpha))),ashr::calc_null_loglik(data3)) 19 | }) 20 | 21 | 22 | test_that("calc_null_loglik gives expected answer under t", { 23 | set.seed(1); z=rnorm(100); s = rgamma(100,10,10); 24 | alpha=0.5 25 | data1 = set_data(z,s,lik_t(2),alpha=0.5) 26 | data2 = set_data(z,s,lik_t(2),alpha=1) 27 | expect_equal(ashr::calc_null_loglik(data1),ashr::calc_null_loglik(data2)) 28 | expect_equal(sum(-log(s) + dt(z/s,df=2,log=TRUE)),ashr::calc_null_loglik(data1)) 29 | }) 30 | 31 | test_that("calc_logLR is 0 for when g is null", { 32 | set.seed(1); z=rnorm(100); 33 | data1 = set_data(z,1) 34 | data2 = set_data(z,1,lik_t(2)) 35 | expect_equal(ashr::calc_logLR(g=unimix(1,0,0),data1),0) 36 | expect_equal(ashr::calc_logLR(g=unimix(1,0,0),data2),0) 37 | }) 38 | 39 | test_that("calc_loglik returns warning when called with wrong model", { 40 | set.seed(1); z=rnorm(100); z.ash = ashr::ash(z,1) 41 | data = set_data(z,1,alpha=1) 42 | expect_warning(calc_loglik(z.ash,data)) 43 | }) 44 | 45 | test_that("logLR in ash object matches calc_logLR", { 46 | set.seed(1); z=rnorm(100); z.ash = ashr::ash(z,1) 47 | data1 = set_data(z,1) 48 | expect_equal(z.ash$logLR, calc_logLR(z.ash$fitted_g,data1)) 49 | z.ash = ashr::ash(z,1,alpha=1,df=3) 50 | data = set_data(z,1,lik_t(3),alpha=1) 51 | expect_equal(z.ash$logLR, calc_logLR(z.ash$fitted_g,data)) 52 | }) 53 | 54 | test_that("sum of calc_vlogLR is same as calc_logLR", { 55 | set.seed(2); z=rnorm(100,0,2); z.ash = ashr::ash(z,1,df=4) 56 | data = set_data(z,1,lik_t(4)) 57 | expect_equal(sum(calc_vlogLR(z.ash$fitted_g,data)),z.ash$logLR) 58 | }) 59 | -------------------------------------------------------------------------------- /tests/testthat/test_trunct.R: -------------------------------------------------------------------------------- 1 | context("ashr, context unclear") 2 | 3 | test_that("my_e2truncnorm matches simulations", { 4 | set.seed(1); x = rnorm(1000000) 5 | expect_equal(mean(x[abs(x)<1]^2),my_e2truncnorm(-1,1),tolerance=0.01) 6 | }) 7 | 8 | test_that("comp_postmean2 is 0 for null", { 9 | set.seed(1) 10 | z=rnorm(10) 11 | expect_equal(comp_postmean2(unimix(1,0,0),set_data(z,rep(1,10))),matrix(0,10,nrow=1)) 12 | expect_equal(comp_postmean2(unimix(1,0,0),set_data(z,rep(1,10),lik_t(4))), matrix(0,10,nrow=1)) 13 | expect_equal(comp_postmean2(normalmix(1,0,0),set_data(z,rep(1,10))),matrix(0,10,nrow=1)) 14 | z.ash = ash(z,1,df=4,g=unimix(1,0,0),fixg=TRUE,outputlevel=3) 15 | expect_equal(z.ash$res$PosteriorSD,rep(0,10)) 16 | expect_equal(z.ash$res$PosteriorMean,rep(0,10)) 17 | }) 18 | 19 | test_that("comp_postmean2.unimix matches simulations", { 20 | bhat = 3 21 | s = 2 22 | x = bhat+s*rt(100000,df=3) 23 | m = c(mean(x[x<2 & x>0]),mean(x[x<2 & x>1]),mean(x[x<0 & x>(-2)])) 24 | m2 = cbind(mean(x[x<2 & x>0]^2),mean(x[x<2 & x>1]^2),mean(x[x<0 & x>(-2)]^2)) 25 | g= unimix(c(0.5,0.2,0.3),c(0,1,-2),c(2,2,0)) 26 | temp2=as.vector(comp_postmean2(g,set_data(3,2,lik_t(3)))) 27 | temp = as.vector(comp_postmean(g,set_data(3,2,lik_t(3)))) 28 | expect_equal(mean((temp2-m2)^2), 0,tolerance=0.01) 29 | expect_equal(mean((temp-m)^2), 0,tolerance=0.01) 30 | }) 31 | 32 | test_that("posterior means and sds computed for unimix from very flat prior are correct", { 33 | set.seed(1); z = rnorm(10,0,2); s=rgamma(10,10,10) 34 | #fit under t likelihood 35 | z.ash=ash(z,s,df=5,g=unimix(c(0.5,0.5),c(-100,-20),c(100,20)),fixg=TRUE,outputlevel=3) 36 | expect_equal(z.ash$res$PosteriorSD,s*sd(rt(1000000,df=5)),tolerance=0.01) 37 | #now do normal version 38 | z.ash=ash(z,s,df=NULL,g=unimix(c(0.5,0.5),c(-100,-20),c(100,20)),fixg=TRUE) 39 | expect_equal(z.ash$res$PosteriorSD,s,tolerance=0.01) 40 | }) 41 | 42 | test_that("posterior means and sds computed for unimix from NAs match prior mean and sd", { 43 | set.seed(1); z = c(NA,rnorm(10,0,2)); s=c(rgamma(10,10,10),NA) 44 | z.ash=ash(z,s,df=5,g=unimix(c(0.5,0.5),c(-100,-20),c(100,20)),fixg=TRUE,outputlevel=3) 45 | priorsd = sd(c(runif(1000000,-100,100),runif(1000000,-20,20))) 46 | expect_equal(z.ash$res$PosteriorMean[1],0) 47 | expect_equal(z.ash$res$PosteriorMean[11],0) 48 | expect_equal(z.ash$res$PosteriorSD[1],priorsd,tolerance=0.01) 49 | expect_equal(z.ash$res$PosteriorSD[11],priorsd,tolerance=0.01) 50 | }) 51 | -------------------------------------------------------------------------------- /tests/testthat/test_tnormal.R: -------------------------------------------------------------------------------- 1 | context("ashr with mixture-of-truncated-normal priors") 2 | 3 | test_that("compdens_conv for tnormal gives same results as compdens_conv for normal when a=-Inf, b=Inf", { 4 | gn = normalmix(c(0.5,0.5),c(0,0),c(0.1,1)) 5 | gtn = tnormalmix(c(0.5,0.5),c(0,0),c(0.1,1),c(-Inf,-Inf),c(Inf,Inf)) 6 | x=c(-10,2) 7 | s = c(1,2) 8 | data = list(x=x,s=s,lik=lik_normal(),alpha=0) 9 | expect_equal(comp_dens_conv(gn, data),comp_dens_conv(gtn,data)) 10 | }) 11 | 12 | test_that("log_compdens_conv for tnormal gives reasonable results", { 13 | gn = normalmix(c(0.5,0.5),c(0,0),c(0.1,1)) 14 | gtn = tnormalmix(c(0.5,0.5),c(0,0),c(0.1,1),c(-Inf,-Inf),c(Inf,Inf)) 15 | x=c(-10,2) 16 | s = c(1,2) 17 | data = list(x=x,s=s,lik=lik_normal(),alpha=0) 18 | expect_equal(exp(log_comp_dens_conv(gtn,data)),comp_dens_conv(gtn,data)) 19 | }) 20 | 21 | test_that("comp_cdf for tnormal gives same results as comp_cdf for normal when a=-Inf, b=Inf", { 22 | gn = normalmix(c(0.5,0.5),c(0,0),c(0.1,1)) 23 | gtn = tnormalmix(c(0.5,0.5),c(0,0),c(0.1,1),c(-Inf,-Inf),c(Inf,Inf)) 24 | y=c(-1,-5,0.5,1) 25 | expect_equal(comp_cdf(gn, y),comp_cdf(gtn,y)) 26 | }) 27 | 28 | test_that("compcdf_post for tnormal gives same results as compcdf_post for normal when a=-Inf, b=Inf", { 29 | gn = normalmix(c(0.5,0.5),c(0,0),c(0.1,1)) 30 | gtn = tnormalmix(c(0.5,0.5),c(0,0),c(0.1,1),c(-Inf,-Inf),c(Inf,Inf)) 31 | betahat = c(-1,-2,1,2) 32 | sebetahat = 1:4 33 | data = list(x=betahat,s=sebetahat,lik=lik_normal(),alpha=0) 34 | c = 0.5 35 | expect_equal(comp_cdf_post(gn,c,data),comp_cdf_post(gtn,c,data)) 36 | }) 37 | 38 | test_that("comp_postmean for tnormal gives same results as comp_postmean for normal when a=-Inf, b=Inf", { 39 | gn = normalmix(c(0.5,0.5),c(0,1),c(0.1,1)) 40 | gtn = tnormalmix(c(0.5,0.5),c(0,1),c(0.1,1),c(-Inf,-Inf),c(Inf,Inf)) 41 | betahat = c(-1,0.4,1.3,5) 42 | sebetahat = 1:4 43 | data = list(x=betahat,s=sebetahat,lik=lik_normal(),alpha=0) 44 | expect_equal(comp_postmean(gn,data),comp_postmean(gtn,data)) 45 | }) 46 | 47 | test_that("comp_postsd for tnormal gives same results as comp_postsd for normal when a=-Inf, b=Inf", { 48 | gn = normalmix(c(0.5,0.5),c(0,1),c(0.1,1)) 49 | gtn = tnormalmix(c(0.5,0.5),c(0,1),c(0.1,1),c(-Inf,-Inf),c(Inf,Inf)) 50 | betahat = c(-1,0.4,1.3,5) 51 | sebetahat = 1:4 52 | data = list(x=betahat,s=sebetahat,lik=lik_normal(),alpha=0) 53 | expect_equal(comp_postsd(gn,data),comp_postsd(gtn,data)) 54 | expect_equal(comp_postmean2(gn,data),comp_postmean2(gtn,data)) 55 | }) 56 | -------------------------------------------------------------------------------- /R/process_args.R: -------------------------------------------------------------------------------- 1 | #sets optimization method 2 | #also checks if necessary tools installed for optmethod specified 3 | set_optmethod = function(optmethod){ 4 | # Fallbacks for optmethod - checks required packages are installed 5 | if(optmethod == "mixIP"){ 6 | if (!requireNamespace("REBayes", quietly = TRUE)) { 7 | 8 | # Check whether REBayes package is present. 9 | # If REBayes package missing. 10 | message("Due to absence of package REBayes, switching to EM algorithm") 11 | optmethod = "cxxMixSquarem" 12 | } 13 | } 14 | 15 | if (optmethod == "mixIP") { 16 | if (!requireNamespace("REBayes",quietly = TRUE)) 17 | stop("optmethod = \"mixIP\" requires package REBayes") 18 | } 19 | return(optmethod) 20 | } 21 | 22 | check_lik = function(lik, betahat, sebetahat, df, mixcompdist){ 23 | if(is.null(lik$lcdfFUN)){stop("Likelihood must have lcdfFUN")} 24 | if(is.null(lik$lpdfFUN)){stop("Likelihood must have lpdfFUN")} 25 | 26 | if(!(lik$name %in% c("normal","t")) & !is.null(df)){ 27 | warning("Input df is ignored for this likelihood function") 28 | } 29 | if(!(lik$name %in% c("normal","normalmix")) & mixcompdist == "normal"){ 30 | stop("Error: Normal mixture for non-normal likelihood is not yet implemented") 31 | } 32 | if(lik$name %in% c("pois","binom")){ 33 | if (!sum(betahat==0) | !sum(sebetahat==1)){ 34 | stop("Error: betahat must be rep(0,n) and sebetaht must be 1 for 35 | Poisson/Binomial likelihood") 36 | } 37 | } 38 | 39 | } 40 | 41 | 42 | check_args = function(mixcompdist,df,prior,optmethod,gridmult,sebetahat,betahat){ 43 | if(!is.numeric(betahat)){ 44 | stop("Error: betahat must be numeric") 45 | } 46 | if(!is.numeric(sebetahat)){ 47 | stop("Error: sebetahat must be numeric") 48 | } 49 | 50 | if (mixcompdist == "normal" & !is.null(df)) 51 | stop("Error: Normal mixture for student-t likelihood is not yet implemented") 52 | 53 | if (identical(prior, "unit") & optmethod != "mixVBEM") 54 | stop("Error: unit prior only valid for mixVBEM") 55 | 56 | if (mixcompdist == "halfuniform" & !identical(prior, "nullbiased")) 57 | warning("Use of halfuniform without nullbiased prior can lead to misleading local false sign rates, and so is not recommended") 58 | 59 | if (gridmult <= 1) stop("gridmult must be > 1") 60 | 61 | if ((length(sebetahat) != length(betahat)) & (length(sebetahat) != 1)) 62 | stop("Error: sebetahat must have length 1, or same length as betahat") 63 | 64 | completeobs = (!is.na(betahat) & !is.na(sebetahat)) 65 | if (sum(completeobs) == 0) stop("Error: all input values are missing") 66 | } 67 | 68 | 69 | 70 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![CRAN_Status_Badge](http://www.r-pkg.org/badges/version/ashr)](https://cran.r-project.org/package=ashr) 2 | [![Build Status](https://travis-ci.org/stephens999/ashr.svg)](https://travis-ci.org/stephens999/ashr) 3 | [![AppVeyor Build Status](https://ci.appveyor.com/api/projects/status/github/stephens999/ashr?branch=master&svg=true)](https://ci.appveyor.com/project/stephens999/ashr) 4 | [![Coverage Status](https://coveralls.io/repos/github/stephens999/ashr/badge.svg?branch=master)](https://coveralls.io/github/stephens999/ashr?branch=master) 5 | [![Coverage Status](https://img.shields.io/codecov/c/github/stephens999/ashr/master.svg)](https://codecov.io/github/stephens999/ashr?branch=master) 6 | 7 | This repository contains an R package for performing "Adaptive Shrinkage." 8 | 9 | To install the ashr package first you need to install devtools: 10 | 11 | ```R 12 | install.packages("devtools") 13 | library(devtools) 14 | install_github("stephens999/ashr") 15 | ``` 16 | 17 | 18 | ## Running Adaptive Shrinkage 19 | 20 | The main function in the ashr package is `ash`. To get minimal help: 21 | 22 | ```R 23 | library(ashr) 24 | ?ash 25 | ``` 26 | 27 | ## More background 28 | 29 | The ashr ("Adaptive SHrinkage") package aims to provide simple, 30 | generic, and flexible methods to derive "shrinkage-based" estimates 31 | and credible intervals for unknown quantities 32 | $\beta=(\beta_1,\dots,\beta_J)$, given only estimates of those 33 | quantities ($\hat\beta=(\hat\beta_1,\dots, \hat\beta_J)$) and their 34 | corresponding estimated standard errors ($s=(s_1,\dots,s_J)$). 35 | 36 | The "adaptive" nature of the shrinkage is two-fold. First, the 37 | appropriate amount of shrinkage is determined from the data, rather 38 | than being pre-specified. Second, the amount of shrinkage undergone by 39 | each $\hat\beta_j$ will depend on the standard error $s_j$: 40 | measurements with high standard error will undergo more shrinkage than 41 | measurements with low standard error. 42 | 43 | ### Methods Outline 44 | 45 | The methods are based on treating the vectors $\hat\beta$ and $s$ as 46 | "observed data", and then performing inference for $\beta$ from these 47 | observed data, using a standard hierarchical modelling framework to 48 | combine information across $j=1,\dots,J$. 49 | 50 | Specifically, we assume that the true $\beta_j$ values are independent 51 | and identically distributed from some unimodal distribution $g$. By 52 | default we assume $g$ is unimodal about zero and symmetric. You can 53 | specify or estimate a different mode using the `mode` parameter. You 54 | can allow for asymmetric $g$ by specifying 55 | `mixcompdist="halfuniform"`. 56 | 57 | Then, we assume that the observations $\hat\beta_j \sim 58 | N(\beta_j,s_j)$, or alternatively the normal assumption can be 59 | replaced by a $t$ distribution by specifying `df`, the number of 60 | degrees of freedom used to estimate $s_j$. Actually this is 61 | important: do be sure to specify `df` if you can. 62 | -------------------------------------------------------------------------------- /R/igmix.R: -------------------------------------------------------------------------------- 1 | 2 | ############################### METHODS FOR igmix class ########################### 3 | 4 | #' @title Constructor for igmix class 5 | #' 6 | #' @description Creates an object of class igmix (finite mixture of 7 | #' univariate inverse-gammas) 8 | #' 9 | #' @details None 10 | #' 11 | #' @param pi vector of mixture proportions 12 | #' @param alpha vector of shape parameters 13 | #' @param beta vector of rate parameters 14 | #' 15 | #' @return an object of class igmix 16 | #' 17 | #' @export 18 | #' 19 | #' @examples igmix(c(0.5,0.5),c(1,1),c(1,2)) 20 | #' 21 | igmix = function(pi,alpha,beta){ 22 | structure(data.frame(pi,alpha,beta),class="igmix") 23 | } 24 | 25 | #' @export 26 | comp_sd.igmix = function(m){ 27 | m$beta/(m$alpha-1)/sqrt(m$alpha-2) 28 | } 29 | 30 | #' @export 31 | comp_mean.igmix = function(m){ 32 | m$beta/(m$alpha-1) 33 | } 34 | 35 | comp_dens.igmix = function(m,y,log=FALSE){ 36 | k=ncomp(m) 37 | n=length(y) 38 | d = matrix(rep(y,rep(k,n)),nrow=k) 39 | return(matrix(stats::dgamma(1/d, shape=m$alpha, rate=outer(m$beta,1/y^2),log),nrow=k)) 40 | } 41 | 42 | #density of product of each component of a inverse-gamma mixture with Gamma(v/2,v/2) at s 43 | # s an n-vector at which density is to be evaluated 44 | #return a k by n matrix 45 | comp_dens_conv.igmix = function(m,data,FUN="+",...){ 46 | k=ncomp(m) 47 | x = data$x 48 | s = data$s 49 | v = data$v 50 | n=length(s) 51 | dens = t(exp(v/2*log(v/2)-lgamma(v/2) 52 | +(v/2-1)*outer(log(s^2),rep(1,k)) 53 | +outer(rep(1,n),m$alpha*log(m$beta)-lgamma(m$alpha)+lgamma(m$alpha+v/2)) 54 | -outer(rep(1,n),m$alpha+v/2)*log(outer(v/2*s^2,m$beta,FUN="+")))) 55 | return(dens) 56 | 57 | } 58 | 59 | #' @importFrom invgamma pinvgamma 60 | #' 61 | #' @export 62 | #' 63 | comp_cdf.igmix = function(m,y,lower.tail=TRUE){ 64 | vapply(y,pinvgamma,m$alpha,m$alpha,m$beta,lower.tail) 65 | } 66 | 67 | #' @importFrom invgamma pinvgamma 68 | #' 69 | #' @export 70 | #' 71 | comp_cdf_post.igmix=function(m,c,data){ 72 | #compute posterior shape (alpha1) and rate (beta1) 73 | alpha1 = m$alpha+data$v/2 74 | beta1 = outer(m$beta,data$v/2*data$s^2,FUN="+") 75 | ismissing = is.na(data$s) 76 | beta1[,ismissing]=m$beta 77 | return(t(pinvgamma(c,alpha1,beta1))) 78 | } 79 | 80 | 81 | #' @export 82 | comp_postmean.igmix = function(m,data){ 83 | k = length(m$pi) 84 | n=length(data$s) 85 | tmp=outer(data$v/2*data$s^2,m$beta,FUN="+")/outer(rep(1,n),m$alpha+data$v/2-1) 86 | ismissing = is.na(data$s) 87 | tmp[ismissing,]=m$beta/(m$alpha-1) #return prior mean when missing data 88 | t(tmp) 89 | } 90 | 91 | 92 | #' @export 93 | comp_postsd.igmix = function(m,data){ 94 | k = length(m$pi) 95 | n=length(data$s) 96 | alpha1 = outer(rep(1,n),m$alpha+data$v/2-1) 97 | beta1 = outer(data$v/2*data$s^2,m$beta,FUN="+") 98 | return(t(beta1/(alpha1-1)/sqrt(alpha1-2))) 99 | } 100 | 101 | #' @importFrom invgamma dinvgamma 102 | #' 103 | #' @export 104 | #' 105 | comp_dens.igmix = function(m,y,log=FALSE){ 106 | k=ncomp(m) 107 | n=length(y) 108 | d = matrix(rep(y,rep(k,n)),nrow=k) 109 | ig_dens=matrix(dinvgamma(d, m$alpha, m$beta),nrow=k) 110 | if(log==TRUE){ig_dens=log(ig_dens)} 111 | return(ig_dens) 112 | } 113 | -------------------------------------------------------------------------------- /R/logF.R: -------------------------------------------------------------------------------- 1 | # File contains functions related to logF distribution (cdf,pdf,moments of truncated logF) 2 | 3 | #' @title The log-F distribution 4 | #' @description Distribution function for the log-F distribution with \code{df1} and \code{df2} 5 | #' degrees of freedom (and optional non-centrality parameter \code{ncp}). 6 | #' @param q vector of quantiles 7 | #' @param df1,df2 degrees of freedom 8 | #' @param ncp non-centrality parameter. If omitted the central F is assumed. 9 | #' @param log.p logical; if TRUE, probabilities p are given as log(p). 10 | #' @param lower.tail logical; if TRUE (default), probabilities are P[X <= x], otherwise, P[X > x]. 11 | #' @return The distribution function. 12 | #' @export 13 | plogf = function(q, df1, df2, ncp, lower.tail=TRUE, log.p=FALSE){ 14 | return(stats::pf(exp(q), df1=df1, df2=df2, ncp=ncp, lower.tail=lower.tail, 15 | log.p=log.p)) 16 | } 17 | 18 | #' @title The log-F distribution 19 | #' @description Density function for the log-F distribution with \code{df1} and \code{df2} 20 | #' degrees of freedom (and optional non-centrality parameter \code{ncp}). 21 | #' @param x vector of quantiles 22 | #' @param log logical; if TRUE, probabilities p are given as log(p). 23 | #' @inheritParams plogf 24 | #' @return The density function. 25 | #' @export 26 | dlogf = function(x, df1, df2, ncp, log=FALSE){ 27 | if (log==FALSE){ 28 | stats::df(exp(x), df1=df1, df2=df2, ncp=ncp)*exp(x) 29 | }else{ 30 | stats::df(exp(x), df1=df1, df2=df2, ncp=ncp, log=TRUE)+x 31 | } 32 | } 33 | 34 | 35 | #' @title my_etrunclogf 36 | #' @description Compute expectation of truncated log-F distribution. 37 | #' 38 | #' @param a Left limit of distribution. 39 | #' @param b Right limit of distribution. 40 | #' @param df1,df2 degrees of freedom 41 | #' @export 42 | my_etrunclogf= function(a,b,df1,df2){ 43 | if (a==b){ 44 | tmp = a 45 | }else{ 46 | tmp = try(etrunclogf(df1=df1, df2=df2, a=a, b=b, adj=FALSE),silent=TRUE) 47 | if (inherits(tmp,"try-error")) 48 | tmp <- try(etrunclogf(df1=df1, df2=df2, a=a, b=b, adj=TRUE),silent=TRUE) 49 | 50 | if (inherits(tmp,"try-error")) 51 | tmp = (a+b)/2 52 | } 53 | return(tmp) #deal with extreme case a=b 54 | } 55 | 56 | # x*dlogf 57 | etrunclogf_num = function(x,df1,df2,a,b){ 58 | #multiply c to avoid numerical issues 59 | c = 10^(-round(min(log10(stats::df(exp(a),df1,df2)*exp(a)), 60 | log10(stats::df(exp(b),df1,df2)*exp(b))))) 61 | c*x*stats::df(exp(x),df1=df1,df2=df2)*exp(x) 62 | } 63 | 64 | # dlogf 65 | etrunclogf_denom = function(x,df1,df2,a,b){ 66 | #multiply c to avoid numerical issues 67 | c = 10^(-round(min(log10(stats::df(exp(a),df1,df2)*exp(a)), 68 | log10(stats::df(exp(b),df1,df2)*exp(b))))) 69 | c*stats::df(exp(x),df1=df1,df2=df2)*exp(x) 70 | } 71 | 72 | # x multiply by the density of truncated log-F distribution on (a,b) at x 73 | xdtrunclogf = function(x,df1,df2,a,b){ 74 | x*stats::df(exp(x),df1=df1,df2=df2)*exp(x)/(stats::pf(exp(b),df1,df2)-stats::pf(exp(a),df1,df2)) 75 | } 76 | 77 | # compute expectation of truncated log-F distribution. 78 | etrunclogf = function(df1,df2,a,b,adj=FALSE){ 79 | if (adj==TRUE){ 80 | # numerator and denominator both multiply a constant to avoid numerical issues 81 | n = stats::integrate(etrunclogf_num, lower=a, upper=b, df1=df1,df2=df2,a=a,b=b)$value 82 | d = stats::integrate(etrunclogf_denom, lower=a, upper=b, df1=df1,df2=df2,a=a,b=b)$value 83 | return(n/d) 84 | }else{ 85 | return(stats::integrate(xdtrunclogf, lower=a, upper=b, df1=df1,df2=df2,a=a,b=b)$value) 86 | } 87 | } 88 | -------------------------------------------------------------------------------- /tests/testthat/test_binom.R: -------------------------------------------------------------------------------- 1 | context("ashr with Binomial likelihoods") 2 | 3 | test_that("lik_binom (identity link) fitted g is close to true g",{ 4 | set.seed(1) 5 | 6 | # true prior g: 0.4*U(0.1,0.9)+0.6*delta(0.5) 7 | trueg = unimix(c(0.4,0.6),c(0.5,0.1),c(0.5,0.9)) 8 | p = c(rep(0.5,400), runif(600,0.1,0.9)) # generate p from g 9 | n = rep(100,1000) 10 | x = rbinom(1000,n,p) # Binomial observations 11 | out <- capture.output( 12 | ash.binom.out <- ash(rep(0,length(x)),1,lik = lik_binom(x,n),g = trueg, 13 | control = list(maxiter.sqp = 40,verbose = TRUE))) 14 | 15 | # Check if the estimated mixture proportion for components delta(0.5) and U(0.1,0.9) 16 | # is close to the true mixture proportion (0.4,0.6) 17 | expect_equal(ash.binom.out$fitted_g$pi, c(0.4,0.6), tolerance = 0.025) 18 | }) 19 | 20 | test_that("lik_binom (identity link) fitted g is close to true g",{ 21 | # Simulate a Binomial dataset 22 | set.seed(1) 23 | truemode = 0.3 24 | trueg = unimix(c(0.5,0.5),c(0.3,0.1),c(0.3,0.5)) 25 | p = c(rep(0.3,500), runif(500,0.1,0.5)) # generate p from g 26 | n = rep(100,1000) 27 | x = rbinom(1000,n,p) # Binomial observations 28 | ash.binom.out = ash(rep(0,length(x)),1,lik=lik_binom(x,n),mode="estimate") 29 | 30 | # Check if the estimated mode is close to the true mode 0.3 31 | expect_equal(ash.binom.out$fitted_g$a[1], truemode, tolerance = 0.05, scale=truemode) 32 | }) 33 | 34 | test_that("lik_binom (identity link) with big n gives similar answers to normal likelihood",{ 35 | # Simulate a Binomial data set with n=200 36 | set.seed(1) 37 | p = c(rep(0.3,500), runif(500,0.1,0.5)) # generate p 38 | n = rep(200,1000) 39 | x = rbinom(1000,n,p) # Binomial observations 40 | 41 | # Fit the ash model with two different likelihood densities: (1) the 42 | # normal distribution with (s.e.) to be match the standard deviations of 43 | # Binomial distribution, and (2) the Binomial distribution 44 | ash.binom.out = ash(rep(0,length(x)),1,lik=lik_binom(x,n)) 45 | ash.norm.out = ash(x/n, sqrt(p*(1-p)/n), mode="estimate", prior="uniform") 46 | 47 | # Compare the posterior mean estimates from ash using the two 48 | # different likelihood densities. We expect that the difference 49 | # between the two estimates should always be small (relative error 50 | # at most 5%). 51 | expect_equal(ash.norm.out$result$PosteriorMean, 52 | ash.binom.out$result$PosteriorMean, 53 | tolerance = 0.05) 54 | }) 55 | 56 | test_that("lik_binom (logit link) fitted g is close to true g",{ 57 | 58 | # Simulate a Binomial dataset 59 | set.seed(1) 60 | trueg = unimix(c(0.5,0.5),c(0,-3),c(0,3)) 61 | logitp = c(rep(0,500), runif(500,-3,3)) 62 | p = 1/(1+exp(-logitp)) 63 | n = rep(1000,1000) 64 | x = rbinom(1000,n,p) # Binomial observations 65 | out <- capture.output( 66 | ash.binom.out <- ash(rep(0,length(x)),1, 67 | lik = lik_binom(x,n,link = "logit"), 68 | g = trueg,prior = "uniform", 69 | control = list(verbose = TRUE))) 70 | 71 | # Check if the estimated mixture proportion for components 72 | # delta(0.5) and U(-3,3) is close to the true mixture proportion 73 | # (0.5,0.5). 74 | expect_equal(ash.binom.out$fitted_g$pi, c(0.5,0.5), tolerance = 0.05) 75 | }) 76 | 77 | test_that("lik_binom (logit link) fitted g is close to true g",{ 78 | 79 | # Simulate a Binomial dataset 80 | set.seed(1) 81 | truemode = 0 82 | logitp = c(rep(0,800), runif(200,-3,3)) 83 | p = 1/(1 + exp(-logitp)) 84 | n = rep(100,1000) 85 | x = rbinom(1000,n,p) # Binomial observations 86 | ash.binom.out = ash(rep(0,length(x)),1,lik=lik_binom(x,n,link="logit"), 87 | mode = "estimate") 88 | 89 | # Check if the estimated mode is close to the true mode 90 | expect_equal(ash.binom.out$fitted_g$a[1], truemode, tolerance = 0.05) 91 | }) 92 | -------------------------------------------------------------------------------- /tests/testthat/test_pois.R: -------------------------------------------------------------------------------- 1 | context("ashr with Poisson likelihoods") 2 | 3 | test_that("lik_pois (identity link) fitted g is close to true g",{ 4 | # Simulate a Poisson dataset 5 | set.seed(1) 6 | trueg = unimix(c(0.5,0.5),c(1,1),c(1,5)) # true prior g: 0.5*U(1,5)+0.5*delta(1) 7 | lambda = c(rep(1,500), runif(500,1,5)) # generate lambda from g 8 | x = rpois(1000,lambda) # Poisson observations 9 | out <- capture.output( 10 | ash.pois.out <- ash(rep(0,length(x)),1,lik=lik_pois(x),g=trueg, 11 | control = list(verbose = TRUE))) 12 | 13 | # Check if the estimated mixture proportion for components delta(0.5) and U(0.1,0.9) 14 | # is close to the true mixture proportion (0.5,0.5) 15 | expect_equal(ash.pois.out$fitted_g$pi, c(0.5,0.5), tolerance = 0.05) 16 | }) 17 | 18 | test_that("lik_pois (identity link) fitted mode is close to true mode",{ 19 | # Simulate a Poisson dataset 20 | set.seed(1) 21 | truemode = 50 # set mode of prior g 22 | lambda = c(rnorm(1000,truemode,5)) # generate lambda from g 23 | x = rpois(1000,lambda) # Poisson observations 24 | ash.pois.out = ash(rep(0,length(x)),1,lik=lik_pois(x),mode="estimate") 25 | 26 | # Check if the estimated mode is close to the true mode 50 27 | expect_equal(ash.pois.out$fitted_g$a[1], truemode, tolerance = 0.05, scale=truemode) 28 | }) 29 | 30 | test_that("lik_pois (identity link) with high intensities gives similar answers to normal likelihood",{ 31 | # Simulate a Poisson data set with relatively high intensities 32 | set.seed(1) 33 | lambda = c(rnorm(1000,200,5)) # simulate intensities around 200 34 | x = rpois(1000,lambda) 35 | 36 | # Fit the ash model with two different likelihood densities: (1) the 37 | # normal distribution with (s.e.) to be match the standard deviations of 38 | # Poisson distribution sqrt(lambda), and (2) the Poisson distribution 39 | ash.pois.out = ash(rep(0,length(x)),1,lik=lik_pois(x)) 40 | ash.norm.out = ash(x, sqrt(lambda), mode="estimate", prior="uniform") 41 | 42 | # Compare the posterior mean estimates from ash using the two 43 | # different likelihood densities. We expect that the difference 44 | # between the two estimates should always be small (relative error 45 | # at most 5%). 46 | expect_equal(ash.norm.out$result$PosteriorMean, 47 | ash.pois.out$result$PosteriorMean, 48 | tolerance = 0.05) 49 | }) 50 | 51 | test_that("lik_pois (log link) fitted g is close to true g",{ 52 | # Simulate a Poisson dataset 53 | set.seed(1) 54 | trueg = unimix(c(0.8,0.2),c(0,-3),c(0,3)) 55 | loglambda = c(rep(0,800), runif(200,-3,3)) 56 | lambda = exp(loglambda) 57 | x = rpois(1000,lambda) # Poisson observations 58 | out <- capture.output( 59 | ash.pois.out <- ash(rep(0,length(x)),1,lik = lik_pois(x,link="log"), 60 | g = trueg,control = list(verbose = TRUE))) 61 | 62 | # Check if the estimated mixture proportion for components delta(0) 63 | # and U(-3,3) is close to the true mixture proportion (0.8,0.2) 64 | expect_equal(ash.pois.out$fitted_g$pi, c(0.8,0.2), tolerance = 0.05) 65 | }) 66 | 67 | test_that("lik_pois (log link) fitted mode is close to true mode",{ 68 | # Simulate a Poisson dataset 69 | set.seed(1) 70 | truemode = 4 71 | loglambda = c(rep(4,500), rnorm(500,4,1)) # simulate log(lambda) from distn w/ mode at 4 72 | lambda = exp(loglambda) 73 | x = rpois(1000,lambda) # Poisson observations 74 | ash.pois.out = ash(rep(0,length(x)),1,lik=lik_pois(x,link="log"),mode="estimate") 75 | 76 | # Check if the estimated mode is close to the true mode 50 77 | expect_equal(ash.pois.out$fitted_g$a[1], truemode, tolerance = 0.05, scale=truemode) 78 | }) 79 | 80 | test_that("Mode estimation for pois_lik finds an acceptable solution", { 81 | set.seed(1) 82 | # Load example 10X Genomics data 83 | dat = readRDS("test_pois_data.Rds") 84 | m0 = ashr::ash(rep(0, nrow(dat)), 1, lik=ashr::lik_pois(dat$x, scale=dat$scale, link="identity"), mode="estimate") 85 | lam = dat$x / dat$scale 86 | m1 = ashr::ash(rep(0, nrow(dat)), 1, lik=ashr::lik_pois(dat$x, scale=dat$scale, link="identity"), mode=c(min(lam), max(lam))) 87 | expect_equal(m0$loglik, m1$loglik, tolerance=1, scale=1) 88 | }) 89 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(calc_mixsd,default) 4 | S3method(cdf_post,default) 5 | S3method(comp_cdf,default) 6 | S3method(comp_cdf,igmix) 7 | S3method(comp_cdf,normalmix) 8 | S3method(comp_cdf,tnormalmix) 9 | S3method(comp_cdf,unimix) 10 | S3method(comp_cdf_conv,unimix) 11 | S3method(comp_cdf_post,default) 12 | S3method(comp_cdf_post,igmix) 13 | S3method(comp_cdf_post,normalmix) 14 | S3method(comp_cdf_post,tnormalmix) 15 | S3method(comp_cdf_post,unimix) 16 | S3method(comp_dens,default) 17 | S3method(comp_dens,igmix) 18 | S3method(comp_dens,normalmix) 19 | S3method(comp_dens,tnormalmix) 20 | S3method(comp_dens,unimix) 21 | S3method(comp_dens_conv,tnormalmix) 22 | S3method(comp_dens_conv,unimix) 23 | S3method(comp_mean,default) 24 | S3method(comp_mean,igmix) 25 | S3method(comp_mean,normalmix) 26 | S3method(comp_mean,tnormalmix) 27 | S3method(comp_mean,unimix) 28 | S3method(comp_postmean,default) 29 | S3method(comp_postmean,igmix) 30 | S3method(comp_postmean,normalmix) 31 | S3method(comp_postmean,tnormalmix) 32 | S3method(comp_postmean,unimix) 33 | S3method(comp_postmean2,default) 34 | S3method(comp_postmean2,normalmix) 35 | S3method(comp_postmean2,tnormalmix) 36 | S3method(comp_postmean2,unimix) 37 | S3method(comp_postprob,default) 38 | S3method(comp_postsd,default) 39 | S3method(comp_postsd,igmix) 40 | S3method(comp_postsd,normalmix) 41 | S3method(comp_postsd,tnormalmix) 42 | S3method(comp_sd,default) 43 | S3method(comp_sd,igmix) 44 | S3method(comp_sd,normalmix) 45 | S3method(comp_sd,tnormalmix) 46 | S3method(comp_sd,unimix) 47 | S3method(dens,default) 48 | S3method(log_comp_dens_conv,tnormalmix) 49 | S3method(loglik_conv,default) 50 | S3method(mixcdf,default) 51 | S3method(mixprop,default) 52 | S3method(ncomp,default) 53 | S3method(plot,ash) 54 | S3method(pm_on_zero,default) 55 | S3method(post_sample,default) 56 | S3method(post_sample,normalmix) 57 | S3method(post_sample,unimix) 58 | S3method(postmean,default) 59 | S3method(postmean2,default) 60 | S3method(postsd,default) 61 | S3method(print,ash) 62 | S3method(prune,default) 63 | S3method(summary,ash) 64 | export(ash) 65 | export(ash.workhorse) 66 | export(ash_pois) 67 | export(ashci) 68 | export(calc_logLR) 69 | export(calc_loglik) 70 | export(calc_mixsd) 71 | export(calc_null_loglik) 72 | export(calc_null_vloglik) 73 | export(calc_vlogLR) 74 | export(calc_vloglik) 75 | export(cdf.ash) 76 | export(cdf_post) 77 | export(comp_cdf) 78 | export(comp_cdf_post) 79 | export(comp_dens) 80 | export(comp_mean) 81 | export(comp_postmean) 82 | export(comp_postmean2) 83 | export(comp_postprob) 84 | export(comp_postsd) 85 | export(comp_sd) 86 | export(compute_lfsr) 87 | export(cxxMixSquarem) 88 | export(dens) 89 | export(dlogf) 90 | export(estimate_mixprop) 91 | export(get_density) 92 | export(get_fitted_g) 93 | export(get_lfdr) 94 | export(get_lfsr) 95 | export(get_logLR) 96 | export(get_loglik) 97 | export(get_np) 98 | export(get_pi0) 99 | export(get_pm) 100 | export(get_post_sample) 101 | export(get_pp) 102 | export(get_psd) 103 | export(get_qvalue) 104 | export(get_svalue) 105 | export(igmix) 106 | export(lik_binom) 107 | export(lik_logF) 108 | export(lik_normal) 109 | export(lik_normalmix) 110 | export(lik_pois) 111 | export(lik_t) 112 | export(loglik_conv) 113 | export(mixEM) 114 | export(mixIP) 115 | export(mixSQP) 116 | export(mixVBEM) 117 | export(mixcdf) 118 | export(mixprop) 119 | export(my_e2truncbeta) 120 | export(my_e2truncgamma) 121 | export(my_e2truncnorm) 122 | export(my_e2trunct) 123 | export(my_etruncbeta) 124 | export(my_etruncgamma) 125 | export(my_etrunclogf) 126 | export(my_etruncnorm) 127 | export(my_etrunct) 128 | export(my_vtruncnorm) 129 | export(ncomp) 130 | export(normalmix) 131 | export(pcdf_post) 132 | export(plogf) 133 | export(plot_diagnostic) 134 | export(pm_on_zero) 135 | export(post_sample) 136 | export(posterior_dist) 137 | export(postmean) 138 | export(postmean2) 139 | export(postsd) 140 | export(prune) 141 | export(qval.from.lfdr) 142 | export(set_data) 143 | export(tnormalmix) 144 | export(unimix) 145 | export(vcdf_post) 146 | export(w_mixEM) 147 | import(Matrix) 148 | import(Rcpp) 149 | import(SQUAREM) 150 | import(truncnorm) 151 | importFrom(graphics,abline) 152 | importFrom(graphics,legend) 153 | importFrom(graphics,lines) 154 | importFrom(invgamma,dinvgamma) 155 | importFrom(invgamma,pinvgamma) 156 | importFrom(mixsqp,mixsqp) 157 | importFrom(stats,dgamma) 158 | importFrom(stats,dnorm) 159 | importFrom(stats,pgamma) 160 | importFrom(stats,pnorm) 161 | importFrom(stats,punif) 162 | importFrom(stats,qbeta) 163 | importFrom(stats,rnorm) 164 | importFrom(truncnorm,rtruncnorm) 165 | importFrom(utils,modifyList) 166 | useDynLib(ashr) 167 | -------------------------------------------------------------------------------- /R/ashCI.R: -------------------------------------------------------------------------------- 1 | #' @title Credible Interval Computation for the ash object 2 | #' 3 | #' @description Given the ash object returned by the main function ash, 4 | #' this function computes a posterior credible interval (CI) for each observation. The ash object 5 | #' must include a data component to use this function (which it does by default). 6 | #' 7 | #' @details Uses uniroot to find credible interval, one at a time for each observation. 8 | #' The computation cost is linear in number of observations. 9 | #' 10 | #' @param a the fitted ash object 11 | #' @param level the level for the credible interval, (default=0.95) 12 | #' @param betaindex a vector consisting of locations of betahat where 13 | #' you would like to compute the credible interval 14 | #' @param lfsr_threshold a scalar, if specified then computes CIs only for observations 15 | #' more significant than that threshold. 16 | #' @param tol passed to uniroot; indicates desired accuracy. 17 | #' @param trace a logical variable denoting whether some of the 18 | #' intermediate results of iterations should be displayed to the 19 | #' user. Default is FALSE. 20 | 21 | #' @return A matrix, with 2 columns, ith row giving CI for ith observation 22 | #' 23 | #' 24 | #' @export 25 | #' @examples 26 | #' beta = c(rep(0,20),rnorm(20)) 27 | #' sebetahat = abs(rnorm(40,0,1)) 28 | #' betahat = rnorm(40,beta,sebetahat) 29 | #' beta.ash = ash(betahat, sebetahat) 30 | #' 31 | #' CImatrix=ashci(beta.ash,level=0.95) 32 | #' 33 | #' CImatrix1=ashci(beta.ash,level=0.95,betaindex=c(1,2,5)) 34 | #' CImatrix2=ashci(beta.ash,level=0.95,lfsr_threshold=0.1) 35 | ashci = function (a,level=0.95,betaindex,lfsr_threshold=1,tol=1e-3,trace=FALSE){ 36 | data = a$data 37 | if(is.null(data)){stop("ash object has to have data returned to compute CIs; use outputlevel 2 or more when running ash")} 38 | 39 | # options(warn=-1) 40 | if(missing(betaindex)){ 41 | betaindex = which(get_lfsr(a)<=lfsr_threshold) 42 | #betaindex[is.na(betaindex)]=FALSE #Some lfsrs would have NA 43 | } 44 | 45 | PosteriorMean = get_pm(a) 46 | PosteriorSD = get_psd(a) 47 | ZeroProb = get_lfdr(a) 48 | NegativeProb = get_np(a) 49 | PositiveProb = get_pp(a) 50 | 51 | m=get_fitted_g(a) 52 | percentage=1 53 | 54 | if (!inherits(m,"normalmix") && 55 | !inherits(m,"unimix") && 56 | !inherits(m,"tnormalmix")) 57 | stop(paste("Invalid class",class(m))) 58 | 59 | CImatrix=matrix(NA,nrow=length(PosteriorMean),ncol=2) 60 | colnames(CImatrix)=c((1-level)/2,(1+level)/2) 61 | #c("Fitted CDF(lower) ","Fitted CDF(upper)") 62 | 63 | if(missing(trace)){ 64 | if(length(betaindex)>=1000){ 65 | trace=TRUE #component-wise computation takes more time 66 | }else {trace=FALSE} 67 | } 68 | 69 | if(trace==TRUE){ 70 | cat("Computation time will be linear w.r.t sample size, progress will be printed to the screen \n") 71 | tic <- proc.time()["elapsed"] 72 | } 73 | for(i in betaindex){ 74 | data_i = extract_data(data,i) 75 | 76 | if(is.nan(PosteriorSD[i])){ 77 | CImatrix[i,]=c(NA,NA) 78 | } else if(PosteriorSD[i]==0){ #special case where posterior is (approximately) point mass 79 | CImatrix[i,]=PosteriorMean[i] 80 | } else { 81 | #find lower point (first checking if 0 is it) 82 | if(NegativeProb[i]<(1-level)/2 & (ZeroProb[i]+NegativeProb[i])> (1-level)/2){ 83 | CImatrix[i,1]=0; 84 | } else { 85 | CImatrix[i,1]=stats::uniroot(f=taildiff,interval=c(PosteriorMean[i]-2*PosteriorSD[i],PosteriorMean[i]),extendInt="upX",m=m,data=data_i,target=(1-level)/2,tol=tol)$root 86 | } 87 | 88 | #find upper point (first checking if 0 is it) 89 | if(PositiveProb[i] < ((1-level)/2) & (ZeroProb[i]+PositiveProb[i])> (1-level)/2){ 90 | CImatrix[i,2]=0; 91 | } else { 92 | CImatrix[i,2]=stats::uniroot(f=taildiff,interval=c(PosteriorMean[i],PosteriorMean[i]+2*PosteriorSD[i]),extendInt="upX",m=m,data=data_i,target=(1+level)/2,tol=tol)$root 93 | } 94 | } 95 | if(trace==TRUE & percentage <=100){ 96 | currentpercentage=round(i*100/length(betaindex)) 97 | if(currentpercentage == percentage){ 98 | cat("Current computation progress", percentage,"%, seconds ") 99 | print(proc.time()["elapsed"] - tic) 100 | percentage = percentage + 1} 101 | } 102 | } 103 | CImatrix = CImatrix * data$s_orig^data$alpha #correct CIs for the fact they are CIs for beta/s^alpha 104 | 105 | CImatrix=signif(CImatrix,digits=round(1-log(tol)/log(10))) 106 | return(CImatrix) 107 | } 108 | 109 | 110 | #difference of tailprob from target 111 | taildiff=function(z,m,data,target){ 112 | cdf_post(m,z,data)-target 113 | } 114 | 115 | 116 | 117 | 118 | -------------------------------------------------------------------------------- /R/output.R: -------------------------------------------------------------------------------- 1 | # this file contains functions to compute the columns of 2 | # the result data.frame from ash 3 | # all such functions must take parameters (g,data) 4 | 5 | # Posterior Mean 6 | calc_pm = function(g,data){ 7 | exclude = get_exclusions(data) 8 | PosteriorMean = rep(0,length = n_obs(data)) 9 | PosteriorMean[!exclude] = postmean(g,data)[!exclude] 10 | PosteriorMean[exclude] = calc_mixmean(g) 11 | PosteriorMean = PosteriorMean * (data$s_orig^data$alpha) 12 | return(PosteriorMean) 13 | } 14 | 15 | # Posterior Standard Deviation 16 | calc_psd = function(g,data){ 17 | exclude = get_exclusions(data) 18 | PosteriorSD = rep(0,length = n_obs(data)) 19 | PosteriorSD[!exclude] = postsd(g,data)[!exclude] 20 | PosteriorSD[exclude] = calc_mixsd(g) 21 | PosteriorSD= PosteriorSD * (data$s_orig^data$alpha) 22 | return(PosteriorSD) 23 | } 24 | 25 | # Local FDR 26 | calc_lfdr = function(g,data){ 27 | exclude = get_exclusions(data) 28 | ZeroProb = rep(0,length = n_obs(data)) 29 | ZeroProb[!exclude] = colSums(comp_postprob(g,data)[pm_on_zero(g),,drop = FALSE])[!exclude] 30 | ZeroProb[exclude] = sum(mixprop(g)[pm_on_zero(g)]) 31 | ifelse(ZeroProb<0,0,ZeroProb) #deal with numerical issues that lead to numbers <0 32 | } 33 | 34 | #negative probability 35 | calc_np = function(g,data){ 36 | exclude = get_exclusions(data) 37 | NegativeProb = rep(0,length = n_obs(data)) 38 | NegativeProb[!exclude] = cdf_post(g, 0, data)[!exclude] - calc_lfdr(g,data)[!exclude] 39 | NegativeProb[exclude] = mixcdf(g,0) 40 | ifelse(NegativeProb<0,0,NegativeProb) #deal with numerical issues that lead to numbers <0 41 | } 42 | 43 | #positive probability 44 | calc_pp = function(g,data){ 45 | pp=(1-calc_np(g,data)-calc_lfdr(g,data)) 46 | ifelse(pp<0,0,pp) #deal with numerical issues that lead to numbers <0 47 | } 48 | 49 | # local False Sign Rate 50 | calc_lfsr = function(g,data){ 51 | compute_lfsr(calc_np(g,data),calc_lfdr(g,data)) 52 | } 53 | 54 | calc_svalue = function(g,data){ 55 | return(qval.from.lfdr(calc_lfsr(g,data))) 56 | } 57 | 58 | calc_qvalue = function(g,data){ 59 | return(qval.from.lfdr(calc_lfdr(g,data))) 60 | } 61 | 62 | # Data for flashr package 63 | calc_flash_data = function(g, data, penloglik) { 64 | kk = ncomp(g) 65 | n = n_obs(data) 66 | exclude = get_exclusions(data) 67 | comp_postprob = matrix(0, nrow = kk, ncol = n) 68 | comp_postmean = matrix(0, nrow = kk, ncol = n) 69 | comp_postmean2 = matrix(0, nrow = kk, ncol = n) 70 | 71 | comp_postprob[, !exclude] = comp_postprob(g, data)[, !exclude] 72 | comp_postmean[, !exclude] = comp_postmean(g, data)[, !exclude] 73 | comp_postmean2[, !exclude] = comp_postmean2(g, data)[, !exclude] 74 | 75 | # For missing observations, use the prior instead of the posterior. 76 | comp_postprob[, exclude] = mixprop(g) 77 | comp_postmean[, exclude] = comp_mean(g) 78 | comp_postmean2[, exclude] = comp_mean2(g) 79 | 80 | postmean = colSums(comp_postprob * comp_postmean) 81 | postmean2 = colSums(comp_postprob * comp_postmean2) 82 | # Avoid potential negatives due to numeric rounding errors. 83 | postmean2[postmean2 < 0] = 0 84 | 85 | return(list(fitted_g = g, 86 | postmean = postmean, 87 | postmean2 = postmean2, 88 | penloglik = penloglik)) 89 | } 90 | 91 | # if outputlevel an integer, produce a vector of character strings naming output to be produced 92 | # (otherwise return outputlevel) 93 | set_output=function(outputlevel){ 94 | if(!is.numeric(outputlevel)){return(outputlevel)} #allows that user might specify directly 95 | else{ 96 | output = c("fitted_g") 97 | if(outputlevel>0){ 98 | output = c(output,"loglik","logLR","PosteriorMean","PosteriorSD") 99 | } 100 | if(outputlevel>1){output=c("data","NegativeProb","PositiveProb","lfsr","svalue","lfdr","qvalue",output)} 101 | if(outputlevel>2){output=c(output,"fit_details")} 102 | 103 | # These are special flags for output used by flashr. 104 | if(outputlevel==4){output=c("fitted_g","PosteriorMean", "PosteriorSD","flash_data")} 105 | if(outputlevel==5){output=c("flash_data")} 106 | 107 | return(output) 108 | } 109 | } 110 | 111 | # returns a named list of output functions, whose names are given in outputnames 112 | # eg set_resfns(c("lfsr","lfdr")) would extract results functions for lfsr and lfdr 113 | set_resfns = function(outputnames){ 114 | result_fns = list(NegativeProb= calc_np, PositiveProb= calc_pp, lfsr = calc_lfsr, 115 | svalue = calc_svalue, lfdr = calc_lfdr, qvalue = calc_qvalue,PosteriorMean = calc_pm, PosteriorSD = calc_psd) 116 | 117 | return(result_fns[intersect(names(result_fns), outputnames)]) #extract the results functions specified in output 118 | } 119 | -------------------------------------------------------------------------------- /R/normalmix.R: -------------------------------------------------------------------------------- 1 | #' @title Constructor for normalmix class 2 | #' 3 | #' @description Creates an object of class normalmix (finite mixture 4 | #' of univariate normals) 5 | #' 6 | #' @details None 7 | #' 8 | #' @param pi vector of mixture proportions 9 | #' @param mean vector of means 10 | #' @param sd vector of standard deviations 11 | #' 12 | #' @return an object of class normalmix 13 | #' 14 | #' @export 15 | #' 16 | #' @examples normalmix(c(0.5,0.5),c(0,0),c(1,2)) 17 | #' 18 | normalmix = function(pi,mean,sd){ 19 | structure(data.frame(pi,mean,sd),class="normalmix") 20 | } 21 | 22 | #' @title comp_sd.normalmix 23 | #' @description returns sds of the normal mixture 24 | #' @param m a normal mixture distribution with k components 25 | #' @return a vector of length k 26 | #' @export 27 | comp_sd.normalmix = function(m){ 28 | m$sd 29 | } 30 | 31 | #' @title comp_mean.normalmix 32 | #' @description returns mean of the normal mixture 33 | #' @param m a normal mixture distribution with k components 34 | #' @return a vector of length k 35 | #' @export 36 | comp_mean.normalmix = function(m) 37 | m$mean 38 | 39 | #' @importFrom stats dnorm 40 | #' 41 | #' @export 42 | #' 43 | comp_dens.normalmix = function (m, y, log = FALSE) { 44 | k = ncomp(m) 45 | n = length(y) 46 | d = matrix(rep(y,rep(k,n)),nrow=k) 47 | return(matrix(dnorm(d,m$mean,m$sd,log),nrow = k)) 48 | } 49 | 50 | #' @importFrom stats pnorm 51 | #' 52 | #' @export 53 | #' 54 | comp_cdf.normalmix = function (m, y, lower.tail = TRUE) 55 | vapply(y,pnorm,m$mean,m$mean,m$sd,lower.tail) 56 | 57 | #' @title comp_dens_conv.normalmix 58 | #' @description returns density of convolution of each component of a 59 | #' normal mixture with N(0,s^2) at x. Note that 60 | #' convolution of two normals is normal, so it works that way 61 | #' @param m mixture distribution with k components 62 | #' @param data a list with components x and s to be interpreted as a normally-distributed observation and its standard error 63 | #' @param \dots other arguments (unused) 64 | #' @return a k by n matrix 65 | comp_dens_conv.normalmix = function(m,data,...){ 66 | if(!is_normal(data$lik)){ 67 | stop("Error: normal mixture for non-normal likelihood is not yet implemented") 68 | } 69 | sdmat = sqrt(outer(data$s^2,m$sd^2,FUN="+")) #n by k matrix of standard deviations of convolutions 70 | return(t(stats::dnorm(outer(data$x,m$mean,FUN="-")/sdmat)/sdmat)) 71 | } 72 | 73 | #' @title log_comp_dens_conv.normalmix 74 | #' @description returns log-density of convolution of each component 75 | #' of a normal mixture with N(0,s^2) or s*t(v) at x. Note that 76 | #' convolution of two normals is normal, so it works that way 77 | #' @inheritParams comp_dens_conv.normalmix 78 | #' @return a k by n matrix 79 | log_comp_dens_conv.normalmix = function(m,data){ 80 | if(!is_normal(data$lik)){ 81 | stop("Error: normal mixture for non-normal likelihood is not yet implemented") 82 | } 83 | sdmat = sqrt(outer(data$s^2,m$sd^2,"+")) #n by k matrix of standard deviations of convolutions 84 | return(t(stats::dnorm(outer(data$x,m$mean,FUN="-")/sdmat,log=TRUE) - log(sdmat))) 85 | } 86 | 87 | #' @title comp_cdf_conv.normalmix 88 | #' @description returns cdf of convolution of each component of a 89 | #' normal mixture with N(0,s^2) at x. Note that 90 | #' convolution of two normals is normal, so it works that way 91 | #' @param m mixture distribution with k components 92 | #' @param data a list with components x and s to be interpreted as a normally-distributed observation and its standard error 93 | #' @return a k by n matrix 94 | comp_cdf_conv.normalmix = function (m, data) { 95 | if(!is_normal(data$lik)){ 96 | stop("Error: normal mixture for non-normal likelihood is not yet implemented") 97 | } 98 | sdmat = sqrt(outer(data$s^2, m$sd^2, FUN="+")) #n by k matrix of standard deviations of convolutions 99 | return(t(stats::pnorm(outer(data$x, m$mean, FUN="-") / sdmat))) 100 | } 101 | 102 | #' @export 103 | comp_cdf_post.normalmix=function(m,c,data){ 104 | if(!is_normal(data$lik)){ 105 | stop("Error: normal mixture for non-normal likelihood is not yet implemented") 106 | } 107 | k = length(m$pi) 108 | 109 | #compute posterior standard deviation (s1) and posterior mean (m1) 110 | s1 = sqrt(outer(data$s^2,m$sd^2,FUN="*")/outer(data$s^2,m$sd^2,FUN="+")) 111 | ismissing = (is.na(data$x) | is.na(data$s)) 112 | s1[ismissing,]=m$sd 113 | 114 | m1 = t(comp_postmean(m,data)) 115 | t(stats::pnorm(c,mean=m1,sd=s1)) 116 | } 117 | 118 | #' @export 119 | comp_postmean.normalmix = function(m,data){ 120 | if(!is_normal(data$lik)){ 121 | stop("Error: normal mixture for non-normal likelihood is not yet implemented") 122 | } 123 | tmp=(outer(data$s^2,m$mean, FUN="*") + outer(data$x,m$sd^2, FUN="*"))/outer(data$s^2,m$sd^2,FUN="+") 124 | ismissing = (is.na(data$x) | is.na(data$s)) 125 | tmp[ismissing,]=m$mean #return prior mean when missing data 126 | t(tmp) 127 | } 128 | 129 | #' @export 130 | comp_postsd.normalmix = function(m,data){ 131 | if(!is_normal(data$lik)){ 132 | stop("Error: normal mixture for non-normal likelihood is not yet implemented") 133 | } 134 | t(sqrt(outer(data$s^2,m$sd^2,FUN="*")/outer(data$s^2,m$sd^2,FUN="+"))) 135 | } 136 | 137 | #' @export 138 | comp_postmean2.normalmix = function(m,data){ 139 | comp_postsd(m,data)^2 + comp_postmean(m,data)^2 140 | } 141 | 142 | #' @title post_sample.normalmix 143 | #' 144 | #' @description returns random samples from the posterior, given a 145 | #' prior distribution m and n observed datapoints. 146 | #' 147 | #' @param m mixture distribution with k components 148 | #' @param data a list with components x and s to be interpreted as a 149 | #' normally-distributed observation and its standard error 150 | #' @param nsamp number of samples to return for each observation 151 | #' @return a nsamp by n matrix 152 | #' @importFrom stats rnorm 153 | #' @export 154 | post_sample.normalmix = function(m,data,nsamp){ 155 | k = length(m$pi) 156 | n = length(data$x) 157 | 158 | postprob = comp_postprob(m,data) 159 | postmean = comp_postmean(m,data) 160 | postsd = comp_postsd(m,data) 161 | 162 | # Sample mixture components 163 | mixcomp = apply(postprob, 2, function(prob) { 164 | sample(1:k, nsamp, replace=TRUE, prob=prob) 165 | }) 166 | # Use samples to index into postmean and postsd matrices 167 | idx = as.vector(mixcomp + rep(k*(0:(n-1)), each=nsamp)) 168 | samp = rnorm(nsamp*n, postmean[idx], postsd[idx]) 169 | matrix(samp, nrow=nsamp, ncol=n) 170 | } 171 | -------------------------------------------------------------------------------- /R/tnormalmix.R: -------------------------------------------------------------------------------- 1 | #' @title Constructor for tnormalmix class 2 | #' 3 | #' @description Creates an object of class tnormalmix (finite mixture 4 | #' of truncated univariate normals). 5 | #' 6 | #' @param pi Cector of mixture proportions (length k say). 7 | #' 8 | #' @param mean Vector of means (length k). 9 | #' 10 | #' @param sd Vector of standard deviations (length k). 11 | #' 12 | #' @param a Vector of left truncation points of each component (length k). 13 | #' 14 | #' @param b Cector of right truncation points of each component (length k). 15 | #' 16 | #' @return An object of class \dQuote{tnormalmix}. 17 | #' 18 | #' @export 19 | #' 20 | #' @examples tnormalmix(c(0.5,0.5),c(0,0),c(1,2),c(-10,0),c(0,10)) 21 | #' 22 | tnormalmix = function (pi, mean, sd, a, b) 23 | structure(data.frame(pi,mean,sd,a,b),class = "tnormalmix") 24 | 25 | #' @title comp_sd.normalmix 26 | #' 27 | #' @description Returns standard deviations of the truncated normal mixture. 28 | #' 29 | #' @param m A truncated normal mixture distribution with k components. 30 | #' 31 | #' @return A vector of length k. 32 | #' 33 | #' @export 34 | #' 35 | comp_sd.tnormalmix = function(m) 36 | sqrt(my_vtruncnorm(m$a,m$b,m$mean,m$sd)) 37 | 38 | #' @title comp_mean.tnormalmix 39 | #' 40 | #' @description Returns mean of the truncated-normal mixture. 41 | #' 42 | #' @param m A truncated normal mixture distribution with k components. 43 | #' 44 | #' @return A vector of length k. 45 | #' 46 | #' @export 47 | #' 48 | comp_mean.tnormalmix = function (m) 49 | my_etruncnorm(m$a,m$b,m$mean,m$sd) 50 | 51 | #' @importFrom stats dnorm 52 | #' @importFrom stats pnorm 53 | #' 54 | #' @export 55 | #' 56 | comp_dens.tnormalmix = function (m, y, log = FALSE) { 57 | k = ncomp(m) 58 | n = length(y) 59 | d = matrix(rep(y,rep(k,n)),nrow = k) 60 | # No cases of b = a. 61 | return(matrix(dnorm(d,m$mean,m$sd))/(pnorm(m$b) - pnorm(m$a))) 62 | } 63 | 64 | #' @importFrom stats pnorm 65 | #' @importFrom stats dnorm 66 | #' 67 | #' @export 68 | #' 69 | comp_dens_conv.tnormalmix = function (m, data, ...) { 70 | if (!is_normal(data$lik)) 71 | stop("Error: truncated normal mixture for non-normal likelihood is not ", 72 | "yet implemented") 73 | if (length(data$s) == 1) 74 | data$s = rep(data$s,length(data$x)) 75 | A = sqrt(outer(1/m$sd^2,1/data$s^2,FUN = "+")) 76 | B = 1/sqrt(outer(m$sd^2,data$s^2,FUN = "+")) 77 | C = outer(m$sd,data$s,"/") 78 | D = pnorm(m$b/m$sd) - pnorm(m$a/m$sd) 79 | varmat = outer(m$sd^2,data$s^2,FUN = "+") 80 | left = pnorm(A*m$b - t(t(B*C)*data$x)) 81 | right = pnorm(A*m$a - t(t(B*C)*data$x)) 82 | denx = dnorm(matrix(data$x,length(m$sd),length(data$x), 83 | byrow = TRUE)/sqrt(varmat))/sqrt(varmat) 84 | result = ((left - right)*denx)/D 85 | DD = dnorm(m$b/m$sd) 86 | lleft = dnorm(A*m$b - t(t(B*C)*data$x)) 87 | result[m$a == m$b,] = ((lleft*denx/varmat)/DD)[m$a == m$b,] 88 | result[m$sd == 0,] = denx[m$sd == 0,] 89 | return(result) 90 | } 91 | 92 | #' @importFrom stats pnorm 93 | #' @importFrom stats dnorm 94 | #' 95 | #' @export 96 | #' 97 | log_comp_dens_conv.tnormalmix = function (m, data) { 98 | if (!is_normal(data$lik)) 99 | stop("Error: truncated normal mixture for non-normal likelihood is not ", 100 | "yet implemented") 101 | 102 | # Use previous function directly. 103 | if (length(data$s) == 1) 104 | data$s = rep(data$s,length(data$x)) 105 | A = sqrt(outer(1/m$sd^2,1/data$s^2,FUN = "+")) 106 | B = 1/sqrt(outer(m$sd^2,data$s^2,FUN = "+")) 107 | C = outer(m$sd,data$s,"/") 108 | D = pnorm(m$b/m$sd) - pnorm(m$a/m$sd) 109 | varmat = outer(m$sd^2,data$s^2,FUN = "+") 110 | left = pnorm(A*m$b - t(t(B*C)*data$x)) 111 | right = pnorm(A*m$a - t(t(B*C)*data$x)) 112 | denx = dnorm(t(matrix(data$x,length(data$x),length(m$sd))),0,sqrt(varmat), 113 | log = TRUE) 114 | result = log(left - right) + denx - 115 | log(matrix(D,length(m$sd),length(data$x))) 116 | DD = dnorm(m$b/m$sd) 117 | lleft = dnorm(A*m$b - t(t(B*C)*data$x)) 118 | result[m$a == m$b,] = (log(lleft/DD) + denx - log(varmat))[m$a == m$b,] 119 | result[m$sd == 0,] = denx[m$sd == 0,] 120 | return(result) 121 | } 122 | 123 | #' @importFrom stats pnorm 124 | #' 125 | #' @export 126 | #' 127 | comp_cdf.tnormalmix = function (m, y, lower.tail = TRUE) { 128 | k = length(m$pi) 129 | n = length(y) 130 | tmp = matrix(1,nrow = k,ncol = n) 131 | subset = outer(m$a,y,">") 132 | tmp[subset] = 0 133 | subset1 = outer(m$a,y,"<=") 134 | subset2 = outer(m$b,y,">=") 135 | subset = subset1 & subset2 136 | if (sum(subset) > 0) { 137 | YY = matrix(y,k,n,byrow = TRUE) 138 | MM = matrix(m$mean,k,n) 139 | SD = matrix(m$sd,k,n) 140 | pnc = matrix(pnorm(YY[subset],MM[subset],SD[subset]),k) 141 | A = matrix(m$a,k,ncol = n) 142 | pna = matrix(pnorm(A[subset],MM[subset],SD[subset],lower.tail),k) 143 | B = matrix(m$b,k,ncol = n) 144 | pnb = matrix(pnorm(B[subset],MM[subset],SD[subset],lower.tail),k) 145 | } 146 | tmp[subset] = (pnc-pna)/(pnb-pna) 147 | return(tmp) 148 | } 149 | 150 | #' @importFrom stats pnorm 151 | #' 152 | #' @export 153 | #' 154 | comp_cdf_post.tnormalmix = function (m, c, data) { 155 | if (!is_normal(data$lik)) 156 | stop("Error: truncated normal mixture for non-normal likelihood is ", 157 | "not yet implemented") 158 | k = length(m$pi) 159 | n = length(data$x) 160 | tmp = matrix(1,nrow = k,ncol = n) 161 | tmp[m$a > c,] = 0 162 | subset = m$a <= c & m$b >= c 163 | if (sum(subset)>0) { 164 | X = 1/(outer(data$s^2,m$sd[subset]^2,FUN = "/") + 1) 165 | Y = 1/outer(1/data$s^2,1/m$sd[subset]^2,FUN = "+") 166 | A = matrix(m$a[subset],nrow = sum(subset),ncol = n) 167 | pna = pnorm(t(A),X*data$x + t(t(1-X) * m$mean[subset]),sqrt(Y)) 168 | C = matrix(c,nrow = sum(subset),ncol = n) 169 | pnc = pnorm(t(C),X*data$x + t(t(1-X) * m$mean[subset]),sqrt(Y)) 170 | B = matrix(m$b[subset],nrow = sum(subset),ncol = n) 171 | pnb = pnorm(t(B),X*data$x + t(t(1-X) * m$mean[subset]),sqrt(Y)) 172 | } 173 | tmp[subset,] = t((pnc - pna)/(pnb - pna)) 174 | subset = (m$a == m$b) 175 | tmp[subset,] = rep(m$a[subset] <= c,n) 176 | subset = B == C 177 | tmp[subset] = 1 178 | ### ZMZ: in case of pnc = pnb, we make it 1 and other 179 | ### Nan 0 to eliminate the 0/0. 180 | ### use naive situation 181 | tmp[is.nan(tmp)] = 0 182 | return(tmp) 183 | } 184 | 185 | #' @export 186 | comp_postmean.tnormalmix = function (m, data) { 187 | if (!is_normal(data$lik)) 188 | stop("Error: truncated normal mixture for non-normal likelihood is not ", 189 | "yet implemented") 190 | k = length(m$pi) 191 | n = length(data$x) 192 | A = 1/(outer(m$sd^2,data$s^2,FUN = "/") + 1) 193 | B = 1/outer(1/m$sd^2,1/data$s^2,FUN = "+") 194 | result = my_etruncnorm(m$a,m$b,A*m$mean + t(t(1-A)*data$x),sqrt(B)) 195 | ismissing = which(is.na(data$x) | is.na(data$s)) 196 | if (length(ismissing) > 0) 197 | result[,ismissing] = m$mean 198 | return(result) 199 | } 200 | 201 | #' @export 202 | comp_postsd.tnormalmix = function (m, data) { 203 | if (!is_normal(data$lik)) 204 | stop("Error: truncated normal mixture for non-normal likelihood is not ", 205 | "yet implemented") 206 | k = length(m$pi) 207 | n = length(data$x) 208 | A = 1/(outer(m$sd^2,data$s^2,FUN = "/") + 1) 209 | B = 1/outer(1/m$sd^2,1/data$s^2,FUN = "+") 210 | result = sqrt(my_vtruncnorm(m$a,m$b,t(A*m$mean + t(t(1 - A)*data$x)), 211 | t(sqrt(B)))) 212 | return(t(result)) 213 | } 214 | 215 | #' @export 216 | comp_postmean2.tnormalmix = function (m, data) 217 | comp_postsd.tnormalmix(m,data)^2 + comp_postmean.tnormalmix(m,data)^2 218 | 219 | -------------------------------------------------------------------------------- /vignettes/adaptive_shrinkage.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Illustration of Adaptive Shrinkage" 3 | author: "Matthew Stephens" 4 | date: "2017-01-19" 5 | vignette: > 6 | %\VignetteIndexEntry{Illustration of Adaptive Shrinkage} 7 | %\VignetteEngine{knitr::rmarkdown} 8 | %\VignetteEncoding{UTF-8} 9 | --- 10 | 11 | The goal here is to illustrate the "adaptive" nature of the adaptive 12 | shrinkage. The shrinkage is adaptive in two senses. First, the amount 13 | of shrinkage depends on the distribution $g$ of the true effects, 14 | which is learned from the data: when $g$ is very peaked about zero 15 | then ash learns this and deduces that signals should be more strongly 16 | shrunk towards zero than when $g$ is less peaked about zero. Second, 17 | the amount of shrinkage of each observation depends on its standard 18 | error: the smaller the standard error, the more informative the data, 19 | and so the less shrinkage that occurs. From an Empirical Bayesian 20 | perspective both of these points are entirely natural: the posterior 21 | depends on both the prior and the likelihood; the prior, $g$, is 22 | learned from the data, and the likelihood incorporates the standard 23 | error of each observation. 24 | 25 | First, we load the necessary libraries. 26 | 27 | ```{r load_packages} 28 | library(ashr) 29 | library(ggplot2) 30 | ``` 31 | 32 | We simulate from two scenarios: in the first scenario, the effects are more 33 | peaked about zero (**sim.spiky**); in the second scenario, the effects are 34 | less peaked at zero (**sim.bignormal**). A summary of the two data sets is 35 | printed at the end of this chunk. 36 | 37 | ```{r initialize, collapse=TRUE} 38 | set.seed(100) 39 | 40 | # Simulates data sets for experiments below. 41 | rnormmix_datamaker = function (args) { 42 | 43 | # generate the proportion of true nulls randomly. 44 | pi0 = runif(1,args$min_pi0,args$max_pi0) 45 | k = ncomp(args$g) 46 | 47 | #randomly draw a component 48 | comp = sample(1:k,args$nsamp,mixprop(args$g),replace = TRUE) 49 | isnull = (runif(args$nsamp,0,1) < pi0) 50 | beta = ifelse(isnull,0,rnorm(args$nsamp,comp_mean(args$g)[comp], 51 | comp_sd(args$g)[comp])) 52 | sebetahat = args$betahatsd 53 | betahat = beta + rnorm(args$nsamp,0,sebetahat) 54 | meta = list(g1 = args$g,beta = beta,pi0 = pi0) 55 | input = list(betahat = betahat,sebetahat = sebetahat,df = NULL) 56 | return(list(meta = meta,input = input)) 57 | } 58 | 59 | NSAMP = 1000 60 | s = 1/rgamma(NSAMP,5,5) 61 | 62 | sim.spiky = 63 | rnormmix_datamaker(args = list(g = normalmix(c(0.4,0.2,0.2,0.2), 64 | c(0,0,0,0), 65 | c(0.25,0.5,1,2)), 66 | min_pi0 = 0, 67 | max_pi0 = 0, 68 | nsamp = NSAMP, 69 | betahatsd = s)) 70 | 71 | sim.bignormal = 72 | rnormmix_datamaker(args = list(g = normalmix(1,0,4), 73 | min_pi0 = 0, 74 | max_pi0 = 0, 75 | nsamp = NSAMP, 76 | betahatsd = s)) 77 | 78 | cat("Summary of observed beta-hats:\n") 79 | print(rbind(spiky = quantile(sim.spiky$input$betahat,seq(0,1,0.1)), 80 | bignormal = quantile(sim.bignormal$input$betahat,seq(0,1,0.1))), 81 | digits = 3) 82 | ``` 83 | 84 | Now we run ash on both data sets. 85 | 86 | ```{r run_ash} 87 | beta.spiky.ash = ash(sim.spiky$input$betahat,s) 88 | beta.bignormal.ash = ash(sim.bignormal$input$betahat,s) 89 | ``` 90 | 91 | Next we plot the shrunken estimates against the observed values, colored 92 | according to the (square root of) precision: precise estimates being colored 93 | red, and less precise estimates being blue. Two key features of the plots 94 | illustrate the ideas of adaptive shrinkage: i) the estimates under the spiky 95 | scenario are shrunk more strongly, illustrating that shrinkage adapts to the 96 | underlying distribution of beta; ii) in both cases, estimates with large 97 | standard error (blue) are shrunk more than estimates with small standard 98 | error (red) illustrating that shrinkage adapts to measurement precision. 99 | 100 | ```{r plot_shrunk_vs_obs, fig.align="center"} 101 | make_df_for_ashplot = 102 | function (sim1, sim2, ash1, ash2, name1 = "spiky", name2 = "big-normal") { 103 | n = length(sim1$input$betahat) 104 | x = c(get_lfsr(ash1),get_lfsr(ash2)) 105 | return(data.frame(betahat = c(sim1$input$betahat,sim2$input$betahat), 106 | beta_est = c(get_pm(ash1),get_pm(ash2)), 107 | lfsr = x, 108 | s = c(sim1$input$sebetahat,sim2$input$sebetahat), 109 | scenario = c(rep(name1,n),rep(name2,n)), 110 | signif = x < 0.05)) 111 | } 112 | 113 | ashplot = function(df,xlab="Observed beta-hat",ylab="Shrunken beta estimate") 114 | ggplot(df,aes(x = betahat,y = beta_est,color = 1/s)) + 115 | xlab(xlab) + ylab(ylab) + geom_point() + 116 | facet_grid(.~scenario) + 117 | geom_abline(intercept = 0,slope = 1,linetype = "dotted") + 118 | scale_colour_gradient2(midpoint = median(1/s),low = "blue", 119 | mid = "white",high = "red",space = "Lab") + 120 | coord_fixed(ratio = 1) 121 | 122 | df = make_df_for_ashplot(sim.spiky,sim.bignormal,beta.spiky.ash, 123 | beta.bignormal.ash) 124 | print(ashplot(df)) 125 | ``` 126 | 127 | A related consequence is that significance of each observation is no longer 128 | monotonic with $p$ value. 129 | 130 | ```{r plot_pvalues, fig.align="center", warning=FALSE} 131 | pval_plot = function (df) 132 | ggplot(df,aes(x = pnorm(-abs(betahat/s)),y = lfsr,color = log(s))) + 133 | geom_point() + facet_grid(.~scenario) + xlim(c(0,0.025)) + 134 | xlab("p value") + ylab("lfsr") + 135 | scale_colour_gradient2(midpoint = 0,low = "red", 136 | mid = "white",high = "blue") 137 | 138 | print(pval_plot(df)) 139 | ``` 140 | 141 | Let's see how these are affected by changing the modelling assumptions so that 142 | the *standardized* beta are exchangeable (rather than the beta being 143 | exchangeable). 144 | 145 | ```{r run_ash_ET, fig.align="center", warning=FALSE} 146 | beta.bignormal.ash.ET = 147 | ash(sim.bignormal$input$betahat,s,alpha = 1,mixcompdist = "normal") 148 | beta.spiky.ash.ET = 149 | ash(sim.spiky$input$betahat,s,alpha = 1,mixcompdist = "normal") 150 | df.ET = make_df_for_ashplot(sim.spiky,sim.bignormal,beta.spiky.ash.ET, 151 | beta.bignormal.ash.ET) 152 | ashplot(df.ET,ylab = "Shrunken beta estimate (ET model)") 153 | pval_plot(df.ET) 154 | ``` 155 | 156 | This is a "volcano plot" showing effect size against p value. The blue points 157 | are "significant" in that they have lfsr < 0.05. 158 | 159 | ```{r volcano, fig.align="center", warning=FALSE} 160 | print(ggplot(df,aes(x = betahat,y = -log10(2*pnorm(-abs(betahat/s))), 161 | col = signif)) + 162 | geom_point(alpha = 1,size = 1.75) + facet_grid(.~scenario) + 163 | theme(legend.position = "none") + xlim(c(-10,10)) + ylim(c(0,15)) + 164 | xlab("Effect (beta)") + ylab("-log10 p-value")) 165 | ``` 166 | 167 | In this case the significance by lfsr is not quite the same as cutting off 168 | at a given p value (you can see that the decision boundary is not quite the 169 | same as drawing a horizontal line), but also not that different, presumably 170 | because the standard errors, although varying across observations, do not 171 | vary greatly. 172 | 173 | ## Session information. 174 | 175 | ```{r info} 176 | print(sessionInfo()) 177 | ``` 178 | -------------------------------------------------------------------------------- /R/unimix.R: -------------------------------------------------------------------------------- 1 | 2 | 3 | ############################### METHODS FOR unimix class ########################### 4 | 5 | #' @title Constructor for unimix class 6 | #' 7 | #' @description Creates an object of class unimix (finite mixture of 8 | #' univariate uniforms) 9 | #' 10 | #' @details None 11 | #' 12 | #' @param pi vector of mixture proportions 13 | #' @param a vector of left hand ends of uniforms 14 | #' @param b vector of right hand ends of uniforms 15 | #' 16 | #' @return an object of class unimix 17 | #' 18 | #' @export 19 | #' 20 | #' @examples unimix(c(0.5,0.5),c(0,0),c(1,2)) 21 | unimix = function(pi,a,b){ 22 | structure(data.frame(pi,a,b),class="unimix") 23 | } 24 | 25 | #' @export 26 | comp_cdf.unimix = function(m,y,lower.tail=TRUE){ 27 | vapply(y,stats::punif,m$a,min=m$a,max=m$b,lower.tail) 28 | } 29 | 30 | #' @export 31 | comp_sd.unimix = function(m){ 32 | (m$b-m$a)/sqrt(12) 33 | } 34 | 35 | #' @export 36 | comp_mean.unimix = function(m){ 37 | (m$a+m$b)/2 38 | } 39 | 40 | #' @export 41 | comp_dens.unimix = function(m,y,log=FALSE){ 42 | k=ncomp(m) 43 | n=length(y) 44 | d = matrix(rep(y,rep(k,n)),nrow=k) 45 | return(matrix(stats::dunif(d, m$a, m$b, log),nrow=k)) 46 | } 47 | 48 | #' density of convolution of each component of a unif mixture 49 | #' @param m a mixture of class unimix 50 | #' @param data, see set_data() 51 | #' @param \dots other arguments (unused) 52 | #' 53 | #' @return a k by n matrix 54 | #' 55 | #' @export 56 | comp_dens_conv.unimix = function(m,data,...){ 57 | return(exp(log_comp_dens_conv(m,data))) 58 | } 59 | 60 | #' log density of convolution of each component of a unif mixture 61 | #' @inheritParams comp_dens_conv.unimix 62 | #' @return a k by n matrix of densities 63 | log_comp_dens_conv.unimix = function(m,data){ 64 | b = pmax(m$b,m$a) #ensure a c,] = 0 116 | subset = m$a<=c & m$b>c # subset of components (1..k) with nontrivial cdf 117 | if(sum(subset)>0){ 118 | lpna = do.call(lik$lcdfFUN, list(outer(data$x,m$a[subset],FUN="-")/data$s)) 119 | lpnc = do.call(lik$lcdfFUN, list(outer(data$x,rep(c,sum(subset)),FUN="-")/data$s)) 120 | lpnb = do.call(lik$lcdfFUN, list(outer(data$x,m$b[subset],FUN="-")/data$s)) 121 | tmp[subset,] = t((exp(lpnc-lpna)-1)/(exp(lpnb-lpna)-1)) 122 | #tmp[subset,] = t((pnc-pna)/(pnb-pna)) ; doing on different log scale reduces numerical issues 123 | } 124 | subset = (m$a == m$b) #subset of components with trivial cdf 125 | tmp[subset,]= rep(m$a[subset] <= c,n) 126 | #Occasionally we would encounter issue such that in some entries pna[i,j]=pnb[i,j]=pnc[i,j]=0 or pna=pnb=pnc=1 127 | #Those are the observations with significant betahat(small sebetahat), resulting in pnorm() return 1 or 0 128 | #due to the thin tail property of normal distribution.(or t-distribution, although less likely to occur) 129 | #Then R would be dividing 0 by 0, resulting in NA values 130 | #In practice, those observations would have 0 probability of belonging to those "problematic" components 131 | #Thus any sensible value in [0,1] would not matter much, as they are highly unlikely to come from those 132 | #components in posterior distribution. 133 | #Here we simply assign the "naive" value as as (c-a)/(b-a) 134 | #As the component pdf is rather smaller over the region. 135 | tmpnaive=matrix(rep((c-m$a)/(m$b-m$a),length(data$x)),nrow=k,ncol=n) 136 | tmp[is.nan(tmp)]= tmpnaive[is.nan(tmp)] 137 | tmp 138 | } 139 | 140 | #note that with uniform prior, posterior is truncated normal, so 141 | #this is computed using formula for mean of truncated normal 142 | #' @export 143 | comp_postmean.unimix = function(m,data){ 144 | x=data$x 145 | s=data$s 146 | 147 | lik = data$lik 148 | 149 | alpha = outer(x, -m$b,FUN="+")/s 150 | beta = outer(x, -m$a, FUN="+")/s 151 | 152 | tmp = x-s*do.call(lik$etruncFUN, list(alpha,beta)) 153 | 154 | # alpha = outer(-x, m$a,FUN="+")/s 155 | # beta = outer(-x, m$b, FUN="+")/s 156 | # 157 | # tmp = x + s*do.call(lik$etruncFUN, list(alpha,beta)) 158 | 159 | ismissing = is.na(x) | is.na(s) 160 | tmp[ismissing,]= (m$a+m$b)/2 161 | t(tmp) 162 | } 163 | 164 | # as for posterior mean, but compute posterior mean squared value 165 | #' @export 166 | comp_postmean2.unimix = function(m,data){ 167 | x=data$x 168 | s=data$s 169 | 170 | lik = data$lik 171 | alpha = outer(-x, m$a,FUN="+")/s 172 | beta = outer(-x, m$b, FUN="+")/s 173 | tmp = x^2 + 174 | 2*x*s* do.call(lik$etruncFUN, list(alpha,beta)) + 175 | s^2* do.call(lik$e2truncFUN, list(alpha,beta)) 176 | 177 | ismissing = is.na(x) | is.na(s) 178 | tmp[ismissing,]= (m$b^2+m$a*m$b+m$a^2)/3 179 | t(tmp) 180 | } 181 | 182 | # #not yet implemented! 183 | # #just returns 0s for now 184 | # comp_postsd.unimix = function(m,data){ 185 | # k= ncomp(m) 186 | # n=length(data$x) 187 | # return(matrix(NA,nrow=k,ncol=n)) 188 | # # return(sqrt(comp_postmean2(m,betahat,sebetahat,v)-comp_postmean(m,betahat,sebetahat,v)^2)) 189 | # } 190 | 191 | 192 | #' @title post_sample.unimix 193 | #' 194 | #' @description returns random samples from the posterior, given a 195 | #' prior distribution m and n observed datapoints. 196 | #' 197 | #' @param m mixture distribution with k components 198 | #' @param data a list with components x and s to be interpreted as a 199 | #' normally-distributed observation and its standard error 200 | #' @param nsamp number of samples to return for each observation 201 | #' @return a nsamp by n matrix 202 | #' @importFrom truncnorm rtruncnorm 203 | #' @export 204 | post_sample.unimix = function(m,data,nsamp){ 205 | k = length(m$pi) 206 | n = length(data$x) 207 | 208 | postprob = comp_postprob(m,data) 209 | # Sample mixture components 210 | mixcomp = apply(postprob, 2, function(prob) { 211 | sample(1:k, nsamp, replace=TRUE, prob=prob) 212 | }) 213 | 214 | a = m$a[mixcomp] 215 | b = m$b[mixcomp] 216 | 217 | samp = rtruncnorm(nsamp*n, a = a, b = b, 218 | mean = rep(data$x, each=nsamp), 219 | sd = rep(data$s, each=nsamp)) 220 | # rtruncnorm gives NA when a = b, so these need to be set separately: 221 | idx = (a == b) 222 | samp[idx] = a[idx] 223 | 224 | matrix(samp, nrow=nsamp, ncol=n) 225 | } 226 | -------------------------------------------------------------------------------- /src/MixSquarem.cpp: -------------------------------------------------------------------------------- 1 | #include 2 | #include 3 | 4 | using namespace Rcpp; 5 | 6 | List fixptfn(NumericVector pi_est,NumericMatrix matrix_lik, NumericVector prior){ 7 | int n=matrix_lik.nrow(), k=matrix_lik.ncol(); 8 | NumericVector pi_new(k); 9 | for (int i=0;i0.01){ 142 | try{p3list=fixptfn(pnew,matrix_lik,prior);feval++;} 143 | catch(...){ 144 | pnew=p2cpp; 145 | lnew=p2list["objfn"]; 146 | if(alpha==stepmax){ 147 | stepmax=std::max(stepmax0,stepmax/mstep); 148 | } 149 | alpha=1; 150 | extrap=false; 151 | if(alpha==stepmax){stepmax=mstep*stepmax;} 152 | if(stepmin<0.0 && alpha==stepmin){stepmin=mstep*stepmin;} 153 | p=pnew; 154 | lnewcpp=lnew; 155 | if(!std::isnan(lnewcpp[0])){lold=lnew;} 156 | if(trace){Rcout<<"Objective fn: "<loldcpp[0]+objfninc) { 164 | pnew=p2list["fixedpointvector"]; 165 | lnew=p2list["objfn"]; 166 | if(alpha==stepmax){ 167 | stepmax=std::max(stepmax0,stepmax/mstep); 168 | } 169 | alpha=1; 170 | extrap=false; 171 | } 172 | }else{//same as above, when stablization is not performed. 173 | lnew=lold; 174 | lnewcpp=lnew; 175 | if (lnewcpp[0]>loldcpp[0]+objfninc) { 176 | pnew=p2list["fixedpointvector"]; 177 | lnew=p2list["objfn"]; 178 | if(alpha==stepmax){ 179 | stepmax=std::max(stepmax0,stepmax/mstep); 180 | } 181 | alpha=1; 182 | extrap=false; 183 | } 184 | } 185 | 186 | if(alpha==stepmax){stepmax=mstep*stepmax;} 187 | if(stepmin<0 && alpha==stepmin){stepmin=mstep*stepmin;} 188 | 189 | p=pnew; 190 | lnewcpp=lnew; 191 | if(!std::isnan(lnewcpp[0])){lold=lnew;} 192 | loldcpp=lold; 193 | if(trace){Rcout<<"Objective fn: "<= maxiter){conv=false;} 199 | 200 | return(List::create(Named("par")=p, 201 | Named("value.objfn")=lold, 202 | Named("iter")=iter, 203 | Named("fpevals")=feval, 204 | Named("objfevals")=feval, 205 | Named("convergence")=conv)); 206 | } 207 | 208 | //' @title Brief description of function. 209 | //' @description Explain here what this function does. 210 | //' @param matrix_lik Description of argument goes here. 211 | //' @param prior Description of argument goes here. 212 | //' @param pi_init Description of argument goes shere. 213 | //' @param control Description of argument goes here. 214 | //' @export 215 | // [[Rcpp::export]] 216 | List cxxMixSquarem (NumericMatrix matrix_lik, 217 | NumericVector prior, NumericVector pi_init, 218 | List control){ //note: no default pi_init=NULL 219 | int k=matrix_lik.ncol(),niter; 220 | bool converged=NA_LOGICAL; 221 | double loglik; 222 | List res; 223 | NumericVector pi(k); 224 | 225 | if(Rf_isNull(pi_init)) 226 | std::fill(pi.begin(), pi.end(), 1./(double)k); 227 | else{ 228 | pi=clone(pi_init); 229 | for (int i=0;i