├── .Rbuildignore ├── .github └── workflows │ ├── R-CMD-check.yml │ └── calc-coverage.yml ├── .gitignore ├── DESCRIPTION ├── LICENSE ├── NAMESPACE ├── NEWS.md ├── R ├── adnuts.R ├── pairs_admb.R ├── parallel.R ├── sample_admb.R ├── sample_tmb_deprecated.R ├── samplers.R └── utils.R ├── README.md ├── adnuts.Rproj ├── cran-comments.md ├── inst ├── CITATION ├── demo.R └── examples │ ├── fit.RDS │ └── simple │ ├── simple.dat │ └── simple.tpl ├── man ├── adfit.Rd ├── adnuts.Rd ├── as.data.frame.adfit.Rd ├── check_identifiable.Rd ├── dot-check_ADMB_version.Rd ├── dot-check_console_printing.Rd ├── dot-check_model_path.Rd ├── dot-getADMBHessian.Rd ├── dot-sample_admb.Rd ├── dot-update_model.Rd ├── extract_sampler_params.Rd ├── extract_samples.Rd ├── is.adfit.Rd ├── launch_shinyadmb.Rd ├── launch_shinytmb.Rd ├── pairs_admb.Rd ├── plot.adfit.Rd ├── plot_marginals.Rd ├── plot_sampler_params.Rd ├── plot_uncertainties.Rd ├── print.adfit.Rd ├── sample_admb.Rd ├── sample_inits.Rd ├── sample_tmb.Rd ├── sample_tmb_hmc.Rd ├── sample_tmb_nuts.Rd ├── sample_tmb_rwm.Rd ├── summary.adfit.Rd └── wrappers.Rd ├── tests ├── simple │ ├── simple.dat │ └── simple.tpl ├── testthat.R └── testthat │ ├── _expect_monitor │ ├── _expect_nuts │ ├── _expect_nuts_mle │ ├── _expect_simple_rwm │ ├── _expect_sp │ ├── fit.RDS │ ├── setup.R │ ├── test-class-methods.R │ ├── test-consistency.R │ ├── test-diagnostics.R │ ├── test-reproducibility.R │ └── test-sample_admb.R └── vignettes ├── adnuts.Rmd ├── auto └── refs.el ├── fit.RDS ├── metric_adaptation.png ├── refs.bib ├── tree_trajectories.png └── validity_tests.png /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^cran-comments\.md$ 4 | ^inst/examples/simple/simple.exe 5 | ^inst/examples/simple/simple.obj 6 | ^\.github/ 7 | tests/testthat/Rplots.pdf 8 | 9 | ^CRAN-RELEASE$ 10 | -------------------------------------------------------------------------------- /.github/workflows/R-CMD-check.yml: -------------------------------------------------------------------------------- 1 | # For help debugging build failures open an issue on the RStudio community with the 'github-actions' tag. 2 | # https://community.rstudio.com/new-topic?category=Package%20development&tags=github-actions 3 | 4 | # Taken from r4ss as example. 2021-01-27 -Cole 5 | on: [push, pull_request] 6 | 7 | name: R-CMD-check 8 | 9 | jobs: 10 | 11 | R-CMD-check: 12 | runs-on: ${{ matrix.config.os }} 13 | 14 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 15 | 16 | strategy: 17 | fail-fast: false 18 | matrix: 19 | config: 20 | - {os: windows-latest, r: 'release'} 21 | - {os: macOS-latest, r: 'release'} 22 | - {os: ubuntu-18.04, r: 'release', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} 23 | # - {os: ubuntu-20.04, r: 'devel', rspm: "https://packagemanager.rstudio.com/cran/__linux__/focal/latest"} 24 | 25 | env: 26 | R_REMOTES_NO_ERRORS_FROM_WARNINGS: true 27 | RSPM: ${{ matrix.config.rspm }} 28 | 29 | steps: 30 | # Checks-out your repository under $GITHUB_WORKSPACE, so your job can access it 31 | 32 | - uses: actions/checkout@v2 33 | 34 | - uses: r-lib/actions/setup-r@master 35 | with: 36 | r-version: ${{ matrix.config.r }} 37 | 38 | - uses: r-lib/actions/setup-pandoc@master 39 | 40 | - name: Get admb and put in path (Windows) 41 | if: runner.os == 'Windows' 42 | run: | 43 | Invoke-WebRequest -Uri https://github.com/admb-project/admb/releases/download/admb-12.2/admb-12.2-windows.zip -OutFile "D:\a\adnuts\admb-12.2.zip" 44 | Expand-Archive -LiteralPath "D:\a\adnuts\admb-12.2.zip" -DestinationPath "D:\a\adnuts\admb\" 45 | echo "D:\a\adnuts\admb\admb-12.2\bin" | Out-File -FilePath $env:GITHUB_PATH -Encoding utf8 -Append 46 | #dummy 47 | - name: Get admb and put in path (MacOS) 48 | if: runner.os == 'MacOS' 49 | run: | 50 | curl https://github.com/admb-project/admb/releases/download/admb-12.2/admb-12.2-macos-xcode12.zip -L -o admb-12.2.zip 51 | unzip admb-12.2.zip -d /usr/local/bin 52 | rm admb-12.2.zip 53 | ls /usr/local/bin 54 | echo "/usr/local/bin/admb-12.2/bin" >> $GITHUB_PATH 55 | #dummy 56 | - name: Get admb and put in path (Linux) 57 | if: runner.os == 'Linux' 58 | run: | 59 | wget https://github.com/admb-project/admb/releases/download/admb-12.2/admb-12.2-linux.zip 60 | sudo unzip admb-12.2-linux.zip -d /usr/local/bin 61 | sudo chmod 755 /usr/local/bin/admb-12.2/bin/admb 62 | rm admb-12.2-linux.zip 63 | echo "/usr/local/bin/admb-12.2/bin" >> $GITHUB_PATH 64 | # dummy 65 | # - name: Compile model and run 66 | # run: | 67 | # cd tests/simple 68 | # admb simple.tpl 69 | # shell: bash 70 | 71 | - name: Query dependencies 72 | run: | 73 | install.packages('remotes') 74 | saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) 75 | writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") 76 | shell: Rscript {0} 77 | 78 | - name: Cache R packages 79 | if: runner.os != 'Windows' 80 | uses: actions/cache@v2 81 | with: 82 | path: ${{ env.R_LIBS_USER }} 83 | key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} 84 | restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- 85 | 86 | - name: Install system dependencies 87 | if: runner.os == 'Linux' 88 | run: | 89 | while read -r cmd 90 | do 91 | eval sudo $cmd 92 | done < <(Rscript -e 'writeLines(remotes::system_requirements("ubuntu", "18.04"))') 93 | shell: bash 94 | #dummy 95 | - name: Install dependencies 96 | run: | 97 | remotes::install_deps(dependencies = TRUE) 98 | remotes::install_cran("rcmdcheck") 99 | shell: Rscript {0} 100 | 101 | - name: Check 102 | env: 103 | _R_CHECK_CRAN_INCOMING_REMOTE_: false 104 | run: rcmdcheck::rcmdcheck(args = c("--no-manual", "--as-cran"), error_on = "error", check_dir = "check") 105 | shell: Rscript {0} 106 | 107 | - name: Show testthat output 108 | if: always() 109 | run: find check -name 'testthat.Rout*' -exec cat '{}' \; || true 110 | shell: bash 111 | 112 | - name: Upload check results 113 | if: failure() 114 | uses: actions/upload-artifact@main 115 | with: 116 | name: ${{ runner.os }}-r${{ matrix.config.r }}-results 117 | path: check 118 | -------------------------------------------------------------------------------- /.github/workflows/calc-coverage.yml: -------------------------------------------------------------------------------- 1 | on: [push, pull_request] 2 | 3 | name: calc-coverage 4 | 5 | jobs: 6 | calc-coverage: 7 | runs-on: ${{ matrix.config.os }} 8 | 9 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 10 | 11 | strategy: 12 | fail-fast: false 13 | matrix: 14 | config: 15 | # - {os: windows-latest, r: 'release'} 16 | # - {os: macOS-latest, r: 'release'} 17 | - {os: ubuntu-18.04, r: 'release', rspm: "https://packagemanager.rstudio.com/cran/__linux__/bionic/latest"} 18 | # - {os: ubuntu-20.04, r: 'devel', rspm: "https://packagemanager.rstudio.com/cran/__linux__/focal/latest"} 19 | 20 | env: 21 | R_REMOTES_NO_ERRORS_FROM_WARNINGS: true 22 | RSPM: ${{ matrix.config.rspm }} 23 | 24 | steps: 25 | - uses: actions/checkout@v2 26 | 27 | - uses: r-lib/actions/setup-r@master 28 | with: 29 | r-version: ${{ matrix.config.r }} 30 | 31 | - uses: r-lib/actions/setup-pandoc@v1 32 | 33 | - name: Get admb and put in path 34 | run: | 35 | wget https://github.com/admb-project/admb/releases/download/admb-12.2/admb-12.2-linux.zip 36 | sudo unzip admb-12.2-linux.zip -d /usr/local/bin 37 | sudo chmod 755 /usr/local/bin/admb-12.2/bin/admb 38 | rm admb-12.2-linux.zip 39 | echo "/usr/local/bin/admb-12.2/bin" >> $GITHUB_PATH 40 | 41 | # for some reason need a comment here to pass syntax check 42 | 43 | - name: Query dependencies 44 | run: | 45 | install.packages('remotes') 46 | saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) 47 | writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") 48 | shell: Rscript {0} 49 | 50 | - name: Cache R packages 51 | if: runner.os != 'Windows' 52 | uses: actions/cache@v2 53 | with: 54 | path: ${{ env.R_LIBS_USER }} 55 | key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} 56 | restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- 57 | 58 | - name: Install system dependencies 59 | if: runner.os == 'Linux' 60 | run: | 61 | while read -r cmd 62 | do 63 | eval sudo $cmd 64 | done < <(Rscript -e 'writeLines(remotes::system_requirements("ubuntu", "18.04"))') 65 | # 66 | 67 | - name: Install dependencies 68 | run: | 69 | remotes::install_deps(dependencies = TRUE) 70 | remotes::install_cran("covr") 71 | shell: Rscript {0} 72 | 73 | - name: Test coverage 74 | run: covr::codecov() 75 | shell: Rscript {0} 76 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .RData 4 | inst/doc 5 | *.bar 6 | *.cor 7 | *.cpp 8 | *.ecm 9 | *.eva 10 | *.exe 11 | *.hst 12 | *.htp 13 | *.log 14 | *.mc2 15 | *.mcm 16 | *.obj 17 | *.par 18 | *.psv 19 | *.std 20 | *.cov 21 | *sims 22 | adaptation.csv 23 | admodel.* 24 | *pin 25 | rwm_lp.txt 26 | unbounded.csv 27 | *adapted_metric.txt 28 | tests/simple_chain* 29 | tests/testthat/Rplots.pdf -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: adnuts 2 | Title: No-U-Turn MCMC Sampling for 'ADMB' Models 3 | Version: 1.1.2 4 | Authors@R: person("Cole", "Monnahan", email = "monnahc@uw.edu", role = c("aut", "cre"), 5 | comment = c(ORCID = "0000-0003-0871-6700")) 6 | Description: Bayesian inference using the no-U-turn (NUTS) algorithm by 7 | Hoffman and Gelman (2014) . 8 | Designed for 'AD Model Builder' ('ADMB') models, 9 | or when R functions for log-density and log-density gradient 10 | are available, such as 'Template Model Builder' 11 | models and other special cases. Functionality is similar to 'Stan', 12 | and the 'rstan' and 'shinystan' packages are used for diagnostics and 13 | inference. 14 | Depends: 15 | R (>= 3.6.0), 16 | snowfall (>= 1.84.6.1) 17 | URL: https://github.com/Cole-Monnahan-NOAA/adnuts 18 | BugReports: https://github.com/Cole-Monnahan-NOAA/adnuts/issues 19 | License: GPL-3 | file LICENSE 20 | Encoding: UTF-8 21 | LazyData: true 22 | RoxygenNote: 7.1.1 23 | ByteCompile: true 24 | Suggests: 25 | shinystan (>= 2.5.0), 26 | matrixcalc (>= 1.0.3), 27 | stats, 28 | knitr, 29 | TMB, 30 | rmarkdown, 31 | withr, 32 | testthat (>= 2.1.0) 33 | Imports: ellipse, rstan, R2admb, ggplot2, rlang 34 | VignetteBuilder: knitr 35 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | “Software code created by U.S. Government employees is not subject to copyright in the United States (17 U.S.C. §105). The United States/Department of Commerce reserve all rights to seek and obtain copyright protection in countries other than the United States for Software authored in its entirety by the Department of Commerce. To this end, the Department of Commerce hereby grants to Recipient a royalty-free, nonexclusive license to use, copy, and create derivative works of the Software outside of the United States.” 2 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(as.data.frame,adfit) 4 | S3method(plot,adfit) 5 | S3method(print,adfit) 6 | S3method(summary,adfit) 7 | export(adfit) 8 | export(check_identifiable) 9 | export(extract_sampler_params) 10 | export(extract_samples) 11 | export(is.adfit) 12 | export(launch_shinyadmb) 13 | export(pairs_admb) 14 | export(plot_marginals) 15 | export(plot_sampler_params) 16 | export(plot_uncertainties) 17 | export(sample_admb) 18 | export(sample_inits) 19 | export(sample_nuts) 20 | export(sample_rwm) 21 | export(sample_tmb) 22 | import(snowfall) 23 | importFrom(ellipse,ellipse) 24 | importFrom(ggplot2,aes) 25 | importFrom(ggplot2,facet_wrap) 26 | importFrom(ggplot2,geom_point) 27 | importFrom(ggplot2,ggplot) 28 | importFrom(ggplot2,theme_bw) 29 | importFrom(grDevices,gray) 30 | importFrom(grDevices,rgb) 31 | importFrom(graphics,abline) 32 | importFrom(graphics,axis) 33 | importFrom(graphics,box) 34 | importFrom(graphics,hist) 35 | importFrom(graphics,legend) 36 | importFrom(graphics,lines) 37 | importFrom(graphics,mtext) 38 | importFrom(graphics,par) 39 | importFrom(graphics,plot) 40 | importFrom(graphics,points) 41 | importFrom(graphics,text) 42 | importFrom(rlang,.data) 43 | importFrom(rstan,monitor) 44 | importFrom(stats,acf) 45 | importFrom(stats,cor) 46 | importFrom(stats,cov) 47 | importFrom(stats,dnorm) 48 | importFrom(stats,qnorm) 49 | importFrom(stats,rnorm) 50 | importFrom(stats,runif) 51 | importFrom(stats,sd) 52 | importFrom(utils,read.csv) 53 | importFrom(utils,read.table) 54 | importFrom(utils,write.table) 55 | -------------------------------------------------------------------------------- /NEWS.md: -------------------------------------------------------------------------------- 1 | ------------------------------------------------------------------------ 2 | adnuts 1.1.2 (2021-03-02) 3 | ------------------------------------------------------------------------ 4 | 5 | * Improve console output for RStudio users. Was broken for NUTS 6 | chains and in serial. 7 | 8 | * Add new argument 'verbose' which suppresses almost all console 9 | output when set to FALSE 10 | 11 | * Update demo file, vignette and README in preparation for 12 | submission to CRAN 13 | 14 | * Add new function plot_uncertainties 15 | 16 | * Expand continuous testing 17 | 18 | * Add slot 'par_names' to objects of type adfit 19 | 20 | ------------------------------------------------------------------------ 21 | adnuts 1.1.1 (2021-02-19) 22 | ------------------------------------------------------------------------ 23 | 24 | * Add slot for par_names to adfit objects 25 | 26 | * Add method `as.data.frame` for class `adfit` 27 | 28 | * Improved and expanded testing via continuous integration 29 | 30 | * Print the ADMB command to console when it fails to run properly 31 | to help user diagnose issues 32 | 33 | * Improve console output for RStudio users. It will now print at 34 | conclusion of parallel runs. 35 | 36 | * Fix bugs in model names for MacOS (use ./model instead of model 37 | internally) 38 | 39 | * Fix small bug with mceval=TRUE for newest version of stock 40 | synthesis 41 | 42 | * Fix `sample_tmb` to work again for short-term use 43 | 44 | ------------------------------------------------------------------------ 45 | adnuts 1.1.0 (2020-07-13) 46 | ------------------------------------------------------------------------ 47 | 48 | * Change from `sample_admb` to `sample_nuts` and `sample_rwm` to 49 | run the NUTS and RWM algorithms, respectively. 50 | 51 | * Rework metric options to allow user to access ADMB 12.2's new 52 | dense mass matrix adaptation scheme. Added new section 53 | demonstrating this in the vignette. 54 | 55 | * Add more control to via 'skip_monitor', 56 | 'skip_unbounded', and 'skip_optimization' arguments 57 | 58 | * Remove TMB references from documentation and vignette, 59 | instead pointing users to package 'tmbstan', and collate 60 | deprecated R code into a single file 61 | 62 | * Migrate to new github repo: github.com/Cole-Monnahan-NOAA per 63 | NOAA's policy 64 | 65 | * Add testing via testthat package 66 | 67 | * Turn on calculation of ESS and Rhat manually, which get used in 68 | subsequent functions 69 | 70 | * Created S3 class 'adfit' and generic methods print, summary, 71 | and plot 72 | 73 | * Updated `pairs_admb` to have an 'order' argument for quickly 74 | plotting slow/fast parameters 75 | 76 | * Add new function `plot_marginals` for quickly plotting posterior 77 | histograms 78 | 79 | * Add new function `plot_sampler_params` to plot NUTS sampling 80 | 81 | * Make parallel the default and deprecate the 'parallel' 82 | argument. 83 | 84 | * Fix bug in parallel path which failed when it was absolute. Now 85 | can be relative or absolute. 86 | 87 | * Add check for valid version of ADMB 88 | 89 | * Minor bug fixes and documentation updates 90 | 91 | * Improve error handling and testing routines 92 | 93 | ------------------------------------------------------------------------ 94 | adnuts 1.0.1 (2019-03-15) 95 | ------------------------------------------------------------------------ 96 | 97 | * Update ADMB algorithms to use "-maxfn 0 -phase 1000" instead of 98 | "-noest". This helps with Stock Synthesis and likely other 99 | models where some initialization is skipped with -noest which 100 | can lead to unusual and undesirable behavior. Also changed 101 | behavior with inits=NULL to pull MLE values from the 102 | admodel.hes file instead of pulling from the .par file for 103 | inits. This fixes some models when negative phases are used. 104 | 105 | * Add function check_identifiable which examines a .hes file and 106 | reports which parameters are not well identified. 107 | 108 | * Add function sample_inits to generate inits from a previous 109 | fitted object. 110 | 111 | * Read in MLE values from the .hes file when inits=NULL, instead 112 | of from the .par file. 113 | 114 | * Add informative errors for common issues. 115 | 116 | * Minor bug fixes and updates. 117 | 118 | 119 | ------------------------------------------------------------------------ 120 | adnuts 1.0.0 (2018-02-04) 121 | ------------------------------------------------------------------------ 122 | 123 | Initial release. 124 | -------------------------------------------------------------------------------- /R/adnuts.R: -------------------------------------------------------------------------------- 1 | #' adnuts: No-U-turn sampling for AD Model Builder (ADMB) 2 | #' 3 | #' Draw Bayesian posterior samples from an ADMB model using the 4 | #' no-U-turn MCMC sampler. Adaptation schemes are used so specifying tuning 5 | #' parameters is not necessary, and parallel execution reduces overall run 6 | #' time. 7 | #' 8 | #' @details 9 | #' The software package Stan pioneered the use of no-U-turn (NUTS) sampling 10 | #' for Bayesian models (Hoffman and Gelman 2014, Carpenter et 11 | #' al. 2017). This algorithm provides fast, efficient sampling across a 12 | #' wide range of models, including hierarchical ones, and thus can be used 13 | #' as a generic modeling tool (Monnahan et al. 2017). The functionality 14 | #' provided by \pkg{adnuts} is based loosely off Stan and \R package 15 | #' \pkg{rstan} 16 | #' 17 | #' The \pkg{adnuts} \R package provides an \R workflow for NUTS 18 | #' sampling for ADMB models (Fournier et al. 2011), including 19 | #' adaptation of step size and metric (mass matrix), parallel 20 | #' execution, and links to diagnostic and inference tools 21 | #' provided by \pkg{rstan} and \pkg{shinystan}. The ADMB 22 | #' implementation of NUTS code is bundled into the ADMB source 23 | #' itself (as of version 12.0). Thus, when a user builds an 24 | #' ADMB model the NUTS code is incorporated into the model 25 | #' executable. Thus, \pkg{adnuts} simply provides a convenient 26 | #' set of wrappers to more easily execute, diagnose, and make 27 | #' inference on a model. More details can be found in the 28 | #' package vignette. 29 | #' 30 | #' Note that previous versions of \pkg{adnuts} included 31 | #' functionality for TMB models, but this has been replaced by 32 | #' \pkg{tmbstan} (Kristensen et al. 2016, Monnahan and 33 | #' Kristensen 2018). 34 | #' 35 | #' @references 36 | #' Carpenter, B., Gelman, A., Hoffman, M.D., Lee, D., Goodrich, B., 37 | #' Betancourt, M., Riddell, A., Guo, J.Q., Li, P., Riddell, A., 38 | #' 2017. Stan: A Probabilistic Programming Language. J Stat 39 | #' Softw. 76:1-29. 40 | #' 41 | #' Fournier, D.A., Skaug, H.J., Ancheta, J., Ianelli, J., Magnusson, A., 42 | #' Maunder, M.N., Nielsen, A., Sibert, J., 2012. AD Model Builder: using 43 | #' automatic differentiation for statistical inference of highly 44 | #' parameterized complex nonlinear models. Optim Method 45 | #' Softw. 27:233-249. 46 | #' 47 | #' Hoffman, M.D., Gelman, A., 2014. The no-U-turn sampler: adaptively 48 | #' setting path lengths in Hamiltonian Monte Carlo. J Mach Learn 49 | #' Res. 15:1593-1623. 50 | #' 51 | #' Kristensen, K., Nielsen, A., Berg, C.W., Skaug, H., Bell, B.M., 52 | #' 2016. TMB: Automatic differentiation and Laplace approximation. J 53 | #' Stat Softw. 70:21. 54 | #' 55 | #' Kristensen, K., 2017. TMB: General random effect model builder tool 56 | #' inspired by ADMB. R package version 1.7.11. 57 | #' 58 | #' Monnahan, C.C., Thorson, J.T., Branch, T.A., 2017. Faster estimation of 59 | #' Bayesian models in ecology using Hamiltonian Monte Carlo. Methods in 60 | #' Ecology and Evolution. 8:339-348. 61 | #' 62 | #' Monnahan C.C., Kristensen K. (2018). No-U-turn sampling for fast 63 | #' Bayesian inference in ADMB and TMB: Introducing the adnuts and 64 | #' tmbstan R packages PLoS ONE 13(5): e0197954. 65 | #' https://doi.org/10.1371/journal.pone.0197954 66 | #' 67 | #' Stan Development Team, 2016. Stan modeling language users guide and 68 | #' reference manual, version 2.11.0. 69 | #' 70 | #' Stan Development Team, 2016. RStan: The R interface to Stan. R package 71 | #' version 2.14.1. http://mc-stan.org. 72 | #' 73 | #' @docType package 74 | #' @name adnuts 75 | #' @import snowfall 76 | #' @importFrom stats rnorm runif cov acf cor dnorm qnorm sd 77 | #' @importFrom utils read.csv read.table write.table 78 | #' @importFrom grDevices gray rgb 79 | #' @importFrom graphics axis box hist legend lines mtext par plot 80 | #' points text abline 81 | #' @importFrom ellipse ellipse 82 | #' @importFrom rstan monitor 83 | #' @importFrom ggplot2 ggplot aes geom_point theme_bw facet_wrap 84 | ## #' @importFrom withr defer 85 | NULL 86 | -------------------------------------------------------------------------------- /R/pairs_admb.R: -------------------------------------------------------------------------------- 1 | #' Plot pairwise parameter posteriors and optionally the MLE points and 2 | #' confidence ellipses. 3 | #' 4 | #' @param fit A list as returned by \code{sample_admb}. 5 | #' @param pars A vector of parameter names or integers 6 | #' representing which parameters to subset. Useful if the model 7 | #' has a larger number of parameters and you just want to show 8 | #' a few key ones. 9 | #' @param label.cex Control size of outer and diagonal labels (default 1) 10 | #' @param order The order to consider the parameters. Options are 11 | #' NULL (default) to use the order declared in the model, or 12 | #' 'slow' and 'fast' which are based on the effective sample 13 | #' sizes ordered by slowest or fastest mixing respectively. See 14 | #' example for usage. 15 | #' @param diag What type of plot to include on the diagonal, 16 | #' options are 'acf' which plots the autocorrelation function 17 | #' \code{acf}, 'hist' shows marginal posterior histograms, and 18 | #' 'trace' the trace plot. 19 | #' @param acf.ylim If using the acf function on the diagonal, 20 | #' specify the y limit. The default is c(-1,1). 21 | #' @param ymult A vector of length ncol(posterior) specifying how 22 | #' much room to give when using the hist option for the 23 | #' diagonal. For use if the label is blocking part of the 24 | #' plot. The default is 1.3 for all parameters. 25 | #' @param axis.col Color of axes 26 | #' @param ... Arguments to be passed to plot call in lower 27 | #' diagonal panels 28 | #' @param limits A list containing the ranges for each parameter 29 | #' to use in plotting. 30 | #' @param add.monitor Boolean whether to print effective sample 31 | #' @param add.mle Boolean whether to add 95\% confidence ellipses 32 | #' @param unbounded Whether to use the bounded or unbounded 33 | #' version of the parameters. 34 | #' size (ESS) and Rhat values on the diagonal. 35 | #' @return Produces a plot, and returns nothing. 36 | #' @details This function is modified from the base \code{pairs} 37 | #' code to work specifically with fits from the 38 | #' 'adnuts' package using either the NUTS or RWM MCMC 39 | #' algorithms. If an invertible Hessian was found (in 40 | #' \code{fit$mle}) then estimated covariances are available to 41 | #' compare and added automatically (red ellipses). Likewise, a 42 | #' "monitor" object from \code{rstan::monitor} is attached as 43 | #' \code{fit$monitor} and provides effective sample sizes (ESS) 44 | #' and Rhat values. The ESS are used to potentially order the 45 | #' parameters via argument \code{order}, but also printed on 46 | #' the diagonal. 47 | #' @export 48 | #' @author Cole Monnahan 49 | #' @examples 50 | #' fit <- readRDS(system.file('examples', 'fit.RDS', package='adnuts')) 51 | #' pairs_admb(fit) 52 | #' pairs_admb(fit, pars=1:2) 53 | #' pairs_admb(fit, pars=c('b', 'a')) 54 | #' pairs_admb(fit, pars=1:2, order='slow') 55 | #' pairs_admb(fit, pars=1:2, order='fast') 56 | #' 57 | pairs_admb <- function(fit, order=NULL, 58 | diag=c("trace","acf","hist"), 59 | acf.ylim=c(-1,1), ymult=NULL, axis.col=gray(.5), 60 | pars=NULL, label.cex=.8, limits=NULL, 61 | add.mle=TRUE, add.monitor=TRUE, unbounded=FALSE, 62 | ...){ 63 | if(!is.adfit(fit)) 64 | stop("Argument 'fit' is not a valid object returned by 'sample_admb'") 65 | if(unbounded | !add.mle){ 66 | mle <- NULL 67 | } else { 68 | mle <- fit$mle 69 | } 70 | posterior <- extract_samples(fit, inc_lp=TRUE, unbounded=unbounded) 71 | chains <- rep(1:dim(fit$samples)[2], each=dim(fit$samples)[1]-fit$warmup) 72 | divs <- if(fit$algorithm=="NUTS") 73 | extract_sampler_params(fit)$divergent__ else NULL 74 | ptcex <- .2 75 | divcex <- .75 76 | chaincols <- 1:length(unique(chains)) 77 | ## reset to old par when exiting 78 | old.par <- par(no.readonly=TRUE) 79 | on.exit(par(old.par)) 80 | diag <- match.arg(diag) 81 | par.names <- names(posterior) 82 | ess <- fit$monitor$n_eff 83 | Rhat <- fit$monitor$Rhat 84 | if(is.null(ess)) 85 | warning("No monitor information found in fitted object so ESS and Rhat not available. See details of help.") 86 | if(!is.null(order)){ 87 | if(! order %in% c('slow', 'fast')){ 88 | stop("Invalid 'order' argument, should be 'slow', 'fast', or NULL") 89 | } 90 | if(is.null(ess)){ 91 | stop("No effective sample sizes found so cannot order by slow/fast.") 92 | } 93 | if(!is.numeric(pars[1])){ 94 | warning("Ignoring 'order' argument because parameter names supplied in 'pars'") 95 | } else { 96 | ## Get slowest or fastest parameter names 97 | ind <- order(ess, decreasing=(order=='fast')) 98 | par.names <- par.names[ind] 99 | } 100 | } 101 | ## if(!(NCOL(posterior) %in% c(mle$nopar, mle$nopar+1))) 102 | ## stop("Number of parameters in posterior and mle not the same") 103 | ## pars will either be NULL, so use all parameters. OR a vector of 104 | ## indices OR a vector of characters. Want to force lp__ to be the very 105 | ## last one stylistically, and b/c there is no ellipse for it. 106 | if(is.null(pars)){ 107 | ## Use all or first 10 108 | if(NCOL(posterior)>10){ 109 | warning("Only showing first 10 parameters, use 'pars' argument to adjust") 110 | pars <- par.names[1:10] 111 | } else { 112 | pars <- par.names[1:NCOL(posterior)] 113 | } 114 | } else if(is.numeric(pars[1])){ 115 | ## Index can be used instead of character names. Note this 116 | ## can be sorted from above 117 | pars <- par.names[pars] 118 | } 119 | ## Now pars is character and possibly sorted by fast/slow 120 | pars.bad <- match(x=pars, table=names(posterior)) 121 | if(any(is.na(pars.bad))){ 122 | warning("Some par names did not match -- dropped") 123 | print(pars.bad) 124 | pars <- pars[!is.na(pars.bad)] 125 | } 126 | ## Converts character to index which is used throughout to 127 | ## subset when looping 128 | pars.ind <- match(x=pars, table=names(posterior)) 129 | n <- length(pars.ind) 130 | n.mle <- ifelse(is.null(mle), 0, nrow(mle$cor)) 131 | if(n==1) stop("This function is only meaningful for >1 parameter") 132 | if(is.null(ymult)) ymult <- rep(1.3, n) 133 | ## If no limits given, calculate the max range of the posterior samples and 134 | ## parameter confidence interval. 135 | if(is.null(limits)){ 136 | limits <- list() 137 | for(i in 1:n){ 138 | if(pars.ind[i]<=n.mle){ 139 | limit.temp <- mle$est[pars.ind[i]] + 140 | c(-1,1)*1.96*mle$se[pars.ind[i]] 141 | } else { 142 | limit.temp <- c(NA,NA) 143 | } 144 | ## multiplier for the ranges, adjusts the whitespace around the 145 | ## plots 146 | min.temp <- min(posterior[,pars.ind[i]], limit.temp[1], na.rm=TRUE) 147 | max.temp <- max(posterior[,pars.ind[i]], limit.temp[2], na.rm=TRUE) 148 | margin <- .15*(max.temp-min.temp) 149 | limits[[i]] <- c(min.temp-margin, max.temp+margin) 150 | } 151 | } 152 | ## Change posterior point look depending on how many samples. Makes 153 | ## easier to read. 154 | N <- NROW(posterior) 155 | mypch <- 16; mycol <- 1 156 | if(N>=1000){ 157 | mycol <- rgb(0,0,0,.5) 158 | } else if(N>=10000){ 159 | mycol <- rgb(0,0,0,.05) 160 | } 161 | if(is.null(divs)) divs <- rep(0, N) 162 | par(mfrow=c(n,n), mar=0*c(.1,.1,.1,.1), yaxs="i", xaxs="i", mgp=c(.25, .25,0), 163 | tck=-.02, cex.axis=.65, col.axis=axis.col, oma=c(2, 2, 2,2)) 164 | temp.box <- function() box(col=axis.col, lwd=.5) 165 | ## Row and col here are not the posterior, but the matrix of pairwise 166 | ## combinations 167 | for(row in 1:n){ 168 | for(col in 1:n){ 169 | ii <- pars.ind[row] 170 | jj <- pars.ind[col] 171 | ## Diagonal, so add user choice 172 | if(row==col){ 173 | if(diag=="hist"){ 174 | h <- hist(posterior[,ii], plot=F) 175 | ## Annoyingling you can't pass NULL to xlim in hist. So 176 | ## have to split up for two cases depending on limits. 177 | if(is.null(limits)){ 178 | hist(posterior[,ii], axes=F, freq=FALSE, ann=F, 179 | ylim=c(0, ymult[row]*max(h$density)), 180 | col=gray(.8), border=gray(.5)) 181 | } else { 182 | ## Else use the user provided limits 183 | hist(posterior[,ii], axes=F, freq=FALSE, ann=F, 184 | ylim=c(0, ymult[row]*max(h$density)), 185 | col=gray(.8), border=gray(.5), xlim=limits[[row]]) 186 | } 187 | temp.box() 188 | } else if(diag=="acf") { 189 | acf(posterior[,ii], axes=F, ann=F, ylim=acf.ylim) 190 | temp.box() 191 | } else if(diag=="trace") { 192 | ## Trace plots for each chain separately 193 | xlim <- c(1, length(chains[chains==1])) 194 | plot(x=0, y=0, type="n", axes=FALSE, 195 | ann=FALSE, ylim=limits[[row]], xlim=xlim) 196 | for(ll in unique(chains)){ 197 | lines(posterior[chains==ll,ii], col=chaincols[ll], lwd=.1) 198 | } 199 | temp.box() 200 | } 201 | ## Add ESS and Rhat info to diagonal 202 | if(!is.null(ess) & !is.null(Rhat) & add.monitor) 203 | mtext(paste0('ESS=', round(ess[ii], 0), " Rhat=", format(round(Rhat[ii],2),nsmall=2)), 204 | cex=.8*label.cex, line=-1) 205 | } 206 | ## If lower triangle and covariance known, add scatterplot 207 | if(row>col){ 208 | par(xaxs="r", yaxs="r") 209 | plot(x=posterior[,jj], y=posterior[,ii], axes=FALSE, ann=FALSE, 210 | pch=mypch, cex=ptcex, col=mycol, xlim=limits[[col]], 211 | ylim=limits[[row]], ...) 212 | ## replot divegences on top so they are always visible 213 | points(x=posterior[which(divs==1),jj], y=posterior[which(divs==1),ii], 214 | pch=mypch, cex=divcex, col='green') 215 | ## can only add MLE stuff if not lp__ parameter which 216 | ## doesn'th ave one 217 | if(ii<=n.mle & jj <=n.mle){ 218 | ## Add bivariate 95% normal levels from MLE 219 | points(x=mle$est[jj], y=mle$est[ii], 220 | pch=16, cex=.5, col='red') 221 | ## Get points of a bivariate normal 95% confidence contour 222 | if(!requireNamespace("ellipse", quietly=TRUE)){ 223 | warning("ellipse package needs to be installed to show ellipses") 224 | } else { 225 | ellipse.temp <- ellipse(x=mle$cor[jj, ii], 226 | scale=mle$se[c(jj, ii)], 227 | centre= mle$est[c(jj, ii)], npoints=1000, 228 | level=.95) 229 | lines(ellipse.temp , lwd=.5, lty=1, col="red") 230 | } 231 | } 232 | par(xaxs="i", yaxs="i") 233 | temp.box() 234 | } 235 | if(row1) { 254 | par( mgp=c(.05, ifelse(row %% 2 ==1, .15, .65),0) ) 255 | axis(2, col=axis.col, lwd=.5) 256 | } 257 | if(col==1 & row ==1){ 258 | par( mgp=c(.05, ifelse(row %% 2 ==1, .15, .65),0) ) 259 | axis(2, col=axis.col, lwd=.5) 260 | } 261 | if(row==1) mtext(pars[col], line=ifelse(col %% 2 ==1, .1, 1.1), 262 | cex=label.cex) 263 | if(col==n) 264 | mtext(pars[ii], side=4, line=ifelse(row %% 2 ==1, 0, 1), cex=label.cex) 265 | } 266 | } 267 | } 268 | -------------------------------------------------------------------------------- /R/parallel.R: -------------------------------------------------------------------------------- 1 | 2 | ## #' Combine multiple fits as returned from \code{sample_tmb} or 3 | ## #' \code{sample_admb} run as a single chain. 4 | ## #' 5 | ## #' @param fits A list of fits, each having a single chain 6 | ## #' @return A merged fit across chains. 7 | ## combine_fits <- function(fits){ 8 | ## z <- list() 9 | ## test <- lapply(fits, function(x) x$samples) 10 | ## samples <- array(NA, dim=c(nrow(test[[1]]), length(test), ncol(test[[1]]))) 11 | ## dimnames(samples) <- dimnames(fits[[1]]$samples) 12 | ## for(i in 1:length(test)) samples[,i,] <- test[[i]] 13 | ## z$samples <- samples 14 | ## sp <- sapply(fits, function(x) x$sampler_params) 15 | ## z$sampler_params <- sp 16 | ## z$time.warmup <- unlist(lapply(fits, function(x) x$time.warmup)) 17 | ## z$time.total <- unlist(lapply(fits, function(x) x$time.total)) 18 | ## z$algorithm <- fits[[1]]$algorithm 19 | ## z$warmup <- fits[[1]]$warmup 20 | ## z$model <- fits[[1]]$model 21 | ## z$max_treedepth <- fits[[1]]$max_treedepth 22 | ## return(z) 23 | ## } 24 | 25 | ## A wrapper for running ADMB models in parallel 26 | sample_admb_parallel <- function(parallel_number, path, algorithm, ...){ 27 | olddir <- getwd() 28 | on.exit(setwd(olddir)) 29 | newdir <- paste0(path,"_chain_",parallel_number) 30 | if(dir.exists(newdir)){ 31 | unlink(newdir, TRUE) 32 | if(dir.exists(newdir)) stop(paste("Could not remove folder:", newdir)) 33 | } 34 | dir.create(newdir) 35 | if(!dir.exists(newdir)) stop(paste("Could not create parallel folder:", newdir)) 36 | trash <- file.copy(from=list.files(path, full.names=TRUE), to=newdir) 37 | if(algorithm=="NUTS") 38 | fit <- sample_admb_nuts(path=newdir, chain=parallel_number, ...) 39 | if(algorithm=="RWM") 40 | fit <- sample_admb_rwm(path=newdir, chain=parallel_number, ...) 41 | unlink(newdir, TRUE) 42 | return(fit) 43 | } 44 | 45 | ## A wrapper for running TMB models in parallel 46 | sample_tmb_parallel <- function(parallel_number, obj, init, path, 47 | algorithm, lower, upper, seed, laplace, ...){ 48 | ## Each node starts in a random work directory. Rebuild TMB model obj so 49 | ## can link it in each session. 50 | setwd(path) 51 | dyn.load(TMB::dynlib(obj$env$DLL)) 52 | ## Use 'shape' attribute to obtain full length of 'map'ped parameters. 53 | map.index <- which(names(obj$env$parameters) %in% names(obj$env$map)) 54 | new.par <- obj$env$parameters 55 | new.par[map.index] <- lapply(obj$env$parameters[map.index], function(x) attr(x, "shape")) 56 | obj <- TMB::MakeADFun(data=obj$env$data, parameters=new.par, random=obj$env$random, 57 | map=obj$env$map, DLL=obj$env$DLL, silent=TRUE) 58 | obj$env$beSilent() 59 | 60 | ## Ignore parameters declared as random? Borrowed from tmbstan. 61 | if(laplace){ 62 | par <- obj$env$last.par.best[-obj$env$random] 63 | fn0 <- obj$fn 64 | gr0 <- obj$gr 65 | } else { 66 | par <- obj$env$last.par.best 67 | fn0 <- obj$env$f 68 | gr0 <- function(x) obj$env$f(x, order=1) 69 | } 70 | ## Parameter constraints, if provided, require the fn and gr functions to 71 | ## be modified to account for differents in volume. There are four cases: 72 | ## no constraints, bounded below, bounded above, or both (box 73 | ## constraint). 74 | bounded <- !(is.null(lower) & is.null(upper)) 75 | if(bounded){ 76 | if(is.null(lower)) lower <- rep(-Inf, len=length(upper)) 77 | if(is.null(upper)) upper <- rep(Inf, len=length(lower)) 78 | cases <- .transform.cases(lower, upper) 79 | fn <- function(y){ 80 | x <- .transform(y, lower, upper, cases) 81 | scales <- .transform.grad(y, lower, upper, cases) 82 | -fn0(x) + sum(log(scales)) 83 | } 84 | gr <- function(y){ 85 | x <- .transform(y, lower, upper, cases) 86 | scales <- .transform.grad(y, lower, upper, cases) 87 | scales2 <- .transform.grad2(y, lower, upper, cases) 88 | -as.vector(gr0(x))*scales + scales2 89 | } 90 | ## Don't need to adjust this b/c init is already backtransformed in 91 | ## sample_tmb. 92 | ## init <- .transform.inv(x=unlist(init), a=lower, b=upper, cases=cases) 93 | } else { 94 | fn <- function(y) -fn0(y) 95 | gr <- function(y) -as.vector(gr0(y)) 96 | } 97 | if(algorithm=="NUTS") 98 | fit <- sample_tmb_nuts(chain=parallel_number, fn=fn, gr=gr, 99 | init=init, seed=seed, ...) 100 | if(algorithm=="RWM") 101 | fit <- sample_tmb_rwm(chain=parallel_number, fn=fn, init=init, 102 | seed=seed, ...) 103 | return(fit) 104 | } 105 | -------------------------------------------------------------------------------- /R/sample_admb.R: -------------------------------------------------------------------------------- 1 | 2 | 3 | #' @rdname wrappers 4 | #' @export 5 | sample_nuts <- function(model, path=getwd(), iter=2000, init=NULL, chains=3, warmup=NULL, 6 | seeds=NULL, thin=1, mceval=FALSE, duration=NULL, 7 | parallel=FALSE, cores=NULL, control=NULL, 8 | skip_optimization=TRUE, verbose=TRUE, 9 | skip_monitor=FALSE, skip_unbounded=TRUE, 10 | admb_args=NULL, extra.args=NULL){ 11 | ## Argument checking and processing 12 | if (!missing(parallel)) { 13 | warning("Argument parallel is deprecated, set cores=1 for serial, and cores>1 for parallel.", 14 | call. = FALSE) 15 | } 16 | if (!missing(extra.args)) { 17 | warning("Argument extra.args is deprecated, use admb_args instead", 18 | call. = FALSE) 19 | admb_args <- extra.args 20 | } 21 | if(is.null(init) & verbose) 22 | warning('Default init of MLE used for each chain. Consider using dispersed inits') 23 | .sample_admb(model=model, path=path, iter=iter, init=init, 24 | chains=chains, warmup=warmup, seeds=seeds, 25 | thin=thin, mceval=mceval, duration=duration, 26 | cores=cores, control=control, algorithm="NUTS", 27 | skip_optimization=skip_optimization, 28 | skip_monitor=skip_monitor, 29 | skip_unbounded=skip_unbounded, 30 | admb_args=admb_args, verbose=verbose) 31 | } 32 | 33 | #' @rdname wrappers 34 | #' @export 35 | sample_rwm <- function(model, path=getwd(), iter=2000, init=NULL, chains=3, warmup=NULL, 36 | seeds=NULL, thin=1, mceval=FALSE, duration=NULL, 37 | parallel=FALSE, cores=NULL, control=NULL, 38 | skip_optimization=TRUE, verbose=TRUE, 39 | skip_monitor=FALSE, skip_unbounded=TRUE, 40 | admb_args=NULL, extra.args=NULL){ 41 | ## Argument checking and processing 42 | if (!missing(parallel)) { 43 | warning("Argument parallel is deprecated, set cores=1 for serial, and cores>1 for parallel.", 44 | call. = FALSE) 45 | } 46 | if (!missing(extra.args)) { 47 | warning("Argument extra.args is deprecated, use admb_args instead", 48 | call. = FALSE) 49 | admb_args <- extra.args 50 | } 51 | if(is.null(init) & verbose) 52 | warning('Default init of MLE used for each chain. Consider using dispersed inits') 53 | .sample_admb(model=model, path=path, iter=iter, init=init, 54 | chains=chains, warmup=warmup, seeds=seeds, 55 | thin=thin, mceval=mceval, duration=duration, 56 | cores=cores, control=control, algorithm="RWM", 57 | skip_optimization=skip_optimization, 58 | skip_monitor=skip_monitor, verbose=verbose, 59 | skip_unbounded=skip_unbounded, 60 | admb_args=admb_args) 61 | } 62 | 63 | 64 | 65 | #' Bayesian inference of an ADMB model using the no-U-turn 66 | #' sampler (NUTS) or random walk Metropolis (RWM) algorithms. 67 | #' 68 | #' Draw Bayesian posterior samples from an AD Model Builder 69 | #' (ADMB) model using an MCMC algorithm. `sample_nuts` and 70 | #' `sample_rwm` generates posterior samples from which inference 71 | #' can be made. 72 | #' 73 | #' Adaptation schemes are used with NUTS so specifying tuning 74 | #' parameters is not necessary. See vignette for options for 75 | #' adaptation of step size and mass matrix. The RWM algorithm 76 | #' provides no new functionality not available from previous 77 | #' versions of ADMB. However, `sample_rwm` has an improved 78 | #' console output, is setup for parallel execution, and a smooth 79 | #' workflow for diagnostics. 80 | #' 81 | #' Parallel chains will be run if argument `cores` is greater 82 | #' than one. This entails copying the folder, and starting a new 83 | #' R session to run that chain, which are then merged back 84 | #' together. Note that console output is inconsistent when using 85 | #' parallel, and may not show. On Windows the R terminal shows 86 | #' output live, but the GUI does not. RStudio is a special case 87 | #' and will not show live, and instead is captured and returned 88 | #' at the end. It is strongly recommended to start with serial 89 | #' execution as debugging parallel chains is very difficult. 90 | #' 91 | #' Note that the algorithm code is in the ADMB source code, and 92 | #' 'adnuts' provides a wrapper for it. The command line arguments 93 | #' are returned and can be examined by the user. See vignette for 94 | #' more information. 95 | #' 96 | #' @details This function implements algorithm 6 of Hoffman and Gelman (2014), 97 | #' and loosely follows package \code{rstan}. The step size can be 98 | #' adapted or specified manually. The metric (i.e., mass matrix) can be 99 | #' unit diagonal, adapted diagonal (default and recommended), a dense 100 | #' matrix specified by the user, or an adapted dense matrix. 101 | #' Further control of algorithms can be 102 | #' specified with the \code{control} argument. Elements are: 103 | #' \describe{ 104 | #' \item{adapt_delta}{The target acceptance rate. D} 105 | #' \item{metric}{The mass metric to use. Options are: "unit" for a unit diagonal 106 | #' matrix; \code{NULL} to estimate a diagonal matrix during warmup; a matrix 107 | #' to be used directly (in untransformed space).} 108 | #' \item{adapt_delta}{Whether adaptation of step size is turned on.} 109 | #' \item{adapt_mass}{Whether adaptation of mass matrix is turned 110 | #' on. Currently only allowed for diagonal metric.} 111 | #' \item{adapt_mass_dense}{Whether dense adaptation of mass 112 | #' matrix is turned on.} 113 | #' \item{max_treedepth}{Maximum treedepth for the NUTS algorithm.} 114 | #' \item{stepsize}{The stepsize for the NUTS algorithm. If \code{NULL} it 115 | #' will be adapted during warmup.} 116 | #' \item{adapt_init_buffer}{The initial buffer size during mass matrix 117 | #' adaptation where sample information is not used (default 118 | #' 50)} 119 | #' \item{adapt_term_buffer}{The terminal buffer size (default 75) 120 | #' during mass 121 | #' matrix adaptation (final fast phase)} 122 | #' \item{adapt_window}{The initial size of the mass matrix 123 | #' adaptation window, which gets doubled each time thereafter.} 124 | #' \item{refresh}{The rate at which to refresh progress to the 125 | #' console. Defaults to even 10%. A value of 0 turns off 126 | #' progress updates.} 127 | #' } 128 | #' The adaptation scheme (step size and mass matrix) is based heavily on those by the 129 | #' software Stan, and more details can be found in that 130 | #' documentation and this vignette. 131 | #' 132 | #' @author Cole Monnahan 133 | #' @name wrappers 134 | #' @param model Name of model (i.e., 'model' for model.tpl). For 135 | #' non-Windows systems this will automatically be converted to 136 | #' './model' internally. For Windows, long file names are 137 | #' sometimes shortened from e.g., 'long_model_filename' to 138 | #' 'LONG_~1'. This should work, but will throw warnings. Please 139 | #' shorten the model name. See 140 | #' https://en.wikipedia.org/wiki/8.3_filename. 141 | #' @param path Path to model executable. Defaults to working 142 | #' directory. Often best to have model files in a separate 143 | #' subdirectory, particularly for parallel. 144 | #' @param iter The number of samples to draw. 145 | #' @param init A list of lists containing the initial parameter 146 | #' vectors, one for each chain or a function. It is strongly 147 | #' recommended to initialize multiple chains from dispersed 148 | #' points. A of NULL signifies to use the starting values 149 | #' present in the model (i.e., \code{obj$par}) for all chains. 150 | #' @param chains The number of chains to run. 151 | #' @param warmup The number of warmup iterations. 152 | #' @param seeds A vector of seeds, one for each chain. 153 | #' @param thin The thinning rate to apply to samples. Typically 154 | #' not used with NUTS. 155 | #' @param mceval Whether to run the model with \code{-mceval} on 156 | #' samples from merged chains. 157 | #' @param duration The number of minutes after which the model 158 | #' will quit running. 159 | #' @param parallel A deprecated argument, use cores=1 for serial 160 | #' execution or cores>1 for parallel (default is to parallel 161 | #' with cores equal to the available-1) 162 | #' @param cores The number of cores to use for parallel 163 | #' execution. Default is number available in the system minus 164 | #' 1. If \code{cores=1}, serial execution occurs (even if 165 | #' \code{chains>1}), otherwise parallel execution via package 166 | #' snowfall is used. For slow analyses it is recommended to set 167 | #' \code{chains}<=\code{cores} so each core needs to run only a 168 | #' single chain. 169 | #' @param control A list to control the sampler. See details for 170 | #' further use. 171 | #' @param skip_optimization Whether to run the optimizer before 172 | #' running MCMC. This is rarely need as it is better to run it 173 | #' once before to get the covariance matrix, or the estimates 174 | #' are not needed with adaptive NUTS. 175 | #' @param skip_monitor Whether to skip calculating diagnostics 176 | #' (effective sample size, Rhat) via the \code{rstan::monitor} 177 | #' function. This can be slow for models with high dimension or 178 | #' many iterations. The result is used in plots and summaries 179 | #' so it is recommended to turn on. If model run with 180 | #' \code{skip_monitor=FALSE} you can recreate it post-hoc by 181 | #' setting \code{fit$monitor=rstan::monitor(fit$samples, 182 | #' fit$warmup, print=FALSE)}. 183 | #' @param skip_unbounded Whether to skip returning the unbounded 184 | #' version of the posterior samples in addition to the bounded 185 | #' ones. It may be advisable to set to FALSE for very large 186 | #' models to save space. 187 | #' @param verbose Flag whether to show console output (default) 188 | #' or suppress it completely except for warnings and 189 | #' errors. Works for serial or parallel execution. 190 | #' @param admb_args A character string which gets passed to the 191 | #' command line, allowing finer control 192 | #' @param extra.args Deprecated, use a \code{admb_args} instead. 193 | #' @section Warning: The user is responsible for specifying the 194 | #' model properly (priors, starting values, desired parameters 195 | #' fixed, etc.), as well as assessing the convergence and 196 | #' validity of the resulting samples (e.g., through the 197 | #' \code{coda} package), or with function 198 | #' \code{\link{launch_shinytmb}} before making 199 | #' inference. Specifically, priors must be specified in the 200 | #' template file for each parameter. Unspecified priors will be 201 | #' implicitly uniform. 202 | #' @examples 203 | #' \dontrun{ 204 | #' ## This is the packaged simple regression model 205 | #' path.simple <- system.file('examples', 'simple', package='adnuts') 206 | #' ## It is best to have your ADMB files in a separate folder and provide that 207 | #' ## path, so make a copy of the model folder locally. 208 | #' path <- 'simple' 209 | #' dir.create(path) 210 | #' trash <- file.copy(from=list.files(path.simple, full.names=TRUE), to=path) 211 | #' ## Compile and run model 212 | #' oldwd <- getwd() 213 | #' setwd(path) 214 | #' system('admb simple.tpl') 215 | #' system('simple') 216 | #' setwd('..') 217 | #' init <- function() rnorm(2) 218 | #' ## Run NUTS with defaults 219 | #' fit <- sample_nuts(model='simple', init=init, path=path) 220 | #' unlink(path, TRUE) # cleanup folder 221 | #' setwd(oldwd) 222 | #' } 223 | #' 224 | NULL 225 | 226 | #' Deprecated version of wrapper function. Use sample_nuts or 227 | #' sample_rwm instead. 228 | #' 229 | #' @inheritParams wrappers 230 | #' @param algorithm The algorithm to use, one of "NUTS" or "RWM" 231 | #' @section Warning: This is deprecated and will cease to exist 232 | #' in future releases 233 | #' @export 234 | sample_admb <- function(model, path=getwd(), iter=2000, init=NULL, chains=3, warmup=NULL, 235 | seeds=NULL, thin=1, mceval=FALSE, duration=NULL, 236 | parallel=FALSE, cores=NULL, control=NULL, 237 | skip_optimization=TRUE, algorithm='NUTS', 238 | skip_monitor=FALSE, skip_unbounded=TRUE, 239 | admb_args=NULL){ 240 | ## Argument checking and processing 241 | if (!missing(parallel)) { 242 | warning("Argument parallel is deprecated, set cores=1 for serial, and cores>1 for parallel.", 243 | call. = FALSE) 244 | } 245 | warning("Function sample_admb is deprecated, use sample_nuts or sample_rwm instead", 246 | call. = FALSE) 247 | .sample_admb(model=model, path=path, iter=iter, init=init, 248 | chains=chains, warmup=warmup, seeds=seeds, 249 | thin=thin, mceval=mceval, duration=duration, 250 | cores=cores, control=control, algorithm=algorithm, 251 | skip_optimization=skip_optimization, 252 | skip_monitor=skip_monitor, 253 | skip_unbounded=skip_unbounded, 254 | admb_args=admb_args) 255 | } 256 | 257 | 258 | #' Hidden wrapper function for sampling from ADMB models 259 | #' 260 | #' @inheritParams wrappers 261 | #' @param algorithm The algorithm to use, one of "NUTS" or "RWM" 262 | #' 263 | .sample_admb <- function(model, path=getwd(), iter=2000, init=NULL, chains=3, warmup=NULL, 264 | seeds=NULL, thin=1, mceval=FALSE, duration=NULL, 265 | cores=NULL, control=NULL, verbose=TRUE, 266 | algorithm="NUTS", skip_optimization=TRUE, 267 | skip_monitor=FALSE, skip_unbounded=TRUE, 268 | admb_args=NULL){ 269 | if(is.null(cores)) cores <- parallel::detectCores()-1 270 | cores.max <- parallel::detectCores() 271 | if(cores > cores.max) { 272 | cores <- cores.max-1 273 | warning(paste('Specified cores larger than available, using total-1=', cores)) 274 | } 275 | stopifnot(is.numeric(cores)) 276 | if(!is.null(control) & !is.list(control)) 277 | stop("control argument invalid, must be a list") 278 | if(cores<1) stop(paste("Cores must be >=1, but is", cores)) 279 | parallel <- ifelse(cores==1 | chains ==1, FALSE, TRUE) 280 | if(parallel & cores < chains) 281 | if(verbose) message(paste("Recommend using chains < cores=", cores)) 282 | stopifnot(thin >=1); stopifnot(chains >= 1) 283 | if(is.null(seeds)) seeds <- sample.int(1e7, size=chains) 284 | if(length(seeds) != chains) stop("Length of seeds must match chains") 285 | if(iter < 1 | !is.numeric(iter)) stop("iter must be > 1") 286 | 287 | ## Catch path and model name errors early 288 | .check_model_path(model=model, path=path) 289 | ## Check verison; warnings only meaningful for NUTS at the moment. 290 | v <- .check_ADMB_version(model=model, path=path, warn= (algorithm=='NUTS')) 291 | if(v<=12.0 & !skip_unbounded) { 292 | warning(paste('Version', v, 'of ADMB is incompatible with skip_unbounded=FALSE, ignoring')) 293 | skip_unbounded <- TRUE 294 | } 295 | 296 | ## Update control with defaults 297 | if(is.null(warmup)) warmup <- floor(iter/2) 298 | if(!(algorithm %in% c('NUTS', 'RWM'))) 299 | stop("Invalid algorithm specified") 300 | if(algorithm=='NUTS') 301 | control <- .update_control(control) 302 | if(is.null(init)){ 303 | ## warning moved to higher functions 304 | } else 305 | if(is.function(init)){ 306 | init <- lapply(1:chains, function(x) init()) 307 | } else if(!is.list(init)){ 308 | stop("init must be NULL, a list, or a function") 309 | } 310 | if(!is.null(init) & length(init) != chains){ 311 | stop("Length of init does not equal number of chains.") 312 | } 313 | ## Delete any psv files in case something goes wrong we dont use old 314 | ## values by accident. Also windows short names might cause 315 | ## there to be two 316 | ff <- list.files(path)[grep('.psv', x=list.files(path))] 317 | if(length(ff)>1){ 318 | if(.Platform$OS.type == "windows" & length(grep("~", ff))>0){ 319 | warning("It appears a shortened Windows filename exists,", 320 | "which occurs with long\nmodel names. Try shortening it.", 321 | " See help for argument 'model'") 322 | } else { 323 | warning("Found more than one .psv file. Deleting: ", paste(ff, collapse=' ')) 324 | } 325 | } 326 | trash <- suppressWarnings(file.remove(file.path(path, ff))) 327 | trash <- suppressWarnings(file.remove(file.path(path, 'adaptation.csv'), 328 | file.path(path, 'unbounded.csv'))) 329 | ## Run in serial 330 | if(!parallel){ 331 | if(algorithm=="NUTS"){ 332 | mcmc.out <- lapply(1:chains, function(i) 333 | sample_admb_nuts(path=path, model=model, warmup=warmup, duration=duration, 334 | iter=iter, init=init[[i]], chain=i, 335 | seed=seeds[i], thin=thin, verbose=verbose, 336 | control=control, admb_args=admb_args, 337 | skip_optimization=skip_optimization, 338 | parallel=parallel)) 339 | } else { 340 | mcmc.out <- lapply(1:chains, function(i) 341 | sample_admb_rwm(path=path, model=model, warmup=warmup, duration=duration, 342 | iter=iter, init=init[[i]], chain=i, 343 | seed=seeds[i], thin=thin, 344 | control=control, verbose=verbose, 345 | skip_optimization=skip_optimization, 346 | admb_args=admb_args, 347 | parallel=parallel)) 348 | } 349 | ## Parallel execution 350 | } else { 351 | console <- .check_console_printing(parallel) 352 | snowfall::sfStop() 353 | snowfall::sfInit(parallel=TRUE, cpus=cores) 354 | if(verbose){ 355 | if(console) 356 | message("Parallel output to console is inconsistent between consoles.\n", 357 | "For live updates try using Rterm. See help for info on console output") 358 | else 359 | message("RStudio detected so output will display at conclusion. \n", 360 | "For live updates try using Rterm. See help for info on console output") 361 | } 362 | ## errors out with empty workspace 363 | if(length(ls(envir=globalenv()))>0) 364 | snowfall::sfExportAll() 365 | on.exit(snowfall::sfStop()) 366 | mcmc.out <- snowfall::sfLapply(1:chains, function(i) 367 | sample_admb_parallel(parallel_number=i, path=path, model=model, 368 | duration=duration, 369 | algorithm=algorithm, 370 | iter=iter, init=init[[i]], warmup=warmup, 371 | seed=seeds[i], thin=thin, 372 | control=control, verbose=verbose, 373 | skip_optimization=skip_optimization, 374 | admb_args=admb_args, 375 | parallel=TRUE)) 376 | if(!console & !is.null(mcmc.out[[1]]$progress)){ 377 | trash <- lapply(mcmc.out, function(x) writeLines(x$progress)) 378 | } 379 | } 380 | 381 | ## Build output list 382 | warmup <- mcmc.out[[1]]$warmup 383 | mle <- .read_mle_fit(model=model, path=path) 384 | if(is.null(mle)){ 385 | par.names <- dimnames(mcmc.out[[1]]$samples)[[2]] 386 | par.names <- par.names[-length(par.names)] 387 | } else { 388 | par.names <- mle$par.names 389 | } 390 | iters <- unlist(lapply(mcmc.out, function(x) dim(x$samples)[1])) 391 | if(any(iters!=iter/thin)){ 392 | N <- min(iters) 393 | warning(paste("Variable chain lengths, truncating to minimum=", N)) 394 | } else { 395 | N <- iter/thin 396 | } 397 | samples <- array(NA, dim=c(N, chains, 1+length(par.names)), 398 | dimnames=list(NULL, NULL, c(par.names,'lp__'))) 399 | if(!skip_unbounded){ 400 | samples.unbounded <- samples 401 | } else { 402 | samples.unbounded= NULL 403 | } 404 | for(i in 1:chains){ 405 | samples[,i,] <- mcmc.out[[i]]$samples[1:N,] 406 | if(!skip_unbounded) 407 | samples.unbounded[,i,] <- cbind(mcmc.out[[i]]$unbounded[1:N,], 408 | mcmc.out[[i]]$samples[,1+length(par.names)]) 409 | } 410 | if(algorithm=="NUTS") 411 | sampler_params <- 412 | lapply(mcmc.out, function(x) x$sampler_params[1:N,]) 413 | else sampler_params <- NULL 414 | time.warmup <- unlist(lapply(mcmc.out, function(x) as.numeric(x$time.warmup))) 415 | time.total <- unlist(lapply(mcmc.out, function(x) as.numeric(x$time.total))) 416 | cmd <- unlist(lapply(mcmc.out, function(x) x$cmd)) 417 | if(N < warmup) warning("Duration too short to finish warmup period") 418 | ## When running multiple chains the psv files will either be overwritten 419 | ## or in different folders (if parallel is used). Thus mceval needs to be 420 | ## done posthoc by recombining chains AFTER thinning and warmup and 421 | ## discarded into a single chain, written to file, then call -mceval. 422 | ## Merge all chains together and run mceval 423 | if(verbose) message(paste("Merging post-warmup chains into main folder:", path)) 424 | samples2 <- do.call(rbind, lapply(1:chains, function(i) 425 | samples[-(1:warmup), i, -dim(samples)[3]])) 426 | .write_psv(fn=model, samples=samples2, model.path=path) 427 | ## These already exclude warmup 428 | unbounded <- do.call(rbind, lapply(mcmc.out, function(x) x$unbounded)) 429 | oldwd <- getwd(); on.exit(setwd(oldwd)) 430 | setwd(path) 431 | write.table(unbounded, file='unbounded.csv', sep=",", col.names=FALSE, row.names=FALSE) 432 | if(mceval){ 433 | if(verbose) message("Running -mceval on merged chains...") 434 | system(paste(.update_model(model), admb_args, "-mceval"), ignore.stdout=FALSE) 435 | } 436 | covar.est <- cov(unbounded) 437 | if(!skip_monitor){ 438 | if(!requireNamespace("rstan", quietly = TRUE)) 439 | stop("Package 'rstan' is required to calculate diagnostics.\n Install it and try again, or set skip_monitor=FALSE.") 440 | if(verbose) message('Calculating ESS and Rhat (skip_monitor=TRUE will skip)...') 441 | mon <- rstan::monitor(samples, warmup, print=FALSE) 442 | } else { 443 | if(verbose) message('Skipping ESS and Rhat statistics...') 444 | mon <- NULL 445 | } 446 | par_names <- dimnames(samples)[[3]] 447 | result <- list(samples=samples, sampler_params=sampler_params, 448 | par_names=par_names, 449 | samples_unbounded=samples.unbounded, 450 | time.warmup=time.warmup, time.total=time.total, 451 | algorithm=algorithm, warmup=warmup, 452 | model=model, max_treedepth=mcmc.out[[1]]$max_treedepth, 453 | cmd=cmd, init=init, covar.est=covar.est, mle=mle, 454 | monitor=mon) 455 | result <- adfit(result) 456 | return(result) 457 | } 458 | 459 | -------------------------------------------------------------------------------- /R/samplers.R: -------------------------------------------------------------------------------- 1 | 2 | ## #' Run a single NUTS chain for an ADMB model 3 | ## #' 4 | ## #' A low level function to run a single chain. Unlikely to be used by a 5 | ## #' user, instead prefer \code{\link{sample_nuts}} 6 | ## #' @inheritParams wrappers 7 | ## #' @param chain Chain number, for printing purposes only. 8 | ## #' @param admb_args Character string of extra command line argument to 9 | ## #' pass to ADMB. 10 | ## #' @param extra.args Deprecated, use \code{admb_args} instead 11 | ## #' @param verbose Boolean for whether to print ADMB output to console. 12 | ## #' @seealso \code{\link{sample_nuts}} 13 | sample_admb_nuts <- function(path, model, iter=2000, 14 | init=NULL, chain=1, 15 | thin=1, warmup=ceiling(iter/2), 16 | seed=NULL, duration=NULL, 17 | control=NULL, 18 | skip_optimization=TRUE, 19 | verbose=TRUE, admb_args=NULL, 20 | parallel){ 21 | 22 | wd.old <- getwd(); on.exit(setwd(wd.old)) 23 | setwd(path) 24 | ## Now contains all required NUTS arguments 25 | control <- .update_control(control) 26 | eps <- control$stepsize 27 | stopifnot(iter >= 1) 28 | stopifnot(warmup <= iter) 29 | stopifnot(duration > 0) 30 | stopifnot(thin >=1) 31 | if(is.null(warmup)) stop("Must provide warmup") 32 | if(thin < 1 | thin > iter) stop("Thin must be >1 and < iter") 33 | max_td <- control$max_treedepth 34 | adapt_delta <- control$adapt_delta 35 | 36 | ## Build the command to run the model 37 | model2 <- .update_model(model) 38 | if(skip_optimization){ 39 | cmd <- paste(model2,"-nox -nohess -maxfn 0 -phase 1000 -nuts -mcmc ",iter) 40 | } else { 41 | cmd <- paste(model2,"-hbf -nuts -mcmc ",iter) 42 | } 43 | cmd <- paste(cmd, "-warmup", warmup, "-chain", chain) 44 | if(!is.null(seed)) cmd <- paste(cmd, "-mcseed", seed) 45 | if(!is.null(duration)) cmd <- paste(cmd, "-duration", duration) 46 | cmd <- paste(cmd, "-max_treedepth", max_td, "-adapt_delta", adapt_delta) 47 | if(!is.null(eps)) cmd <- paste(cmd, "-hyeps", eps) 48 | if(!is.null(control$adapt_init_buffer)) 49 | cmd <- paste(cmd, "-adapt_init_buffer", control$adapt_init_buffer) 50 | if(!is.null(control$adapt_term_buffer)) 51 | cmd <- paste(cmd, "-adapt_term_buffer", control$adapt_term_buffer) 52 | if(!is.null(control$adapt_window)) 53 | cmd <- paste(cmd, "-adapt_window", control$adapt_window) 54 | if(!is.null(control$refresh)) 55 | cmd <- paste(cmd, "-refresh", control$refresh) 56 | if(control$adapt_mass) 57 | cmd <- paste(cmd, "-adapt_mass") 58 | if(control$adapt_mass_dense) 59 | cmd <- paste(cmd, "-adapt_mass_dense") 60 | 61 | ## Three options for metric. (1) 'mle' is to use the MLE estimates in 62 | ## admodel.cov without mass adaptation. (2) If a matrix is passed, this 63 | ## is written to file admodel.cov and no adaptation is done. (3) (default) 64 | ## Adaptation starting with diagonal. (4) Diagonal without mass adaptation. 65 | metric <- control$metric 66 | stopifnot(!is.null(metric)) 67 | if(is.matrix(metric)){ 68 | ## User defined one will be writen to admodel.cov 69 | if(!requireNamespace("matrixcalc", quietly = TRUE)) 70 | stop("Package 'matrixcalc' is required to pass a matrix.\n Install it and try again.") 71 | cor.user <- metric/ sqrt(diag(metric) %o% diag(metric)) 72 | if(!matrixcalc::is.positive.definite(x=cor.user)) 73 | stop("Invalid mass matrix passed: it is not positive definite.\n Check 'metric' argument or use different option.") 74 | .write.admb.cov(metric, hbf=1) 75 | warning("admodel.cov overwritten, revert admodel_original.cov if needed") 76 | } else if(is.character(metric) && metric == 'unit') { 77 | ## The default: Start from unit diag. 78 | cmd <- paste(cmd, '-mcdiag') 79 | } else if(is.character(metric) && metric=='mle') { 80 | ## ADMB default so do nothing special. No adaptation, will use 81 | ## estimated MLE covariance matrix in unbounded space (read from 82 | ## admodel.cov) 83 | } else { 84 | stop("Invalid metric option") 85 | } 86 | ## Write the starting values to file. A NULL value means to use the MLE, 87 | ## so need to run model 88 | if(!is.null(init)){ 89 | cmd <- paste(cmd, "-mcpin init.pin") 90 | write.table(file="init.pin", x=unlist(init), row.names=F, col.names=F) 91 | } else { 92 | ## Use MLE values which are read in from the admodel.hes file 93 | ## which is the default behavior 94 | } 95 | if(!is.null(admb_args)) cmd <- paste(cmd, admb_args) 96 | 97 | ## Run it and get results 98 | model2 <- .update_model(model) 99 | console <- .check_console_printing(parallel) 100 | progress <- NULL 101 | if(console){ 102 | ## Normal case 103 | time <- system.time(system2(model2, cmd, stdout=ifelse(verbose, '', FALSE)))[3] 104 | } else { 105 | ## RStudio won't print output so capture it and print at 106 | ## end. Better than nothing 107 | fn <- 'mcmc_progress.txt' 108 | if(file.exists(fn)) file.remove(fn) 109 | time <- system.time(system2(model2, cmd, stdout=ifelse(verbose, fn, FALSE)))[3] 110 | if(file.exists(fn)){ 111 | progress <- readLines('mcmc_progress.txt') 112 | ## trash <- suppressWarnings(file.remove('mcmc_progress.txt')) 113 | } else { 114 | warning("Progress output file not found. Try troubleshooting in serial model") 115 | } 116 | } 117 | 118 | if(!file.exists('adaptation.csv') | !file.exists('unbounded.csv')) 119 | stop(paste0("NUTS failed to run. Command attempted was:\n", cmd)) 120 | sampler_params <- as.matrix(read.csv("adaptation.csv")) 121 | unbounded <- as.matrix(read.csv("unbounded.csv", header=FALSE)) 122 | dimnames(unbounded) <- NULL 123 | pars <- .get_psv(model) 124 | par.names <- names(pars) 125 | if(!"lp__" %in% dimnames(sampler_params)[[2]]){ 126 | ## Previous version had a bug where energy__ was stored as 127 | ## the log-posterior. So energy is wrong, but log-posterior 128 | ## is right here. 129 | ## warning("ADMB version <= 12.0 has a bug where the energy statistic is wrong. Please consider updating") 130 | pars[,'log-posterior'] <- sampler_params[,'energy__'] 131 | } else { 132 | ## Later versions has a 7th column containing the LP and 6 is 133 | ## the energy. Both enegy and lp are correct 134 | pars[,'log-posterior'] <- sampler_params[,'lp__'] 135 | ## Drop the lp__ here since not used and may cause issues 136 | ## downstream. 137 | sampler_params <- sampler_params[,-7] 138 | } 139 | pars <- as.matrix(pars) 140 | ## Thin samples and adaptation post hoc for NUTS 141 | pars <- pars[seq(1, nrow(pars), by=thin),] 142 | unbounded <- unbounded[seq(1, nrow(unbounded), by=thin),] 143 | sampler_params <- sampler_params[seq(1, nrow(sampler_params), by=thin),] 144 | time.total <- time; time.warmup <- NA 145 | warmup <- warmup/thin 146 | return(list(samples=pars, sampler_params=sampler_params, 147 | time.total=time.total, time.warmup=time.warmup, 148 | warmup=warmup, max_treedepth=max_td, 149 | model=model, par.names=par.names, cmd=cmd, 150 | unbounded=unbounded, progress=progress)) 151 | } 152 | 153 | 154 | ## #' Run a single random walk Metropolis chain for an ADMB model 155 | ## #' 156 | ## #' A low level function to run a single chain. Unlikely to be used by a 157 | ## #' user, instead prefer \code{\link{sample_rwm}} 158 | ## #' @inheritParams wrappers 159 | ## #' @seealso \code{\link{sample_rwm}} 160 | sample_admb_rwm <- function(path, model, iter=2000, thin=1, warmup=ceiling(iter/2), 161 | init=NULL, chain=1, seed=NULL, control=NULL, 162 | verbose=TRUE, duration=NULL, 163 | admb_args=NULL, 164 | skip_optimization=TRUE, 165 | parallel=FALSE){ 166 | 167 | wd.old <- getwd(); on.exit(setwd(wd.old)) 168 | setwd(path) 169 | ## Only refresh is used by RWM 170 | if(any(names(control) !='refresh')) 171 | warning("Only refresh control argument is used with RWM, ignoring: ", 172 | paste(names(control)[names(control)!='refresh'], 173 | collapse=', '), call.=FALSE) 174 | refresh <- control$refresh 175 | if(!is.null(refresh) & !is.numeric(refresh)) 176 | stop("Invalid refresh value ", refresh) 177 | metric <- 'mle' ## only one allowed 178 | stopifnot(iter >= 1) 179 | stopifnot(warmup <= iter) 180 | stopifnot(duration > 0) 181 | stopifnot(thin >=1) 182 | if(is.null(warmup)) stop("Must provide warmup") 183 | if(thin < 1 | thin > iter) stop("Thin must be >1 and < iter") 184 | 185 | 186 | ## Build the command to run the model 187 | if(skip_optimization){ 188 | cmd <- paste("-nox -nohess -maxfn 0 -phase 1000 -rwm -mcmc ",iter) 189 | } else { 190 | cmd <- paste("-rwm -mcmc ",iter) 191 | } 192 | 193 | cmd <- paste(cmd, "-mcscale", warmup, "-chain", chain) 194 | if(!is.null(seed)) cmd <- paste(cmd, "-mcseed", seed) 195 | if(!is.null(duration)) cmd <- paste(cmd, "-duration", duration) 196 | cmd <- paste(cmd, "-mcsave", thin) 197 | 198 | ## Three options for metric. NULL (default) is to use the MLE estimates 199 | ## in admodel.cov. If a matrix is passed, this is written to file and 200 | ## no scaling is done. Option 'unit' means identity. Note: these are 201 | ## all in unbounded space. 202 | if(is.matrix(metric)){ 203 | ## User defined one will be writen to admodel.cov 204 | cor.user <- metric/ sqrt(diag(metric) %o% diag(metric)) 205 | if(!matrixcalc::is.positive.definite(x=cor.user)) 206 | stop("Invalid mass matrix, not positive definite") 207 | .write.admb.cov(metric) 208 | } else if(is.null(metric)){ 209 | ## NULL means default of MLE 210 | } else if(metric=='mle'){ 211 | ## also use mle (i.e., do nothing) 212 | } else if(metric=='unit') { 213 | ## Identity in unbounded space 214 | cmd <- paste(cmd, "-mcdiag") 215 | } else { 216 | stop("Invalid metric option") 217 | } 218 | ## Write the starting values to file. A NULL value means to use the MLE, 219 | ## so need to run model 220 | if(!is.null(init)){ 221 | cmd <- paste(cmd, "-mcpin init.pin") 222 | write.table(file="init.pin", x=unlist(init), row.names=F, col.names=F) 223 | } 224 | if(!is.null(refresh)) cmd <- paste(cmd, "-refresh", refresh) 225 | if(!is.null(admb_args)) cmd <- paste(cmd, admb_args) 226 | 227 | 228 | ## Run it and get results 229 | model2 <- .update_model(model) 230 | console <- .check_console_printing(parallel) 231 | progress <- NULL 232 | if(console){ 233 | ## Normal case 234 | time <- system.time(system2(model2, cmd, stdout=ifelse(verbose, '', FALSE)))[3] 235 | } else { 236 | ## RStudio won't print output so capture it and print at 237 | ## end. Better than nothing 238 | fn <- 'mcmc_progress.txt' 239 | if(file.exists(fn)) file.remove(fn) 240 | time <- system.time(system2(model2, cmd, stdout=ifelse(verbose, fn, FALSE)))[3] 241 | if(file.exists(fn)){ 242 | progress <- readLines('mcmc_progress.txt') 243 | ## trash <- suppressWarnings(file.remove('mcmc_progress.txt')) 244 | } else { 245 | warning("Progress output file not found. Try troubleshooting in serial model") 246 | } 247 | } 248 | 249 | if(!file.exists('unbounded.csv')) 250 | stop(paste0("RWM failed to run. Command attempted was:\n", cmd)) 251 | unbounded <- as.matrix(read.csv("unbounded.csv", header=FALSE)) 252 | dimnames(unbounded) <- NULL 253 | pars <- .get_psv(model) 254 | par.names <- names(pars) 255 | lp <- as.vector(read.table('rwm_lp.txt', header=TRUE)[,1]) 256 | pars[,'log-posterior'] <- lp 257 | pars <- as.matrix(pars) 258 | ## Thinning is done interally for RWM (via -mcsave) so don't need to do 259 | ## it here 260 | time.total <- time; time.warmup <- NA 261 | warmup <- warmup/thin 262 | return(list(samples=pars, sampler_params=NULL, time.total=time.total, 263 | time.warmup=time.warmup, warmup=warmup, model=model, 264 | par.names=par.names, cmd=cmd, unbounded=unbounded, 265 | progress=progress)) 266 | } 267 | 268 | 269 | 270 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # adnuts 2 | 3 | main: [![R-CMD-check](https://github.com/Cole-Monnahan-NOAA/adnuts/workflows/R-CMD-check/badge.svg?branch=main)](https://github.com/Cole-Monnahan-NOAA/adnuts/actions?query=workflow%3AR-CMD-check) 4 | dev: [![R-CMD-check](https://github.com/Cole-Monnahan-NOAA/adnuts/workflows/R-CMD-check/badge.svg?branch=dev)](https://github.com/Cole-Monnahan-NOAA/adnuts/actions?query=workflow%3AR-CMD-check) [![codecov](https://codecov.io/gh/Cole-Monnahan-NOAA/adnuts/branch/dev/graph/badge.svg)](https://codecov.io/gh/Cole-Monnahan-NOAA/adnuts) 5 | [![](https://www.r-pkg.org/badges/version/adnuts)](https://www.r-pkg.org/pkg/adnuts) 6 | [![CRAN RStudio mirror downloads](https://cranlogs.r-pkg.org/badges/adnuts)](https://www.r-pkg.org/pkg/adnuts) 7 | 8 | The aim of 'adnuts' (pronounced A-D NUTS like A-D MB) is to provide 9 | advanced MCMC sampling for 'ADMB' and 'TMB' models. It mimics 'Stan' in 10 | functionality and feel, specifically providing no-U-turn (NUTS) sampling 11 | with adaptive mass matrix and parallel execution. 12 | 13 | The R package 'tmbstan' (available on CRAN) has largely replaced the TMB 14 | capabilities since original development. As such, adnuts is primarily used 15 | for ADMB models. See the following paper for an introduction to the package 16 | capabilities, and contrast with tmbstan: 17 | 18 | Monnahan CC, Kristensen K (2018) No-U-turn sampling for fast Bayesian 19 | inference in ADMB and TMB: Introducing the adnuts and tmbstan R 20 | packages. PLoS ONE 13(5):e0197954. 21 | https://doi.org/10.1371/journal.pone.0197954 22 | 23 | 'adnuts' was designed specifically for use in fisheries stock assessments, 24 | and interested authors are referred to: 25 | 26 | Monnahan, C.C., T.A. Branch, J.T. Thorson, I.J. Stewart, C.S. Szuwalksi 27 | (2020) Overcoming long Bayesian run times in integrated fisheries stock 28 | assessments. ICES Journal of Marine 29 | Science. https://dx.doi.org/10.1093/icesjms/fsz059 30 | 31 | 32 | ## Usage 33 | The 'sample_rwm' and 'sample_nuts' functions draw posterior samples from an 34 | ADMB model using an MCMC algorithm (random walk Metropolis or no-U-turn 35 | sampler). The returned fitted object contains samples and other 36 | information. The function 'extract_samples' can be used to get posterior 37 | samples (post warmup and thinning) into a data frame for inference, while 38 | 'launch_shinyadmb' can be used for interactive diagnostics based on 39 | 'ShinyStan'. 40 | 41 | A brief [demonstration 42 | file](https://github.com/Cole-Monnahan-NOAA/adnuts/blob/master/inst/demo.R) 43 | is the best place to help get you started, and there is also a user guide: 44 | `vignette('adnuts')` for more detailed information. 45 | 46 | ## Installation 47 | 48 | To use the ADMB functionality you need to build your model with version 49 | 12.0 (released December 2017) or later, otherwise this functionality is not 50 | available. See [the ADMB installation 51 | instructions](https://www.admb-project.org/docs/install/) for more 52 | information. ADMB 12.2 is highly recommended because it provides better 53 | console output, fixes bugs, and adds improved adaptation capabilities as 54 | compared to 12.0. You can check the ADMB version of a compiled model from 55 | the command line with a command `model.exe -version` which prints the 56 | version among other things. 57 | 58 | The adnuts R package version 1.1.2 can be installed from CRAN: 59 | `install.packages('adnuts')`. Future minor releases [listed 60 | here](https://github.com/Cole-Monnahan-NOAA/adnuts/releases) may not be 61 | released on CRAN so the latest stable version can be installed as: 62 | 63 | `devtools::install_github('Cole-Monnahan-NOAA/adnuts')` 64 | 65 | The development version can be installed as: 66 | `devtools::install_github('Cole-Monnahan-NOAA/adnuts', ref='dev')` 67 | 68 | ## Known issues 69 | Windows users may experience issues if their model name is too long. In 70 | some cases the OS will rename the output files using a "short" 71 | version. You'll see files like "MODEL~1.par". The package tries to handle 72 | this but it is **highly recommended** to simply shorten your filename. So 73 | instead of 'model_filename_2021.tpl' use e.g. 'model_21'. 74 | 75 | Analyses are reproducible by setting the same initial values and a seed in 76 | `sample_rwm` or `sample_nuts` (passed to ADMB as '-mcseed'). However, they 77 | may not be entirely consistent across OS platforms. The chains will start 78 | the same but may eventually diverge. This is likely due to minuscule 79 | differences in the gradient and log-posterior calculations between systems 80 | and compilers. 81 | 82 | ## Disclaimer 83 | 84 | “The United States Department of Commerce (DOC) GitHub project code is 85 | provided on an ‘as is’ basis and the user assumes responsibility for its 86 | use. DOC has relinquished control of the information and no longer has 87 | responsibility to protect the integrity, confidentiality, or availability 88 | of the information. Any claims against the Department of Commerce stemming 89 | from the use of its GitHub project will be governed by all applicable 90 | Federal law. Any reference to specific commercial products, processes, or 91 | services by service mark, trademark, manufacturer, or otherwise, does not 92 | constitute or imply their endorsement, recommendation or favoring by the 93 | Department of Commerce. The Department of Commerce seal and logo, or the 94 | seal and logo of a DOC bureau, shall not be used in any manner to imply 95 | endorsement of any commercial product or activity by DOC or the United 96 | States Government.” 97 | 98 | -------------------------------------------------------------------------------- /adnuts.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: No 4 | SaveWorkspace: No 5 | AlwaysSaveHistory: No 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source 21 | PackageRoxygenize: rd,collate,namespace 22 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | ## Resubmission 2 | This is a resubmission where I have corrected more potentially invalid URL in the README file. 3 | 4 | This is a minor release fixing bugs and adding minor new capabilities. 5 | 6 | ## Test environments 7 | * local Windows 10, R 4.0.3 8 | * ubuntu 18.04 (on Github Actions), R 4.0.4 9 | * macOS 10.15.7 (on Github Actions), R 4.0.4 10 | * Windows server 2019 10.0.17763 (on Github Actions), R 4.0.4 11 | * win-builder (release and devel) 12 | 13 | ## R CMD check results 14 | There were no errors, warnings or notes: 15 | 0 errors | 0 warnings | 0 notes 16 | 17 | ## Downstream dependencies 18 | There are currently no downstream dependencies for this package. 19 | -------------------------------------------------------------------------------- /inst/CITATION: -------------------------------------------------------------------------------- 1 | citHeader("To cite adnuts in publications use:") 2 | 3 | year <- 2018##sub(".*(2[[:digit:]]{3})-.*", "\\1", meta$Date, perl = TRUE) 4 | vers <- paste("R package version", meta$Version) 5 | 6 | 7 | citEntry(entry = "Article", 8 | title = "No-U-turn sampling for fast Bayesian inference in ADMB and TMB: Introducing the adnuts and tmbstan R packages.", 9 | author = personList(as.person("Cole C. Monnahan"), 10 | as.person("Kasper Kristensen")), 11 | year = "2018", 12 | journal = "PLoS ONE", 13 | volume = "13", 14 | number = "5", 15 | pages = "e0197954", 16 | textVersion = "Monnahan CC, Kristensen K (2018) No-U-turn sampling for fast Bayesian inference in ADMB and TMB: Introducing the adnuts and tmbstan R packages. PLoS ONE 13(5): e0197954. 17 | https://doi.org/10.1371/journal.pone.0197954" 18 | ) 19 | 20 | citEntry(entry = "Manual", 21 | title = "adnuts: No-U-Turn MCMC Sampling for ADMB Models. ", 22 | author = personList(as.person("Cole C. Monnahan")), 23 | year = year, 24 | note = vers, 25 | textVersion = paste0("Monnahan CC (", year, 26 | "). adnuts: No-U-Turn MCMC Sampling for 'ADMB' and 'TMB' Models. ", vers, ".") 27 | ) 28 | -------------------------------------------------------------------------------- /inst/demo.R: -------------------------------------------------------------------------------- 1 | ### A very quick demonstration of no-U-turn sampling in ADMB 2 | library(adnuts) 3 | 4 | ### ----------- ADMB example 5 | ## This is the packaged simple regression model 6 | path.simple <- system.file('examples', 'simple', package='adnuts') 7 | ## It is best to have your ADMB files in a separate folder and provide that 8 | ## path, so make a copy of the model folder locally. 9 | path <- 'simple' 10 | dir.create(path) 11 | trash <- file.copy(from=list.files(path.simple, full.names=TRUE), to=path) 12 | 13 | ## Compile and run model 14 | setwd(path) 15 | ## If admb is not in the PATH this line will fail and you need to manually compile the model. 16 | system('admb simple.tpl') 17 | system('simple') 18 | setwd('..') 19 | 20 | ## 3 options to specify inits: list, NULL (uses MLE), or a function that 21 | ## returns a list. 22 | init <- lapply(1:3, function(i) rnorm(2)) 23 | init <- NULL # uses MLEs -- not recommended! 24 | init <- function() rnorm(2) 25 | fit <- sample_nuts(model='simple', init=init, path=path, cores=1) 26 | fit$cmd[1] # this is the command line arguments used 27 | ## Merged chains after discarding warmup phase 28 | post <- extract_samples(fit) 29 | str(post) 30 | ## A list with MLE fit 31 | str(fit$mle) 32 | 33 | ## Can also run in parallel which is the default when 34 | ## chains>1. Here we also execute the mceval phase 35 | ## afterward. Note that the .psv file only has post-warmup and 36 | ## thinned (if used) samples in it. Chains are rbind'ed together, 37 | ## and thus the -mceval call runs on all chains and can be used 38 | ## directly for inference. You can set mceval=TRUE here, or run 39 | ## your model later manually as typically done. 40 | fit <- sample_nuts(model='simple', init=init, path=path, mceval=TRUE) 41 | 42 | 43 | ## Can also specify a duration argument for capping the run 44 | ## time. This is useful e.g. if you want to run overnight but 45 | ## have results by 8am. Here we do 0.5 minutes just to 46 | ## demonstrate. This period needs to be long enough to do the 47 | ## warmup or it'll throw an error. Normally we wouldn't use 48 | ## thinning for NUTS but only to demonstrate here since the model 49 | ## runs so fast. I recommended setting a reasonable warmup, then 50 | ## an unreasonably big iter. It will truncate to the shortest 51 | ## chains. 52 | fit <- sample_nuts(model='simple', init=init, path=path, 53 | warmup=200, iter=20000000, thin=100, 54 | duration=.5) 55 | 56 | ## The default is use an adaptive mass matrix. If we want to use 57 | ## the MLE covariance as the mass matrix, set it using 58 | ## control. This *should* make for more efficient sampling in 59 | ## most cases. For technical reasons, must optimize the model 60 | ## with flag -hbf 1. Here setting skip_optimization=FALSE will 61 | ## rerun the model before starting the chains. Or you could 62 | ## manually optimize the model with the flag '-hbf'. 63 | fit <- sample_nuts(model='simple', init=init, path=path, 64 | skip_optimization=FALSE, 65 | control=list(metric='mle')) 66 | ## See the vignette for a discussion on the options for the 67 | ## metric (mass matrix adaptation and such). 68 | 69 | ## Can also use a slightly modified version of the original 70 | ## Metropolis MCMC algorithm (Random Walk Metropolis or 71 | ## RWM). Here we really want to use the MLE covariance. Since we 72 | ## ran above with hbf=1 we need to rerun model to recreate the 73 | ## .hes and .cov files. Can do this with skip_optimization 74 | ## argument again. 75 | fit.rwm <- sample_rwm(model='simple', init=init, path=path, 76 | skip_optimization=FALSE, 77 | iter=200000, thin=100, mceval=TRUE) 78 | 79 | ### ------------------------------------------------------------ 80 | ### Convergence diagnostics 81 | ## Key convergence diagnostics (effective sample size and Rhat) 82 | ## are available in the summary of the fit. It is your 83 | ## responsibility to check for signs of non-convergence before 84 | ## using the output for inference!! 85 | summary(fit) 86 | ## or from the rstan::monitor output 87 | str(fit$monitor) 88 | 89 | ### ------------------------------------------------------------ 90 | ### Plotting options 91 | pairs_admb(fit) # modified pairs just for ADMB fits like this 92 | ## Can also use ShinyStan (make sure to exit it) 93 | ## launch_shinyadmb(fit) 94 | plot_sampler_params(fit) # NUTS adaptation 95 | ## Compare MLE and posterior of marginals. See help for 96 | ## recommendation for creating multipage PDF for high dimensional 97 | ## parameters. 98 | plot_marginals(fit) 99 | 100 | ### ------------------------------------------------------------ 101 | ### Extracting posterior samples 102 | ## Get post-warmup, merged chains into data frame (these two are 103 | ## identical) 104 | str(as.data.frame(fit)) 105 | str(extract_samples(fit)) 106 | ## If you want it in list form, e.g., to put into coda package 107 | str(extract_samples(fit, as.list=TRUE)) 108 | ## If you want to see warmup samples, and the log-posterior (lp__ column) 109 | str(extract_samples(fit, inc_warmup=TRUE, inc_lp=TRUE)) 110 | 111 | ## Remove folder 112 | unlink(path, TRUE) 113 | 114 | 115 | ### End of demo 116 | 117 | 118 | -------------------------------------------------------------------------------- /inst/examples/fit.RDS: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cole-Monnahan-NOAA/adnuts/ff0c8d3d56006e61ea3651ea27fedc97f2a02afe/inst/examples/fit.RDS -------------------------------------------------------------------------------- /inst/examples/simple/simple.dat: -------------------------------------------------------------------------------- 1 | # number of observations 2 | 10 3 | # observed Y values 4 | 1.4 4.7 5.1 8.3 9.0 14.5 14.0 13.4 19.2 18 5 | # observed x values 6 | -1 0 1 2 3 4 5 6 7 8 7 | 8 | -------------------------------------------------------------------------------- /inst/examples/simple/simple.tpl: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2008, 2009, 2010 Regents of the University of California. 2 | // 3 | // ADModelbuilder and associated libraries and documentations are 4 | // provided under the general terms of the "BSD" license. 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions are 8 | // met: 9 | // 10 | // 1. Redistributions of source code must retain the above copyright 11 | // notice, this list of conditions and the following disclaimer. 12 | // 13 | // 2. Redistributions in binary form must reproduce the above copyright 14 | // notice, this list of conditions and the following disclaimer in the 15 | // documentation and/or other materials provided with the distribution. 16 | // 17 | // 3. Neither the name of the University of California, Otter Research, 18 | // nor the ADMB Foundation nor the names of its contributors may be used 19 | // to endorse or promote products derived from this software without 20 | // specific prior written permission. 21 | // 22 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | 34 | DATA_SECTION 35 | init_int nobs 36 | init_vector Y(1,nobs) 37 | init_vector x(1,nobs) 38 | PARAMETER_SECTION 39 | init_bounded_number a(-10,8); 40 | init_bounded_number b(-19,15); 41 | vector pred_Y(1,nobs) 42 | sdreport_number aa 43 | objective_function_value f 44 | PROCEDURE_SECTION 45 | aa=a; 46 | pred_Y=a*x+b; 47 | f=(norm2(pred_Y-Y)); 48 | f=nobs/2.*log(f); // make it a likelihood function so that 49 | // covariance matrix is correct 50 | 51 | -------------------------------------------------------------------------------- /man/adfit.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{adfit} 4 | \alias{adfit} 5 | \title{Constructor for the "adfit" (A-D fit) class} 6 | \usage{ 7 | adfit(x) 8 | } 9 | \arguments{ 10 | \item{x}{Fitted object from \code{\link{sample_admb}}} 11 | } 12 | \value{ 13 | An object of class "adfit" 14 | } 15 | \description{ 16 | Constructor for the "adfit" (A-D fit) class 17 | } 18 | -------------------------------------------------------------------------------- /man/adnuts.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/adnuts.R 3 | \docType{package} 4 | \name{adnuts} 5 | \alias{adnuts} 6 | \title{adnuts: No-U-turn sampling for AD Model Builder (ADMB)} 7 | \description{ 8 | Draw Bayesian posterior samples from an ADMB model using the 9 | no-U-turn MCMC sampler. Adaptation schemes are used so specifying tuning 10 | parameters is not necessary, and parallel execution reduces overall run 11 | time. 12 | } 13 | \details{ 14 | The software package Stan pioneered the use of no-U-turn (NUTS) sampling 15 | for Bayesian models (Hoffman and Gelman 2014, Carpenter et 16 | al. 2017). This algorithm provides fast, efficient sampling across a 17 | wide range of models, including hierarchical ones, and thus can be used 18 | as a generic modeling tool (Monnahan et al. 2017). The functionality 19 | provided by \pkg{adnuts} is based loosely off Stan and \R package 20 | \pkg{rstan} 21 | 22 | The \pkg{adnuts} \R package provides an \R workflow for NUTS 23 | sampling for ADMB models (Fournier et al. 2011), including 24 | adaptation of step size and metric (mass matrix), parallel 25 | execution, and links to diagnostic and inference tools 26 | provided by \pkg{rstan} and \pkg{shinystan}. The ADMB 27 | implementation of NUTS code is bundled into the ADMB source 28 | itself (as of version 12.0). Thus, when a user builds an 29 | ADMB model the NUTS code is incorporated into the model 30 | executable. Thus, \pkg{adnuts} simply provides a convenient 31 | set of wrappers to more easily execute, diagnose, and make 32 | inference on a model. More details can be found in the 33 | package vignette. 34 | 35 | Note that previous versions of \pkg{adnuts} included 36 | functionality for TMB models, but this has been replaced by 37 | \pkg{tmbstan} (Kristensen et al. 2016, Monnahan and 38 | Kristensen 2018). 39 | } 40 | \references{ 41 | Carpenter, B., Gelman, A., Hoffman, M.D., Lee, D., Goodrich, B., 42 | Betancourt, M., Riddell, A., Guo, J.Q., Li, P., Riddell, A., 43 | 2017. Stan: A Probabilistic Programming Language. J Stat 44 | Softw. 76:1-29. 45 | 46 | Fournier, D.A., Skaug, H.J., Ancheta, J., Ianelli, J., Magnusson, A., 47 | Maunder, M.N., Nielsen, A., Sibert, J., 2012. AD Model Builder: using 48 | automatic differentiation for statistical inference of highly 49 | parameterized complex nonlinear models. Optim Method 50 | Softw. 27:233-249. 51 | 52 | Hoffman, M.D., Gelman, A., 2014. The no-U-turn sampler: adaptively 53 | setting path lengths in Hamiltonian Monte Carlo. J Mach Learn 54 | Res. 15:1593-1623. 55 | 56 | Kristensen, K., Nielsen, A., Berg, C.W., Skaug, H., Bell, B.M., 57 | 2016. TMB: Automatic differentiation and Laplace approximation. J 58 | Stat Softw. 70:21. 59 | 60 | Kristensen, K., 2017. TMB: General random effect model builder tool 61 | inspired by ADMB. R package version 1.7.11. 62 | 63 | Monnahan, C.C., Thorson, J.T., Branch, T.A., 2017. Faster estimation of 64 | Bayesian models in ecology using Hamiltonian Monte Carlo. Methods in 65 | Ecology and Evolution. 8:339-348. 66 | 67 | Monnahan C.C., Kristensen K. (2018). No-U-turn sampling for fast 68 | Bayesian inference in ADMB and TMB: Introducing the adnuts and 69 | tmbstan R packages PLoS ONE 13(5): e0197954. 70 | https://doi.org/10.1371/journal.pone.0197954 71 | 72 | Stan Development Team, 2016. Stan modeling language users guide and 73 | reference manual, version 2.11.0. 74 | 75 | Stan Development Team, 2016. RStan: The R interface to Stan. R package 76 | version 2.14.1. http://mc-stan.org. 77 | } 78 | -------------------------------------------------------------------------------- /man/as.data.frame.adfit.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{as.data.frame.adfit} 4 | \alias{as.data.frame.adfit} 5 | \title{Convert object of class adfit to data.frame. Calls 6 | \code{\link{extract_samples}}} 7 | \usage{ 8 | \method{as.data.frame}{adfit}(x, row.names = NULL, optional = FALSE, ...) 9 | } 10 | \arguments{ 11 | \item{x}{Fitted object from \code{\link{sample_rwm}}} 12 | 13 | \item{row.names}{Ignored} 14 | 15 | \item{optional}{Ignored} 16 | 17 | \item{...}{Ignored} 18 | } 19 | \value{ 20 | A data frame with parameters as columns and samples as 21 | rows. 22 | } 23 | \description{ 24 | Convert object of class adfit to data.frame. Calls 25 | \code{\link{extract_samples}} 26 | } 27 | \details{ 28 | This calls the default settings of 29 | \code{\link{extract_samples}}, no warmup samples and no 30 | column for the log-posterior (lp__). Use this function 31 | directly for finer control. 32 | } 33 | -------------------------------------------------------------------------------- /man/check_identifiable.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{check_identifiable} 4 | \alias{check_identifiable} 5 | \title{Check identifiability from model Hessian} 6 | \usage{ 7 | check_identifiable(model, path = getwd()) 8 | } 9 | \arguments{ 10 | \item{model}{Model name without file extension} 11 | 12 | \item{path}{Path to model folder, defaults to working directory} 13 | } 14 | \value{ 15 | Prints output of bad parameters and invisibly returns it. 16 | } 17 | \description{ 18 | Check identifiability from model Hessian 19 | } 20 | \details{ 21 | Read in the admodel.hes file and check the eigenvalues to 22 | determine which parameters are not identifiable and thus cause the 23 | Hessian to be non-invertible. Use this to identify which parameters 24 | are problematic. This function was converted from a version in the 25 | \code{FishStatsUtils} package. 26 | } 27 | -------------------------------------------------------------------------------- /man/dot-check_ADMB_version.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{.check_ADMB_version} 4 | \alias{.check_ADMB_version} 5 | \title{Check that the model is compiled with the right version 6 | of ADMB which is 12.0 or later} 7 | \usage{ 8 | .check_ADMB_version(model, path = getwd(), min.version = 12, warn = TRUE) 9 | } 10 | \arguments{ 11 | \item{model}{Model name without file extension} 12 | 13 | \item{path}{Path to model folder, defaults to working 14 | directory. NULL value specifies working directory (default).} 15 | 16 | \item{min.version}{Minimum valid version (numeric). Defaults 17 | to 12.0.} 18 | 19 | \item{warn}{Boolean whether to throw warnings or not} 20 | } 21 | \value{ 22 | Nothing, errors out if either model could not be run 23 | or the version is incompatible. If compatible nothing 24 | happens. 25 | } 26 | \description{ 27 | Check that the model is compiled with the right version 28 | of ADMB which is 12.0 or later 29 | } 30 | \details{ 31 | Some functionality of packages \pkg{adnuts} is 32 | imbedded in the ADMB source code so that when a model is 33 | compiled it is contained in the model executable. If this 34 | code does not exist adnuts will fail. The solution is to 35 | update ADMB and recompile the model. 36 | } 37 | -------------------------------------------------------------------------------- /man/dot-check_console_printing.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{.check_console_printing} 4 | \alias{.check_console_printing} 5 | \title{Check if the session is interactive or Rstudio which has 6 | implications for parallel output} 7 | \usage{ 8 | .check_console_printing(parallel) 9 | } 10 | \arguments{ 11 | \item{parallel}{Boolean whether chain is executed in parallel 12 | mode or not.} 13 | } 14 | \value{ 15 | Boolean whether output should be printed to console 16 | progressively, or saved to file and printed at the end. 17 | } 18 | \description{ 19 | Check if the session is interactive or Rstudio which has 20 | implications for parallel output 21 | } 22 | \details{ 23 | When using RStudio and RGui, the parallel output does 24 | not show on the console. As a workaround it is captured in 25 | each cluster into a file and then read in and printed. 26 | } 27 | -------------------------------------------------------------------------------- /man/dot-check_model_path.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{.check_model_path} 4 | \alias{.check_model_path} 5 | \title{Check that the file can be found} 6 | \usage{ 7 | .check_model_path(model, path) 8 | } 9 | \arguments{ 10 | \item{model}{Model name without file extension} 11 | 12 | \item{path}{Path to model folder, defaults to working} 13 | } 14 | \description{ 15 | Check that the file can be found 16 | } 17 | -------------------------------------------------------------------------------- /man/dot-getADMBHessian.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{.getADMBHessian} 4 | \alias{.getADMBHessian} 5 | \title{Read in admodel.hes file} 6 | \usage{ 7 | .getADMBHessian(path) 8 | } 9 | \arguments{ 10 | \item{path}{Path to folder containing the admodel.hes file} 11 | } 12 | \value{ 13 | The Hessian matrix 14 | } 15 | \description{ 16 | Read in admodel.hes file 17 | } 18 | -------------------------------------------------------------------------------- /man/dot-sample_admb.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/sample_admb.R 3 | \name{.sample_admb} 4 | \alias{.sample_admb} 5 | \title{Hidden wrapper function for sampling from ADMB models} 6 | \usage{ 7 | .sample_admb( 8 | model, 9 | path = getwd(), 10 | iter = 2000, 11 | init = NULL, 12 | chains = 3, 13 | warmup = NULL, 14 | seeds = NULL, 15 | thin = 1, 16 | mceval = FALSE, 17 | duration = NULL, 18 | cores = NULL, 19 | control = NULL, 20 | verbose = TRUE, 21 | algorithm = "NUTS", 22 | skip_optimization = TRUE, 23 | skip_monitor = FALSE, 24 | skip_unbounded = TRUE, 25 | admb_args = NULL 26 | ) 27 | } 28 | \arguments{ 29 | \item{model}{Name of model (i.e., 'model' for model.tpl). For 30 | non-Windows systems this will automatically be converted to 31 | './model' internally. For Windows, long file names are 32 | sometimes shortened from e.g., 'long_model_filename' to 33 | 'LONG_~1'. This should work, but will throw warnings. Please 34 | shorten the model name. See 35 | https://en.wikipedia.org/wiki/8.3_filename.} 36 | 37 | \item{path}{Path to model executable. Defaults to working 38 | directory. Often best to have model files in a separate 39 | subdirectory, particularly for parallel.} 40 | 41 | \item{iter}{The number of samples to draw.} 42 | 43 | \item{init}{A list of lists containing the initial parameter 44 | vectors, one for each chain or a function. It is strongly 45 | recommended to initialize multiple chains from dispersed 46 | points. A of NULL signifies to use the starting values 47 | present in the model (i.e., \code{obj$par}) for all chains.} 48 | 49 | \item{chains}{The number of chains to run.} 50 | 51 | \item{warmup}{The number of warmup iterations.} 52 | 53 | \item{seeds}{A vector of seeds, one for each chain.} 54 | 55 | \item{thin}{The thinning rate to apply to samples. Typically 56 | not used with NUTS.} 57 | 58 | \item{mceval}{Whether to run the model with \code{-mceval} on 59 | samples from merged chains.} 60 | 61 | \item{duration}{The number of minutes after which the model 62 | will quit running.} 63 | 64 | \item{cores}{The number of cores to use for parallel 65 | execution. Default is number available in the system minus 66 | 1. If \code{cores=1}, serial execution occurs (even if 67 | \code{chains>1}), otherwise parallel execution via package 68 | snowfall is used. For slow analyses it is recommended to set 69 | \code{chains}<=\code{cores} so each core needs to run only a 70 | single chain.} 71 | 72 | \item{control}{A list to control the sampler. See details for 73 | further use.} 74 | 75 | \item{verbose}{Flag whether to show console output (default) 76 | or suppress it completely except for warnings and 77 | errors. Works for serial or parallel execution.} 78 | 79 | \item{algorithm}{The algorithm to use, one of "NUTS" or "RWM"} 80 | 81 | \item{skip_optimization}{Whether to run the optimizer before 82 | running MCMC. This is rarely need as it is better to run it 83 | once before to get the covariance matrix, or the estimates 84 | are not needed with adaptive NUTS.} 85 | 86 | \item{skip_monitor}{Whether to skip calculating diagnostics 87 | (effective sample size, Rhat) via the \code{rstan::monitor} 88 | function. This can be slow for models with high dimension or 89 | many iterations. The result is used in plots and summaries 90 | so it is recommended to turn on. If model run with 91 | \code{skip_monitor=FALSE} you can recreate it post-hoc by 92 | setting \code{fit$monitor=rstan::monitor(fit$samples, 93 | fit$warmup, print=FALSE)}.} 94 | 95 | \item{skip_unbounded}{Whether to skip returning the unbounded 96 | version of the posterior samples in addition to the bounded 97 | ones. It may be advisable to set to FALSE for very large 98 | models to save space.} 99 | 100 | \item{admb_args}{A character string which gets passed to the 101 | command line, allowing finer control} 102 | } 103 | \description{ 104 | Hidden wrapper function for sampling from ADMB models 105 | } 106 | -------------------------------------------------------------------------------- /man/dot-update_model.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{.update_model} 4 | \alias{.update_model} 5 | \title{Convert model name depending on system} 6 | \usage{ 7 | .update_model(model) 8 | } 9 | \arguments{ 10 | \item{model}{Model name without file extension} 11 | } 12 | \value{ 13 | Updated model name to use with system call 14 | } 15 | \description{ 16 | Convert model name depending on system 17 | } 18 | -------------------------------------------------------------------------------- /man/extract_sampler_params.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{extract_sampler_params} 4 | \alias{extract_sampler_params} 5 | \title{Extract sampler parameters from a fit.} 6 | \usage{ 7 | extract_sampler_params(fit, inc_warmup = FALSE) 8 | } 9 | \arguments{ 10 | \item{fit}{A list returned by \code{sample_admb}.} 11 | 12 | \item{inc_warmup}{Whether to extract the warmup samples or not 13 | (default). Warmup samples should never be used for inference, but may 14 | be useful for diagnostics.} 15 | } 16 | \value{ 17 | An invisible data.frame containing samples (rows) of each 18 | parameter (columns). If multiple chains exist they will be rbinded 19 | together. 20 | } 21 | \description{ 22 | Extract information about NUTS trajectories, such as acceptance ratio 23 | and treedepth, from a fitted object. 24 | } 25 | \details{ 26 | Each trajectory (iteration) in NUTS has associated information 27 | about the trajectory: stepsize, acceptance ratio, treedepth, and number of 28 | leapfrog steps. This function extracts these into a data.frame, which 29 | may be useful for diagnosing issues in certain cases. In general, the 30 | user should not need to examine them, or preferably should via 31 | \code{\link{plot_sampler_params}} or \code{\link{launch_shinyadmb}}. 32 | } 33 | \examples{ 34 | fit <- readRDS(system.file('examples', 'fit.RDS', package='adnuts')) 35 | sp <- extract_sampler_params(fit, inc_warmup=TRUE) 36 | str(sp) 37 | 38 | } 39 | \seealso{ 40 | \code{\link{launch_shinyadmb}}. 41 | } 42 | -------------------------------------------------------------------------------- /man/extract_samples.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{extract_samples} 4 | \alias{extract_samples} 5 | \title{Extract posterior samples from a model fit.} 6 | \usage{ 7 | extract_samples( 8 | fit, 9 | inc_warmup = FALSE, 10 | inc_lp = FALSE, 11 | as.list = FALSE, 12 | unbounded = FALSE 13 | ) 14 | } 15 | \arguments{ 16 | \item{fit}{A list returned by \code{sample_admb}.} 17 | 18 | \item{inc_warmup}{Whether to extract the warmup samples or not 19 | (default). Warmup samples should never be used for inference, but may 20 | be useful for diagnostics.} 21 | 22 | \item{inc_lp}{Whether to include a column for the log posterior density 23 | (last column). For diagnostics it can be useful.} 24 | 25 | \item{as.list}{Whether to return the samples as a list (one element per 26 | chain). This could then be converted to a CODA mcmc object.} 27 | 28 | \item{unbounded}{Boolean flag whether to return samples in 29 | unbounded (untransformed) space. Will only be differences 30 | when init_bounded types are used in the ADMB template. This 31 | can be useful for model debugging.} 32 | } 33 | \value{ 34 | If as.list is FALSE, an invisible data.frame containing samples 35 | (rows) of each parameter (columns). If multiple chains exist they will 36 | be rbinded together, maintaining order within each chain. If as.list 37 | is TRUE, samples are returned as a list of matrices. 38 | } 39 | \description{ 40 | A helper function to extract posterior samples across multiple chains 41 | into a single data.frame. 42 | } 43 | \details{ 44 | This function is loosely based on the \pkg{rstan} function 45 | \code{extract}. Merging samples across chains should only be used for 46 | inference after appropriate diagnostic checks. Do not calculate 47 | diagnostics like Rhat or effective sample size after using this 48 | function, instead, use \code{\link[rstan]{monitor}}. Likewise, warmup 49 | samples are not valid and should never be used for inference, but may 50 | be useful in some cases for diagnosing issues. 51 | } 52 | \examples{ 53 | ## A previously run fitted ADMB model 54 | fit <- readRDS(system.file('examples', 'fit.RDS', package='adnuts')) 55 | post <- extract_samples(fit) 56 | tail(apply(post, 2, median)) 57 | } 58 | -------------------------------------------------------------------------------- /man/is.adfit.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{is.adfit} 4 | \alias{is.adfit} 5 | \title{Check object of class adfit} 6 | \usage{ 7 | is.adfit(x) 8 | } 9 | \arguments{ 10 | \item{x}{Returned list from \code{\link{sample_admb}}} 11 | } 12 | \description{ 13 | Check object of class adfit 14 | } 15 | -------------------------------------------------------------------------------- /man/launch_shinyadmb.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{launch_shinyadmb} 4 | \alias{launch_shinyadmb} 5 | \title{Launch shinystan for an ADMB fit.} 6 | \usage{ 7 | launch_shinyadmb(fit) 8 | } 9 | \arguments{ 10 | \item{fit}{A named list returned by \code{sample_admb}.} 11 | } 12 | \description{ 13 | Launch shinystan for an ADMB fit. 14 | } 15 | \seealso{ 16 | \code{launch_shinytmb} 17 | } 18 | -------------------------------------------------------------------------------- /man/launch_shinytmb.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{launch_shinytmb} 4 | \alias{launch_shinytmb} 5 | \title{Launch shinystan for a TMB fit.} 6 | \usage{ 7 | launch_shinytmb(fit) 8 | } 9 | \arguments{ 10 | \item{fit}{A named list returned by \code{sample_tmb}.} 11 | } 12 | \description{ 13 | Launch shinystan for a TMB fit. 14 | } 15 | \seealso{ 16 | \code{launch_shinyadmb} 17 | } 18 | -------------------------------------------------------------------------------- /man/pairs_admb.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/pairs_admb.R 3 | \name{pairs_admb} 4 | \alias{pairs_admb} 5 | \title{Plot pairwise parameter posteriors and optionally the MLE points and 6 | confidence ellipses.} 7 | \usage{ 8 | pairs_admb( 9 | fit, 10 | order = NULL, 11 | diag = c("trace", "acf", "hist"), 12 | acf.ylim = c(-1, 1), 13 | ymult = NULL, 14 | axis.col = gray(0.5), 15 | pars = NULL, 16 | label.cex = 0.8, 17 | limits = NULL, 18 | add.mle = TRUE, 19 | add.monitor = TRUE, 20 | unbounded = FALSE, 21 | ... 22 | ) 23 | } 24 | \arguments{ 25 | \item{fit}{A list as returned by \code{sample_admb}.} 26 | 27 | \item{order}{The order to consider the parameters. Options are 28 | NULL (default) to use the order declared in the model, or 29 | 'slow' and 'fast' which are based on the effective sample 30 | sizes ordered by slowest or fastest mixing respectively. See 31 | example for usage.} 32 | 33 | \item{diag}{What type of plot to include on the diagonal, 34 | options are 'acf' which plots the autocorrelation function 35 | \code{acf}, 'hist' shows marginal posterior histograms, and 36 | 'trace' the trace plot.} 37 | 38 | \item{acf.ylim}{If using the acf function on the diagonal, 39 | specify the y limit. The default is c(-1,1).} 40 | 41 | \item{ymult}{A vector of length ncol(posterior) specifying how 42 | much room to give when using the hist option for the 43 | diagonal. For use if the label is blocking part of the 44 | plot. The default is 1.3 for all parameters.} 45 | 46 | \item{axis.col}{Color of axes} 47 | 48 | \item{pars}{A vector of parameter names or integers 49 | representing which parameters to subset. Useful if the model 50 | has a larger number of parameters and you just want to show 51 | a few key ones.} 52 | 53 | \item{label.cex}{Control size of outer and diagonal labels (default 1)} 54 | 55 | \item{limits}{A list containing the ranges for each parameter 56 | to use in plotting.} 57 | 58 | \item{add.mle}{Boolean whether to add 95\% confidence ellipses} 59 | 60 | \item{add.monitor}{Boolean whether to print effective sample} 61 | 62 | \item{unbounded}{Whether to use the bounded or unbounded 63 | version of the parameters. 64 | size (ESS) and Rhat values on the diagonal.} 65 | 66 | \item{...}{Arguments to be passed to plot call in lower 67 | diagonal panels} 68 | } 69 | \value{ 70 | Produces a plot, and returns nothing. 71 | } 72 | \description{ 73 | Plot pairwise parameter posteriors and optionally the MLE points and 74 | confidence ellipses. 75 | } 76 | \details{ 77 | This function is modified from the base \code{pairs} 78 | code to work specifically with fits from the 79 | 'adnuts' package using either the NUTS or RWM MCMC 80 | algorithms. If an invertible Hessian was found (in 81 | \code{fit$mle}) then estimated covariances are available to 82 | compare and added automatically (red ellipses). Likewise, a 83 | "monitor" object from \code{rstan::monitor} is attached as 84 | \code{fit$monitor} and provides effective sample sizes (ESS) 85 | and Rhat values. The ESS are used to potentially order the 86 | parameters via argument \code{order}, but also printed on 87 | the diagonal. 88 | } 89 | \examples{ 90 | fit <- readRDS(system.file('examples', 'fit.RDS', package='adnuts')) 91 | pairs_admb(fit) 92 | pairs_admb(fit, pars=1:2) 93 | pairs_admb(fit, pars=c('b', 'a')) 94 | pairs_admb(fit, pars=1:2, order='slow') 95 | pairs_admb(fit, pars=1:2, order='fast') 96 | 97 | } 98 | \author{ 99 | Cole Monnahan 100 | } 101 | -------------------------------------------------------------------------------- /man/plot.adfit.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{plot.adfit} 4 | \alias{plot.adfit} 5 | \title{Plot object of class adfit} 6 | \usage{ 7 | \method{plot}{adfit}(x, y, ...) 8 | } 9 | \arguments{ 10 | \item{x}{Fitted object from \code{\link{sample_admb}}} 11 | 12 | \item{y}{Ignored} 13 | 14 | \item{...}{Ignored} 15 | } 16 | \value{ 17 | Plot created 18 | } 19 | \description{ 20 | Plot object of class adfit 21 | } 22 | -------------------------------------------------------------------------------- /man/plot_marginals.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{plot_marginals} 4 | \alias{plot_marginals} 5 | \title{Plot marginal distributions for a fitted model} 6 | \usage{ 7 | plot_marginals( 8 | fit, 9 | pars = NULL, 10 | mfrow = NULL, 11 | add.mle = TRUE, 12 | add.monitor = TRUE, 13 | breaks = 30 14 | ) 15 | } 16 | \arguments{ 17 | \item{fit}{A fitted object returned by 18 | \code{\link{sample_admb}}.} 19 | 20 | \item{pars}{A numeric or character vector of parameters which 21 | to plot, for plotting a subset of the total (defaults to all)} 22 | 23 | \item{mfrow}{A custom grid size (vector of two) to be called 24 | as \code{par(mfrow)}, overriding the defaults.} 25 | 26 | \item{add.mle}{Whether to add marginal normal distributions 27 | determined from the inverse Hessian file} 28 | 29 | \item{add.monitor}{Whether to add ESS and Rhat information} 30 | 31 | \item{breaks}{The number of breaks to use in \code{hist()}, 32 | defaulting to 30} 33 | } 34 | \description{ 35 | Plot marginal distributions for a fitted model 36 | } 37 | \details{ 38 | This function plots grid cells of all parameters 39 | in a model, comparing the marginal posterior histogram vs 40 | the asymptotic normal (red lines) from the inverse 41 | Hessian. Its intended use is to quickly gauge differences 42 | between frequentist and Bayesian inference on the same 43 | model. 44 | 45 | If \code{fit$monitor} exists the effective sample size 46 | (ESS) and R-hat estimates are printed in the top right 47 | corner. See 48 | \url{https://mc-stan.org/rstan/reference/Rhat.html} for more 49 | information. Generally Rhat>1.05 or ESS<100 (per chain) 50 | suggest inference may be unreliable. 51 | 52 | This function is customized to work with multipage PDFs, 53 | specifically: 54 | \code{pdf('marginals.pdf', onefile=TRUE, width=7,height=5)} 55 | produces a nice readable file. 56 | } 57 | \examples{ 58 | fit <- readRDS(system.file('examples', 'fit.RDS', package='adnuts')) 59 | plot_marginals(fit, pars=1:2) 60 | 61 | } 62 | -------------------------------------------------------------------------------- /man/plot_sampler_params.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{plot_sampler_params} 4 | \alias{plot_sampler_params} 5 | \title{Plot adaptation metrics for a fitted model.} 6 | \usage{ 7 | plot_sampler_params(fit, plot = TRUE) 8 | } 9 | \arguments{ 10 | \item{fit}{A fitted object returned by 11 | \code{\link{sample_admb}}.} 12 | 13 | \item{plot}{Whether to plot the results} 14 | } 15 | \value{ 16 | Prints and invisibly returns a ggplot object 17 | } 18 | \description{ 19 | Plot adaptation metrics for a fitted model. 20 | } 21 | \details{ 22 | This utility function quickly plots the adaptation output of NUTS 23 | chains. 24 | } 25 | \examples{ 26 | fit <- readRDS(system.file('examples', 'fit.RDS', package='adnuts')) 27 | plot_sampler_params(fit) 28 | } 29 | -------------------------------------------------------------------------------- /man/plot_uncertainties.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{plot_uncertainties} 4 | \alias{plot_uncertainties} 5 | \title{Plot MLE vs MCMC marginal standard deviations for each 6 | parameter} 7 | \usage{ 8 | plot_uncertainties(fit, log = TRUE, plot = TRUE) 9 | } 10 | \arguments{ 11 | \item{fit}{A fitted object returned by 12 | \code{\link{sample_admb}}} 13 | 14 | \item{log}{Whether to plot the logarithm or not.} 15 | 16 | \item{plot}{Whether to plot it or not.} 17 | } 18 | \value{ 19 | Invisibly returns data.frame with parameter name and 20 | estimated uncertainties. 21 | } 22 | \description{ 23 | Plot MLE vs MCMC marginal standard deviations for each 24 | parameter 25 | } 26 | \details{ 27 | It can be helpful to compare uncertainty estimates 28 | between the two paradigms. This plots the marginal posterior 29 | standard deviation vs the frequentist standard error 30 | estimated from the .cor file. Large differences often 31 | indicate issues with one estimation method. 32 | } 33 | \examples{ 34 | fit <- readRDS(system.file('examples', 'fit.RDS', package='adnuts')) 35 | x <- plot_uncertainties(fit, plot=FALSE) 36 | head(x) 37 | } 38 | -------------------------------------------------------------------------------- /man/print.adfit.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{print.adfit} 4 | \alias{print.adfit} 5 | \title{Print summary of adfit object} 6 | \usage{ 7 | \method{print}{adfit}(x, ...) 8 | } 9 | \arguments{ 10 | \item{x}{Fitted object from \code{\link{sample_admb}}} 11 | 12 | \item{...}{Ignored} 13 | } 14 | \value{ 15 | Summary printed to console 16 | } 17 | \description{ 18 | Print summary of adfit object 19 | } 20 | -------------------------------------------------------------------------------- /man/sample_admb.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/sample_admb.R 3 | \name{sample_admb} 4 | \alias{sample_admb} 5 | \title{Deprecated version of wrapper function. Use sample_nuts or 6 | sample_rwm instead.} 7 | \usage{ 8 | sample_admb( 9 | model, 10 | path = getwd(), 11 | iter = 2000, 12 | init = NULL, 13 | chains = 3, 14 | warmup = NULL, 15 | seeds = NULL, 16 | thin = 1, 17 | mceval = FALSE, 18 | duration = NULL, 19 | parallel = FALSE, 20 | cores = NULL, 21 | control = NULL, 22 | skip_optimization = TRUE, 23 | algorithm = "NUTS", 24 | skip_monitor = FALSE, 25 | skip_unbounded = TRUE, 26 | admb_args = NULL 27 | ) 28 | } 29 | \arguments{ 30 | \item{model}{Name of model (i.e., 'model' for model.tpl). For 31 | non-Windows systems this will automatically be converted to 32 | './model' internally. For Windows, long file names are 33 | sometimes shortened from e.g., 'long_model_filename' to 34 | 'LONG_~1'. This should work, but will throw warnings. Please 35 | shorten the model name. See 36 | https://en.wikipedia.org/wiki/8.3_filename.} 37 | 38 | \item{path}{Path to model executable. Defaults to working 39 | directory. Often best to have model files in a separate 40 | subdirectory, particularly for parallel.} 41 | 42 | \item{iter}{The number of samples to draw.} 43 | 44 | \item{init}{A list of lists containing the initial parameter 45 | vectors, one for each chain or a function. It is strongly 46 | recommended to initialize multiple chains from dispersed 47 | points. A of NULL signifies to use the starting values 48 | present in the model (i.e., \code{obj$par}) for all chains.} 49 | 50 | \item{chains}{The number of chains to run.} 51 | 52 | \item{warmup}{The number of warmup iterations.} 53 | 54 | \item{seeds}{A vector of seeds, one for each chain.} 55 | 56 | \item{thin}{The thinning rate to apply to samples. Typically 57 | not used with NUTS.} 58 | 59 | \item{mceval}{Whether to run the model with \code{-mceval} on 60 | samples from merged chains.} 61 | 62 | \item{duration}{The number of minutes after which the model 63 | will quit running.} 64 | 65 | \item{parallel}{A deprecated argument, use cores=1 for serial 66 | execution or cores>1 for parallel (default is to parallel 67 | with cores equal to the available-1)} 68 | 69 | \item{cores}{The number of cores to use for parallel 70 | execution. Default is number available in the system minus 71 | 1. If \code{cores=1}, serial execution occurs (even if 72 | \code{chains>1}), otherwise parallel execution via package 73 | snowfall is used. For slow analyses it is recommended to set 74 | \code{chains}<=\code{cores} so each core needs to run only a 75 | single chain.} 76 | 77 | \item{control}{A list to control the sampler. See details for 78 | further use.} 79 | 80 | \item{skip_optimization}{Whether to run the optimizer before 81 | running MCMC. This is rarely need as it is better to run it 82 | once before to get the covariance matrix, or the estimates 83 | are not needed with adaptive NUTS.} 84 | 85 | \item{algorithm}{The algorithm to use, one of "NUTS" or "RWM"} 86 | 87 | \item{skip_monitor}{Whether to skip calculating diagnostics 88 | (effective sample size, Rhat) via the \code{rstan::monitor} 89 | function. This can be slow for models with high dimension or 90 | many iterations. The result is used in plots and summaries 91 | so it is recommended to turn on. If model run with 92 | \code{skip_monitor=FALSE} you can recreate it post-hoc by 93 | setting \code{fit$monitor=rstan::monitor(fit$samples, 94 | fit$warmup, print=FALSE)}.} 95 | 96 | \item{skip_unbounded}{Whether to skip returning the unbounded 97 | version of the posterior samples in addition to the bounded 98 | ones. It may be advisable to set to FALSE for very large 99 | models to save space.} 100 | 101 | \item{admb_args}{A character string which gets passed to the 102 | command line, allowing finer control} 103 | } 104 | \description{ 105 | Deprecated version of wrapper function. Use sample_nuts or 106 | sample_rwm instead. 107 | } 108 | \section{Warning}{ 109 | This is deprecated and will cease to exist 110 | in future releases 111 | } 112 | 113 | -------------------------------------------------------------------------------- /man/sample_inits.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{sample_inits} 4 | \alias{sample_inits} 5 | \title{Function to generate random initial values from a previous fit using 6 | adnuts} 7 | \usage{ 8 | sample_inits(fit, chains) 9 | } 10 | \arguments{ 11 | \item{fit}{An outputted list from \code{\link{sample_admb}}} 12 | 13 | \item{chains}{The number of chains for the subsequent run, which 14 | determines the number to return.} 15 | } 16 | \value{ 17 | A list of lists which can be passed back into 18 | \code{\link{sample_admb}}. 19 | } 20 | \description{ 21 | Function to generate random initial values from a previous fit using 22 | adnuts 23 | } 24 | -------------------------------------------------------------------------------- /man/sample_tmb.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/sample_tmb_deprecated.R 3 | \name{sample_tmb} 4 | \alias{sample_tmb} 5 | \title{Bayesian inference of a TMB model using the no-U-turn sampler.} 6 | \usage{ 7 | sample_tmb( 8 | obj, 9 | iter = 2000, 10 | init, 11 | chains = 3, 12 | seeds = NULL, 13 | warmup = floor(iter/2), 14 | lower = NULL, 15 | upper = NULL, 16 | thin = 1, 17 | parallel = FALSE, 18 | cores = NULL, 19 | path = NULL, 20 | algorithm = "NUTS", 21 | laplace = FALSE, 22 | control = NULL, 23 | ... 24 | ) 25 | } 26 | \arguments{ 27 | \item{obj}{A TMB model object.} 28 | 29 | \item{iter}{The number of samples to draw.} 30 | 31 | \item{init}{A list of lists containing the initial parameter 32 | vectors, one for each chain or a function. It is strongly 33 | recommended to initialize multiple chains from dispersed 34 | points. A of NULL signifies to use the starting values 35 | present in the model (i.e., \code{obj$par}) for all chains.} 36 | 37 | \item{chains}{The number of chains to run.} 38 | 39 | \item{seeds}{A vector of seeds, one for each chain.} 40 | 41 | \item{warmup}{The number of warmup iterations.} 42 | 43 | \item{lower}{A vector of lower bounds for parameters. Allowed values are 44 | -Inf and numeric.} 45 | 46 | \item{upper}{A vector of upper bounds for parameters. Allowed values are 47 | Inf and numeric.} 48 | 49 | \item{thin}{The thinning rate to apply to samples. Typically 50 | not used with NUTS.} 51 | 52 | \item{parallel}{A deprecated argument, use cores=1 for serial 53 | execution or cores>1 for parallel (default is to parallel 54 | with cores equal to the available-1)} 55 | 56 | \item{cores}{The number of cores to use for parallel 57 | execution. Default is number available in the system minus 58 | 1. If \code{cores=1}, serial execution occurs (even if 59 | \code{chains>1}), otherwise parallel execution via package 60 | snowfall is used. For slow analyses it is recommended to set 61 | \code{chains}<=\code{cores} so each core needs to run only a 62 | single chain.} 63 | 64 | \item{path}{Path to model executable. Defaults to working 65 | directory. Often best to have model files in a separate 66 | subdirectory, particularly for parallel.} 67 | 68 | \item{algorithm}{The algorithm to use. NUTS is the default and 69 | recommended one, but "RWM" for the random walk Metropolis sampler and 70 | "HMC" for the static HMC sampler are available. These last two are 71 | deprecated but may be of use in some situations. These algorithms 72 | require different arguments; see their help files for more 73 | information.} 74 | 75 | \item{laplace}{Whether to use the Laplace approximation if some 76 | parameters are declared as random. Default is to turn off this 77 | functionality and integrate across all parameters with MCMC.} 78 | 79 | \item{control}{A list to control the sampler. See details for 80 | further use.} 81 | 82 | \item{...}{Further arguments to be passed to samplers} 83 | } 84 | \value{ 85 | A list containing the samples, and properties of the sampler 86 | useful for diagnosing behavior and efficiency. 87 | } 88 | \description{ 89 | Draw Bayesian posterior samples from a Template Model Builder (TMB) 90 | model using an MCMC algorithm. This function generates posterior samples 91 | from which inference can be made. Adaptation schemes are used so 92 | specification tuning parameters are not necessary, and parallel 93 | execution reduces overall run time. 94 | } 95 | \details{ 96 | This function implements algorithm 6 of Hoffman and Gelman (2014), 97 | and loosely follows package \code{rstan}. The step size can be 98 | adapted or specified manually. The metric (i.e., mass matrix) can be 99 | unit diagonal, adapted diagonal (default and recommended), or a dense 100 | matrix specified by the user. Further control of algorithms can be 101 | specified with the \code{control} argument. Elements are: 102 | \describe{ 103 | \item{adapt_delta}{The target acceptance rate.} 104 | \item{metric}{The mass metric to use. Options are: "unit" for a unit diagonal 105 | matrix; "diag" to estimate a diagonal matrix during warmup; a matrix 106 | to be used directly (in untransformed space).} 107 | \item{adapt_engaged}{Whether adaptation of step size and metric is turned on.} 108 | \item{max_treedepth}{Maximum treedepth for the NUTS algorithm.} 109 | \item{stepsize}{The stepsize for the NUTS algorithm. If \code{NULL} it 110 | will be adapted during warmup.} 111 | } 112 | } 113 | \section{Warning}{ 114 | This is deprecated and will cease to exist 115 | in future releases 116 | } 117 | 118 | \examples{ 119 | ## Build a fake TMB object with objective & gradient functions and some 120 | ## other flags 121 | \dontrun{ 122 | f <- function(x, order=0){ 123 | if(order != 1) # negative log density 124 | -sum(dnorm(x=x, mean=0, sd=1, log=TRUE)) 125 | else x # gradient of negative log density 126 | } 127 | init <- function() rnorm(2) 128 | obj <- list(env=list(DLL='demo', last.par.best=c(x=init()), f=f, 129 | beSilent=function() NULL)) 130 | ## Run NUTS for this object 131 | fit <- sample_tmb(obj, iter=1000, chains=3, init=init) 132 | ## Check basic diagnostics 133 | mon <- rstan::monitor(fit$samples, print=FALSE) 134 | Rhat <- mon[,"Rhat"] 135 | max(Rhat) 136 | ess <- mon[, 'n_eff'] 137 | min(ess) 138 | ## Or do it interactively with ShinyStan 139 | launch_shinytmb(fit) 140 | } 141 | 142 | } 143 | \seealso{ 144 | \code{\link{extract_samples}} to extract samples and 145 | \code{\link{launch_shinytmb}} to explore the results graphically which 146 | is a wrapper for the \code{\link[shinystan]{launch_shinystan}} function. 147 | } 148 | \author{ 149 | Cole Monnahan 150 | } 151 | -------------------------------------------------------------------------------- /man/sample_tmb_hmc.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/sample_tmb_deprecated.R 3 | \name{sample_tmb_hmc} 4 | \alias{sample_tmb_hmc} 5 | \title{Draw MCMC samples from a model posterior using a static HMC sampler.} 6 | \usage{ 7 | sample_tmb_hmc( 8 | iter, 9 | fn, 10 | gr, 11 | init, 12 | L, 13 | eps, 14 | warmup = floor(iter/2), 15 | seed = NULL, 16 | chain = 1, 17 | thin = 1, 18 | control = NULL 19 | ) 20 | } 21 | \arguments{ 22 | \item{iter}{The number of samples to draw.} 23 | 24 | \item{fn}{A function that returns the log of the posterior density.} 25 | 26 | \item{gr}{A function that returns a vector of gradients of the log of 27 | the posterior density (same as \code{fn}).} 28 | 29 | \item{init}{A list of lists containing the initial parameter 30 | vectors, one for each chain or a function. It is strongly 31 | recommended to initialize multiple chains from dispersed 32 | points. A of NULL signifies to use the starting values 33 | present in the model (i.e., \code{obj$par}) for all chains.} 34 | 35 | \item{L}{The number of leapfrog steps to take. The NUTS algorithm does 36 | not require this as an input. If \code{L=1} this function will perform 37 | Langevin sampling. In some contexts \code{L} can roughly be thought of 38 | as a thinning rate.} 39 | 40 | \item{eps}{The step size. If a numeric value is passed, it will be used 41 | throughout the entire chain. A \code{NULL} value will initiate 42 | sampler_params of \code{eps} using the dual averaging algorithm during 43 | the first \code{warmup} steps.} 44 | 45 | \item{warmup}{The number of warmup iterations.} 46 | 47 | \item{seed}{The random seed to use.} 48 | 49 | \item{chain}{The chain number, for printing only.} 50 | 51 | \item{thin}{The thinning rate to apply to samples. Typically 52 | not used with NUTS.} 53 | 54 | \item{control}{A list to control the sampler. See details for 55 | further use.} 56 | } 57 | \value{ 58 | A list containing samples ('par') and algorithm details such as 59 | step size adaptation and acceptance probabilities per iteration 60 | ('sampler_params'). 61 | } 62 | \description{ 63 | Draw MCMC samples from a model posterior using a static HMC sampler. 64 | } 65 | \details{ 66 | This function implements algorithm 5 of Hoffman and Gelman 67 | (2014), which includes adaptive step sizes (\code{eps}) via an 68 | algorithm called dual averaging. 69 | } 70 | \references{ 71 | \itemize{ \item{Neal, R. M. (2011). MCMC using Hamiltonian 72 | dynamics. Handbook of Markov Chain Monte Carlo.} \item{Hoffman and 73 | Gelman (2014). The No-U-Turn sampler: Adaptively setting path lengths 74 | in Hamiltonian Monte Carlo. J. Mach. Learn. Res. 15:1593-1623.} } 75 | 76 | Hoffman and Gelman (2014). The No-U-Turn sampler: Adaptively setting 77 | path lengths in Hamiltonian Monte Carlo. J. Mach. Learn. Res. 78 | 15:1593-1623. 79 | } 80 | \seealso{ 81 | \code{\link{sample_tmb}} 82 | 83 | \code{\link{sample_tmb}} 84 | } 85 | -------------------------------------------------------------------------------- /man/sample_tmb_nuts.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/sample_tmb_deprecated.R 3 | \name{sample_tmb_nuts} 4 | \alias{sample_tmb_nuts} 5 | \title{Draw MCMC samples from a model posterior using the No-U-Turn (NUTS) 6 | sampler with dual averaging.} 7 | \usage{ 8 | sample_tmb_nuts( 9 | iter, 10 | fn, 11 | gr, 12 | init, 13 | warmup = floor(iter/2), 14 | chain = 1, 15 | thin = 1, 16 | seed = NULL, 17 | control = NULL 18 | ) 19 | } 20 | \arguments{ 21 | \item{iter}{The number of samples to draw.} 22 | 23 | \item{fn}{A function that returns the log of the posterior density.} 24 | 25 | \item{gr}{A function that returns a vector of gradients of the log of 26 | the posterior density (same as \code{fn}).} 27 | 28 | \item{init}{A list of lists containing the initial parameter 29 | vectors, one for each chain or a function. It is strongly 30 | recommended to initialize multiple chains from dispersed 31 | points. A of NULL signifies to use the starting values 32 | present in the model (i.e., \code{obj$par}) for all chains.} 33 | 34 | \item{warmup}{The number of warmup iterations.} 35 | 36 | \item{chain}{The chain number, for printing only.} 37 | 38 | \item{thin}{The thinning rate to apply to samples. Typically 39 | not used with NUTS.} 40 | 41 | \item{seed}{The random seed to use.} 42 | 43 | \item{control}{A list to control the sampler. See details for 44 | further use.} 45 | } 46 | \description{ 47 | Draw MCMC samples from a model posterior using the No-U-Turn (NUTS) 48 | sampler with dual averaging. 49 | } 50 | \details{ 51 | This function implements algorithm 6 of Hoffman and Gelman 52 | (2014), which includes adaptive step sizes (\code{eps}) via an 53 | algorithm called dual averaging. It also includes an adaptation scheme 54 | to tune a diagonal mass matrix (metric) during warmup. 55 | 56 | These \code{fn} and \code{gr} functions must have Jacobians already 57 | applied if there are transformations used. 58 | } 59 | \references{ 60 | Hoffman and Gelman (2014). The No-U-Turn sampler: Adaptively setting 61 | path lengths in Hamiltonian Monte Carlo. J. Mach. Learn. Res. 62 | 15:1593-1623. 63 | } 64 | \seealso{ 65 | \code{sample_tmb} 66 | } 67 | -------------------------------------------------------------------------------- /man/sample_tmb_rwm.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/sample_tmb_deprecated.R 3 | \name{sample_tmb_rwm} 4 | \alias{sample_tmb_rwm} 5 | \title{[Deprecated] Draw MCMC samples from a model posterior using a 6 | Random Walk Metropolis (RWM) sampler.} 7 | \usage{ 8 | sample_tmb_rwm( 9 | iter, 10 | fn, 11 | init, 12 | alpha = 1, 13 | chain = 1, 14 | warmup = floor(iter/2), 15 | thin = 1, 16 | seed = NULL, 17 | control = NULL 18 | ) 19 | } 20 | \arguments{ 21 | \item{iter}{The number of samples to draw.} 22 | 23 | \item{fn}{A function that returns the log of the posterior density.} 24 | 25 | \item{init}{A list of lists containing the initial parameter 26 | vectors, one for each chain or a function. It is strongly 27 | recommended to initialize multiple chains from dispersed 28 | points. A of NULL signifies to use the starting values 29 | present in the model (i.e., \code{obj$par}) for all chains.} 30 | 31 | \item{alpha}{The amount to scale the proposal, i.e, 32 | Xnew=Xcur+alpha*Xproposed where Xproposed is generated from a mean-zero 33 | multivariate normal. Varying \code{alpha} varies the acceptance rate.} 34 | 35 | \item{chain}{The chain number, for printing only.} 36 | 37 | \item{warmup}{The number of warmup iterations.} 38 | 39 | \item{thin}{The thinning rate to apply to samples. Typically 40 | not used with NUTS.} 41 | 42 | \item{seed}{The random seed to use.} 43 | 44 | \item{control}{A list to control the sampler. See details for 45 | further use.} 46 | } 47 | \value{ 48 | A list containing samples and other metadata. 49 | } 50 | \description{ 51 | [Deprecated] Draw MCMC samples from a model posterior using a 52 | Random Walk Metropolis (RWM) sampler. 53 | } 54 | \details{ 55 | This algorithm does not yet contain adaptation of \code{alpha} 56 | so some trial and error may be required for efficient sampling. 57 | } 58 | \references{ 59 | Metropolis, N., Rosenbluth, A.W., Rosenbluth, M.N., Teller, A.H., 60 | Teller, E., 1953. Equation of state calculations by fast computing 61 | machines. J Chem Phys. 21:1087-1092. 62 | } 63 | \seealso{ 64 | \code{\link{sample_tmb}} 65 | } 66 | -------------------------------------------------------------------------------- /man/summary.adfit.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{summary.adfit} 4 | \alias{summary.adfit} 5 | \title{Print summary of object of class adfit} 6 | \usage{ 7 | \method{summary}{adfit}(object, ...) 8 | } 9 | \arguments{ 10 | \item{object}{Fitted object from \code{\link{sample_admb}}} 11 | 12 | \item{...}{Ignored} 13 | } 14 | \value{ 15 | Summary printed to screen 16 | } 17 | \description{ 18 | Print summary of object of class adfit 19 | } 20 | -------------------------------------------------------------------------------- /man/wrappers.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/sample_admb.R 3 | \name{sample_nuts} 4 | \alias{sample_nuts} 5 | \alias{sample_rwm} 6 | \alias{wrappers} 7 | \title{Bayesian inference of an ADMB model using the no-U-turn 8 | sampler (NUTS) or random walk Metropolis (RWM) algorithms.} 9 | \usage{ 10 | sample_nuts( 11 | model, 12 | path = getwd(), 13 | iter = 2000, 14 | init = NULL, 15 | chains = 3, 16 | warmup = NULL, 17 | seeds = NULL, 18 | thin = 1, 19 | mceval = FALSE, 20 | duration = NULL, 21 | parallel = FALSE, 22 | cores = NULL, 23 | control = NULL, 24 | skip_optimization = TRUE, 25 | verbose = TRUE, 26 | skip_monitor = FALSE, 27 | skip_unbounded = TRUE, 28 | admb_args = NULL, 29 | extra.args = NULL 30 | ) 31 | 32 | sample_rwm( 33 | model, 34 | path = getwd(), 35 | iter = 2000, 36 | init = NULL, 37 | chains = 3, 38 | warmup = NULL, 39 | seeds = NULL, 40 | thin = 1, 41 | mceval = FALSE, 42 | duration = NULL, 43 | parallel = FALSE, 44 | cores = NULL, 45 | control = NULL, 46 | skip_optimization = TRUE, 47 | verbose = TRUE, 48 | skip_monitor = FALSE, 49 | skip_unbounded = TRUE, 50 | admb_args = NULL, 51 | extra.args = NULL 52 | ) 53 | } 54 | \arguments{ 55 | \item{model}{Name of model (i.e., 'model' for model.tpl). For 56 | non-Windows systems this will automatically be converted to 57 | './model' internally. For Windows, long file names are 58 | sometimes shortened from e.g., 'long_model_filename' to 59 | 'LONG_~1'. This should work, but will throw warnings. Please 60 | shorten the model name. See 61 | https://en.wikipedia.org/wiki/8.3_filename.} 62 | 63 | \item{path}{Path to model executable. Defaults to working 64 | directory. Often best to have model files in a separate 65 | subdirectory, particularly for parallel.} 66 | 67 | \item{iter}{The number of samples to draw.} 68 | 69 | \item{init}{A list of lists containing the initial parameter 70 | vectors, one for each chain or a function. It is strongly 71 | recommended to initialize multiple chains from dispersed 72 | points. A of NULL signifies to use the starting values 73 | present in the model (i.e., \code{obj$par}) for all chains.} 74 | 75 | \item{chains}{The number of chains to run.} 76 | 77 | \item{warmup}{The number of warmup iterations.} 78 | 79 | \item{seeds}{A vector of seeds, one for each chain.} 80 | 81 | \item{thin}{The thinning rate to apply to samples. Typically 82 | not used with NUTS.} 83 | 84 | \item{mceval}{Whether to run the model with \code{-mceval} on 85 | samples from merged chains.} 86 | 87 | \item{duration}{The number of minutes after which the model 88 | will quit running.} 89 | 90 | \item{parallel}{A deprecated argument, use cores=1 for serial 91 | execution or cores>1 for parallel (default is to parallel 92 | with cores equal to the available-1)} 93 | 94 | \item{cores}{The number of cores to use for parallel 95 | execution. Default is number available in the system minus 96 | 1. If \code{cores=1}, serial execution occurs (even if 97 | \code{chains>1}), otherwise parallel execution via package 98 | snowfall is used. For slow analyses it is recommended to set 99 | \code{chains}<=\code{cores} so each core needs to run only a 100 | single chain.} 101 | 102 | \item{control}{A list to control the sampler. See details for 103 | further use.} 104 | 105 | \item{skip_optimization}{Whether to run the optimizer before 106 | running MCMC. This is rarely need as it is better to run it 107 | once before to get the covariance matrix, or the estimates 108 | are not needed with adaptive NUTS.} 109 | 110 | \item{verbose}{Flag whether to show console output (default) 111 | or suppress it completely except for warnings and 112 | errors. Works for serial or parallel execution.} 113 | 114 | \item{skip_monitor}{Whether to skip calculating diagnostics 115 | (effective sample size, Rhat) via the \code{rstan::monitor} 116 | function. This can be slow for models with high dimension or 117 | many iterations. The result is used in plots and summaries 118 | so it is recommended to turn on. If model run with 119 | \code{skip_monitor=FALSE} you can recreate it post-hoc by 120 | setting \code{fit$monitor=rstan::monitor(fit$samples, 121 | fit$warmup, print=FALSE)}.} 122 | 123 | \item{skip_unbounded}{Whether to skip returning the unbounded 124 | version of the posterior samples in addition to the bounded 125 | ones. It may be advisable to set to FALSE for very large 126 | models to save space.} 127 | 128 | \item{admb_args}{A character string which gets passed to the 129 | command line, allowing finer control} 130 | 131 | \item{extra.args}{Deprecated, use a \code{admb_args} instead.} 132 | } 133 | \description{ 134 | Draw Bayesian posterior samples from an AD Model Builder 135 | (ADMB) model using an MCMC algorithm. `sample_nuts` and 136 | `sample_rwm` generates posterior samples from which inference 137 | can be made. 138 | } 139 | \details{ 140 | Adaptation schemes are used with NUTS so specifying tuning 141 | parameters is not necessary. See vignette for options for 142 | adaptation of step size and mass matrix. The RWM algorithm 143 | provides no new functionality not available from previous 144 | versions of ADMB. However, `sample_rwm` has an improved 145 | console output, is setup for parallel execution, and a smooth 146 | workflow for diagnostics. 147 | 148 | Parallel chains will be run if argument `cores` is greater 149 | than one. This entails copying the folder, and starting a new 150 | R session to run that chain, which are then merged back 151 | together. Note that console output is inconsistent when using 152 | parallel, and may not show. On Windows the R terminal shows 153 | output live, but the GUI does not. RStudio is a special case 154 | and will not show live, and instead is captured and returned 155 | at the end. It is strongly recommended to start with serial 156 | execution as debugging parallel chains is very difficult. 157 | 158 | Note that the algorithm code is in the ADMB source code, and 159 | 'adnuts' provides a wrapper for it. The command line arguments 160 | are returned and can be examined by the user. See vignette for 161 | more information. 162 | 163 | This function implements algorithm 6 of Hoffman and Gelman (2014), 164 | and loosely follows package \code{rstan}. The step size can be 165 | adapted or specified manually. The metric (i.e., mass matrix) can be 166 | unit diagonal, adapted diagonal (default and recommended), a dense 167 | matrix specified by the user, or an adapted dense matrix. 168 | Further control of algorithms can be 169 | specified with the \code{control} argument. Elements are: 170 | \describe{ 171 | \item{adapt_delta}{The target acceptance rate. D} 172 | \item{metric}{The mass metric to use. Options are: "unit" for a unit diagonal 173 | matrix; \code{NULL} to estimate a diagonal matrix during warmup; a matrix 174 | to be used directly (in untransformed space).} 175 | \item{adapt_delta}{Whether adaptation of step size is turned on.} 176 | \item{adapt_mass}{Whether adaptation of mass matrix is turned 177 | on. Currently only allowed for diagonal metric.} 178 | \item{adapt_mass_dense}{Whether dense adaptation of mass 179 | matrix is turned on.} 180 | \item{max_treedepth}{Maximum treedepth for the NUTS algorithm.} 181 | \item{stepsize}{The stepsize for the NUTS algorithm. If \code{NULL} it 182 | will be adapted during warmup.} 183 | \item{adapt_init_buffer}{The initial buffer size during mass matrix 184 | adaptation where sample information is not used (default 185 | 50)} 186 | \item{adapt_term_buffer}{The terminal buffer size (default 75) 187 | during mass 188 | matrix adaptation (final fast phase)} 189 | \item{adapt_window}{The initial size of the mass matrix 190 | adaptation window, which gets doubled each time thereafter.} 191 | \item{refresh}{The rate at which to refresh progress to the 192 | console. Defaults to even 10%. A value of 0 turns off 193 | progress updates.} 194 | } 195 | The adaptation scheme (step size and mass matrix) is based heavily on those by the 196 | software Stan, and more details can be found in that 197 | documentation and this vignette. 198 | } 199 | \section{Warning}{ 200 | The user is responsible for specifying the 201 | model properly (priors, starting values, desired parameters 202 | fixed, etc.), as well as assessing the convergence and 203 | validity of the resulting samples (e.g., through the 204 | \code{coda} package), or with function 205 | \code{\link{launch_shinytmb}} before making 206 | inference. Specifically, priors must be specified in the 207 | template file for each parameter. Unspecified priors will be 208 | implicitly uniform. 209 | } 210 | 211 | \examples{ 212 | \dontrun{ 213 | ## This is the packaged simple regression model 214 | path.simple <- system.file('examples', 'simple', package='adnuts') 215 | ## It is best to have your ADMB files in a separate folder and provide that 216 | ## path, so make a copy of the model folder locally. 217 | path <- 'simple' 218 | dir.create(path) 219 | trash <- file.copy(from=list.files(path.simple, full.names=TRUE), to=path) 220 | ## Compile and run model 221 | oldwd <- getwd() 222 | setwd(path) 223 | system('admb simple.tpl') 224 | system('simple') 225 | setwd('..') 226 | init <- function() rnorm(2) 227 | ## Run NUTS with defaults 228 | fit <- sample_nuts(model='simple', init=init, path=path) 229 | unlink(path, TRUE) # cleanup folder 230 | setwd(oldwd) 231 | } 232 | 233 | } 234 | \author{ 235 | Cole Monnahan 236 | } 237 | -------------------------------------------------------------------------------- /tests/simple/simple.dat: -------------------------------------------------------------------------------- 1 | # number of observations 2 | 10 3 | # observed Y values 4 | 1.4 4.7 5.1 8.3 9.0 14.5 14.0 13.4 19.2 18 5 | # observed x values 6 | -1 0 1 2 3 4 5 6 7 8 7 | 8 | -------------------------------------------------------------------------------- /tests/simple/simple.tpl: -------------------------------------------------------------------------------- 1 | // Copyright (c) 2008, 2009, 2010 Regents of the University of California. 2 | // 3 | // ADModelbuilder and associated libraries and documentations are 4 | // provided under the general terms of the "BSD" license. 5 | // 6 | // Redistribution and use in source and binary forms, with or without 7 | // modification, are permitted provided that the following conditions are 8 | // met: 9 | // 10 | // 1. Redistributions of source code must retain the above copyright 11 | // notice, this list of conditions and the following disclaimer. 12 | // 13 | // 2. Redistributions in binary form must reproduce the above copyright 14 | // notice, this list of conditions and the following disclaimer in the 15 | // documentation and/or other materials provided with the distribution. 16 | // 17 | // 3. Neither the name of the University of California, Otter Research, 18 | // nor the ADMB Foundation nor the names of its contributors may be used 19 | // to endorse or promote products derived from this software without 20 | // specific prior written permission. 21 | // 22 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 23 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 24 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 25 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 26 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 27 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 28 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 29 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 30 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 31 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 32 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 33 | 34 | DATA_SECTION 35 | init_int nobs 36 | init_vector Y(1,nobs) 37 | init_vector x(1,nobs) 38 | PARAMETER_SECTION 39 | init_bounded_number a(-10,8); 40 | init_bounded_number b(-19,15); 41 | vector pred_Y(1,nobs) 42 | sdreport_number aa 43 | objective_function_value f 44 | PROCEDURE_SECTION 45 | aa=a; 46 | pred_Y=a*x+b; 47 | f=(norm2(pred_Y-Y)); 48 | f=nobs/2.*log(f); // make it a likelihood function so that 49 | // covariance matrix is correct 50 | 51 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | library(adnuts) 3 | 4 | test_check("adnuts") 5 | -------------------------------------------------------------------------------- /tests/testthat/_expect_monitor: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cole-Monnahan-NOAA/adnuts/ff0c8d3d56006e61ea3651ea27fedc97f2a02afe/tests/testthat/_expect_monitor -------------------------------------------------------------------------------- /tests/testthat/_expect_nuts: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cole-Monnahan-NOAA/adnuts/ff0c8d3d56006e61ea3651ea27fedc97f2a02afe/tests/testthat/_expect_nuts -------------------------------------------------------------------------------- /tests/testthat/_expect_nuts_mle: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cole-Monnahan-NOAA/adnuts/ff0c8d3d56006e61ea3651ea27fedc97f2a02afe/tests/testthat/_expect_nuts_mle -------------------------------------------------------------------------------- /tests/testthat/_expect_simple_rwm: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cole-Monnahan-NOAA/adnuts/ff0c8d3d56006e61ea3651ea27fedc97f2a02afe/tests/testthat/_expect_simple_rwm -------------------------------------------------------------------------------- /tests/testthat/_expect_sp: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cole-Monnahan-NOAA/adnuts/ff0c8d3d56006e61ea3651ea27fedc97f2a02afe/tests/testthat/_expect_sp -------------------------------------------------------------------------------- /tests/testthat/fit.RDS: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cole-Monnahan-NOAA/adnuts/ff0c8d3d56006e61ea3651ea27fedc97f2a02afe/tests/testthat/fit.RDS -------------------------------------------------------------------------------- /tests/testthat/setup.R: -------------------------------------------------------------------------------- 1 | ## Skip consistency and reproducibility tests? Only need to run 2 | ## these locally when ADMB changes. 3 | skip_consistency <- TRUE 4 | skip_reproducibility <- TRUE 5 | 6 | ### Skip all this if on CRAN. Otherwise locally or on CI, need to 7 | ### build the executables and run them so they're available for 8 | ### the tests. Then cleanup. On CRAN only a .RDS file is read in 9 | ### and really simple tests are performed. 10 | if(Sys.getenv("NOT_CRAN")=='true'){ 11 | oldwd <- getwd() 12 | setwd('../simple') 13 | system("admb simple") 14 | system('./simple') 15 | expect_equal(readLines('simple.par')[2], '# a:') # hack to test something 16 | dir.create('../simple_long_filename') 17 | trash <- file.copy('../simple/simple.tpl', 18 | to='../simple_long_filename/simple_long_filename.tpl') 19 | trash <- file.copy('../simple/simple.dat', 20 | to='../simple_long_filename/simple_long_filename.dat') 21 | setwd('../simple_long_filename') 22 | system("admb simple_long_filename") 23 | system('./simple_long_filename') 24 | setwd(oldwd) 25 | 26 | ## Clean up files to pass checks locally 27 | if(requireNamespace('withr')){ 28 | withr::defer({ 29 | files <- list.files('../simple', full.names = TRUE) 30 | ignore <- file.remove(files[-grep('.dat|.tpl', x=files)]) 31 | unlink('../simple_long_filename', TRUE) 32 | unlink("../simple_chain_1", TRUE) 33 | unlink("../simple_chain_2", TRUE) 34 | unlink("../simple_chain_3", TRUE) 35 | ## dev.off() 36 | ## plotout <- 'Rplots.pdf' 37 | ## trash <- if(file.exists(plotout)) file.remove(plotout) 38 | }, teardown_env()) 39 | } 40 | 41 | } 42 | -------------------------------------------------------------------------------- /tests/testthat/test-class-methods.R: -------------------------------------------------------------------------------- 1 | ## Note these were tested with ADMB 12.2 on 8/3/2020 2 | test_that("class methods working", { 3 | fit <- readRDS('fit.RDS') 4 | x <- as.data.frame(fit) 5 | expect_is(x, 'data.frame') 6 | summary(fit) 7 | print(fit) 8 | plot_marginals(fit) 9 | }) 10 | -------------------------------------------------------------------------------- /tests/testthat/test-consistency.R: -------------------------------------------------------------------------------- 1 | test_that("consistency of algorithms within platform", { 2 | skip_on_cran() 3 | skip_if(skip_consistency) 4 | ## These will work across platforms b/c they compare within 5 | ## each. Running the same chains repeatedly to make sure the 6 | ## same answer each time. 7 | 8 | ## Check consistency given same init and seeds 9 | inits.fn <- function() list(c(0,0)) 10 | chains <- 10 11 | cores <- NULL # use parallel which probably catches errors better 12 | iter <- 2000 # slightly longer ones to detect subtle divergences 13 | myequal <- function(fit) length(unique(fit$samples[iter,,3]))==1 14 | 15 | test <- sample_rwm('simple', path='../simple', iter=10, init=inits.fn, 16 | skip_optimization=FALSE, chains=1) 17 | fit <- sample_rwm('simple', path='../simple', chains=chains, 18 | iter=iter*10, cores=cores, 19 | seeds=rep(45,chains), init=inits.fn, 20 | skip_monitor=TRUE, 21 | control=list(refresh=-1)) 22 | expect_identical(myequal(fit), TRUE) 23 | ## These correspond to the 6 options in the metric table in the 24 | ## vignette. 25 | seeds <- rep(123,chains) 26 | ## Initialize with diagonal for first three 27 | ignore <- file.remove('../simple/admodel.cov') # dont need this 28 | fit1 <- sample_nuts('simple', path='../simple', chains=chains, iter=iter, 29 | seeds=seeds, init=inits.fn, 30 | skip_monitor=TRUE, 31 | control=list(refresh=-1, adapt_mass=FALSE), 32 | cores=cores) 33 | expect_identical(myequal(fit1), TRUE) 34 | fit2 <- sample_nuts('simple', path='../simple', chains=chains, iter=iter, 35 | seeds=seeds, init=inits.fn, 36 | control=list(refresh=-1), 37 | skip_monitor=TRUE, 38 | cores=cores) 39 | expect_identical(myequal(fit2), TRUE) 40 | fit3 <- sample_nuts('simple', path='../simple', chains=chains, iter=iter, 41 | seeds=seeds, init=inits.fn, 42 | skip_monitor=TRUE, 43 | control=list(refresh=-1, adapt_mass_dense=TRUE), 44 | cores=cores) 45 | expect_identical(myequal(fit3), TRUE) 46 | ## Next three initialize from MLE, need to rerun model to get these 47 | test <- sample_nuts('simple', path='../simple', iter=100, 48 | init=inits.fn, chains=1, skip_optimization=FALSE) 49 | fit4 <- sample_nuts('simple', path='../simple', chains=chains, iter=iter, 50 | seeds=seeds, init=inits.fn, 51 | skip_monitor=TRUE, 52 | control=list(refresh=-1, metric='mle'), 53 | cores=cores) 54 | expect_identical(myequal(fit4), TRUE) 55 | fit5 <- sample_nuts('simple', path='../simple', chains=chains, iter=iter, 56 | seeds=seeds, init=inits.fn, 57 | skip_monitor=TRUE, 58 | control=list(refresh=-1, metric='mle', adapt_mass=TRUE), 59 | cores=cores) 60 | expect_identical(myequal(fit5), TRUE) 61 | fit6 <- sample_nuts('simple', path='../simple', chains=chains, iter=iter, 62 | seeds=seeds, init=inits.fn, 63 | skip_monitor=TRUE, 64 | control=list(refresh=-1, metric='mle', adapt_mass_dense=TRUE), 65 | cores=cores) 66 | expect_identical(myequal(fit6), TRUE) 67 | ## In addition test passing a user matrix, here unit diag 68 | fit7 <- suppressWarnings(sample_nuts('simple', path='../simple', chains=chains, iter=iter, 69 | seeds=seeds, init=inits.fn, 70 | skip_monitor=TRUE, 71 | control=list(refresh=-1, metric=diag(2)), 72 | cores=cores)) 73 | expect_identical(myequal(fit7), TRUE) 74 | fit8 <- suppressWarnings(sample_nuts('simple', path='../simple', chains=chains, iter=iter, 75 | seeds=seeds, init=inits.fn, 76 | skip_monitor=TRUE, 77 | control=list(refresh=-1, metric=diag(2), adapt_mass=TRUE), 78 | cores=cores)) 79 | expect_identical(myequal(fit8), TRUE) 80 | fit9 <- suppressWarnings(sample_nuts('simple', path='../simple', chains=chains, iter=iter, 81 | seeds=seeds, init=inits.fn, 82 | skip_monitor=TRUE, 83 | control=list(refresh=-1, metric=diag(2), adapt_mass_dense=TRUE), 84 | cores=cores)) 85 | expect_identical(myequal(fit9), TRUE) 86 | fit10 <- sample_nuts('simple', path='../simple', chains=chains, iter=iter, 87 | seeds=seeds, init=inits.fn, 88 | skip_optimization = FALSE, 89 | skip_monitor=TRUE, 90 | control=list(refresh=-1, metric='mle', stepsize=.1), 91 | cores=cores) 92 | expect_identical(myequal(fit10), TRUE) 93 | }) 94 | -------------------------------------------------------------------------------- /tests/testthat/test-diagnostics.R: -------------------------------------------------------------------------------- 1 | test_that("diagnostics and plotting", { 2 | fit <- readRDS('fit.RDS') 3 | sp <- extract_sampler_params(fit) 4 | expect_is(sp, 'data.frame') 5 | plot_sampler_params(fit, TRUE) 6 | pairs_admb(fit) 7 | pairs_admb(fit, pars=1:3, order='slow') 8 | pairs_admb(fit, pars=1:3, order='fast') 9 | pairs_admb(fit, pars=c('a', 'lp__', 'b'), add.monitor=FALSE) 10 | expect_warning(pairs_admb(fit, pars=c('a', 'b', 'c')), 'Some par names did not match') 11 | expect_error(pairs_admb(fit, pars=c('a')), 'only meaningful for >1 parameter') 12 | pairs_admb(fit, add.mle=FALSE) 13 | pairs_admb(fit, add.mle=FALSE, diag='hist') 14 | pairs_admb(fit, add.mle=FALSE, diag='acf') 15 | expect_error(pairs_admb(fit, add.mle=FALSE, diag='bad'), 'should be one of') 16 | plot_marginals(fit) 17 | plot_marginals(fit, add.monitor=FALSE) 18 | plot_marginals(fit, add.mle=FALSE) 19 | x <- plot_uncertainties(fit, plot=FALSE) 20 | }) 21 | -------------------------------------------------------------------------------- /tests/testthat/test-reproducibility.R: -------------------------------------------------------------------------------- 1 | test_that("reproducibility of algorithms between versions", { 2 | skip_on_cran() 3 | ## due to compiler differences these wont match between 4 | ## platforms, so useful only for testing that new releases of 5 | ## ADMB don't break something unexpectedly 6 | skip_on_ci() 7 | skip_if(skip_reproducibility) 8 | 9 | ## Check reproducibility given same init and seeds 10 | inits.fn <- function() list(c(0,0)) 11 | chains <- 1 12 | fit <- sample_rwm('simple', path='../simple', chains=chains, 13 | iter=400, cores=1, 14 | seeds=rep(45,chains), init=inits.fn, 15 | skip_optimization=FALSE, 16 | control=list(refresh=-1)) 17 | expect_identical(unique(fit$samples[400,,3]), -16.0439) 18 | ## These correspond to the 6 options in the metric table in the 19 | ## vignette. 20 | seeds <- rep(123,chains) 21 | ## Initialize with diagonal for first three 22 | ignore <- file.remove('../simple/admodel.cov') # dont need this 23 | fit1 <- sample_nuts('simple', path='../simple', chains=chains, iter=400, 24 | seeds=seeds, init=inits.fn, 25 | control=list(refresh=-1, adapt_mass=FALSE), 26 | cores=1) 27 | expect_identical(unique(fit1$samples[400,,3]), -12.9319) 28 | fit2 <- sample_nuts('simple', path='../simple', chains=chains, iter=400, 29 | seeds=seeds, init=inits.fn, 30 | control=list(refresh=-1), 31 | cores=1) 32 | expect_identical(unique(fit2$samples[400,,3]), -13.2107) 33 | fit3 <- sample_nuts('simple', path='../simple', chains=chains, iter=400, 34 | seeds=seeds, init=inits.fn, 35 | control=list(refresh=-1, adapt_mass_dense=TRUE), 36 | cores=1) 37 | # expect_identical(unique(fit3$samples[400,,3]), -14.2902) 38 | ## Next three initialize from MLE, need to rerun model to get these 39 | fit4 <- sample_nuts('simple', path='../simple', chains=chains, iter=400, 40 | seeds=seeds, init=inits.fn, 41 | skip_optimization=FALSE, 42 | control=list(refresh=-1, metric='mle'), 43 | cores=1) 44 | expect_identical(unique(fit4$samples[400,,3]), -12.1684) 45 | fit5 <- sample_nuts('simple', path='../simple', chains=chains, iter=400, 46 | seeds=seeds, init=inits.fn, 47 | control=list(refresh=-1, metric='mle', adapt_mass=TRUE), 48 | cores=1) 49 | expect_identical(unique(fit5$samples[400,,3]), -12.2534) 50 | fit6 <- sample_nuts('simple', path='../simple', chains=chains, iter=400, 51 | seeds=seeds, init=inits.fn, 52 | control=list(refresh=-1, metric='mle', adapt_mass_dense=TRUE), 53 | cores=1) 54 | expect_identical(unique(fit6$samples[400,,3]), -12.4441) 55 | ## In addition test passing a user matrix, here unit diag 56 | fit7 <- suppressWarnings(sample_nuts('simple', path='../simple', chains=chains, iter=400, 57 | seeds=seeds, init=inits.fn, 58 | control=list(refresh=-1, metric=diag(2)), 59 | cores=1)) 60 | expect_identical(unique(fit7$samples[400,,3]), -12.9319) 61 | fit8 <- suppressWarnings(sample_nuts('simple', path='../simple', chains=chains, iter=400, 62 | seeds=seeds, init=inits.fn, 63 | control=list(refresh=-1, metric=diag(2), adapt_mass=TRUE), 64 | cores=1)) 65 | expect_identical(unique(fit8$samples[400,,3]), -13.2107) 66 | fit9 <- suppressWarnings(sample_nuts('simple', path='../simple', chains=chains, iter=400, 67 | seeds=seeds, init=inits.fn, 68 | control=list(refresh=-1, metric=diag(2), adapt_mass_dense=TRUE), 69 | cores=1)) 70 | expect_identical(unique(fit9$samples[400,,3]), -14.2902) 71 | ## All of these test might fail if changes to the adaptation 72 | ## schemes (stepsize or mass matrix) are done in the ADMB 73 | ## source. So one last tests which uses no adaptation so should 74 | ## be consistent between ADMB versions. Also need to reoptimize 75 | ## since I overwrite the admodel.cov file above 76 | fit10 <- sample_nuts('simple', path='../simple', chains=chains, iter=400, 77 | seeds=seeds, init=inits.fn, 78 | skip_optimization = FALSE, 79 | control=list(refresh=-1, metric='mle', stepsize=.1), 80 | cores=1) 81 | expect_identical(unique(fit10$samples[400,,3]), -13.6047) 82 | 83 | }) 84 | -------------------------------------------------------------------------------- /tests/testthat/test-sample_admb.R: -------------------------------------------------------------------------------- 1 | 2 | ## Note these were tested with ADMB 12.2 on 8/3/2020 3 | test_that("simple example works", { 4 | skip_on_cran() 5 | inits.fn <- function() list(c(0,0)) 6 | fit <- sample_rwm('simple', path='../simple', chains=1, 7 | seeds=1, init=inits.fn, 8 | skip_optimization=FALSE, 9 | control=list(refresh=-1), skip_monitor=TRUE) 10 | expect_known_output(extract_samples(fit)[1000,], 11 | file='_expect_simple_rwm') 12 | fit <- sample_rwm('simple', path='../simple', chains=1, 13 | seeds=1, init=inits.fn, 14 | skip_optimization=FALSE, 15 | control=list(refresh=-1), skip_monitor=TRUE) 16 | expect_known_output(extract_samples(fit)[1000,], 17 | file='_expect_simple_rwm') 18 | fit <- sample_nuts('simple', path='../simple', chains=1, 19 | seeds=1, init=inits.fn, 20 | skip_optimization=FALSE, 21 | control=list(refresh=-1), skip_monitor=TRUE) 22 | expect_known_output(extract_samples(fit)[1000,], 23 | file='_expect_nuts') 24 | fit <- sample_nuts('simple', path='../simple', chains=1, 25 | seeds=1, init=inits.fn, 26 | control=list(metric='mle', refresh=-1), 27 | skip_monitor = TRUE) 28 | expect_known_output(extract_samples(fit)[1000,], 29 | file='_expect_nuts_mle') 30 | fit <- sample_nuts('simple', path='../simple', chains=1, 31 | seeds=1, init=inits.fn, 32 | control=list(metric='mle', refresh=-1), 33 | skip_monitor = TRUE) 34 | expect_known_output(extract_samples(fit)[1000,], 35 | file='_expect_nuts_mle') 36 | }) 37 | 38 | test_that("mceval works",{ 39 | skip_on_cran() 40 | inits.fn <- function() list(c(0,0)) 41 | fit <- sample_nuts('simple', path='../simple', chains=1, 42 | seeds=1, init=inits.fn, 43 | control=list(metric='mle', refresh=-1), 44 | skip_monitor = TRUE, 45 | mceval=TRUE) 46 | }) 47 | 48 | test_that("parallel works",{ 49 | skip_on_cran() 50 | message("Starting parallel tests") 51 | inits.fn <- function() list(c(0,0)) 52 | fit <- sample_nuts('simple', path='../simple', chains=3, 53 | seeds=1:3, init=inits.fn, iter=1000, 54 | control=list(refresh=-1), 55 | skip_monitor = TRUE) 56 | ## expect_equal(extract_samples(fit)[1500,2], 3.483071) 57 | fit <- sample_rwm('simple', path='../simple', chains=3, 58 | seeds=1:3, init=inits.fn, iter=1000, 59 | control=list(refresh=-1), 60 | skip_monitor = TRUE) 61 | }) 62 | 63 | 64 | test_that("warnings and errors in sample_nuts and sample_rwm",{ 65 | skip_on_cran() 66 | inits <- function() list(1,1) 67 | test <- expect_warning(sample_nuts('simple', path='../simple', 68 | iter=1000, init=inits, 69 | extra.args='-test', 70 | control=list(refresh=-1), 71 | chains=1, warmup=500), 72 | regexp='extra.args is deprecated') 73 | test <- expect_warning(sample_rwm('simple', path='../simple', 74 | iter=1000, init=inits, 75 | extra.args='-test', 76 | control=list(refresh=-1), 77 | chains=1, warmup=500), 78 | regexp='extra.args is deprecated') 79 | test <- expect_warning(sample_nuts('simple', path='../simple', 80 | iter=1000, init=inits, 81 | parallel=TRUE, 82 | control=list(refresh=-1), 83 | chains=1, warmup=500), 84 | regexp='parallel is deprecated') 85 | test <- expect_warning(sample_rwm('simple', path='../simple', 86 | iter=1000, init=inits, 87 | parallel=TRUE, 88 | control=list(refresh=-1), 89 | chains=1, warmup=500), 90 | regexp='parallel is deprecated') 91 | test <- expect_warning(sample_rwm('simple', path='../simple', 92 | iter=1000, init=inits, 93 | control=list(refresh=-1, metric='mle'), 94 | chains=1, warmup=500), 95 | regexp='Only refresh control argument is used') 96 | test <- expect_warning(sample_nuts('simple', path='../simple', 97 | iter=1000, init=inits, 98 | chains=1, warmup=500, 99 | control=list(refresh=-1, metric=diag(2))), 100 | regexp='admodel.cov overwritten') 101 | test <- expect_warning(sample_rwm('simple', path='../simple', 102 | iter=1000, init=inits, 103 | parallel=TRUE, 104 | control=list(refresh=-1), 105 | chains=1, warmup=500), 106 | regexp='parallel is deprecated') 107 | test <- expect_warning(sample_admb('simple', path='../simple', 108 | iter=1000, init=inits, 109 | control=list(refresh=-1), 110 | chains=1, warmup=500), 111 | regexp='sample_admb is deprecated') 112 | test <- expect_error(sample_nuts('simple', path='../simple', 113 | iter=1000, init=inits, 114 | control=list(refresh=-1), 115 | chains=1, warmup=2000), 116 | regexp='warmup <= iter') 117 | test <- expect_error(sample_nuts('simple', path='../simple', 118 | iter=1000, init=inits, algorithm='NUTS', 119 | chains=1, warmup=2000), 120 | regexp='unused argument \\(algorithm') 121 | test <- expect_error(sample_rwm('simple', path='../simple', 122 | iter=1000, init=inits, algorithm='RWM', 123 | chains=1, warmup=2000), 124 | regexp='unused argument \\(algorithm') 125 | test <- expect_error(sample_nuts('simple', path='../simple', 126 | iter=1000, init=inits, 127 | chains=-1), 128 | regexp='chains >= 1') 129 | test <- expect_error(sample_nuts('simple', path='../simple55', 130 | iter=1000, init=inits, 131 | chains=1), 132 | regexp="does not exist. Check argument \'path\'") 133 | test <- expect_error(sample_nuts('simple3', path='../simple', 134 | iter=1000, init=inits, 135 | chains=1), 136 | regexp="not found in specified folder") 137 | test <- expect_error(sample_nuts('simple', path='../simple', 138 | iter=1000, init=inits, 139 | chains=1, 140 | control=list(adapt_init_buffer=-20)), 141 | regexp='NUTS failed to run') 142 | test <- expect_error(sample_nuts('simple', path='../simple', 143 | iter=1000, init=inits, 144 | chains=3, 145 | control=list(adapt_delta=1.5)), 146 | regexp='NUTS failed to run') 147 | test <- expect_error(sample_nuts('simple', path='../simple', 148 | iter=1000, init=inits, 149 | chains=3, cores=1, 150 | control=list(adapt_delta=1.5)), 151 | regexp='NUTS failed to run') 152 | test <- expect_error(sample_rwm('simple', path='../simple', 153 | iter=1000, init=inits, 154 | chains=1, 155 | admb_args='-refresh -2'), 156 | regexp='RWM failed to run') 157 | test <- expect_error(sample_rwm('simple', path='../simple', 158 | iter=1000, init=inits, 159 | chains=1, 160 | control=list(refresh='a')), 161 | regexp='Invalid refresh value') 162 | test <- expect_error(sample_rwm('simple', path='../simple', 163 | iter=1000, init=inits, 164 | chains=3, seeds=1), 165 | regexp='Length of seeds must match chains') 166 | }) 167 | 168 | test_that("verbose option works", { 169 | skip_on_cran() 170 | inits.fn <- function() list(c(0,0)) 171 | message("Should be no console output between here....") 172 | message("Starting verbose NUTS in parallel..") 173 | fit <- sample_nuts('simple', path='../simple', chains=3, 174 | seeds=1:3, init=inits.fn, iter=800, 175 | skip_monitor = TRUE, verbose=FALSE) 176 | message("Starting verbose NUTS in serial..") 177 | fit <- sample_nuts('simple', path='../simple', chains=1, 178 | seeds=1, init=inits.fn, iter=800, 179 | skip_monitor = TRUE, verbose=FALSE) 180 | message("Starting verbose RWM in parallel..") 181 | fit <- sample_rwm('simple', path='../simple', chains=3, 182 | seeds=1:3, init=inits.fn, iter=800, 183 | skip_monitor = TRUE, verbose=FALSE) 184 | message("Starting verbose RWM in serial..") 185 | fit <- sample_rwm('simple', path='../simple', chains=1, 186 | seeds=1, init=inits.fn, iter=800, 187 | skip_monitor = TRUE, verbose=FALSE) 188 | message("... and here") 189 | }) 190 | 191 | 192 | test_that("long file names work ok on Windows",{ 193 | skip_on_cran() 194 | inits.fn <- function() list(1,1) 195 | p <- '../simple_long_filename' 196 | m <- 'simple_long_filename' 197 | if(.Platform$OS.type=='windows'){ 198 | ## Should give warning 199 | test <- expect_warning(sample_nuts(m, path=p, chains=3, cores=1, 200 | seeds=1:3, init=inits.fn, iter=1000, 201 | control=list(refresh=-1), 202 | skip_monitor = TRUE), 203 | regexp='It appears a shortened') 204 | test <- expect_warning(sample_nuts(m, path=p, chains=3, cores=3, 205 | seeds=1:3, init=inits.fn, iter=1000, 206 | control=list(refresh=-1), 207 | skip_monitor = TRUE), 208 | regexp='It appears a shortened') 209 | } else { 210 | test <- sample_nuts(m, path=p, chains=3, cores=1, 211 | seeds=1:3, init=inits.fn, iter=1000, 212 | control=list(refresh=-1), 213 | skip_monitor = TRUE) 214 | } 215 | }) 216 | -------------------------------------------------------------------------------- /vignettes/adnuts.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "No-U-turn sampling for ADMB models" 3 | author: "Cole C. Monnahan" 4 | date: "`r Sys.Date()`" 5 | bibliography: refs.bib 6 | output: 7 | rmarkdown::html_vignette: 8 | toc: true 9 | vignette: > 10 | %\VignetteIndexEntry{No-U-turn sampling for ADMB models} 11 | %\VignetteEngine{knitr::rmarkdown} 12 | %\VignetteEncoding{UTF-8} 13 | --- 14 | 15 | ## Summary 16 | 17 | `adnuts` (pronounced A-D NUTS like A-D MB) main purpose is to provide a 18 | wrapper for performing Bayesian analyses using the no-U-turn (NUTS) 19 | algorithm [@hoffman2014] for ADMB models [@fournier2012]. The ADMB model 20 | itself contains the algorithm code, but this package provides the user a 21 | convenient environment to run and diagnose Markov chains, and make 22 | inference. In addition, NUTS capabilities are provided for any posterior 23 | whose log-density and log-density gradient can be written as R 24 | functions. This includes TMB models [@kristensen2016] but also other 25 | special cases, although TMB users should prefer the package `tmbstan`. 26 | This package aims to give ADMB models similar functionality to the software 27 | Stan and `rstan` [@carpenter2017; @stan2017]. 28 | 29 | Key features of the packages: 30 | 31 | - Run no-U-turn sampler (NUTS) or random walk Metropolis (RWM) MCMC chains 32 | from within R using the `sample_nuts` and 'sample_rwm' functions. 33 | - Parallel execution with automatic merging of chains and linking to other 34 | R packages provides a smooth, efficient workflow for ADMB users. 35 | - Adaptation of the NUTS stepsize is automatically done during the warmup phase. 36 | - The mass matrix options are: diagonal or dense adaptation during warmup, 37 | the estimated covariance (from admodel.cov file), or an arbitrary dense 38 | matrix can be passed from R. 39 | - Easy diagnostic checking using functionality provided by packages `rstan` 40 | and `shinystan`. 41 | - A 'duration' argument to stop the chains running after a specified period 42 | of time (e.g., 2 hours), returning whatever samples were generated in 43 | that period. 44 | - When running multiple chains, whether in parallel or serial, samples are 45 | merged and written to the '.psv' file. Thus, executing the model in the 46 | '-mceval' phase uses all chains, including with an 'mceval' 47 | argument dictating whether to run in this phase when the sampling is 48 | finished. 49 | - A modified pairs plot designed to help facilitate comparison between MLE 50 | estimates and covariances, and the posterior samples. 51 | 52 | Typically, for well-designed models, NUTS works efficiently with default 53 | settings and no user intervention. However, in some cases you may need to 54 | modify the settings. See below for a brief description of NUTS and how you 55 | can modify its behavior and when needed. Guidance and performance 56 | specifically designed for fisheries stock assessment is given in 57 | [@monnahan2019]. 58 | 59 | ## Sampling for ADMB models 60 | ### Setting up the model 61 | 62 | In general very little is needed to prepare an ADMB model for use with 63 | `adnuts`. As with any model, the user must build the template file to 64 | return a negative log likelihood value for given data and parameters. The 65 | user is responsible for ensuring a valid and reasonable model is 66 | specified. Typical model building practices such as building complexity 67 | slowly and validating with simulated data are strongly encouraged. Users 68 | must manually specify priors, otherwise there are implicit improper uniform 69 | distributions for unbounded parameters, and proper uniform distributions 70 | for bounded parameters (see below for more details). 71 | 72 | The ADMB model is an executable file that contains the code necessary for 73 | NUTS and RWM. When run, it typically has various input files and generates 74 | many output files. As such, **I strongly recommend putting the model into a 75 | subdirectory below the directory containing the R script** (passed as the 76 | `path` argument). **This is required for parallel execution** but is 77 | recommended in general. 78 | 79 | ### Sampling with sample_nuts and sample_rwm 80 | 81 | Sampling for ADMB models is accomplished with the R functions `sample_nuts` 82 | and `sample_rwm` which replace the deprecated function `sample_admb`. 83 | These functions are designed to be similar to Stan's `stan` function in 84 | naming conventions and behavior. Some differences are necessary, such as 85 | passing a model name and path. The two MCMC algorithms, NUTS and RWM, are 86 | built into the ADMB source code so this is just a wrapper function. Also 87 | note that this function does not do optimization nor Variational Inference. 88 | 89 | The default behavior for NUTS is to run 3 chains with 2000 iterations, with 90 | a warmup (i.e., burn-in) phase during the first 1000. There is no external 91 | thinning (in a sense it is done automatically within the algorithm), and 92 | thus the `-mcsave` option does not work with NUTS by design. These defaults 93 | work well in the case where diagonal mass matrix adaptation is done (e.g., 94 | hierarchical models). This adaptation often requires a long warmup 95 | period. For models starting with a good mass matrix (e.g., from the MLE 96 | covariance or previous run), a much shorter warmup period can be used. For 97 | instance `warmup=200` and `iter=800` with multiple chains may work 98 | sufficiently well during model development. Users of the RWM algorithm will 99 | accustomed to running millions of iterations with a high thinning 100 | rate. **Do not do that!**. The key thing to understand is that NUTS runs as 101 | long as it needs to get nearly independent samples. Consult the Stan 102 | documentation for advice on a workflow for NUTS models (e.g., [this 103 | guide](https://mc-stan.org/users/documentation/case-studies/rstan_workflow.html)) 104 | 105 | 106 | For poorly-constructed or over-parameterized models, the NUTS algorithm 107 | will be potentially catastrophically slow. This is likely common in many 108 | existing fisheries stock assessment models. In these cases it can be very 109 | informative to run the RWM algorithm with `sample_rwm` because it often 110 | provides fast feedback from which the user can determine the cause of poor 111 | mixing (see [@monnahan2019]). Consult the ADMB documentation for more 112 | information on a workflow with these samplers. `adnuts` provides no new 113 | options for RWM compared to the command line from previous ADMB versions 114 | (besides a better console output), but the option for parallel execution 115 | and integration with MCMC diagnostic tools provided by adnuts should be 116 | sufficiently appealing to users. Once a model is more appropriately 117 | parameterized, NUTS should be used. Further work on optimal 118 | parameterizations for fisheries model is needed. This vignette only covers 119 | the functionality of the package. 120 | 121 | One important overlap with Stan is with the `control` argument, which 122 | allows the user to control the NUTS algorithm: 123 | 124 | - Metric or mass matrix (adapted diagonal or dense matrix) [`metric`] 125 | 126 | - Maximum treedepth for trajectories [`max_treedepth`'] 127 | 128 | - Target acceptance rate [`adapt_delta`] 129 | 130 | - Step size, which if NULL (recommended) is adapted [`stepsize`] 131 | 132 | - Mass matrix adaptation tuning parameters (not recommended to change) 133 | [`adapt_init_buffer`, `adapt_term_buffer`, `adapt_window`] 134 | 135 | 136 | This function returns a list (of class `adfit`) whose elements mimic some 137 | of that returned by `stan` to be useful for plugging into some `rstan` 138 | tools (see below). 139 | 140 | ### mceval phase and posterior outputs 141 | 142 | No special output files are required to run the model with `adnuts`. In 143 | addition, the user can still use the `mceval_phase` flag to run specific 144 | code on saved samples. ADMB saves posterior draws to a .psv file. When 145 | executing the model with `-mceval` it will loop through those samples and 146 | execute the procedure section with flag `mceval_phase()` evaluating 147 | to 1. This behavior is unchanged with `adnuts`, but is complicated when 148 | running multiple chains because there will be multiple .psv files. Thus, 149 | `sample_nuts` combines chains in R and writes a single .psv file containing 150 | samples from all chains (after warmup and thinned samples are 151 | discarded). This also works in parallel (see below). Consequently, the user 152 | only has to set `mceval=TRUE`, or run `-mceval` from the command line after 153 | `adnuts` finishes sampling, in order to generate the desired output 154 | files. 155 | 156 | Previously, ADMB required an estimated covariance function to use the 157 | random walk Metropolis (RWM) algorithm. Thus, for models without a valid 158 | mode or a Hessian that could not be inverted could not use MCMC 159 | methods. With `adnuts` neither an MLE nor covariance estimate is needed 160 | because NUTS adapts these tuning parameters automatically (see 161 | below). However, if a mode exists I recommend estimating the model normally 162 | before running MCMC. 163 | 164 | `sample_nuts` or `sample_rwm` are strongly recommended for running the 165 | MCMC. However, it is a convenience function that runs the chains from the 166 | command line. The list returned contains an element `cmd` which shows the 167 | user the exact command used to call the ADMB model from the command 168 | line. The command line can also be useful for quick tests. 169 | 170 | 171 | ### Bounds & Priors 172 | 173 | Parameter priors must be specified manually in the ADMB template file. For 174 | instance, a standard normal prior on parameter `B` would be subtracted from 175 | the objective as `f+=dnorm(B,0.0,1.0)`. Note that contributed statistical 176 | functions in ADMB, such as `dnorm`, return the negative log density and 177 | thus must be added to the objective function. 178 | 179 | Parameter transformations are limited to box constraints within the ADMB 180 | template (e.g., `init_bounded_number`). When used, this puts an implicit 181 | uniform prior on the parameter over the bounds. Implicit improper uniform 182 | priors occur when an unbounded parameter has no explicit prior. The 183 | analysis can proceed if the data contain information to update the prior, 184 | but if not the chains will wander between negative and positive infinity 185 | and fail diagnostic checks. 186 | 187 | Variance parameters are common and require bounds of (0, Inf). To implement 188 | such a bound in ADMB, specify the model parameter as the log of the 189 | standard deviation, and then in the template exponentiate it and use 190 | throughout. Because of this parameter transformation, the Jacobian 191 | adjustment is needed. This can be accomplished by subtracting the parameter 192 | in log space from the negative log-likelihood. For instance, use parameter 193 | `log_sd` in the template, then let `sigma=exp(log_sd)`, and update the 194 | objective function with the Jacobian: `f-=log_sd;`. The recommended 195 | half-normal prior for standard deviations can then be added as, e.g., 196 | `f+=dnorm(sigma,0,2)`. This also holds for any positively constrained 197 | parameters of which there are many in ecology and fisheries: somatic growth 198 | rates, maximum length, unfished recruits, etc. 199 | 200 | ### Initializing chains 201 | 202 | It is generally recommended to initialize multiple chains from "dispersed" 203 | values relative to the typical set of the posterior. The sampling functions 204 | can accept a list of lists (one for each chain), or function which returns 205 | a list of parameters (e.g., `init <- function() list(a=rnorm(1), 206 | eta=rnorm(10))`. If no initial values are specified `init=NULL` then ADMB 207 | will attempt to read in the optimized values stored in the admodel.hes 208 | file. Typically these are the MLE (really MPD) values. Starting all chains 209 | from the model is discouraged because it makes diagnostic tools like Rhat 210 | (see below) inefficient. From [this 211 | discussion](https://discourse.mc-stan.org/t/overdispersed-initial-values-general-questions/3966) 212 | "...Rhat is ratio of overestimate and underestimate of variance, but that 213 | overestimate is overestimate only if the starting points are diffuse." 214 | Consequently I **strongly encourage creating a function to generate 215 | reasonable random initial values**. 216 | 217 | If your model has inactive parameters (those with negative phases) they are 218 | completely ignored in the MCMC analysis (sampling, inputs, outputs, etc.), 219 | so the initial values are only for active parameters. This means you cannot 220 | read in the .par file and use it for initial values if there are inactive 221 | parameters. 222 | 223 | ### Parallel sampling 224 | 225 | Parallel sampling is done by default as of version 1.1.0. It is done by 226 | parallelizing multiple chains, not calculations within a chain. The 227 | `snowfall` package is used. `n.cores` chains will be run by making 228 | temporary copies of the directory `path` (which contain the model 229 | executable, data inputs, and any other required files). Then a separate R 230 | session does the sampling and when done the results are merged together 231 | and the temporary folders deleted. If errors occur, these temporary folders 232 | may need to be deleted manually. The default behavior is to set `n.cores` 233 | to be one fewer than available to the system, but the user can override 234 | this and by setting `n.cores=1` the chains will be run in serial which can 235 | be useful for debugging purposes. 236 | 237 | ## Diagnostics and plotting results 238 | 239 | ### Diagnosing MCMC chains 240 | 241 | MCMC diagnostics refers to checking for signs of non-convergence of the 242 | Markov chains before using them for inference, and is a key step in 243 | Bayesian inference. There is a large literature related to this which I 244 | refer unfamiliar readers to the Stan manual [chapter on 245 | convergence](https://mc-stan.org/docs/2_23/reference-manual/convergence.html). 246 | Note that the user is entirely responsible for this component of the 247 | analysis, `adnuts` only provides tools to help with it. 248 | 249 | The `rstan` package provides an improved function for calculating effective 250 | sample size and $\hat{R}$ statistics vis the function 251 | `rstan::monitor`. This function is automatically run for completed runs and 252 | stored in the output. For very large models (either many parameters or many 253 | iterations) this operation can be slow and thus a user may disable it with 254 | the argument `skip_monitor`, however this situation should be rare as these 255 | quantities should always be checked. 256 | 257 | I use a hierarchical mark-recapture model of swallows to demonstrate 258 | functionality, taken from the examples in [@monnahan2017] read in as a RDS 259 | file from a previous run. 260 | 261 | ```` {r, eval=TRUE, echo=FALSE} 262 | library(adnuts) 263 | ```` 264 | 265 | The diagnostic information can be directly accessed via the fitted 266 | object `fit`. : 267 | 268 | ````{r} 269 | fit <- readRDS('fit.RDS') 270 | print(fit) 271 | summary(fit$monitor$n_eff) 272 | summary(fit$monitor$Rhat) 273 | ```` 274 | 275 | The Rhat values are sufficiently close to 1 but the minimum 276 | effective sample size is 71 which is too few for inference so 277 | longer chains should be run. Both the model parameters 278 | and the NUTS sampler parameters can be extracted as a data 279 | frame. 280 | 281 | ````{r} 282 | post <- extract_samples(fit) 283 | str(post[,1:5]) 284 | sp <- extract_sampler_params(fit) 285 | str(sp) 286 | ```` 287 | 288 | These functions have options whether to include the warmup and 289 | log-posterior (lp) column, but also whether to return the unbounded 290 | parameters. The latter can be useful for debugging issues with parameters 291 | with high density near the bounds or poor mixing issues when using RWM 292 | chains. 293 | 294 | The object returned by `sample_nuts` and `sample_rwm' can also be plugged 295 | directly into the ShinyStan interactive tool environment by calling the 296 | wrapper function `launch_shinyadmb(fit)` after loading the `shinystan` 297 | library. See ShinyStan documentation for more information on this. It is 298 | designed to provide NUTS specific diagnostics, but also serves as a more 299 | general tool for MCMC diagnostics and thus is beneficial for RWM chains as 300 | well. If desired, the output samples can be converted into `mcmc` objects 301 | for use with the CODA R package. For instance, CODA traceplots can be 302 | accessed like this: 303 | 304 | ```` {r, eval=FALSE, echo=TRUE} 305 | post <- extract_samples(fit, as.list=TRUE) 306 | postlist <- coda::mcmc.list(lapply(post, coda::mcmc)) 307 | coda::traceplot(postlist) 308 | ```` 309 | 310 | Or into `bayesplot` with a little massaging. Future versions of 311 | adnuts may link these more directly. But for now it can be done 312 | manually such as with the energy diagnostic: 313 | 314 | ```` {r, eval=FALSE, echo=TRUE} 315 | library(bayesplot) 316 | library(dplyr) 317 | library(tidyr) 318 | library(ggplot2) 319 | color_scheme_set("red") 320 | np <- extract_sampler_params(fit) %>% 321 | pivot_longer(-c(chain, iteration), names_to='Parameter', values_to='Value') %>% 322 | select(Iteration=iteration, Parameter, Value, Chain=chain) %>% 323 | mutate(Parameter=factor(Parameter), 324 | Iteration=as.integer(Iteration), 325 | Chain=as.integer(Chain)) %>% as.data.frame() 326 | mcmc_nuts_energy(np) + ggtitle("NUTS Energy Diagnostic") + theme_minimal() 327 | ```` 328 | 329 | ## Plotting output 330 | 331 | A convenience function `plot_marginals` is provided to quickly plot 332 | marginal posterior distributions with options to overlay the 333 | asymptotic estimates. 334 | 335 | ```` {r fig1, fig.width=6, fig.height=4.5} 336 | plot_marginals(fit, pars=1:9) 337 | ```` 338 | 339 | Many ADMB models have well defined modes and estimated covariance matrices 340 | used to quantify uncertainty. The `pairs_admb` function can be used to plot 341 | pairwise posterior draws vs the MLE estimate and confidence ellipses. Major 342 | discrepancies between the two are cause for concern. As such, this can be a 343 | good diagnostic tool for both frequentist and Bayesian inference. In 344 | particular, it often is informative to plot the slowest mixing parameters 345 | or key ones by name as follows. 346 | 347 | ```` {r fig2, fig.width=6, fig.height=4.5} 348 | pairs_admb(fit, pars=1:3, order='slow') 349 | pairs_admb(fit, pars=c('sigmaphi', 'sigmap', 'sigmayearphi')) 350 | ```` 351 | 352 | The last plot shows the three hypervariances of this hierarchical 353 | model. The diagonal shows traces of two chains (colors), where alternative 354 | options for argument `diag` are 'trace' (default), 'hist' for histogram, 355 | and 'acf' for the autocorrelation function. The remaining plots show 356 | pairwise posterior samples (black points) for the remaining 357 | parameters. Divergences are shown as green points if they exist (none do 358 | here). A red point shows the posterior mode and an ellipse shows the 95% 359 | bivariate confidence region, taken from the inverse Hessian calculated by 360 | ADMB. Since the log-posterior (lp__) is not a parameter there is no 361 | ellipse. Note that the posterior samples and asymptotic approximations for 362 | the two fixed effects `a` match closely, whereas for the `sigmaphi` 363 | hypervariance parameter there is a notable mismatch. This mismatch is not 364 | surprising as estimates from optimizing hierarchical models are not 365 | reliable. Since adaptive NUTS was used for sampling, the information 366 | contained in red was not used and is only shown for illustration. The 367 | option `metric='mle'` would use the inverse Hessian as a tuning parameter 368 | (see section on metric below). More options for plotting fits like these 369 | are available in the help file `?pairs_admb`. 370 | 371 | ## Mass matrix adaptation 372 | 373 | I assume the reader is familiar with the basics of the mass matrix and its 374 | effect on sampling with NUTS (if not, see section below). Note that the 375 | mass matrix represents the geometry of the posterior in untransformed 376 | (unbounded) space, not the parameter space defined by the user. This space 377 | is typically hidden from the user but nonetheless is important to recognize 378 | when thinking about the mass matrix. 379 | 380 | ADMB has the capability to do both diagonal and dense (full matrix, as of 381 | version 12.2) estimation during warmup (adaptation). The initial matrix can 382 | likewise easily be initialized in two ways. First is a unit diagonal, and 383 | second is the "MLE" option, which more accurately is the covariance matrix 384 | calculated from inverting the Hessian at the maximum posterior density (the 385 | mode -- informally referred to as the MLE often). I refer to this as 386 | $\Sigma$. As such there are 6 options for the mass matrix, summarized in 387 | the subsequent table. Note that options 3 and 6 were not available before 388 | `adnuts` version 1.1.0 and are only available for ADMB >= 389 | 12.2. Also not the differences in default behavior when running from 390 | `sample_nuts` vs. the command line. 391 | 392 | Note that dense estimation should be considered an experimental feature. 393 | This option is in Stan but is rarely used. Stan users almost always use 394 | option 2 below (the default in `adnuts`). As more models are fit this 395 | advice will evolve. For now this is my best guess: 396 | 397 | | Initial
matrix | Adaptation | adnuts | Command
line | Recommended usage | 398 | |----------------|-----------------|---------------|---------------------|----------------| 399 | | Unit | None | `adapt_mass=FALSE`| `-mcdiag` | Rarely if ever used | 400 | | Unit | Diagonal | (default) | `-mcdiag`
`-adapt_mass` | Use with minimal correlations, $\Sigma$ is unavailable, or $d$>1000. Often the best choice for hierarchical models | 401 | | Unit | Dense | `adapt_mass_dense=TRUE`| `-mcdiag`
`-adapt_mass_dense`| Use when strong correlations but $\Sigma$ is unavailable and $d$<500 | 402 | | $\Sigma$ | None | `metric='mle'| (default) | When $\Sigma$ is good and $d$<1000 | 403 | | $\Sigma$ | Diagonal | `metric='mle'``-adapt_mass` | When $\Sigma$ is OK and $d$>500. 404 | | $\Sigma$ | Dense | `metric='mle'`
`adapt_mass_dense=TRUE`| `-adapt_mass_dense`| When $\Sigma$ is OK but $d$<500 405 | 406 | $d$ refers to the dimensionality (# of parameters) and this guidance is a 407 | very rough guess. The reason dimensionality matters is that there is a 408 | numerical cost to using a dense matrix over a diagonal one, and one that 409 | scales poorly with dimensionality. However, the more computationally 410 | expensive the model is (the prediction and log density calculations) the 411 | smaller relative cost of the dense calculations. Thus there is an interplay 412 | between the mass matrix form, dimensionality, model computational cost, and 413 | MCMC sampling efficiency. 414 | 415 | In addition to these options, an arbitrary matrix `M` can be passed via 416 | `metric=M`. This works by using R to overwrite the admodel.cov file so that 417 | when ADMB runs it reads it in thinking it was the estimated matrix. The 418 | file admodel_original.cov is copied in case the user wants to revert 419 | it. Probably the only realistic usage of this feature is when you have 420 | already run a pilot chain and want to rerun it for longer, and wish to use 421 | the samples to generate an estimated mass matrix. In this case use 422 | `M=fit$covar.est` which is the estimate in unbounded space (see 423 | below). Note that if `M=diag(d)` it is equivalent to the first three rows 424 | and if `M`=$\Sigma$ it is equivalent to the last three rows. 425 | 426 | The following figure shows the step size of a single chain during warmup 427 | for these six options for a simple linear model, and demonstrates the 428 | general differences among them. 429 | 430 | ![Effects of mass matrix adaptation on step size adaptation](metric_adaptation.png){width=90%} 431 | 432 | The three which start with a unit diagonal matrix are apparent by their 433 | small initial step sizes, while the three which start with dense are 434 | larger. There is a clear shift in the stepsize at iteration 125 which is 435 | when the first mass matrix update happens (and with improved knowledge of 436 | the geometry the optimal step size changes). A second update happens later 437 | but is not apparent, indicating the first was sufficient. All fits with a 438 | dense matrix end with the same approximate step size, which is larger than 439 | any without it. Option 3 starts off diagonal but after updating to a dense 440 | matrix performs equivalently. For the diagonal options, option 1 does not 441 | update, while option two starts at unit diagonal and aftering updating the 442 | diagonal performs equally well as option 5 which starts well. 443 | 444 | This is the behavior on a trivial model, but it is often hard to estimate a 445 | good dense mass matrix, especially in the first few phases of warmup with 446 | very few samples. In such cases the Cholesky decomposition of the estimated 447 | matrix may fail. Instead of crashing the run I coded ADMB to instead do a 448 | diagonal estimate in this case, and try a dense update at the next phase, 449 | repeating until warmup is over. Warnings are printed to the console when 450 | this happens. 451 | 452 | Which option to use in which situation is still an open question. Certainly 453 | for hierarchical models where $\Sigma$ is not helpful or doesn't exist, 454 | option 2 is likely the best. For fisheries stock assessment models which 455 | already rely on $\Sigma$ options 4-6 are worth exploring. 456 | 457 | ## Sampling for TMB models 458 | 459 | Previous versions of `adnuts` included explicit 460 | demonstrations of use for TMB models. Now, however, the package `tmbstan` 461 | ([@monnahan2018]) has replaced this functionality and users should 462 | exclusively use that package except for very rare cases. Consequently I 463 | deleted the TMB section from this vignette, but older versions on CRAN may 464 | be helpful for those looking for guidance. 465 | 466 | ## The no-U-turn sampler implementation 467 | 468 | ### Brief review of Hamiltonian Monte Carlo 469 | 470 | Hamiltonian Monte Carlo is a powerful family of MCMC algorithms that use 471 | gradients to propose efficient transitions. We review the basics here but 472 | refer to interested readers to 473 | [@neal2011; @betancourt2017intro; @monnahan2017]. Instead of randomly 474 | generating a proposed point, to be rejected/accepted, HMC generates 475 | *trajectories* from which a point is chosen to be rejected/accepted. These 476 | trajectories use gradient information and an analogy of a ball rolling on a 477 | surface is often used. These trajectories are efficient when they can 478 | transition to nearly anywhere on the posterior (stark contrast with random 479 | walk algorithms). However, to do this they need to be well-tuned. Generally 480 | there are three aspects of the algorithms that need to be tuned. 481 | 482 | 1. The step size. How big of steps between points on a single 483 | trajectory. Bigger steps means fewer calculations (and thus faster), 484 | but has a negative cost of rejecting more points. 485 | 2. The trajectory length. How long should a trajectory should be depends 486 | on many factors, and is not constant over the posterior. If it is too 487 | short, HMC resembles inefficient random walk behavior. If it is too 488 | long, computations are wasted. 489 | 3. The "mass matrix" used. This matrix tells the algorithm about the 490 | global shape of the posterior so that it can generate better 491 | trajectories. When large discrepancies between marginal variances 492 | exist, the trajectories will be less efficient (e.g., one parameter has 493 | a marginal variance of 1, and another a marginal variance of 1000). 494 | 495 | The no-U-turn sampler is a powerful sampler because it automated the tuning 496 | of the first two of these aspects [@hoffman2014]. During warmup it tunes 497 | the step size to a target acceptance rate (default of 0.8) which has been 498 | shown to be optimal [@betancourt2014]. Most importantly, though, is that it 499 | uses a recursive tree building algorithm to continue doubling the 500 | trajectory until a "U-turn" occurs, meaning going any further would be 501 | wasteful computationally. Thus, trajectory lengths are automatically 502 | optimal. 503 | 504 | The original algorithm was implemented into the Bayesian statistical 505 | software Stan [@carpenter2017; @stan2017]. In addition to the automation of 506 | NUTS, Stan provides a scheme for adapting the step size during the warmup 507 | phase. Estimated diagonal mass matrices correct for global differences in 508 | scale, but not correlations. A dense matrix can also be adapted, and 509 | corrects for global correlations, but comes at a higher computation 510 | cost. Typically a diagonal matrix is best and thus is default in both Stan 511 | and `adnuts`. 512 | 513 | These three extensions lead to efficient HMC sampling with little to no 514 | user intervention for a wide class of statistical models, including 515 | hierarchical ones [@monnahan2017]. Since publication, further developments 516 | have been made in HMC theoretical and practical research. For instance, 517 | Stan now includes an update called "exhaustive" HMC [@betancourt2016] that 518 | more efficiently samples from the points in a trajectory. 519 | 520 | ### Algorithm implementation details 521 | 522 | For both ADMB and TMB models, `adnuts` uses the original algorithm 523 | presented in [@hoffman2014]. However it also uses a similar mass matrix 524 | adaptation scheme as used in Stan. 525 | 526 | The algorithm is initiated with a unit diagonal mass matrix. During 527 | the first 50 iterations only the step size is adapted. After the next 75 528 | iterations an estimated variance for each parameter (in untransformed 529 | space) is calculated and used as the new mass matrix. The next update 530 | occurs after twice the iterations as the previous update. This process 531 | repeats until the last 25 samples of the warmup phase. During this phase 532 | the mass matrix is held constant and only the step size adapt. See the Stan 533 | manual [@stan2017] for more details. The step size is adapted during all 534 | warmup iterations. No information is returned about mass matrix adaptation 535 | currently. 536 | 537 | Once the warmup phase is over, no adaptation is done. Because of the 538 | adaptation the warmup samples are not valid samples from the posterior and 539 | *must* be discarded and not used for inference. 540 | 541 | ### User intervention 542 | 543 | In some cases you will need to adjust the behavior of the NUTS algorithm to 544 | improve sampling. Here I review the three options for intervention (step 545 | size, trajectory lengths, mass matrix) that a user can take, and when and 546 | why they might need to. 547 | 548 | A maximum tree depth argument is used to prevent excessively long 549 | trajectories (which can occur with poorly specified models). This is set to 550 | 12 (i.e., a length of $2^12=4096$ steps) by default, which typically is 551 | long enough that a U-turn would occur. However, in some cases a model may 552 | need to make longer trajectories to maintain efficient sampling. In this 553 | case you will get warnings about exceeding maximum tree depth. Rerun the 554 | model with `control=list(max_treedepth=14)` or higher, as needed. 555 | 556 | Recall that a single NUTS trajectory consists of a set of posterior 557 | samples, resulting from a numerical approximation to a path along the 558 | posterior. The step size controls how close the approximation is along the 559 | true path. When the step size is too large and encounters extreme curvature 560 | in the posterior a divergence will occur. Divergences should not be ignored 561 | because they could lead to bias in inference. Instead, you force the model 562 | to take smaller step sizes by increasing the target acceptance rate. Thus, 563 | when you get warnings about divergences, rerun the model with 564 | `control=list(adapt_delta=.9)` or higher, as necessary. If the divergences 565 | do not go away, investigate the cause and try to eliminate the extreme 566 | curvature from the model, for example with a reparameterization 567 | [@stan2017; @monnahan2017]. 568 | 569 | If there are extreme global correlations in your model, NUTS will be 570 | inefficient when using a diagonal mass matrix (the default). In this case, 571 | you can pass a dense matrix, estimated externally or from previous runs 572 | (previous fits contain an element `covar.est` which can be passed to the 573 | next call). Do this with `control=list(metric=M)` where M is a matrix in 574 | untransformed space that approximates the posterior. For ADMB models, you 575 | can try using the MLE covariance by setting 576 | `control=list(metric="mle"). Note that, for technical reasons, you need to 577 | reoptimize the model with the command line argument `-hbf`. (ADMB uses 578 | different transformation functions for HMC so the covariance would be 579 | mismatched otherwise). Note that when using a dense mass matrix there is 580 | additional computational overhead, particularly in higher dimensions. That 581 | is, a dense matrix leads to shorter trajectories, but they take longer to 582 | calculate. Whether a dense metric is worth the increase in sampling 583 | efficiency will depend on the model. 584 | 585 | The following figure demonstrates the effect of the mass matrix on a 2d 586 | normal model with box constraints. The columns denote the different model 587 | "spaces" and the rows different mass matrices. Random, arbitrary NUTS 588 | trajectories are show in red over the top of posterior draws (points). The 589 | right column is the model space, the middle the untransformed, and the far 590 | left the untransformed after being rotated by the mass matrix. Note the 591 | differences in scales in the axes among plots. The key here is the 592 | rightmost column. The top panel is with no mass matrix (i.e., unit 593 | diagonal), and the trajectories ungulate back and forth as they move across 594 | the posterior. Thus to go from one end to the other is not very 595 | straight. When a diagonal matrix is used, the trajectories become 596 | noticeably straighter. Finally, with the dense matrix the trajectories are 597 | even better. This is the effect of the mass matrix: trajectories can move 598 | between regions in the posterior more easily. 599 | 600 | 601 | ![Effects of mass matrix on trajectories](tree_trajectories.png) 602 | 603 | ## Algorithm validity 604 | 605 | Software bugs in the MCMC algorithms can manifest as biased sampling from 606 | the target distribution. This is a serious issue and one that can be hard 607 | to detect. One way to check this is to run the algorithms on known 608 | distributions and check estimated properties against the analytical. I 609 | checked this with normal, t with df=4 and df=10, gamma, inverse gamma, 610 | truncated normal, and multivariate normal. I ran long chains (1 million 611 | samples) with thinning rate of 10 for NUTS, and compared this to an 612 | equivalent set of points from RWM and IID samples from Monte Carlo samples 613 | (e.g., `rnorm`). Long chains and thining ensured no residual 614 | autocorrelation. I repeated this for 20 chains for the RWM, NUTS with MLE 615 | metric, adaptive NUTS, and Monte Carlo (mc). Results are plotted as 616 | relative error of different probabilities (via `pnorm` etc.). 617 | 618 | ![Validity of ADMB MCMC algorithms](validity_tests.png){width=100%} 619 | 620 | All cases are mean 0, and MC is indistinguishable from MCMC. This provides 621 | strong evidence that the algorithms are coded correctly. This **does not** 622 | mean that finite samples from target distributions are unbiased. 623 | 624 | 625 | ## References 626 | 627 | -------------------------------------------------------------------------------- /vignettes/auto/refs.el: -------------------------------------------------------------------------------- 1 | (TeX-add-style-hook 2 | "refs" 3 | (lambda () 4 | (LaTeX-add-bibitems 5 | "monnahan2018" 6 | "monnahan2019" 7 | "betancourt2014" 8 | "kristensen2016" 9 | "fournier2012" 10 | "monnahan2017" 11 | "carpenter2017" 12 | "hoffman2014" 13 | "neal2011" 14 | "betancourt2016" 15 | "betancourt2017intro" 16 | "stan2017")) 17 | :bibtex) 18 | 19 | -------------------------------------------------------------------------------- /vignettes/fit.RDS: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cole-Monnahan-NOAA/adnuts/ff0c8d3d56006e61ea3651ea27fedc97f2a02afe/vignettes/fit.RDS -------------------------------------------------------------------------------- /vignettes/metric_adaptation.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cole-Monnahan-NOAA/adnuts/ff0c8d3d56006e61ea3651ea27fedc97f2a02afe/vignettes/metric_adaptation.png -------------------------------------------------------------------------------- /vignettes/refs.bib: -------------------------------------------------------------------------------- 1 | @article{monnahan2018, 2 | title={No-U-turn sampling for fast Bayesian inference in ADMB and TMB: Introducing the adnuts and tmbstan R packages}, 3 | author={Monnahan, C. C and Kristensen, Kasper}, 4 | journal={PloS one}, 5 | volume={13}, 6 | number={5}, 7 | year={2018}, 8 | publisher={Public Library of Science} 9 | } 10 | 11 | @article{monnahan2019, 12 | author = {Monnahan, C. C. and Branch, T. A. and Thorson, J. T. and Stewart, I. J., and Szuwalksi, C. S.}, 13 | title = {Overcoming long Bayesian run times in integrated fisheries stock assessments}, 14 | journal = {ICES Journal of Marine Science (in press)}, 15 | year = {2019}, 16 | doi= {10.1093/icesjms/fsz059} 17 | } 18 | 19 | 20 | @article{betancourt2014, 21 | author = {Betancourt, MJ and Byrne, Simon and Girolami, Mark}, 22 | title = {Optimizing the integrator step size for Hamiltonian Monte Carlo}, 23 | journal = {arXiv preprint arXiv:1411.6669}, 24 | year = {2014} 25 | } 26 | 27 | 28 | 29 | @article{kristensen2016, 30 | author = {Kristensen, Kasper and Nielsen, Anders and Berg, Casper W. and Skaug, Hans and Bell, Bradley M.}, 31 | title = {TMB: Automatic differentiation and Laplace approximation}, 32 | journal = {Journal of Statistical Software}, 33 | volume = {70}, 34 | number = {5}, 35 | pages = {21}, 36 | year = {2016} 37 | } 38 | 39 | 40 | 41 | @article{fournier2012, 42 | author = {Fournier, D. A. and Skaug, H. J. and Ancheta, J. and Ianelli, J. and Magnusson, A. and Maunder, M. N. and Nielsen, A. and Sibert, J.}, 43 | title = {AD Model Builder: using automatic differentiation for statistical inference of highly parameterized complex nonlinear models}, 44 | journal = {Optimization Methods & Software}, 45 | volume = {27}, 46 | number = {2}, 47 | pages = {233-249}, 48 | year = {2012} 49 | } 50 | 51 | 52 | @article{monnahan2017, 53 | author = {Monnahan, C. C. and Thorson, J. T. and Branch, T. A.}, 54 | title = {Faster estimation of Bayesian models in ecology using Hamiltonian Monte Carlo}, 55 | journal = {Methods in Ecology and Evolution}, 56 | volume = {8}, 57 | number = {3}, 58 | pages = {339-348}, 59 | year = {2017} 60 | } 61 | 62 | @article{carpenter2017, 63 | author = {Carpenter, B. and Gelman, A. and Hoffman, M. D. and Lee, D. and Goodrich, B. and Betancourt, M. and Riddell, A. and Guo, J. Q. and Li, P. and Riddell, A.}, 64 | title = {Stan: A Probabilistic Programming Language}, 65 | journal = {Journal of Statistical Software}, 66 | volume = {76}, 67 | number = {1}, 68 | pages = {1-29}, 69 | year = {2017} 70 | } 71 | 72 | 73 | @article{hoffman2014, 74 | author = {Hoffman, M. D. and Gelman, A.}, 75 | title = {The no-U-turn sampler: adaptively setting path lengths in Hamiltonian Monte Carlo}, 76 | journal = {Journal of Machine Learning Research}, 77 | volume = {15}, 78 | number = {1}, 79 | pages = {1593-1623}, 80 | year = {2014} 81 | } 82 | 83 | 84 | @article{neal2011, 85 | author = {Neal, Radford M}, 86 | title = {MCMC using Hamiltonian dynamics}, 87 | journal = {Handbook of Markov Chain Monte Carlo}, 88 | volume = {2}, 89 | year = {2011} 90 | } 91 | 92 | @article{betancourt2016, 93 | author = {Betancourt, Michael}, 94 | title = {Identifying the optimal integration time in Hamiltonian Monte Carlo}, 95 | journal = {arXiv preprint arXiv:1601.00225}, 96 | year = {2016} 97 | } 98 | 99 | 100 | @article{betancourt2017intro, 101 | author = {Betancourt, Michael}, 102 | title = {A Conceptual Introduction to Hamiltonian Monte Carlo}, 103 | journal = {arXiv preprint arXiv:1701.02434}, 104 | year = {2017} 105 | } 106 | 107 | @misc{stan2017, 108 | author = {Stan Development Team,}, 109 | title = {Stan modeling language users guide and reference manual, version 2.17.0.}, 110 | year = {2017} 111 | } 112 | 113 | 114 | -------------------------------------------------------------------------------- /vignettes/tree_trajectories.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cole-Monnahan-NOAA/adnuts/ff0c8d3d56006e61ea3651ea27fedc97f2a02afe/vignettes/tree_trajectories.png -------------------------------------------------------------------------------- /vignettes/validity_tests.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Cole-Monnahan-NOAA/adnuts/ff0c8d3d56006e61ea3651ea27fedc97f2a02afe/vignettes/validity_tests.png --------------------------------------------------------------------------------