├── .Rbuildignore ├── data ├── us_states.rda └── cigarettes.rda ├── R ├── 90_other.R ├── bsreg-package.R ├── 92_zzz.R ├── 80_methods.R ├── 81_coda.R ├── 42_set_mh.R ├── 91_data.R ├── 13_stochvol.R ├── 50_execute.R ├── 00_aux.R ├── 60_interface.R ├── 12_slx.R ├── 11_shrinkage.R ├── 40_setup.R ├── 15_sem.R ├── 15_sar.R ├── 10_lm.R ├── 20_mh.R └── 41_set_options.R ├── tests └── tinytest.R ├── NEWS.md ├── paper ├── 1b_estimate_dist.R ├── 1a_estimate_cont.R ├── 0_data.R ├── 3_extra.R ├── 9_paper.R ├── 5_vis.R ├── 2b_outputs_dist.R └── 2a_outputs_cont.R ├── DESCRIPTION ├── NAMESPACE ├── .gitignore ├── README.md └── LICENSE /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^paper.* -------------------------------------------------------------------------------- /data/us_states.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nk027/bsreg/HEAD/data/us_states.rda -------------------------------------------------------------------------------- /data/cigarettes.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/nk027/bsreg/HEAD/data/cigarettes.rda -------------------------------------------------------------------------------- /R/90_other.R: -------------------------------------------------------------------------------- 1 | 2 | #' @noRd 3 | self <- NULL 4 | #' @noRd 5 | private <- NULL 6 | #' @noRd 7 | super <- NULL 8 | -------------------------------------------------------------------------------- /R/bsreg-package.R: -------------------------------------------------------------------------------- 1 | #' Bayesian Spatial Regression Models 2 | #' 3 | #' Fit Bayesian models with a focus on the spatial econometric models. 4 | #' 5 | #' @docType package 6 | #' 7 | #' @name bsreg-package 8 | #' 9 | NULL -------------------------------------------------------------------------------- /tests/tinytest.R: -------------------------------------------------------------------------------- 1 | 2 | # if(requireNamespace("tinytest", quietly = TRUE)) { 3 | # set.seed(42) 4 | # home <- length(unclass(packageVersion("bsreg"))[[1]]) == 4 # 0.0.0.9000 5 | # # home <- TRUE 6 | # if(home) { # Only run locally, let CRAN test examples and the vignette 7 | # tinytest::test_package("bsreg", at_home = home, pattern = "^.*\\.[rR]$") 8 | # } 9 | # } 10 | -------------------------------------------------------------------------------- /NEWS.md: -------------------------------------------------------------------------------- 1 | 2 | # v0.0.2, CRAN Update 1 3 | 4 | - Introduce an `as.mcmc` method to interface with **coda** 5 | - Add a CC-BY-NC-SA dataset on US state boundaries (**sf** format, `us_states`) 6 | - Improve the ordering of the `cigarettes` dataset, add location information 7 | - Improve examples and documentation 8 | - Add concise README 9 | 10 | # v0.0.1, CRAN Submission 11 | 12 | - Prepare for release with basic functionality 13 | - `R CMD check --as-cran`: No errors or warnings, one note (New submission) 14 | -------------------------------------------------------------------------------- /R/92_zzz.R: -------------------------------------------------------------------------------- 1 | 2 | .onLoad <- function(...) { 3 | 4 | register_s3("coda", "as.mcmc", "bm") 5 | register_s3("coda", "as.mcmc", "bm") 6 | invisible() 7 | } 8 | 9 | register_s3 <- function(pkg, generic, class) { 10 | 11 | fun <- get(paste0(generic, ".", class), envir = parent.frame()) 12 | stopifnot(is.function(fun)) 13 | 14 | if(pkg %in% loadedNamespaces()) { 15 | registerS3method(generic, class, fun, envir = asNamespace(pkg)) 16 | } 17 | 18 | setHook(packageEvent(pkg, "onLoad"), function(...) { 19 | registerS3method(generic, class, fun, envir = asNamespace(pkg)) 20 | }) 21 | } 22 | -------------------------------------------------------------------------------- /R/80_methods.R: -------------------------------------------------------------------------------- 1 | 2 | #' @export 3 | print.bm <- function(x, ...) { 4 | cat("\nCall:\n", paste(deparse(x$call), sep = "\n", collapse = "\n"), "\n\n", sep = "") 5 | cat("Draws (total): ", NROW(x$draw), " (", x$model$get_meta, ")\n", sep = "") 6 | coefs <- colMeans(x$draw) 7 | cat("Coefficients (mean):\n") 8 | print.default(format(coefs, digits = 3L), print.gap = 2L, quote = FALSE) 9 | cat("\n") 10 | # invisible(x) 11 | } 12 | 13 | 14 | #' @export 15 | summary.bm <- function(object, ...) { 16 | summary(object$draws) 17 | } 18 | 19 | 20 | #' @export 21 | #' @importFrom stats plot.ts 22 | plot.bm <- function(x, ...) { 23 | plot.ts(x$draws) 24 | } 25 | -------------------------------------------------------------------------------- /paper/1b_estimate_dist.R: -------------------------------------------------------------------------------- 1 | 2 | # Load packages --- 3 | devtools::load_all() 4 | 5 | # Settings --- 6 | n_save <- 50000L 7 | n_burn <- 10000L 8 | 9 | # Estimate --- 10 | out_slxd2 <- bslx(y ~ X, W = Psi(2), X_SLX = X_lag, 11 | n_save = n_save, n_burn = n_burn) 12 | 13 | out_slxd3 <- bslx(y ~ X, W = Psi(3), X_SLX = X_lag, 14 | n_save = n_save, n_burn = n_burn) 15 | 16 | out_slxd4 <- bslx(y ~ X, W = Psi(4), X_SLX = X_lag, 17 | n_save = n_save, n_burn = n_burn) 18 | 19 | out_slxdx <- bslx(y ~ X, W = Psi, X_SLX = X_lag, 20 | n_save = n_save, n_burn = n_burn, options = set_options( 21 | SLX = set_SLX(delta = 3, delta_scale = 0.05, delta_a = 2, delta_b = 2))) 22 | 23 | # Store results 24 | save.image("paper/cigarettes_dist.Rda") 25 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: bsreg 2 | Type: Package 3 | Title: Bayesian Spatial Regression Models 4 | Version: 0.0.2 5 | Date: 2022-02-25 6 | Authors@R: 7 | person(given = "Nikolas", family = "Kuschnig", role = c("aut", "cre"), email = "nikolas.kuschnig@wu.ac.at", comment = c(ORCID = "0000-0002-6642-2543")) 8 | Author: Nikolas Kuschnig [aut, cre] () 9 | Maintainer: Nikolas Kuschnig 10 | Description: Fit Bayesian models with a focus on the spatial econometric models. 11 | Depends: R (>= 3.5.0) 12 | Imports: R6, Matrix, stats, graphics, utils, grDevices 13 | Suggests: stochvol, coda 14 | License: GPL-3 | file LICENSE 15 | Encoding: UTF-8 16 | LazyData: true 17 | Roxygen: list(markdown = TRUE) 18 | RoxygenNote: 7.1.2 19 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(bm,bm) 4 | S3method(bm,formula) 5 | S3method(plot,bm) 6 | S3method(print,bm) 7 | S3method(summary,bm) 8 | export(blm) 9 | export(bm) 10 | export(bsar) 11 | export(bsdem) 12 | export(bsdm) 13 | export(bsem) 14 | export(bslx) 15 | export(bsv) 16 | export(set_HS) 17 | export(set_NG) 18 | export(set_SAR) 19 | export(set_SEM) 20 | export(set_SLX) 21 | export(set_SNG) 22 | export(set_SV) 23 | export(set_mh) 24 | export(set_options) 25 | importFrom(R6,R6Class) 26 | importFrom(stats,model.frame) 27 | importFrom(stats,model.matrix) 28 | importFrom(stats,model.response) 29 | importFrom(stats,plot.ts) 30 | importFrom(stats,rnorm) 31 | importFrom(stats,splinefun) 32 | importFrom(utils,setTxtProgressBar) 33 | importFrom(utils,txtProgressBar) 34 | -------------------------------------------------------------------------------- /R/81_coda.R: -------------------------------------------------------------------------------- 1 | 2 | #' Methods for \pkg{coda} Markov chain Monte Carlo objects 3 | #' 4 | #' Methods to convert parameter and/or coefficient draws to \pkg{coda}'s 5 | #' \code{\link[coda]{mcmc}} format for further processing. 6 | #' 7 | #' @name coda 8 | #' 9 | #' @param x A \code{bm} object, obtained from \code{\link{bm}}. 10 | #' @param ... Other parameters for \code{\link[coda]{as.mcmc}}. 11 | #' 12 | #' @return Returns a \pkg{coda} \code{\link[coda]{mcmc}} object. 13 | NULL 14 | 15 | 16 | #' @rdname coda 17 | as.mcmc.bm <- function( # Dynamic export (zzz.R) 18 | x, ...) { 19 | 20 | # Checks --- 21 | 22 | if(!inherits(x, "bm")) { 23 | stop("Please provide a `bm` object.") 24 | } 25 | has_package("coda") 26 | 27 | out <- coda::as.mcmc(x[["draws"]], ...) 28 | 29 | return(out) 30 | } 31 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | paper/data/* 2 | R/depr/* 3 | 4 | # Documentation files 5 | man/* 6 | 7 | # History files 8 | .Rhistory 9 | .Rapp.history 10 | 11 | # Session Data files 12 | .RData 13 | 14 | # User-specific files 15 | .Ruserdata 16 | 17 | # Example code in package build process 18 | *-Ex.R 19 | 20 | # Output files from R CMD build 21 | /*.tar.gz 22 | 23 | # Output files from R CMD check 24 | /*.Rcheck/ 25 | 26 | # RStudio files 27 | .Rproj.user/ 28 | 29 | # produced vignettes 30 | vignettes/*.html 31 | vignettes/*.pdf 32 | 33 | # OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3 34 | .httr-oauth 35 | 36 | # knitr and R markdown default cache directories 37 | *_cache/ 38 | /cache/ 39 | 40 | # Temporary files created by R markdown 41 | *.utf8.md 42 | *.knit.md 43 | 44 | # R Environment Variables 45 | .Renviron 46 | 47 | # pkgdown site 48 | docs/ 49 | 50 | # translation temp files 51 | po/*~ 52 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | bsreg: Bayesian Spatial Regression Models 3 | ======= 4 | 5 | [![CRAN](https://www.r-pkg.org/badges/version/bsreg)](https://cran.r-project.org/package=bsreg) 6 | [![month](https://cranlogs.r-pkg.org/badges/bsreg)](https://www.r-pkg.org/pkg/bsreg) 7 | [![total](https://cranlogs.r-pkg.org/badges/grand-total/bsreg)](https://www.r-pkg.org/pkg/bsreg) 8 | 9 | Estimation of Bayesian spatial models. 10 | 11 | Installation 12 | ------- 13 | 14 | **bsreg** is available on [CRAN](https://CRAN.R-project.org/package=bsreg). The development version can be installed from GitHub. 15 | ``` r 16 | install.packages("bsreg") 17 | devtools::install_github("nk027/bsreg") 18 | ``` 19 | 20 | Demonstration 21 | ------- 22 | 23 | ``` r 24 | # Load the package 25 | library("bsreg") 26 | 27 | # Estimate a Bayesian linear model using cigarette demand data 28 | x <- blm(log(sales) ~ log(price), data = cigarettes, lags = 1) 29 | 30 | # Check convergence via trace and density plots 31 | plot(x) 32 | ``` 33 | 34 | References 35 | ------- 36 | 37 | Nikolas Kuschnig (2021). Bayesian spatial econometrics and the need for software. *Working Paper*, DOI: [10.13140/RG.2.2.11269.68328](https://doi.org/10.13140/RG.2.2.11269.68328). 38 | -------------------------------------------------------------------------------- /R/42_set_mh.R: -------------------------------------------------------------------------------- 1 | 2 | #' Settings to tune a Metropolis-Hastings step 3 | #' 4 | #' @param adjust_burn Numeric scalar with the percentage of burn-in that should be used to tune the MH step. 5 | #' @param acc_target Numeric vector with the lower and upper bound of the target acceptance rate for the MH step. 6 | #' @param acc_change Numeric scalar with the percentage adjustment to the proposal scale for tuning. 7 | #' 8 | #' @return Returns a list with settings to tune the Metropolis-Hastings step of a Bayesian model. 9 | #' @export 10 | #' 11 | #' @examples 12 | #' set_mh(0.5, c(0.1, 0.5), .05) 13 | set_mh <- function( 14 | adjust_burn = 0.8, 15 | acc_target = c(0.20, 0.45), 16 | acc_change = 0.01) { 17 | 18 | structure(list( 19 | adjust_burn = num_check(adjust_burn, min = 0, max = 1, 20 | msg = "Please provide a valid length for the MH tuning period (in percent of burn-in) via 'adjust_burn'."), 21 | acc_target = vapply(acc_target, num_check, numeric(1L), min = 0, max = 1, 22 | msg = "Please provide a valid target range for the MH tuning (in percent of acceptance) via 'acc_target'."), 23 | acc_change = num_check(adjust_burn, min = 0, max = 1e6, 24 | msg = "Please provide a valid scale adjustment factor for the MH tuning period (in percent) via 'acc_change'.") 25 | ), class = "mh_settings") 26 | } 27 | -------------------------------------------------------------------------------- /paper/1a_estimate_cont.R: -------------------------------------------------------------------------------- 1 | 2 | # Load packages --- 3 | devtools::load_all() 4 | library("spatialreg") 5 | 6 | # Settings --- 7 | n_save <- 25000L 8 | n_burn <- 5000L 9 | # Prepare the full matrix with lagged explanatories 10 | X_SLX <- cbind(X, X_cont) 11 | listw <- spdep::mat2listw(W_cont) 12 | 13 | # Estimate --- 14 | out_blm <- blm(y ~ X - 1, 15 | n_save = n_save, n_burn = n_burn) 16 | out_lm <- lm(y ~ X - 1) 17 | 18 | out_bslx <- bslx(y ~ X - 1, W = W_cont, X_SLX = X_lag, 19 | n_save = n_save, n_burn = n_burn) 20 | out_slx <- lm(y ~ - 1) 21 | 22 | out_bsar <- bsar(y ~ X - 1, W = W_cont, 23 | n_save = n_save, n_burn = n_burn, ldet_SAR = list(reps = n_time)) 24 | out_sar <- lagsarlm(y ~ X - 1, listw = listw) 25 | 26 | out_bsem <- bsem(y ~ X, W = W_cont, 27 | n_save = n_save, n_burn = n_burn, ldet_SEM = list(reps = n_time)) 28 | out_sem <- errorsarlm(y ~ X - 1, listw = listw) 29 | 30 | out_bsdm <- bsdm(y ~ X, W = W_cont, X_SLX = X_lag, 31 | n_save = n_save, n_burn = n_burn, ldet_SAR = list(reps = n_time)) 32 | out_sdm <- lagsarlm(y ~ X_LX - 1, listw = listw) 33 | 34 | out_bsdem <- bsdem(y ~ X, W = W_cont, X_SLX = X_lag, 35 | n_save = n_save, n_burn = n_burn, ldet_SEM = list(reps = n_time)) 36 | out_sdem <- errorsarlm(y ~ X_LX - 1, listw = listw) 37 | 38 | # Store results 39 | save.image("paper/cigarettes_contig.Rda") 40 | -------------------------------------------------------------------------------- /R/91_data.R: -------------------------------------------------------------------------------- 1 | 2 | #' Cigarette demand 3 | #' 4 | #' Panel dataset on cigarette demand in 46 US states from 1963 until 1992, 5 | #' see Baltagi and Levin (1992) and Baltagi and Li (2004). Extended with 6 | #' longitude and latitude from the \code{us_states} dataset. 7 | #' 8 | #' @docType data 9 | #' 10 | #' @format A \code{data.frame} object. 11 | #' 12 | #' @references 13 | #' Baltagi, B. H. and Levin, D. (1992). Cigarette taxation: raising revenues 14 | #' and reducing consumption, \emph{Structural Change and Economic Dynamics}, 15 | #' \bold{3(2)}, 321-335. \doi{10.1016/0954-349X(92)90010-4}. 16 | #' Baltagi, B. H. and Li, D. (2004). Prediction in the panel data model with 17 | #' spatial correlation. \emph{Advances in Spatial Econometrics}, 283-295. 18 | #' Springer, Berlin. \doi{10.1007/978-3-662-05617-2_13}. 19 | "cigarettes" 20 | 21 | 22 | #' United States Historical States 23 | #' 24 | #' Polygons of US state boundaries for the period 1960--2000. Subset from 25 | #' Siczewicz (2011). Licensed under CC BY-NC-SA 2.5 by the Atlas of Historical 26 | #' County Boundaries. 27 | #' 28 | #' @docType data 29 | #' 30 | #' @format A \code{sf} object. 31 | #' 32 | #' @references 33 | #' Siczewicz, P. (2011) U.S. Historical States and Territories (Generalized 34 | #' 0.01 deg). Dataset. Atlas of Historical County Boundaries, edited by 35 | #' Long, J. H. . Chicago: The Newberry Library, 2011. Available online from 36 | #' \url{https://publications.newberry.org/ahcbp/}. 37 | #' 38 | #' @source \url{https://publications.newberry.org/ahcbp/} 39 | "us_states" 40 | -------------------------------------------------------------------------------- /paper/0_data.R: -------------------------------------------------------------------------------- 1 | 2 | # Prepare data --- 3 | library("dplyr") 4 | library("readxl") 5 | 6 | # Data kindly made available by Paul Elhorst at 7 | download.file( 8 | "https://spatial-panels.com/wp-content/uploads/2017/06/Files-SLX-paper.zip", 9 | destfile = "paper/data/halleckvega2015.zip") 10 | unzip("paper/data/halleckvega2015.zip", exdir = "paper/data", 11 | files = c("cigarette+2var.xls", "Spat-Sym-US.xls", "cigar_states.xls")) 12 | 13 | # Prepare the data 14 | df <- read_excel("paper/data/cigarette+2var.xls") %>% 15 | mutate(year = factor(year + 1963), state = factor(state)) 16 | contig <- read_excel("paper/data/Spat-Sym-US.xls", 17 | col_names = paste0(1:46)) %>% as.matrix() 18 | xy <- read_excel("paper/data/cigar_states.xls", col_names = TRUE) 19 | 20 | # Utah has a wrong longitude 21 | xy %>% filter(Name == "UTAH") # 11.9 is off the Irish coast 22 | xy <- xy %>% mutate(longitude = ifelse(Name == "UTAH", 111.7, longitude)) 23 | 24 | # Connectivities --- 25 | n_time <- length(unique(df$year)) 26 | # Contiguity matrix 27 | W_cont <- kronecker(diag(n_time), contig / rowSums(contig)) 28 | # Inverse-distance decay function 29 | dist <- as.matrix(dist(xy)) 30 | diag(dist) <- Inf # Diagonal elements will be 0 31 | Psi <- function(delta) { 32 | W_dist <- dist ^ (-delta) # Build 33 | W_dist <- W_dist / max(eigen(W_dist, symmetric = TRUE)$values) # Scale 34 | kronecker(diag(n_time), W_dist) 35 | } 36 | W_dist <- Psi(3) 37 | 38 | # Prepare variables --- 39 | y <- cbind(logc = df$logc) 40 | X <- model.matrix(logc ~ logp + logy + year + state, data = df) 41 | X_lag <- X[, c("logp", "logy")] 42 | colnames(X_lag) <- c("wlogp", "wlogy") 43 | X_cont <- W_cont %*% X_lag 44 | -------------------------------------------------------------------------------- /paper/3_extra.R: -------------------------------------------------------------------------------- 1 | 2 | # Lazy BIC for comparing fixed SLX(delta) variants --- 3 | bics <- c( 4 | "cont" = BIC(lm(y ~ cbind(X, X_cont))), 5 | "dist2" = BIC(lm(y ~ cbind(X, Psi(2) %*% X_lag))), 6 | "dist3" = BIC(lm(y ~ cbind(X, Psi(3) %*% X_lag))), 7 | "dist4" = BIC(lm(y ~ cbind(X, Psi(4) %*% X_lag))) 8 | ) 9 | bics_s <- bics - min(bics) # Standardise for numerics 10 | exp(bics_s / -2) / sum(exp(bics_s / -2)) # Posterior probabilities 11 | 12 | # Visualise inverse-distance connectivity 13 | distances <- seq(1, 5, length.out = 1000) 14 | deltas <- seq(0.5, 5, length.out = 1000) 15 | d_conn <- expand.grid(distance = distances, delta = deltas) 16 | idd <- function(delta, distance) distance^(-delta) 17 | d_conn$connectivity <- NA_real_ 18 | for(i in seq_len(nrow(d_conn))) 19 | d_conn[i, "connectivity"] <- idd(d_conn[i, "delta"], d_conn[i, "distance"]) 20 | 21 | # Plot the connectivity as function of delta and the distance 22 | p_idd <- ggplot(d_conn, aes(x = distance, y = delta, fill = connectivity)) + 23 | geom_tile(col = "transparent", alpha = 1) + 24 | ggtitle("Distance-decay connectivity strength") + 25 | ylab("δ") + xlab("distance") + 26 | theme_minimal(base_size = 14) + 27 | theme( 28 | plot.background = element_rect(fill = "white", color = "transparent"), 29 | plot.title = element_text(color = "#333333", size = 18, face = "bold"), 30 | axis.title.x = element_text(color = "#333333", size = 14, face = "bold"), 31 | axis.title.y = element_text(color = "#333333", size = 14, face = "bold"), 32 | legend.title = element_blank(), 33 | legend.background = element_rect(fill = "white", color = "#333333"), 34 | legend.text = element_text(color = "#333333", size = 14, face = "bold"), 35 | legend.position = c(0.90, 0.85), 36 | text = element_text(family = "Helvetica") 37 | ) + 38 | scale_fill_viridis_c() 39 | 40 | # Save the plot 41 | p_idd 42 | # png("dist-decay.png", # Base device to avoid bugged outputs 43 | # height = 1800, width = 1800, res = 300, bg = "transparent") 44 | # p_idd 45 | # dev.off() 46 | -------------------------------------------------------------------------------- /R/13_stochvol.R: -------------------------------------------------------------------------------- 1 | 2 | #' Bayesian stochastic volatility model 3 | #' 4 | #' @docType class 5 | #' 6 | #' @param parent \code{\link{R6Class}} object to inherit from. 7 | #' 8 | #' @importFrom R6 R6Class 9 | #' 10 | #' @noRd 11 | get_sv_class <- function(parent = NormalGamma) { 12 | 13 | has_package("stochvol") 14 | StochasticVolatility <- R6Class("StochasticVolatility", inherit = parent, 15 | 16 | public = list( 17 | 18 | initialize_SV = function(priors, ...) { 19 | 20 | # Store prior settings --- 21 | 22 | if(missing(priors) || is.null(priors$SV)) {priors <- list(SV = set_SV())} 23 | private$SV$priors <- priors$SV$priors 24 | private$SV$parameters <- priors$SV$parameters 25 | 26 | # Build SV object --- 27 | 28 | # Function to update the normalizer and cache with a new latent 29 | private$SV$set_latent <- function(latent) { 30 | private$SV$latent <- latent 31 | normalizer <- exp(-latent / 2) 32 | private$SV$y <- super$y * normalizer 33 | private$SV$X <- super$X * normalizer 34 | # Cache 35 | private$SV$XX <- crossprod(private$SV$X) 36 | private$SV$Xy <- crossprod(private$SV$X, private$SV$y) 37 | } 38 | 39 | return(NULL) 40 | }, 41 | 42 | setup_SV = function(...) { 43 | 44 | # Set up the latent --- 45 | 46 | private$SV$set_latent(rep(private$SV$parameters$latent0, private$cache$N)) 47 | 48 | return(NULL) 49 | }, 50 | 51 | # Sample latent quantity 52 | sample_volatility = function() { 53 | 54 | # Use stochvol's one step sampler 55 | sv <- stochvol::svsample_fast_cpp(self$residuals, 56 | startpara = private$SV$parameters, startlatent = private$SV$latent, priorspec = private$SV$priors) 57 | 58 | private$SV$parameters <- sv$para 59 | private$SV$set_latent(drop(sv$latent)) 60 | } 61 | ), 62 | 63 | active = list( 64 | 65 | # Variables that are adapted using the normalizer --- 66 | y = function() {private$SV$y}, 67 | X = function() {private$SV$X}, 68 | XX = function() {private$SV$XX}, 69 | Xy = function() {private$SV$Xy}, 70 | 71 | # Access functions --- 72 | get_SV = function() {private$SV} 73 | ), 74 | 75 | private = list( 76 | SV = NULL 77 | ) 78 | 79 | ) 80 | 81 | } -------------------------------------------------------------------------------- /paper/9_paper.R: -------------------------------------------------------------------------------- 1 | 2 | set.seed(42) 3 | library("bsreg") 4 | 5 | # Code in the paper ----- 6 | 7 | # Plain linear model (Independent Normal-Gamma) 8 | x <- blm(log(sales) ~ log(price), data = cigarettes) 9 | 10 | # Conjugate linear model (Dependent Normal-Gamma) 11 | x <- blm(log(sales) ~ log(price), data = cigarettes, 12 | options = set_options("Conjugate", 13 | NG = set_NG(prec = 1e-4, shape = 1, rate = 1))) 14 | 15 | # Construct an inverse distance-decay matrix --- 16 | xy <- cigarettes[cigarettes[["year"]] == 1980, c("longitude", "latitude")] 17 | dist <- as.matrix(dist(xy)) 18 | diag(dist) <- Inf 19 | delta <- 3 # Decay parameter 20 | W_decay <- dist ^ -delta 21 | W_scaled <- W_decay / max(eigen(W_decay, symmetric = TRUE)[["values"]]) 22 | n_time <- length(unique(cigarettes[["year"]])) 23 | W <- kronecker(diag(n_time), W_scaled) # Repeated for every year 24 | 25 | # Spatial Durbin model (Uniform lambda) 26 | x <- bsdm(log(sales) ~ log(price), data = cigarettes, 27 | W = W, options = set_options( 28 | SAR = set_SAR(lambda_a = 1, lambda_b = 1)), 29 | n_save = 5000L, n_burn = 1000L) 30 | 31 | # Outputs --- 32 | 33 | # Convergence 34 | plot(x) 35 | plot(as.mcmc(x)) 36 | coda::geweke.diag(as.mcmc(x)) 37 | 38 | # Analysis 39 | print(x) 40 | apply(x[[1]], 2, quantile, c(0.025, 0.5, 0.975)) 41 | coda::HPDinterval(as.mcmc(x)) 42 | plot(density(x[[1]][, "lambda_SAR"])) 43 | 44 | # Add more draws 45 | x <- bm(x, n_save = 5000L) 46 | 47 | 48 | # Extra code ----- 49 | 50 | # The results in Section 5 use the data by Halleck-Vega and Elhorst (2015), 51 | # including their choices with respect to spatial connectivity. These results 52 | # can be reproduced with external code attached. 53 | 54 | # However, we can reproduce the results just with 'bsreg' to some extent 55 | 56 | # The linear model reproduces 57 | blm(log(sales) ~ log(price / cpi) + log(ndi / cpi) + 58 | factor(name) + factor(year), data = cigarettes) 59 | 60 | # There are some discrepancies for inverse distance-decay 61 | y <- log(cigarettes$sales) 62 | X <- model.matrix(log(sales) ~ log(price / cpi) + log(ndi / cpi) + 63 | factor(name) + factor(year), data = cigarettes) 64 | X_lag <- X[, 2:3] # To lag with the W from above 65 | bslx(y ~ X, X_SLX = X_lag, W = W, data = cigarettes) 66 | # Other packages are needed to construct a contiguity matrix 67 | -------------------------------------------------------------------------------- /R/50_execute.R: -------------------------------------------------------------------------------- 1 | 2 | #' Obtain draws from a Bayesian model sampler 3 | #' 4 | #' @param x Bayesian model 5 | #' @param n_save,n_burn Integer scalar with number of draws to save / burn. 6 | #' @param mh Settings to tune the Metropolis-Hastings step. See \code{\link{set_mh}}. 7 | #' @param verbose Logical scalar. Whether to print status updates. 8 | #' 9 | #' @return Returns a numeric matrix with stored draws. The Bayesian model is modified in place. 10 | #' 11 | #' @importFrom utils setTxtProgressBar txtProgressBar 12 | sample <- function(x, n_save = 1000L, n_burn = 0L, mh = set_mh(), verbose = TRUE) { 13 | 14 | if(n_burn > 0) {tune(x, n_burn = n_burn, mh = mh, verbose = verbose)} 15 | 16 | if(n_save > 0) { 17 | 18 | if(verbose) { 19 | timer <- Sys.time() 20 | cat("Starting sampler with", n_save, "draws.\n") 21 | pb <- txtProgressBar(min = 0, max = n_save, style = 3) 22 | } 23 | 24 | draw <- unlist(x$get_parameters) 25 | storage <- matrix(NA_real_, n_save, length(draw), dimnames = list(NULL, names(draw))) 26 | 27 | for(i in seq.int(n_save)) { 28 | x$sample() 29 | storage[i, ] <- unlist(x$get_parameters) 30 | 31 | if(verbose) {setTxtProgressBar(pb, i)} 32 | } 33 | 34 | if(verbose) { 35 | cat("\nFinished sampling after ", format(round(Sys.time() - timer, 2)), ".\n", sep = "") 36 | close(pb) 37 | } 38 | 39 | } 40 | 41 | return(storage) 42 | } 43 | 44 | 45 | #' Burn-in and tune a Bayesian model sampler 46 | #' 47 | #' @inheritParams sample 48 | #' 49 | #' @return Modifies the Bayesian model in place and returns it invisibly. 50 | tune <- function(x, n_burn = 1000L, mh = set_mh(), verbose = TRUE) { 51 | 52 | if(verbose) { 53 | timer <- Sys.time() 54 | cat("Starting burn-in with", n_burn, "draws.\n") 55 | pb <- txtProgressBar(min = 0, max = n_burn, style = 3) 56 | } 57 | 58 | for(i in seq.int(n_burn)) { 59 | 60 | x$sample() 61 | 62 | if(i %% 10 == 0 && i <= mh$adjust_burn * n_burn) { # Every tenth step we consider tuning the MH step 63 | 64 | for(obj in x$MH) { # Loop over each Metropolis-Hastings object 65 | acc_rate <- obj$get_tuning 66 | if(acc_rate < mh$acc_target[1L]) { # Loosen 67 | obj$set_scale <- max(obj$get_scale * (1 - mh$acc_change), 1e-12) 68 | } else if(acc_rate > mh$acc_target[2L]) { # Tighten 69 | obj$set_scale <- obj$get_scale * (1 + mh$acc_change) 70 | } 71 | } 72 | } 73 | 74 | if(verbose) {setTxtProgressBar(pb, i)} 75 | } 76 | 77 | if(verbose) { 78 | cat("\nFinished burn-in after ", format(round(Sys.time() - timer, 2)), ".\n", sep = "") 79 | close(pb) 80 | } 81 | 82 | return(invisible(x)) 83 | } 84 | 85 | 86 | #' @rdname tune 87 | burn <- function(x, n_burn = 1000L, verbose = TRUE) { 88 | 89 | return(tune(x, n_burn = n_burn, mh = set_mh(adjust_burn = 0), verbose = verbose)) 90 | } 91 | -------------------------------------------------------------------------------- /R/00_aux.R: -------------------------------------------------------------------------------- 1 | 2 | #' Check numeric scalar 3 | #' 4 | #' Check whether an object is bounded and coercible to a numeric value. 5 | #' 6 | #' @param x Numeric scalar. 7 | #' @param min Numeric scalar. Minimum value of \emph{x}. 8 | #' @param max Numeric scalar. Maximum value of \emph{x}. 9 | #' @param fun Function to apply to \emph{x} before returning. 10 | #' @param msg String fed to \code{\link[base]{stop}} if an error occurs. 11 | #' 12 | #' @return Returns \code{fun(x)}. 13 | #' 14 | #' @noRd 15 | num_check <- function( 16 | x, min = 0, max = Inf, 17 | msg = "Please check the numeric parameters.", 18 | fun = as.numeric) { 19 | 20 | if(!is.numeric(x) || length(x) != 1 || x < min || x > max) {stop(msg)} 21 | 22 | return(fun(x)) 23 | } 24 | 25 | 26 | #' @noRd 27 | int_check <- function( 28 | x, min = 0L, max = Inf, 29 | msg = "Please check the integer parameters.") { 30 | 31 | num_check(x, min, max, msg, fun = as.integer) 32 | } 33 | 34 | #' @noRd 35 | num_default <- function(x, default, min, max, msg) { 36 | if(missing(x)) {return(default)} 37 | num_check(x, min, max, msg) 38 | } 39 | 40 | 41 | #' Multivariate Normal 42 | #' 43 | #' Draw from a multivariate Normal using the precision instead of variance. 44 | #' 45 | #' @param n Integer scalar. Number of draws. 46 | #' @param mu Numeric vector. 47 | #' @param precision Numeric matrix. 48 | #' 49 | #' @return Returns a matrix with \emph{n} rows of draws. 50 | #' 51 | #' @importFrom stats rnorm 52 | #' 53 | #' @noRd 54 | rmvn <- function(n, mu, precision) { 55 | 56 | # Spectral --- 57 | # ev <- eigen(precision, symmetric = TRUE) 58 | # m <- length(ev[["values"]]) 59 | # R <- t(ev[["vectors"]] %*% (t(ev[["vectors"]]) * sqrt(1 / pmax(ev[["values"]], 0)))) 60 | # out <- matrix(rnorm(n * m), nrow = n, ncol = m, byrow = TRUE) %*% R 61 | 62 | # Cholesky --- 63 | m <- ncol(precision) 64 | R <- chol(precision) 65 | out <- t(backsolve(R, matrix(rnorm(n * m), nrow = m, ncol = n, byrow = TRUE))) 66 | 67 | if(!missing(mu)) {out <- sweep(out, 2, mu, "+")} 68 | 69 | return(out) 70 | } 71 | 72 | 73 | #' Check Sparsity 74 | #' 75 | #' @param x Matrix. 76 | #' 77 | #' @return Returns a logical scalar indicating sparsity. 78 | #' 79 | #' @noRd 80 | is_sparse <- function(x) { 81 | if(inherits(x, "function")) {return(isTRUE(attr(x, "sparse")))} # Allow setting via attribute 82 | isTRUE(inherits(x, "dgCMatrix")) 83 | } 84 | 85 | #' Check Symmetry 86 | #' 87 | #' @param x A numeric matrix. 88 | #' 89 | #' @return Returns a logical scalar indicating symmetry. 90 | #' 91 | #' @noRd 92 | is_symmetric <- function(x) { 93 | if(inherits(x, "function")) {return(isTRUE(attr(x, "symmetric")))} # Allow setting via attribute 94 | isSymmetric(x) 95 | } 96 | 97 | 98 | #' Construct integer interval from vector 99 | #' 100 | #' @param x A numeric vector with the start, end, and length of the interval. 101 | #' 102 | #' @return Returns an integer interval. 103 | #' 104 | #' @noRd 105 | i_seq <- function(x) { 106 | seq.int(x[1], x[2], length.out = x[3]) 107 | } 108 | 109 | 110 | #' Sum of squares 111 | #' 112 | #' @param ... Numeric vectors or scalars that are squared and summed. 113 | #' 114 | #' @return Returns an numeric scalar. 115 | #' 116 | #' @noRd 117 | sq_sum <- function(...) { 118 | sum((...)^2) 119 | } 120 | 121 | 122 | #' Check whether a package is installed 123 | #' 124 | #' @param package Character scalar. 125 | #' 126 | #' @noRd 127 | has_package <- function(package) { 128 | 129 | if(!requireNamespace(package, quietly = TRUE)) { 130 | stop("Package \'", package, "\' required for this method.", call. = FALSE) 131 | } 132 | 133 | return(NULL) 134 | } 135 | -------------------------------------------------------------------------------- /R/60_interface.R: -------------------------------------------------------------------------------- 1 | 2 | #' Fit a Bayesian model 3 | #' 4 | #' @param x Formula or \code{bm} object to sample with. 5 | #' @param data A \code{\link{data.frame}} containing the variables in the model. 6 | #' @param n_save,n_burn Integer scalar. Number of draws for the burn-in period and to store for inference. 7 | #' @param type Character scalar with the type of prior setup. 8 | #' @param options Settings for the prior setup. See \code{\link{set_options}}. 9 | #' @param mh Settings to tune the Metropolis-Hastings step. See \code{\link{set_mh}}. 10 | #' @param verbose Logical scalar. Whether to print status updates. 11 | #' @param W Numeric matrix (or function to construct one) with the spatial connectivities. 12 | #' @param X_SLX Numeric matrix with explanatory variables that should be lagged spatially. 13 | #' @param type Character scalar used to specify the desired model. 14 | #' @param ... Not used. 15 | #' 16 | #' @return Returns a list with draws from the specified Bayesian model and an object to obtain further samples. 17 | #' @export 18 | #' 19 | #' @examples 20 | #' N <- 100L 21 | #' beta <- 1:5 22 | #' X <- matrix(rnorm(N * 5), N, 5) 23 | #' y <- X %*% beta + rnorm(N) 24 | #' 25 | #' bm(y ~ X, n_burn = 100, n_draw = 100) 26 | #' 27 | #' \donttest{ 28 | #' # Reproduce the linear model in Kuschnig (2022) 29 | #' blm(log(sales) ~ log(price / cpi) + log(ndi / cpi) + 30 | #' factor(name) + factor(year), data = cigarettes) 31 | #'} 32 | bm <- function(x, ...) {UseMethod("bm", x)} 33 | 34 | 35 | #' @export 36 | #' @importFrom stats model.frame model.response model.matrix 37 | #' @rdname bm 38 | bm.formula <- function(x, data = NULL, 39 | n_save = 1000L, n_burn = 500L, 40 | options = set_options(), mh = set_mh(), verbose = TRUE, 41 | W, X_SLX, 42 | type = c("lm", "slx", "sar", "sem", "sdm", "sdem", "sv"), 43 | ...) { 44 | 45 | # Check inputs --- 46 | call <- match.call() 47 | type <- match.arg(type) 48 | getter <- switch(type, lm = get_blm, slx = get_bslx, sar = get_bsar, sem = get_bsem, 49 | sdm = get_bsdm, sdem = get_bsdem, sv = get_bsv) 50 | 51 | # Prepare data --- 52 | mf <- model.frame(x, data = data) 53 | y <- model.response(mf, "numeric") 54 | X <- model.matrix(attr(mf, "terms"), mf, contrasts = NULL) 55 | 56 | if(all(X[, 1] == X[, 2])) {X <- X[, -1]} # Drop double intercept 57 | if(type %in% c("slx", "sdm", "sdem") && missing(X_SLX)) {X_SLX <- X[, -1]} # Use all regressors except the intercept 58 | 59 | # Get model and estimate --- 60 | mdl <- getter(y = y, X = X, options = options, Psi = W, X_SLX = X_SLX, ...) 61 | 62 | draws <- sample(mdl, n_save = n_save, n_burn = n_burn, mh = mh, verbose = verbose) 63 | 64 | # Done --- 65 | return(structure(list("draws" = draws, "model" = mdl, "call" = call), class = "bm")) 66 | } 67 | 68 | 69 | #' @export 70 | #' @rdname bm 71 | bm.bm <- function(x, n_save = 1000L, n_burn = 0L, verbose = TRUE, ...) { 72 | 73 | draws <- rbind(x$draws, sample(x$model, n_save = n_save, n_burn = n_burn, verbose = verbose)) 74 | 75 | # Done --- 76 | return(structure(list("draws" = draws, "model" = x$model, "call" = x$call), class = "bm")) 77 | 78 | } 79 | 80 | 81 | #' @export 82 | #' @rdname bm 83 | blm <- function(...) {bm(..., type = "lm")} 84 | #' @export 85 | #' @rdname bm 86 | bslx <- function(...) {bm(..., type = "slx")} 87 | #' @export 88 | #' @rdname bm 89 | bsar <- function(...) {bm(..., type = "sar")} 90 | #' @export 91 | #' @rdname bm 92 | bsem <- function(...) {bm(..., type = "sem")} 93 | #' @export 94 | #' @rdname bm 95 | bsdm <- function(...) {bm(..., type = "sdm")} 96 | #' @export 97 | #' @rdname bm 98 | bsdem <- function(...) {bm(..., type = "sdem")} 99 | #' @export 100 | #' @rdname bm 101 | bsv <- function(...) {bm(..., type = "sv")} 102 | -------------------------------------------------------------------------------- /R/12_slx.R: -------------------------------------------------------------------------------- 1 | 2 | #' Bayesian spatially lagged explanatories model 3 | #' 4 | #' @docType class 5 | #' 6 | #' @param parent \code{\link{R6Class}} object to inherit from. 7 | #' 8 | #' @importFrom R6 R6Class 9 | #' 10 | #' @noRd 11 | get_slx_class <- function(parent = NormalGamma) { 12 | 13 | SpatialLX <- R6Class("SpatialLX", inherit = parent, 14 | 15 | public = list( 16 | 17 | initialize_SLX = function(priors, ...) { 18 | 19 | # Store prior settings --- 20 | 21 | if(missing(priors) || is.null(priors$SLX)) {priors <- list(SLX = set_SLX())} 22 | private$SLX$priors <- priors$SLX 23 | 24 | # Build SLX object --- 25 | 26 | # Function to set a new delta and update W(delta) and cache 27 | private$SLX$set_delta <- function(delta) { 28 | if(private$SLX$Psi_fixed) {stop("Connectivity function 'Psi' (SLX) required to update delta.")} 29 | private$SLX$delta <- delta 30 | private$SLX$W <- private$SLX$Psi(delta) 31 | # Update the cache 32 | private$SLX$WX <- private$SLX$W %*% private$SLX$X_SLX 33 | private$SLX$X <- cbind(super$X, private$SLX$WX) 34 | private$SLX$XX <- crossprod(self$X) 35 | private$SLX$Xy <- crossprod(self$X, self$y) 36 | } 37 | 38 | # Initialise MH --- 39 | 40 | # Only sample delta if a proposal scale is set 41 | if(priors$SLX$delta_scale > 0) { 42 | self$MH$SLX_delta <- MH_SLX_delta$new(value = priors$SLX$delta, scale = priors$SLX$delta_scale, 43 | shape_a = priors$SLX$delta_a, shape_b = priors$SLX$delta_b, 44 | lower = priors$SLX$delta_min, upper = priors$SLX$delta_max) 45 | } 46 | 47 | return(NULL) 48 | }, 49 | 50 | # Priority 2 since it updates 'M' (the number of columns in the design matrix) 51 | setup_2SLX = function(X_SLX, Psi_SLX = NULL, ...) { 52 | 53 | # Add new data and adapt the cache --- 54 | private$SLX$X_SLX <- X_SLX 55 | private$cache$M <- private$cache$M + NCOL(X_SLX) # Add columns of spatial lag 56 | 57 | # Work out connectivity --- 58 | 59 | if(is.null(Psi_SLX)) {stop("Please provide a connecitivity function or matrix 'Psi_SLX'.")} 60 | 61 | if(is.matrix(Psi_SLX)) { 62 | private$SLX$Psi_fixed <- TRUE 63 | private$SLX$W <- Psi_SLX 64 | # Set cache manually 65 | private$SLX$WX <- private$SLX$W %*% private$SLX$X_SLX 66 | private$SLX$X <- cbind(super$X, private$SLX$WX) 67 | private$SLX$XX <- crossprod(private$SLX$X) 68 | private$SLX$Xy <- crossprod(private$SLX$X, self$y) 69 | } else { 70 | private$SLX$Psi_fixed <- FALSE 71 | private$SLX$Psi <- Psi_SLX 72 | # Set delta to obtain spatially lagged X 73 | private$SLX$set_delta(private$SLX$priors$delta) 74 | } 75 | 76 | # Set up MH --- 77 | 78 | if(!is.null(self$MH$SLX_delta)) { 79 | if(private$SLX$Psi_fixed) {stop("Connectivity function 'Psi' (SLX) required to sample 'delta'.")} 80 | self$MH$SLX_delta$setup(N = private$cache$N, M = private$cache$M) 81 | } 82 | 83 | return(NULL) 84 | }, 85 | 86 | # Sample connectivity parameter delta --- 87 | sample_extra3 = function() { 88 | 89 | if(!is.null(self$MH$SLX_delta)) { 90 | # Prepare RSS as a function of delta 91 | get_rss <- function(value) { 92 | X <- cbind(super$X, private$SLX$Psi(value) %*% private$SLX$X_SLX) 93 | beta <- solve(private$NG$prec0 + crossprod(X) / self$sigma, 94 | (crossprod(X, self$y) / self$sigma + self$prior_precision %*% private$NG$mu0)) 95 | sq_sum(self$y - X %*% beta) 96 | # sq_sum(self$y - cbind(super$X, private$SLX$Psi(value) %*% private$SLX$X_SLX) %*% self$beta) 97 | } 98 | 99 | # Metropolis-Hastings step for delta 100 | self$MH$SLX_delta$propose() 101 | self$MH$SLX_delta$acceptance(get_rss = get_rss) 102 | self$MH$SLX_delta$finalize() 103 | delta <- self$MH$SLX_delta$get_value # Assign and recompute 104 | if(abs(delta - private$SLX$delta) > 1e-12) {private$SLX$set_delta(delta)} 105 | } 106 | } 107 | ), 108 | 109 | active = list( 110 | 111 | # Variables that are updated using the connectivity --- 112 | X = function() {private$SLX$X}, 113 | XX = function() {private$SLX$XX}, 114 | Xy = function() {crossprod(private$SLX$X, self$y)}, 115 | 116 | # Acessor functions --- 117 | get_parameters = function() { 118 | pars <- super$get_parameters 119 | pars$delta_SLX <- private$SLX$delta 120 | return(pars) 121 | }, 122 | get_SLX = function() {private$SLX} 123 | ), 124 | 125 | private = list( 126 | SLX = NULL 127 | ) 128 | 129 | ) 130 | } -------------------------------------------------------------------------------- /R/11_shrinkage.R: -------------------------------------------------------------------------------- 1 | 2 | #' Bayesian model with Normal-Gamma shrinkage prior (Polson and Scott, 2010) 3 | #' 4 | #' @docType class 5 | #' 6 | #' @importFrom R6 R6Class 7 | #' 8 | #' @noRd 9 | ShrinkageNormalGamma <- R6Class("ShrinkageNormalGamma", inherit = NormalGamma, 10 | 11 | public = list( 12 | 13 | initialize_SNG = function(priors, ...) { 14 | 15 | # Store prior settings --- 16 | 17 | if(missing(priors) || is.null(priors$SNG)) {priors <- list(SNG = set_SNG())} 18 | private$SNG$priors <- priors$SNG 19 | 20 | # Update meta info --- 21 | 22 | private$meta$priortype <- "Normal-Gamma shrinkage" 23 | 24 | # Initialise MH --- 25 | 26 | # Only sample theta if a proposal scale is set 27 | if(priors$SNG$theta_scale > 0) { 28 | self$MH$SNG_theta <- MH_SNG_theta$new(value = priors$SNG$theta, scale = priors$SNG$theta_scale, 29 | rate = priors$SNG$theta_a) 30 | } 31 | 32 | return(NULL) 33 | }, 34 | 35 | setup_SNG = function(...) { 36 | 37 | # Set up quantities --- 38 | 39 | private$SNG$tau <- rep(private$SNG$priors$tau, times = private$cache$M) 40 | private$SNG$lambda <- private$SNG$priors$lambda 41 | private$SNG$theta <- private$SNG$priors$theta 42 | # Overwrite prior precision 43 | self$prior_precision <- diag(1 / private$SNG$tau, nrow = private$cache$M) 44 | 45 | # Set up MH --- 46 | if(!is.null(self$MH$SNG_theta)) {self$MH$SNG_theta$setup()} 47 | 48 | return(NULL) 49 | }, 50 | 51 | sample_shrinkage = function() { 52 | 53 | # Sample Normal-Gamma shrinkage --- 54 | 55 | # Lambda from Gamma 56 | private$SNG$lambda <- rgamma(1L, shape = private$SNG$priors$lambda_a + private$cache$M * private$SNG$theta, 57 | rate = private$SNG$priors$lambda_b + (private$SNG$theta * sum(private$SNG$tau)) / 2) 58 | # Tau from GIG 59 | for(i in seq_along(private$SNG$tau)) { 60 | private$SNG$tau[i] <- max(GIGrvg::rgig(1L, lambda = private$SNG$theta - 0.5, 61 | chi = (self$beta[i] - self$prior_mean[i])^2, psi = private$SNG$lambda * private$SNG$theta), 1e-12) 62 | } 63 | 64 | # Update prior precision 65 | self$prior_precision <- diag(1 / private$SNG$tau, nrow = private$cache$M) 66 | 67 | # Metropolis-Hastings step for theta 68 | if(!is.null(self$MH$SNG_theta)) { 69 | self$MH$SNG_theta$sample(tau = private$SNG$tau, lambda = private$SNG$lambda) 70 | private$SNG$theta <- self$MH$SNG_theta$get_value 71 | } 72 | } 73 | ), 74 | 75 | active = list( 76 | # Access functions --- 77 | get_SNG = function() {private$SNG} 78 | ), 79 | 80 | private = list( 81 | SNG = NULL 82 | ) 83 | 84 | ) 85 | 86 | 87 | #' Bayesian model with Horseshoe shrinkage prior (Makalic and Schmidt, 2015) 88 | #' 89 | #' @docType class 90 | #' 91 | #' @importFrom R6 R6Class 92 | #' 93 | #' @noRd 94 | Horseshoe <- R6Class("Horseshoe", inherit = NormalGamma, 95 | 96 | public = list( 97 | 98 | initialize_HS = function(priors, ...) { 99 | 100 | # Store prior settings --- 101 | 102 | if(missing(priors) || is.null(priors$HS)) {priors <- list(HS = set_HS())} 103 | private$HS$priors <- priors$HS 104 | 105 | # Update meta info --- 106 | 107 | private$meta$priortype <- "Horseshoe" 108 | 109 | return(NULL) 110 | }, 111 | 112 | setup_HS = function(...) { 113 | 114 | # Set up quantities --- 115 | 116 | private$HS$lambda <- rep(private$HS$priors$lambda, times = private$cache$M) 117 | private$HS$nu <- rep(private$HS$priors$nu, times = private$cache$M) 118 | private$HS$tau <- private$HS$priors$tau 119 | private$HS$zeta <- private$HS$priors$zeta 120 | # Overwrite prior precision 121 | self$prior_precision <- diag(1 / c(private$HS$lambda * private$HS$tau), nrow = private$cache$M) 122 | }, 123 | 124 | sample_shrinkage = function() { 125 | 126 | # Sample Horseshoe shrinkage --- 127 | 128 | # Lambda from inverse Gamma 129 | private$HS$lambda <- 1 / rgamma(private$cache$M, shape = 1, 130 | rate = 1 / private$HS$nu + self$beta^2 / (2 * private$HS$tau * self$sigma)) 131 | # Tau from inverse Gamma 132 | private$HS$tau <- 1 / rgamma(1L, shape = (private$cache$M + 1L) / 2, 133 | rate = 1 / private$HS$zeta + sum(self$beta^2 / private$HS$lambda) / (2 * self$sigma)) 134 | # Tau from inverse Gamma 135 | private$HS$nu <- 1 / rgamma(private$cache$M, shape = 1, rate = 1 + 1 / private$HS$lambda) 136 | # Zeta from inverse Gamma 137 | private$HS$zeta <- 1 / rgamma(1L, shape = 1, rate = 1 + 1 / private$HS$tau) 138 | 139 | # Update prior precision 140 | self$prior_precision <- diag(1 / pmax((private$HS$lambda * private$HS$tau), 1e-12), nrow = private$cache$M) 141 | } 142 | ), 143 | 144 | active = list( 145 | # Access functions --- 146 | get_HS = function() {private$HS} 147 | ), 148 | 149 | private = list( 150 | HS = NULL 151 | ) 152 | 153 | ) 154 | -------------------------------------------------------------------------------- /R/40_setup.R: -------------------------------------------------------------------------------- 1 | 2 | #' Build a Bayesian linear model 3 | #' 4 | #' @param y Numeric vector with the dependent variable. 5 | #' @param X Numeric matrix with the explanatory variables. 6 | #' @param options List with settings and prior information. 7 | #' 8 | #' @return Returns an object with the desired Bayesian model. 9 | #' 10 | #' @noRd 11 | get_blm <- function(y, X, options = set_options(), ...) { 12 | 13 | class <- switch(options$type, Independent = NormalGamma, 14 | Conjugate = ConjugateNormalGamma, Shrinkage = ShrinkageNormalGamma, Horseshoe = Horseshoe) 15 | 16 | mdl <- class$new(priors = options$priors) 17 | mdl$setup(y = y, X = X, ...) 18 | 19 | return(mdl) 20 | } 21 | 22 | 23 | #' Build a Bayesian spatially lagged explanatories model 24 | #' 25 | #' @inheritParams get_blm 26 | #' @param Psi Numeric matrix (or function to construct one) with the spatial connectivities. 27 | #' @param X_SLX Numeric matrix with explanatory variables that should be lagged spatially. 28 | #' 29 | #' @return Returns an object with the desired Bayesian model. 30 | #' 31 | #' @noRd 32 | get_bslx <- function(y, X, options = set_options(), Psi, X_SLX, ...) { 33 | 34 | class <- get_slx_class(parent = switch(options$type, Independent = NormalGamma, 35 | Conjugate = ConjugateNormalGamma, Shrinkage = ShrinkageNormalGamma, Horseshoe = Horseshoe)) 36 | 37 | mdl <- class$new(priors = options$priors) 38 | mdl$setup(y = y, X = X, Psi_SLX = Psi, X_SLX = X_SLX, ...) 39 | 40 | return(mdl) 41 | } 42 | 43 | 44 | #' Build a Bayesian spatial autoregressive model 45 | #' 46 | #' @inheritParams get_blm 47 | #' @param Psi Numeric matrix (or function to construct one) with the spatial connectivities. 48 | #' 49 | #' @return Returns an object with the desired Bayesian model. 50 | #' 51 | #' @noRd 52 | get_bsar <- function(y, X, options = set_options(), Psi, ...) { 53 | 54 | class <- get_sar_class(parent = switch(options$type, Independent = NormalGamma, 55 | Conjugate = ConjugateNormalGamma, Shrinkage = ShrinkageNormalGamma, Horseshoe = Horseshoe)) 56 | 57 | mdl <- class$new(priors = options$priors) 58 | mdl$setup(X = X, y = y, Psi_SAR = Psi, ...) 59 | 60 | return(mdl) 61 | } 62 | 63 | 64 | #' Build a Bayesian spatial error model 65 | #' 66 | #' @inheritParams get_blm 67 | #' @param Psi Numeric matrix (or function to construct one) with the spatial connectivities. 68 | #' 69 | #' @return Returns an object with the desired Bayesian model. 70 | #' 71 | #' @noRd 72 | get_bsem <- function(y, X, options = set_options(), Psi, ...) { 73 | 74 | class <- get_sem_class(parent = switch(options$type, Independent = NormalGamma, 75 | Conjugate = ConjugateNormalGamma, Shrinkage = ShrinkageNormalGamma, Horseshoe = Horseshoe)) 76 | 77 | mdl <- class$new(priors = options$priors) 78 | mdl$setup(X = X, y = y, Psi_SEM = Psi, ...) 79 | 80 | return(mdl) 81 | } 82 | 83 | 84 | #' Build a Bayesian spatial Durbin model 85 | #' 86 | #' @inheritParams get_blm 87 | #' @inheritParams get_bslx 88 | #' @param Psi,Psi_SLX Numeric matrix (or function to construct one) with the spatial connectivities. 89 | #' 90 | #' @return Returns an object with the desired Bayesian model. 91 | #' 92 | #' @noRd 93 | get_bsdm <- function(y, X, options = set_options(), X_SLX, Psi, Psi_SLX, ...) { 94 | 95 | class <- get_sar_class(parent = get_slx_class(parent = switch(options$type, Independent = NormalGamma, 96 | Conjugate = ConjugateNormalGamma, Shrinkage = ShrinkageNormalGamma, Horseshoe = Horseshoe))) 97 | 98 | mdl <- class$new(priors = options$priors) 99 | mdl$setup(X = X, y = y, X_SLX = X_SLX, Psi_SAR = Psi, Psi_SLX = if(missing(Psi_SLX)) Psi else Psi_SLX, ...) 100 | 101 | return(mdl) 102 | } 103 | 104 | 105 | #' Build a Bayesian spatial Durbin error model 106 | #' 107 | #' @inheritParams get_blm 108 | #' @inheritParams get_bslx 109 | #' @param Psi,Psi_SLX Numeric matrix (or function to construct one) with the spatial connectivities. 110 | #' 111 | #' @return Returns an object with the desired Bayesian model. 112 | #' 113 | #' @noRd 114 | get_bsdem <- function(y, X, options = set_options(), X_SLX, Psi, Psi_SLX, ...) { 115 | 116 | class <- get_sem_class(parent = get_slx_class(parent = switch(options$type, Independent = NormalGamma, 117 | Conjugate = ConjugateNormalGamma, Shrinkage = ShrinkageNormalGamma, Horseshoe = Horseshoe))) 118 | 119 | mdl <- class$new(priors = options$priors) 120 | mdl$setup(X = X, y = y, X_SLX = X_SLX, Psi_SEM = Psi, Psi_SLX = if(missing(Psi_SLX)) Psi else Psi_SLX, ...) 121 | 122 | return(mdl) 123 | } 124 | 125 | 126 | #' Build a Bayesian stochastic volatility model 127 | #' 128 | #' @inheritParams get_blm 129 | #' 130 | #' @return Returns an object with the desired Bayesian model. 131 | #' 132 | #' @noRd 133 | get_bsv <- function(y, X, options = set_options(), ...) { 134 | 135 | class <- get_sv_class(parent = switch(options$type, Independent = NormalGamma, 136 | Conjugate = ConjugateNormalGamma, Shrinkage = ShrinkageNormalGamma, Horseshoe = Horseshoe)) 137 | 138 | mdl <- class$new(priors = options$priors) 139 | mdl$setup(y = y, X = X) 140 | 141 | return(mdl) 142 | } 143 | -------------------------------------------------------------------------------- /paper/5_vis.R: -------------------------------------------------------------------------------- 1 | 2 | library("dplyr") # or dplyr 3 | 4 | # Beta-Gamma shrinkage --- 5 | 6 | n_draw <- 1000 7 | n_beta <- 1000 8 | x_vals <- seq(0, 1, length.out = n_beta + 2)[-c(1, n_beta + 2)] 9 | 10 | gamma_coef <- \(mu, var) c("shape" = mu^2 / var, "rate" = 1 / (var / mu)) 11 | gamma_shape <- \(mu, var) gamma_coef(mu, var)["shape"] 12 | gamma_rate <- \(mu, var) gamma_coef(mu, var)["rate"] 13 | 14 | pars <- expand.grid( 15 | "mu" = c(0.01, 1, 10, 100), 16 | "var" = c(0.01, 0.1, 1, 10, 100) 17 | ) |> rowwise() |> 18 | mutate(shape = gamma_shape(mu, var), rate = gamma_rate(mu, var)) 19 | 20 | out <- vector("list", nrow(pars)) 21 | for(i in seq_along(out)) { 22 | out[[i]] <- matrix(NA_real_, n_draw, n_beta) 23 | for(j in seq_len(n_draw)) { 24 | tau <- rgamma(1L, pars[i, ][["shape"]], pars[i, ][["rate"]]) 25 | out[[i]][j, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 26 | } 27 | } 28 | 29 | op <- par(mfrow = c(5, 4), mar = c(2, 2, 2, 2)) 30 | for(i in seq_along(out)) { 31 | plot(x = (x_vals - .5) * 2, y = colMeans(out[[i]]), type = "l", 32 | main = paste0("mu = ", pars[[i, 1]], ", var = ", pars[[i, 2]]), 33 | # main = paste0("shape = ", pars[[i, 3]], ", rate = ", pars[[i, 4]]), 34 | ylim = c(0, 5) 35 | # ylim = range(apply(out[[i]], 2, quantile, c(0.01, 0.99))) 36 | ) 37 | lines(x = (x_vals - .5) * 2, y = apply(out[[i]], 2, min), lty = 2, col = "darkred") 38 | lines(x = (x_vals - .5) * 2, y = apply(out[[i]], 2, max), lty = 2, col = "darkgreen") 39 | lines(x = (x_vals - .5) * 2, y = apply(out[[i]], 2, quantile, 0.9), lty = 3) 40 | lines(x = (x_vals - .5) * 2, y = apply(out[[i]], 2, quantile, 0.1), lty = 3) 41 | } 42 | 43 | op <- par(mfrow = c(5, 4), mar = c(2, 2, 2, 2)) 44 | for(i in seq_along(out)) { 45 | plot(x = (x_vals - .5) * 2, y = colMeans(out[[i]]), type = "l", 46 | main = paste0("mu = ", pars[[i, 1]], ", var = ", pars[[i, 2]]), 47 | # main = paste0("shape = ", pars[[i, 3]], ", rate = ", pars[[i, 4]]), 48 | ylim = c(0, 5) 49 | # ylim = range(apply(out[[i]], 2, quantile, c(0.01, 0.99))) 50 | ) 51 | lines(x = (x_vals - .5) * 2, y = apply(out[[i]], 2, min), lty = 2, col = "darkred") 52 | lines(x = (x_vals - .5) * 2, y = apply(out[[i]], 2, max), lty = 2, col = "darkgreen") 53 | lines(x = (x_vals - .5) * 2, y = apply(out[[i]], 2, quantile, 0.9), lty = 3) 54 | lines(x = (x_vals - .5) * 2, y = apply(out[[i]], 2, quantile, 0.1), lty = 3) 55 | } 56 | 57 | 58 | mean <- shapes / rates 59 | variance <- shapes / rates^2 60 | 61 | x <- matrix(NA_real_, ndraw, nbeta) 62 | out1 <- out2 <- out3 <- list(x, x, x, x, x) 63 | 64 | for(i in seq(ndraw)) { 65 | tau <- rgamma(1, 1, .1) 66 | out1[[1]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 67 | tau <- rgamma(1, 1, .5) 68 | out1[[2]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 69 | tau <- rgamma(1, 1, 1) 70 | out1[[3]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 71 | tau <- rgamma(1, 1, 1.5) 72 | out1[[4]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 73 | tau <- rgamma(1, 1, 2) 74 | out1[[5]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 75 | } 76 | for(i in seq(ndraw)) { 77 | tau <- rgamma(1, .1, 1) 78 | out2[[1]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 79 | tau <- rgamma(1, .5, 1) 80 | out2[[2]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 81 | tau <- rgamma(1, 1, 1) 82 | out2[[3]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 83 | tau <- rgamma(1, 1.5, 1) 84 | out2[[4]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 85 | tau <- rgamma(1, 2, 1) 86 | out2[[5]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 87 | } 88 | for(i in seq(ndraw)) { 89 | tau <- rgamma(1, .5, 1) 90 | out3[[1]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 91 | tau <- rgamma(1, 1, .5) 92 | out3[[2]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 93 | tau <- rgamma(1, 1, 1) 94 | out3[[3]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 95 | tau <- rgamma(1, 2, 1) 96 | out3[[4]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 97 | tau <- rgamma(1, 1, 2) 98 | out3[[5]][i, ] <- dbeta(x_vals, 1 + tau, 1 + tau) 99 | } 100 | 101 | cols <- viridisLite::viridis(5, option = "D", end = 0.8) 102 | ltys <- c(1, 2, 1, 2, 1) 103 | 104 | cols <- c("#4B0055", "#006290", "#00AC8E", "#A6DA42") 105 | ltys <- c(4, 3, 2, 1) 106 | 107 | pdf("output/beta-gamma.pdf", width = 7, height = 3.4) 108 | png("output/beta-exponential.png", width = 1000, height = 400, pointsize = 24) 109 | op <- par(mfrow = c(1, 1), mar = c(2, 2, 0.1, 0.1), bg = "transparent") 110 | 111 | out <- out1 112 | out[[4]] <- NULL 113 | y <- lapply(out, function(x) colSums(x) / ndraw) 114 | plot(y[[1]], x = x_vals * 2 - 1, type = "l", col = cols[1], lty = ltys[1], lwd = 7, 115 | ylim = c(0, max(sapply(y, max))), ylab = "Density", xlab = "Value") 116 | for(i in seq_along(out)[-1]) { 117 | lines(y[[i]], x = x_vals * 2 - 1, col = cols[i], lty = ltys[i], lwd = 7) 118 | } 119 | legend("topright", title = "rate", lwd = 7, bg = "transparent", 120 | legend = c("0.1", "0.5", "1.0", "2.0"), cex = 1.1, 121 | y.intersp = 0.75, x.intersp = 0.75, 122 | col = cols, lty = ltys) 123 | dev.off() 124 | 125 | out <- out1 126 | y <- lapply(out, function(x) colSums(x) / ndraw) 127 | plot(y[[1]], x = x_vals, type = "l", col = cols[1], lty = ltys[1], lwd = 2, 128 | ylim = c(0, max(sapply(y, max))), ylab = "Density", xlab = "Value") 129 | for(i in seq_along(out)[-1]) { 130 | lines(y[[i]], x = x_vals, col = cols[i], lty = ltys[i], lwd = 2) 131 | } 132 | legend("topleft", title = "Rate", lwd = 2, bg = "white", 133 | legend = c(".1", ".5", "1", "1.5", "2"), 134 | col = cols, lty = ltys) 135 | 136 | out <- out3 137 | y <- lapply(out, function(x) colSums(x) / ndraw) 138 | plot(y[[1]], x = x_vals, type = "l", col = cols[1], lty = ltys[1], lwd = 2, 139 | ylim = c(0, max(sapply(y, max))), ylab = "Density", xlab = "Value") 140 | for(i in seq_along(out)[-1]) { 141 | lines(y[[i]], x = x_vals, col = cols[i], lty = ltys[i], lwd = 2) 142 | } 143 | legend("topleft", title = "Shape-Rate", lwd = 2, bg = "white", 144 | legend = c(".5-1", "1-.5", "1-1", "2-1", "1-2"), 145 | col = cols, lty = ltys) 146 | 147 | -------------------------------------------------------------------------------- /R/15_sem.R: -------------------------------------------------------------------------------- 1 | 2 | #' Bayesian spatial error model 3 | #' 4 | #' @docType class 5 | #' 6 | #' @param parent \code{\link{R6Class}} object to inherit from. 7 | #' 8 | #' @importFrom R6 R6Class 9 | #' @importFrom stats splinefun 10 | #' 11 | #' @noRd 12 | get_sem_class <- function(parent = NormalGamma) { 13 | 14 | SpatialEM <- R6Class("SpatialEM", inherit = parent, 15 | 16 | public = list( 17 | 18 | initialize_SEM = function(priors, ...) { 19 | 20 | # Store prior settings --- 21 | 22 | if(missing(priors) || is.null(priors$SEM)) {priors <- list(SEM = set_SEM())} 23 | private$SEM$priors <- priors$SEM 24 | 25 | # Build SEM object --- 26 | 27 | # Function to update the normalizer, i.e. s(lambda) = I - lambda W, with new parameters 28 | private$SEM$set_normalizer <- function(lambda = private$SEM$lambda, W = private$SEM$W) { 29 | private$SEM$normalizer <- diag(private$cache$N) - lambda * W 30 | # Apply to y and X 31 | private$SEM$y <- private$SEM$normalizer %*% super$y 32 | private$SEM$X <- private$SEM$normalizer %*% super$X 33 | # Update the cache 34 | private$SEM$Xy <- crossprod(private$SEM$X, private$SEM$y) 35 | private$SEM$XX <- crossprod(private$SEM$X) 36 | } 37 | 38 | # Function to set a new lambda and update the normalizer 39 | private$SEM$set_lambda <- function(lambda) { 40 | private$SEM$lambda <- lambda 41 | # Update the normalizer 42 | private$SEM$set_normalizer(lambda = lambda) 43 | } 44 | 45 | # Initialise MH --- 46 | 47 | self$MH$SEM_lambda <- MH_SEM_lambda$new(value = priors$SEM$lambda, scale = priors$SEM$lambda_scale, 48 | shape_a = priors$SEM$lambda_a, shape_b = priors$SEM$lambda_b, 49 | lower = priors$SEM$lambda_min, upper = priors$SEM$lambda_max) 50 | }, 51 | 52 | # Medium priority 8 for volatility filter 53 | setup_8SEM = function(Psi_SEM = NULL, ldet_SEM = list(grid = FALSE, i_lambda = c(-1, 1 - 1e-12, 100L), reps = 1L), 54 | ...) { 55 | 56 | # Work out connectivity --- 57 | 58 | if(is.null(Psi_SEM)) {stop("Please provide a connecitivity matrix 'Psi_SEM'.")} 59 | 60 | private$SEM$Psi_fixed <- TRUE 61 | private$SEM$W <- private$SEM$Psi <- Psi_SEM 62 | # Set cache 63 | private$SEM$Wy <- private$SEM$W %*% super$y 64 | private$SEM$WX <- private$SEM$W %*% super$X 65 | # Set lambda and obtain the normalizer 66 | private$SEM$set_lambda(private$SEM$priors$lambda) 67 | 68 | # Set up MH --- 69 | 70 | self$MH$SEM_lambda$setup(N = private$cache$N, M = private$cache$M) 71 | 72 | if(!is.null(self$MH$SEM_delta)) { 73 | if(private$SEM$Psi_fixed) {stop("Connectivity function 'Psi' (SEM) required to sample 'delta'.")} 74 | self$MH$SEM_delta$setup(N = private$cache$N, M = private$cache$M) 75 | } 76 | 77 | # Set up the log determinant --- 78 | 79 | # Initialise object with options 80 | private$SEM$ldet <- list(size = private$cache$N / ldet_SEM$reps, reps = ldet_SEM$reps, # Kronecker settings 81 | grid = isTRUE(ldet_SEM$grid), i_lambda = ldet_SEM$i_lambda) # Grid settings 82 | 83 | # If W is repeated via Kronecker product we only need one submatrix and scale up using `reps` later 84 | private$SEM$ldet$get_W <- if(private$SEM$ldet$reps == 1) { 85 | function(W = private$SEM$W) {W} 86 | } else { # Just retrieve the sub-matrix 87 | function(W = private$SEM$W) {W[seq(private$SEM$ldet$size), seq(private$SEM$ldet$size)]} 88 | } 89 | 90 | # Provide a function for the log-determinant 91 | if(private$SEM$ldet$grid) { # If a grid is requested we fit a spline to a grid over lambda 92 | 93 | pars <- i_seq(private$SEM$ldet$i_lambda) 94 | ldets <- vapply(pars, function(x) { 95 | determinant(diag(private$SEM$ldet$size) - x * private$SEM$ldet$get_W(), 96 | logarithm = TRUE)$modulus * private$SEM$ldet$reps 97 | }, numeric(1L)) 98 | private$SEM$ldet$splinefun <- splinefun(pars, y = ldets) 99 | private$SEM$ldet$get_ldet <- function(lambda, ...) {private$SEM$ldet$splinefun(x = lambda)} 100 | 101 | } else { 102 | 103 | private$SEM$ldet$ev <- eigen(private$SEM$ldet$get_W(), 104 | symmetric = is_symmetric(private$SEM$Psi), only.values = TRUE)$values 105 | # The log-determinant of I - lambda W is just the sum of log(1 - lambda * omega) 106 | private$SEM$ldet$get_ldet <- function(lambda, ...) { 107 | Re(sum(log(1 - lambda * private$SEM$ldet$ev))) * private$SEM$ldet$reps 108 | } 109 | } 110 | }, 111 | 112 | # Sample spatial error parameter lambda --- 113 | sample_volatility = function() { 114 | 115 | # Prepare RSS and the log-determinant as functions of lambda 116 | get_rss <- function(value) { 117 | y_s <- super$y - value * private$SEM$Wy 118 | X_s <- super$X - value * private$SEM$WX 119 | sq_sum(y_s) - sq_sum(crossprod(qr.Q(qr(X_s)), y_s)) # See e.g. Bivand and Piras, 2015 120 | } 121 | get_ldet <- function(value) {private$SEM$ldet$get_ldet(lambda = value)} 122 | 123 | # Metropolis-Hastings step for lambda 124 | self$MH$SEM_lambda$propose() 125 | self$MH$SEM_lambda$acceptance(get_rss = get_rss, get_ldet = get_ldet) 126 | self$MH$SEM_lambda$finalize() 127 | lambda <- self$MH$SEM_lambda$get_value # Assign and recompute 128 | private$SEM$set_lambda(lambda) # Do it always to accommodate changing X 129 | } 130 | 131 | ), 132 | 133 | active = list( 134 | 135 | # Variables that are adapted using the normalizer --- 136 | y = function() {private$SEM$y}, 137 | X = function() {private$SEM$X}, 138 | XX = function() {private$SEM$XX}, 139 | Xy = function() {private$SEM$Xy}, 140 | 141 | # Access functions --- 142 | get_parameters = function() { 143 | pars <- super$get_parameters 144 | pars$lambda_SEM <- private$SEM$lambda 145 | return(pars) 146 | }, 147 | get_SEM = function() {private$SEM} 148 | ), 149 | 150 | private = list( 151 | SEM = NULL 152 | ) 153 | 154 | ) 155 | } 156 | -------------------------------------------------------------------------------- /R/15_sar.R: -------------------------------------------------------------------------------- 1 | 2 | #' Bayesian spatial autoregressive model 3 | #' 4 | #' @docType class 5 | #' 6 | #' @param parent \code{\link{R6Class}} object to inherit from. 7 | #' 8 | #' @importFrom R6 R6Class 9 | #' @importFrom stats splinefun 10 | #' 11 | #' @noRd 12 | get_sar_class <- function(parent = NormalGamma) { 13 | 14 | SpatialAR <- R6Class("SpatialAR", inherit = parent, 15 | 16 | public = list( 17 | 18 | initialize_SAR = function(priors, ...) { 19 | 20 | # Store prior settings --- 21 | 22 | if(missing(priors) || is.null(priors$SAR)) {priors <- list(SAR = set_SAR())} 23 | private$SAR$priors <- priors$SAR 24 | 25 | # Build SAR object --- 26 | 27 | # Function to update the latent, i.e. z(lambda) = y - lambda W y, with new parameters 28 | private$SAR$set_latent <- function(lambda = private$SAR$lambda, Wy = self$Wy) { 29 | private$SAR$z <- super$y - lambda * Wy 30 | } 31 | 32 | # Function to set a new lambda and update the latent 33 | private$SAR$set_lambda <- function(lambda) { 34 | private$SAR$lambda <- lambda 35 | # Update the latent 36 | private$SAR$set_latent(lambda = lambda) 37 | } 38 | 39 | # Initialise MH --- 40 | 41 | self$MH$SAR_lambda <- MH_SAR_lambda$new(value = priors$SAR$lambda, scale = priors$SAR$lambda_scale, 42 | shape_a = priors$SAR$lambda_a, shape_b = priors$SAR$lambda_b, 43 | lower = priors$SAR$lambda_min, upper = priors$SAR$lambda_max) 44 | }, 45 | 46 | # High priority 9 for latent 47 | setup_9SAR = function(Psi_SAR = NULL, ldet_SAR = list(grid = FALSE, i_lambda = c(-1, 1 - 1e-12, 100L), reps = 1L), 48 | ...) { 49 | 50 | # Work out connectivity --- 51 | 52 | if(is.null(Psi_SAR)) {stop("Please provide a connectivity matrix 'Psi_SAR'.")} 53 | 54 | private$SAR$Psi_fixed <- TRUE 55 | private$SAR$W <- private$SAR$Psi <- Psi_SAR 56 | # # Set cache 57 | private$SAR$Wy <- private$SAR$W %*% super$y 58 | # Set lambda and obtain the latent 59 | private$SAR$set_lambda(private$SAR$priors$lambda) 60 | 61 | # Set up MH --- 62 | 63 | self$MH$SAR_lambda$setup(N = private$cache$N, M = private$cache$M) 64 | 65 | # Set up the log determinant --- 66 | 67 | # Initialise object with options 68 | private$SAR$ldet <- list(size = private$cache$N / ldet_SAR$reps, reps = ldet_SAR$reps, # Kronecker settings 69 | grid = isTRUE(ldet_SAR$grid), i_lambda = ldet_SAR$i_lambda) # Grid settings 70 | 71 | # If W is repeated via Kronecker product we only need one submatrix and scale up using `reps` later 72 | private$SAR$ldet$get_W <- if(private$SAR$ldet$reps == 1) { 73 | function(W = private$SAR$W) {W} 74 | } else { # Just retrieve the sub-matrix 75 | function(W = private$SAR$W) {W[seq(private$SAR$ldet$size), seq(private$SAR$ldet$size)]} 76 | } 77 | 78 | # Provide a function for the log-determinant 79 | if(private$SAR$ldet$grid) { # If a grid is requested we fit a spline to a grid over lambda 80 | 81 | pars <- i_seq(private$SAR$ldet$i_lambda) 82 | ldets <- vapply(pars, function(x) { 83 | determinant(diag(private$SAR$ldet$size) - x * private$SAR$ldet$get_W(), 84 | logarithm = TRUE)$modulus * private$SAR$ldet$reps 85 | }, numeric(1L)) 86 | private$SAR$ldet$splinefun <- splinefun(pars, y = ldets) 87 | private$SAR$ldet$get_ldet <- function(lambda, ...) {private$SAR$ldet$splinefun(x = lambda)} 88 | 89 | } else { # Otherwise we use a spectral decomposition 90 | 91 | private$SAR$ldet$ev <- eigen(private$SAR$ldet$get_W(), 92 | symmetric = is_symmetric(private$SAR$Psi), only.values = TRUE)$values 93 | # The log-determinant of I - lambda W is just the sum of log(1 - lambda * omega) 94 | private$SAR$ldet$get_ldet <- function(lambda, ...) { 95 | Re(sum(log(1 - lambda * private$SAR$ldet$ev))) * private$SAR$ldet$reps 96 | } 97 | } 98 | }, 99 | 100 | # Sample spatial autoregressive parameter lambda --- 101 | sample_latent = function() { 102 | 103 | # Prepare RSS and the log-determinant as functions of lambda 104 | get_rss <- {function() { 105 | prec_ch <- chol(private$NG$prec0 + self$XX / self$sigma) 106 | b0 <- backsolve(prec_ch, forwardsolve(prec_ch, (private$NG$prec0 %*% private$NG$mu0 + crossprod(self$X, super$y) / self$sigma), 107 | upper.tri = TRUE, transpose = TRUE)) 108 | b1 <- backsolve(prec_ch, forwardsolve(prec_ch, (private$NG$prec0 %*% private$NG$mu0 + self$XWy / self$sigma), 109 | upper.tri = TRUE, transpose = TRUE)) 110 | e0 <- super$y - self$X %*% b0 111 | e1 <- self$Wy - self$X %*% b1 112 | e0e0 <- sum(e0^2) 113 | e1e0 <- sum(e1 * e0) 114 | e1e1 <- sum(e1^2) 115 | return(function(value) {(e0e0) - (2 * value * e1e0) + (value^2 * e1e1)}) 116 | }}() 117 | get_ldet <- function(value) {private$SAR$ldet$get_ldet(lambda = value)} 118 | 119 | # Metropolis-Hastings step for lambda 120 | self$MH$SAR_lambda$propose() 121 | self$MH$SAR_lambda$acceptance(get_rss = get_rss, get_ldet = get_ldet) 122 | self$MH$SAR_lambda$finalize() 123 | lambda <- self$MH$SAR_lambda$get_value # Assign and recompute 124 | if(abs(lambda - private$SAR$lambda) > 1e-12) {private$SAR$set_lambda(lambda)} 125 | } 126 | 127 | ), 128 | 129 | active = list( 130 | 131 | # Variables that are adapted using the latent --- 132 | y = function() {private$SAR$z}, 133 | Xy = function() {crossprod(self$X, private$SAR$z)}, 134 | Wy = function() {private$SAR$Wy}, 135 | XWy = function() {crossprod(self$X, private$SAR$Wy)}, 136 | 137 | # Access functions --- 138 | get_parameters = function() { 139 | pars <- super$get_parameters 140 | pars$lambda_SAR <- private$SAR$lambda 141 | return(pars) 142 | }, 143 | get_effects = function() { # To-do: use eigendecomposition or provide alternative methods to be more efficient 144 | total <- as.numeric(self$beta / (1 - private$SAR$lambda)) 145 | direct <- as.numeric(sum(diag(solve(diag(private$cache$N) - private$SAR$lambda * private$SAR$W))) / 146 | private$cache$N * self$beta) 147 | 148 | list("total" = total, "direct" = direct, "indirect" = total - direct) 149 | }, 150 | get_SAR = function() {private$SAR} 151 | ), 152 | private = list( 153 | SAR = NULL 154 | ) 155 | 156 | ) 157 | } 158 | -------------------------------------------------------------------------------- /paper/2b_outputs_dist.R: -------------------------------------------------------------------------------- 1 | 2 | # Generate outputs --- 3 | library("dplyr") 4 | library("spatialreg") 5 | library("ggplot2") 6 | library("ggdist") # Dotplots 7 | 8 | # Prepare object --- 9 | d_te <- rbind( 10 | as_tibble(out_bslx$draws) %>% transmute(model = "SLX", 11 | price = beta2, price_i = beta78, 12 | income = beta3, income_i = beta79, delta = NA_real_), 13 | as_tibble(out_slxd2$draws) %>% transmute(model = "SLX(2)", 14 | price = beta2, price_i = beta78, 15 | income = beta3, income_i = beta79, delta = 2), 16 | as_tibble(out_slxd3$draws) %>% transmute(model = "SLX(3)", 17 | price = beta2, price_i = beta78, 18 | income = beta3, income_i = beta79, delta = 3), 19 | as_tibble(out_slxd4$draws) %>% transmute(model = "SLX(4)", 20 | price = beta2, price_i = beta78, 21 | income = beta3, income_i = beta79, delta = 4), 22 | as_tibble(out_slxdx$draws) %>% transmute(model = "SLX(delta)", 23 | price = beta2, price_i = beta78, 24 | income = beta3, income_i = beta79, delta = delta_SLX) 25 | ) 26 | 27 | # Build a table of coefficients --- 28 | tbl <- d_te %>% filter(model != "SLX") %>% 29 | tidyr::pivot_longer(cols = 2:6) %>% 30 | group_by(model, name) %>% 31 | summarise_all(list(mean = mean, sd = sd, # Report mean, sd, and quantiles 32 | qu01 = \(x) quantile(x, .01), qu99 = \(x) quantile(x, .99))) %>% 33 | tidyr::pivot_longer(cols = 3:6, names_to = "measure") %>% 34 | mutate(value = round(value, 3)) %>% 35 | mutate(value = ifelse(measure == "sd", # Brackets around sd 36 | gsub("(.*)", "(\\1)", value), value)) %>% 37 | mutate(value = ifelse(measure == "qu01", # Square brackets around quantiles 38 | gsub("(.*)", "[\\1, ", value), value)) %>% 39 | mutate(value = ifelse(measure == "qu99", 40 | gsub("(.*)", " \\1]", value), value)) %>% 41 | tidyr::pivot_wider(names_from = model) 42 | 43 | # Fix order 44 | tbl <- tbl[c(13:16, 5:8, 17:20, 9:12, 1:4), ] 45 | # Move the quantiles to one row with the credible interval 46 | for(r in c(3, 7, 11, 15, 19)) { 47 | tbl[r, 2:6] <- as.list(c("ci", paste0(tbl[r, 3:6], tbl[r + 1, 3:6]))) 48 | } 49 | tbl <- tbl[-c(4, 8, 12, 16, 20), ] 50 | 51 | # Export to LaTeX 52 | tbl %>% as.data.frame() %>% memisc:::toLatex.data.frame() 53 | 54 | # Total effect plots --- 55 | 56 | # The size of indirect effects depends on delta via Psi 57 | N <- nrow(X) 58 | delta <- seq(2, 4, 0.01) 59 | avg_weight <- sapply(delta, \(x) sum(Psi(x)) / N) 60 | s <- splinefun(delta, avg_weight) 61 | 62 | # Scaling factor for the table 63 | round(sapply(c(2, 3, 4, 3.01, 2.54, 3.74), s), 2) 64 | 65 | d_te <- d_te %>% mutate( 66 | price_t = price + price_i * s(delta), 67 | income_t = income + income_i * s(delta)) %>% 68 | mutate( # Row-stochastic is simpler 69 | price_t = ifelse(model == "SLX", price + price_i, price_t), 70 | income_t = ifelse(model == "SLX", income + income_i, income_t) 71 | ) 72 | 73 | # Check summaries to manually limit the y axis 74 | d_te %>% tidyr::pivot_longer(cols = 7:8) %>% 75 | group_by(model, name) %>% 76 | summarise(min = min(value), max = max(value), 77 | q9 = quantile(value, .99), q1 = quantile(value, .01)) 78 | 79 | # Fix colour of SLX with binary contiguity 80 | cols <- ggthemes::colorblind_pal()(7)[c(5, 6, 7, 4, 3, 2)] 81 | 82 | # Plots 83 | p1 <- d_te %>% transmute(model, price_t) %>% 84 | tidyr::pivot_longer(cols = 2) %>% 85 | # Use unicode delta 86 | mutate(model = ifelse(model == "SLX(delta)", "SLX(δ)", model)) %>% 87 | ggplot(aes(x = model, y = value, fill = model, col = model)) + 88 | geom_hline(yintercept = 0, lwd = 1.5, col = "#a4a4a4") + 89 | stat_dots(quantiles = 250, width = .75, justification = -0.2) + 90 | geom_boxplot(col = "#444444", alpha = 1, # No points for outliers 91 | width = .2, size = .8, outlier.color = NA) + 92 | coord_cartesian(ylim = c(-1.5, -0.6)) + # Manually set limits 93 | ggtitle("Price effect by connectivity") + ylab("total effect") + xlab("") + 94 | theme_minimal(base_size = 14) + 95 | theme( 96 | plot.title = element_text(color = "#333333", size = 18, face = "bold"), 97 | axis.title.x = element_text(color = "#333333", size = 14, face = "bold"), 98 | axis.title.y = element_text(color = "#333333", size = 14, face = "bold"), 99 | text = element_text(family = "Helvetica"), 100 | legend.position = "none" 101 | ) + 102 | scale_colour_manual(values = cols) + 103 | scale_fill_manual(values = cols) 104 | 105 | p2 <- d_te %>% transmute(model, income_t) %>% 106 | tidyr::pivot_longer(cols = 2) %>% 107 | # Use unicode delta 108 | mutate(model = ifelse(model == "SLX(delta)", "SLX(δ)", model)) %>% 109 | ggplot(aes(x = model, y = value, fill = model, col = model)) + 110 | geom_hline(yintercept = 0, lwd = 1.5, col = "#a4a4a4") + 111 | stat_dots(quantiles = 250, width = .75, justification = -0.2) + 112 | geom_boxplot(col = "#444444", alpha = 1, # No points for outliers 113 | width = .2, size = .8, outlier.color = NA) + 114 | coord_cartesian(ylim = c(0.1, 0.8)) + 115 | ggtitle("Income effect by connectivity") + ylab("total effect") + xlab("") + 116 | theme_minimal(base_size = 14) + 117 | theme( 118 | plot.title = element_text(color = "#333333", size = 18, face = "bold"), 119 | axis.title.x = element_text(color = "#333333", size = 14, face = "bold"), 120 | axis.title.y = element_text(color = "#333333", size = 14, face = "bold"), 121 | text = element_text(family = "Helvetica"), 122 | legend.position = "none", 123 | ) + 124 | scale_colour_manual(values = cols) + 125 | scale_fill_manual(values = cols) 126 | 127 | # Merge and save the plots 128 | gridExtra::grid.arrange(p1, p2, ncol = 2) 129 | ggsave(file = "cigar-dist_te.eps", # Cairo for unicode delta 130 | plot = gridExtra::arrangeGrob(p1, p2, nrow = 1, ncol = 2), 131 | device = cairo_pdf, width = 9, height = 4) 132 | 133 | # Diagnostic plot for delta --- 134 | 135 | # Visualise connectivity --- 136 | d_delta <- as_tibble(list( 137 | "iteration" = seq_len(nrow(out_slxdx$draws)), 138 | "delta" = out_slxdx$draws[, "delta_SLX"] 139 | )) 140 | 141 | # Trace plot 142 | p1 <- ggplot(d_delta) + 143 | geom_line(aes(y = delta, x = iteration)) + 144 | ggtitle("Connectivity parameter") + ylab("δ") + xlab("iteration") + 145 | theme_minimal(base_size = 14) + 146 | theme( 147 | plot.title = element_text(color = "#333333", size = 18, face = "bold"), 148 | axis.title.x = element_text(color = "#333333", size = 14, face = "bold"), 149 | axis.title.y = element_text(color = "#333333", size = 14, face = "bold"), 150 | text = element_text(family = "Helvetica"), 151 | legend.position = "none" 152 | ) 153 | # Density plot 154 | p2 <- ggplot(d_delta) + 155 | stat_halfeye(aes(y = delta)) + 156 | ggtitle("") + ylab("δ") + xlab("density") + 157 | theme_minimal(base_size = 14) + 158 | theme( 159 | plot.title = element_text(color = "#333333", size = 18, face = "bold"), 160 | axis.title.x = element_text(color = "#333333", size = 14, face = "bold"), 161 | axis.title.y = element_text(color = "#333333", size = 14, face = "bold"), 162 | text = element_text(family = "Helvetica"), 163 | legend.position = "none" 164 | ) 165 | 166 | # Merge and save the plots 167 | gridExtra::grid.arrange(p1, p2, ncol = 2) 168 | ggsave(file = "cigar-dist_delta.eps", # Cairo for unicode delta 169 | plot = gridExtra::arrangeGrob(p1, p2, nrow = 1, ncol = 2), 170 | device = cairo_pdf, width = 9, height = 4) 171 | -------------------------------------------------------------------------------- /paper/2a_outputs_cont.R: -------------------------------------------------------------------------------- 1 | 2 | # Generate outputs --- 3 | library("dplyr") 4 | library("spatialreg") 5 | library("ggplot2") 6 | library("ggdist") # Dotplots 7 | library("qqplotr") # QQ plot 8 | 9 | # Compute total effects --- 10 | d_te1 <- rbind( 11 | as_tibble(out_blm$draws) %>% transmute(model = "LM", 12 | price = beta2, income = beta3), 13 | as_tibble(out_bslx$draws) %>% transmute(model = "SLX", 14 | price = beta2 + beta78, income = beta3 + beta79), 15 | as_tibble(out_bsar$draws) %>% transmute(model = "SAR", 16 | price = beta2 / (1 - lambda_SAR), income = beta3 / (1 - lambda_SAR)), 17 | as_tibble(out_bsem$draws) %>% transmute(model = "SEM", 18 | price = beta2, income = beta3), 19 | as_tibble(out_bsdm$draws) %>% transmute(model = "SDM", 20 | price = (beta2 + beta78) / (1 - lambda_SAR), 21 | income = (beta3 + beta79) / (1 - lambda_SAR)), 22 | as_tibble(out_bsdem$draws) %>% transmute(model = "SDEM", 23 | price = beta2 + beta78, income = beta3 + beta79) 24 | ) %>% tidyr::pivot_longer(cols = 2:3) %>% 25 | mutate(model = factor(model, # Set ordering 26 | levels = c("LM", "SEM", "SDEM", "SLX", "SDM", "SAR"))) 27 | d_te2 <- rbind( 28 | tibble(model = "LM", 29 | price = coef(out_lm)[2], income = coef(out_lm)[3]), 30 | tibble(model = "SLX", 31 | price = sum(coef(out_slx)[c(2, 78)]), 32 | income = sum(coef(out_slx)[c(3, 79)])), 33 | tibble(model = "SAR", 34 | price = coef(out_sar)[3] / (1 - coef(out_sar)[1]), 35 | income = coef(out_sar)[4] / (1 - coef(out_sar)[1])), 36 | tibble(model = "SEM", 37 | price = coef(out_sem)[3], income = coef(out_sem)[4]), 38 | tibble(model = "SDM", 39 | price = sum(coef(out_sdm)[c(3, 79)]) / (1 - coef(out_sdm)[1]), 40 | income = sum(coef(out_sdm)[c(4, 80)]) / (1 - coef(out_sdm)[1])), 41 | tibble(model = "SDEM", 42 | price = sum(coef(out_sdem)[c(3, 79)]), 43 | income = sum(coef(out_sdem)[c(4, 80)])) 44 | ) %>% tidyr::pivot_longer(cols = 2:3) 45 | 46 | # Manually limit the y axis, ignoring the SDM's fat tails 47 | d_te1 %>% filter(model != "SDM") %>% 48 | group_by(name) %>% summarise(min = min(value), max = max(value), 49 | q9 = quantile(value, .999), q1 = quantile(value, .001)) 50 | 51 | # Total effect plots --- 52 | p1 <- d_te1 %>% filter(name == "price") %>% 53 | ggplot(aes(x = model, y = value, fill = model)) + 54 | geom_hline(yintercept = 0, lwd = 1.5, col = "#a4a4a4") + 55 | stat_dots(aes(col = model), quantiles = 250, 56 | width = .75, justification = -0.2) + # 250 dots, narrower & shifted right 57 | geom_boxplot(col = "#444444", alpha = 1, # No points for outliers 58 | width = .2, size = .8, outlier.color = NA) + 59 | geom_point(data = d_te2, aes(x = model, y = value), # Add freq. estimates 60 | col = "#444444", shape = 4, stroke = 1.5, size = 3) + 61 | coord_cartesian(ylim = c(-1.8, -.7)) + # Manually set limits 62 | ggtitle("Price effect by model") + ylab("total effect") + xlab("") + 63 | theme_minimal(base_size = 14) + 64 | theme( 65 | plot.title = element_text(color = "#333333", size = 18, face = "bold"), 66 | axis.title.x = element_text(color = "#333333", size = 14, face = "bold"), 67 | axis.title.y = element_text(color = "#333333", size = 14, face = "bold"), 68 | text = element_text(family = "Helvetica"), 69 | legend.position = "none" 70 | ) + 71 | scale_colour_manual(values = ggthemes::colorblind_pal()(7)[-1]) + 72 | scale_fill_manual(values = ggthemes::colorblind_pal()(7)[-1]) 73 | 74 | p2 <- d_te1 %>% filter(name == "income") %>% 75 | ggplot(aes(x = model, y = value, fill = model)) + 76 | geom_hline(yintercept = 0, lwd = 1.5, col = "#a4a4a4") + 77 | stat_dots(aes(col = model), quantiles = 250, 78 | width = .75, justification = -0.2) + # 250 dots, narrower & shifted right 79 | geom_boxplot(col = "#444444", alpha = 1, # No points for outliers 80 | width = .2, size = .8, outlier.color = NA) + 81 | geom_point(data = d_te2, aes(x = model, y = value), # Add frequentist estimates 82 | col = "#444444", shape = 4, stroke = 1.5, size = 3) + 83 | coord_cartesian(ylim = c(-0.1, 1.1)) + # Manually set limits 84 | ggtitle("Income effect by model") + ylab("total effect") + xlab("") + 85 | theme_minimal(base_size = 14) + 86 | theme( 87 | plot.title = element_text(color = "#333333", size = 18, face = "bold"), 88 | axis.title.x = element_text(color = "#333333", size = 14, face = "bold"), 89 | axis.title.y = element_text(color = "#333333", size = 14, face = "bold"), 90 | text = element_text(family = "Helvetica"), 91 | legend.position = "none" 92 | ) + 93 | scale_colour_manual(values = ggthemes::colorblind_pal()(7)[-1]) + 94 | scale_fill_manual(values = ggthemes::colorblind_pal()(7)[-1]) 95 | 96 | # Merge and save the plots 97 | gridExtra::grid.arrange(p1, p2, nrow = 2) 98 | ggsave(file = "cigar-contig_te.eps", 99 | plot = gridExtra::arrangeGrob(p1, p2, nrow = 2), width = 9, height = 8) 100 | 101 | # Investigate distribution of total effects --- 102 | d_qq1 <- d_te1 %>% # Focus on price in the SLX, SDM, and SAR models 103 | filter(model %in% c("SLX", "SDM", "SAR"), name == "price") 104 | set.seed(27) # We plot a random subset of all posterior values 105 | d_qq2 <- d_qq1 %>% slice(c(sample(1:25000, 5000), # Assuming there's 25,000 draws 106 | sample(25001:50000, 5000), sample(50001:75000, 5000))) 107 | 108 | # QQ plots for the distribution of total effects --- 109 | pq1 <- d_qq1 %>% 110 | ggplot(mapping = aes(sample = value, col = model, fill = model)) + 111 | facet_grid(. ~ model, scale = "free") + # A model per column 112 | qqplotr::stat_qq_point(data = d_qq2, pch = 4, size = 0.5, col = "#333333") + 113 | qqplotr::stat_qq_band(alpha = 0.5, conf = 0.999, band = "pointwise") + 114 | qqplotr::stat_qq_line() + 115 | ggtitle("QQ plot of total price effect") + 116 | ylab("sample quantiles") + xlab("theoretical quantiles") + 117 | theme_minimal(base_size = 14) + 118 | theme( 119 | plot.title = element_text(color = "#333333", size = 18, face = "bold"), 120 | axis.title.x = element_text(color = "#333333", size = 14, face = "bold"), 121 | axis.title.y = element_text(color = "#333333", size = 14, face = "bold"), 122 | text = element_text(family = "Helvetica"), 123 | legend.position = "none" 124 | ) + 125 | scale_colour_manual(values = ggthemes::colorblind_pal()(7)[-1:-4]) + 126 | scale_fill_manual(values = ggthemes::colorblind_pal()(7)[-1:-4]) 127 | 128 | # Save the plot 129 | pq1 130 | ggsave(file = "cigar-contig_qq.png", plot = pq1, width = 9, height = 4) 131 | 132 | # Build a table of coefficients --- 133 | d_tab <- rbind( 134 | as_tibble(out_blm$draws) %>% transmute(model = "LM", 135 | price = beta2, price_ind = NA, 136 | income = beta3, income_ind = NA, lambda = NA), 137 | as_tibble(out_bslx$draws) %>% transmute(model = "SLX", 138 | price = beta2, price_ind = beta78, 139 | income = beta3, income_ind = beta79, lambda = NA), 140 | as_tibble(out_bsar$draws) %>% transmute(model = "SAR", 141 | price = beta2, price_ind = NA, 142 | income = beta3, income_ind = NA, lambda = lambda_SAR), 143 | as_tibble(out_bsem$draws) %>% transmute(model = "SEM", 144 | price = beta2, price_ind = NA, 145 | income = beta3, income_ind = NA, lambda = lambda_SEM), 146 | as_tibble(out_bsdm$draws) %>% transmute(model = "SDM", 147 | price = beta2, price_ind = beta78, 148 | income = beta3, income_ind = beta79, lambda = lambda_SAR), 149 | as_tibble(out_bsdem$draws) %>% transmute(model = "SDEM", 150 | price = beta2, price_ind = beta78, 151 | income = beta3, income_ind = beta79, lambda = lambda_SEM) 152 | ) %>% tidyr::pivot_longer(cols = 2:6) 153 | 154 | # Prepare the table 155 | tbl <- d_tab %>% group_by(model, name) %>% 156 | summarise_all(list(mean = mean, sd = sd)) %>% # Report mean and sd 157 | tidyr::pivot_longer(cols = 3:4, names_to = "measure") %>% 158 | mutate(value = round(value, 3)) %>% # Three digits 159 | mutate(value = ifelse(measure == "sd", 160 | gsub("(.*)", "(\\1)", value), value)) %>% # Brackets around sd 161 | tidyr::pivot_wider(names_from = model) 162 | 163 | # Fix order 164 | tbl <- tbl[c(7, 8, 1, 2, 9, 10, 3, 4, 5, 6), 165 | c("name", "LM", "SEM", "SDEM", "SLX", "SDM", "SAR")] 166 | # Remove NAs 167 | tbl[is.na(tbl)] <- "" 168 | 169 | # Export to LaTeX 170 | tbl %>% as.data.frame() %>% memisc:::toLatex.data.frame() 171 | -------------------------------------------------------------------------------- /R/10_lm.R: -------------------------------------------------------------------------------- 1 | 2 | #' Base class with common functionality 3 | #' 4 | #' This class provides basic functionality to build a hierarchical Bayesian model. 5 | #' The three public functions are (1) 'initialize' to provide settings at construction time, (2) 'setup' to provide 6 | #' additional settings and data, (3) 'finalize' to update the status and meta information after an iteration. The first 7 | #' two functions also call 'initialize' and 'setup' methods of descendants as well as the 'starting' methods to further 8 | #' allow layering setup processes. 9 | #' 10 | #' @docType class 11 | #' 12 | #' @importFrom R6 R6Class 13 | #' 14 | #' @noRd 15 | Base <- R6Class("Base", 16 | 17 | public = list( 18 | 19 | initialize = function(...) { 20 | 21 | # Meta --- 22 | private$meta <- list("iterations" = 0L) 23 | 24 | # Initialize children 25 | lapply(ls(self, pattern = "^initialize_[0-9a-zA-Z]+$"), function(n) self[[n]](...)) 26 | 27 | invisible(self) 28 | }, 29 | 30 | setup = function(y, X, ...) { 31 | 32 | # Data --- 33 | private$data <- list("y" = y, "X" = X) 34 | 35 | # Cache --- 36 | private$cache <- list("N" = NROW(X), "M" = NCOL(X), "XX" = crossprod(X), "Xy" = crossprod(X, y)) 37 | 38 | # Set up children --- 39 | lapply(ls(self, pattern = "^setup_[0-9a-zA-Z]+$"), function(n) self[[n]](...)) 40 | lapply(ls(self, pattern = "^starting_[0-9a-zA-Z]+$"), function(n) self[[n]](...)) 41 | 42 | # Done --- 43 | invisible(self) 44 | }, 45 | 46 | sample = function(...) { # To-do: build a list of sampling functions once at initialization 47 | 48 | # Sample expected quantities --- 49 | self$sample_sigma() 50 | self$sample_beta() 51 | self$sample_shrinkage() # Normal-Gamma or Horseshoe 52 | self$sample_latent() # Spatial autoregressive or limited dependent 53 | self$sample_volatility() # Spatial error or stochastic volatility 54 | # Sample potential extra quantities 55 | self$sample_extra1() 56 | self$sample_extra2() 57 | self$sample_extra3() 58 | 59 | # Update status --- 60 | self$finalize() 61 | 62 | return(invisible(NULL)) 63 | }, 64 | sample_beta = function() {NULL}, 65 | sample_sigma = function() {NULL}, 66 | sample_shrinkage = function() {NULL}, 67 | sample_latent = function() {NULL}, 68 | sample_volatility = function() {NULL}, 69 | sample_extra1 = function() {NULL}, 70 | sample_extra2 = function() {NULL}, 71 | sample_extra3 = function() {NULL}, 72 | 73 | finalize = function() { 74 | 75 | private$meta$iterations <- private$meta$iterations + 1L 76 | 77 | }, 78 | 79 | # Slot for Metropolis-Hastings steps 80 | MH = list() 81 | ), 82 | active = list( 83 | 84 | # Access functions --- 85 | y = function() {private$data$y}, 86 | X = function() {private$data$X}, 87 | XX = function() {private$cache$XX}, 88 | Xy = function() {private$cache$Xy}, 89 | 90 | get_meta = function() {c(iterations = private$meta$iterations)} 91 | ), 92 | 93 | private = list( 94 | data = NULL, cache = NULL, meta = NULL 95 | ) 96 | 97 | ) 98 | 99 | 100 | 101 | #' Bayesian model with independent Normal-Gamma prior 102 | #' 103 | #' This class serves as the base for most practical models. It extends the proper 'Base' class with basic functionality 104 | #' to estimate a linear model. A print function is available, a slot for Metropolis-Hastings objects is provided, and 105 | #' several functions to access parts of the model are made available. 106 | #' 107 | #' @docType class 108 | #' 109 | #' @importFrom R6 R6Class 110 | #' 111 | #' @noRd 112 | NormalGamma <- R6Class("NormalGamma", inherit = Base, 113 | 114 | public = list( 115 | 116 | print = function() { 117 | 118 | cat("Bayesian model with a", private$meta$priortype, "prior setup.\n") 119 | cat(private$meta$iterations, "total samples have been drawn so far.\n") 120 | cat("The sampler contains", length(self$MH), "Metropolis-Hastings steps.\n") 121 | for(i in seq_along(self$MH)) { 122 | cat("\t", names(self$MH)[i], " at ", self$MH[[i]]$get_accepted, " accepted draws out of ", 123 | self$MH[[i]]$get_proposed, " proposals (rate ", round(self$MH[[i]]$get_acceptance, 2), ").\n", sep = "") 124 | } 125 | 126 | return(invisible(self)) 127 | }, 128 | 129 | initialize_NG = function(priors, ...) { 130 | 131 | # Store prior settings --- 132 | 133 | if(missing(priors) || is.null(priors$NG)) {priors <- list(NG = set_NG())} 134 | private$NG$priors <- priors$NG 135 | 136 | # Update meta info --- 137 | 138 | private$meta$priortype <- "Normal-Gamma" 139 | }, 140 | 141 | # Priority 4 since it relies on 'M' 142 | setup_4NG = function(...) { 143 | 144 | # Update prior settings to fit the data --- 145 | 146 | self$prior_mean <- matrix(private$NG$priors$mu, nrow = private$cache$M) 147 | self$prior_precision <- diag(private$NG$priors$precision, nrow = private$cache$M) 148 | private$NG$shape0 <- private$NG$priors$shape 149 | private$NG$rate0 <- private$NG$priors$rate 150 | 151 | # Calculate known posterior quantities 152 | private$NG$shape1 <- private$NG$shape0 + private$cache$N / 2 153 | }, 154 | 155 | starting_NG = function(...) { 156 | 157 | # Set sensible values of beta and sigma using LS 158 | self$beta <- if(is.null(private$NG$priors$beta)) { 159 | qr.solve(self$XX, self$Xy) 160 | } else {matrix(private$NG$priors$beta, nrow = private$cache$M)} 161 | self$sigma <- if(is.null(private$NG$priors$sigma)) { 162 | sq_sum(self$residuals) / (private$cache$N - private$cache$M) 163 | } else {private$NG$priors$sigma} 164 | }, 165 | 166 | sample_beta = function() { 167 | 168 | private$NG$prec1 <- self$XX / self$sigma + self$prior_precision 169 | private$NG$mu1 <- solve(private$NG$prec1, (self$Xy / self$sigma + self$prior_precision %*% private$NG$mu0)) 170 | # Draw from multivariate Normal 171 | self$beta <- t(rmvn(1L, mu = private$NG$mu1, precision = private$NG$prec1)) 172 | }, 173 | 174 | sample_sigma = function() { 175 | 176 | private$NG$rate1 <- private$NG$rate0 + sq_sum(self$residuals) / 2 177 | # Draw from inverse Gamma 178 | self$sigma <- 1 / rgamma(1L, shape = private$NG$shape1, rate = private$NG$rate1) 179 | } 180 | ), 181 | 182 | active = list( 183 | 184 | # We reserve beta and sigma 185 | beta = function(value) {if(missing(value)) {private$NG$beta} else {private$NG$beta <- value}}, 186 | sigma = function(value) {if(missing(value)) {private$NG$sigma} else {private$NG$sigma <- value}}, 187 | # Shrinkage priors may change the prior precision 188 | prior_precision = function(value) {if(missing(value)) {private$NG$prec0} else {private$NG$prec0 <- value}}, 189 | prior_mean = function(value) {if(missing(value)) {private$NG$mu0} else {private$NG$mu0 <- value}}, 190 | 191 | # Access functions --- 192 | residuals = function() {self$y - self$X %*% self$beta}, 193 | get_parameters = function() {list("beta" = self$beta, "sigma" = sqrt(self$sigma))}, 194 | get_NG = function() {private$NG} 195 | ), 196 | 197 | private = list( 198 | NG = NULL 199 | ) 200 | 201 | ) 202 | 203 | 204 | #' Bayesian model with a conjugate Normal-Gamma prior 205 | #' 206 | #' The conjugate Normal-Gamma prior is a simple adaptation of the independent Normal-Gamma prior. An additional 207 | #' 'starting' method is provided to compute all known posterior quantities. Sampling steps for 'beta' and 'sigma' 208 | #' are adapted to use the known posteriors. 209 | #' 210 | #' @docType class 211 | #' 212 | #' @importFrom R6 R6Class 213 | #' 214 | #' @noRd 215 | ConjugateNormalGamma <- R6Class("ConjugateNormalGamma", inherit = NormalGamma, 216 | 217 | public = list( 218 | starting_NG = function(...) { # No need for LS estimates to initialise 219 | self$sample_beta() 220 | self$sample_sigma() 221 | }, 222 | sample_beta = function() { 223 | private$NG$prec1 <- self$XX + self$prior_precision 224 | private$NG$mu1 <- solve(private$NG$prec1, (self$Xy + self$prior_precision %*% private$NG$mu0)) 225 | self$beta <- t(rmvn(1L, mu = private$NG$mu1, precision = private$NG$prec1)) 226 | }, 227 | sample_sigma = function() { 228 | residuals <- self$y - self$X %*% private$NG$mu1 229 | private$NG$rate1 <- private$NG$rate0 + sq_sum(residuals) / 2 230 | self$sigma <- 1 / rgamma(1L, shape = private$NG$shape1, rate = private$NG$rate1) 231 | } 232 | ) 233 | 234 | ) 235 | -------------------------------------------------------------------------------- /R/20_mh.R: -------------------------------------------------------------------------------- 1 | 2 | #' Metropolis-Hastings step 3 | #' 4 | #' @docType class 5 | #' 6 | #' @importFrom R6 R6Class 7 | #' 8 | #' @noRd 9 | MetropolisHastings <- R6Class("MetropolisHastings", 10 | 11 | public = list( 12 | 13 | print = function() { 14 | 15 | cat("Metropolis-Hastings object for ", private$name ,".\n", sep = "") 16 | cat(private$accepted, " / ", private$proposed, " accepted / proposed values (rate ", 17 | round(self$get_acceptance, 2), ").\n", private$accepted_tune, " / ", private$proposed_tune, 18 | " total accepted / proposed values.\n", sep = "") 19 | cat("Current value / scale: ", private$value, " / ", private$scale, ".\n", sep = "") 20 | 21 | return(invisible(self)) 22 | }, 23 | 24 | initialize = function(value, scale = 0.1, ...) { 25 | 26 | private$value <- value 27 | private$scale <- scale 28 | 29 | # Keep track of totals and since scale adjustment 30 | private$proposed <- private$accepted <- private$proposed_tune <- private$accepted_tune <- 0L 31 | 32 | # Initialize children 33 | for(l in ls(self)) {if(grepl("^initialize_[0-9a-zA-Z]+$", l)) {self[[l]](...)}} 34 | 35 | return(NULL) 36 | }, 37 | 38 | setup = function(...) { 39 | 40 | # Update children 41 | for(l in ls(self)) {if(grepl("^setup_[0-9a-zA-Z]+$", l)) {self[[l]](...)}} 42 | 43 | return(NULL) 44 | }, 45 | 46 | sample = function(...) { 47 | self$propose(...) 48 | self$acceptance(...) 49 | self$finalize(...) 50 | }, 51 | 52 | propose = function(location = private$value, scale = private$scale, ...) { 53 | private$proposal <- rnorm(1L, location, scale) 54 | }, 55 | 56 | acceptance = function(proposal = private$proposal, current = private$value, ...) { 57 | private$probability <- exp(self$posterior(proposal, ...) - 58 | self$posterior(current, ...) + self$adjustment(proposal, ...)) 59 | }, 60 | 61 | adjustment = function(proposal = private$proposal, current = private$value, ...) { 62 | 0 63 | }, 64 | 65 | posterior = function(value = private$value, ...) { 66 | 0 + prior(value, ...) 67 | }, 68 | 69 | prior = function(value = private$value, ...) { 70 | 0 71 | }, 72 | 73 | finalize = function(...) { 74 | if(isTRUE(runif(1L) < private$probability)) { 75 | private$value <- private$proposal 76 | private$accepted <- private$accepted + 1L 77 | private$accepted_tune <- private$accepted_tune + 1L 78 | } 79 | private$proposed <- private$proposed + 1L 80 | private$proposed_tune <- private$proposed_tune + 1L 81 | } 82 | 83 | ), 84 | 85 | active = list( 86 | get_value = function() {private$value}, 87 | get_scale = function() {private$scale}, 88 | set_scale = function(value) { 89 | if(missing(value)) return(private$scale) 90 | private$scale <- value 91 | private$accepted <- 0L 92 | private$proposed <- 0L 93 | }, 94 | get_accepted = function() {private$accepted}, 95 | get_proposed = function() {private$proposed}, 96 | get_acceptance = function(value) {private$accepted / max(private$proposed, 1)}, 97 | get_tuning = function(value) {private$accepted_tune / max(private$proposed_tune, 1)} 98 | ), 99 | 100 | private = list( 101 | value = NULL, proposal = NULL, scale = NULL, 102 | probability = NULL, 103 | proposed = NULL, accepted = NULL, proposed_tune = NULL, accepted_tune = NULL, 104 | name = NULL 105 | ) 106 | ) 107 | 108 | 109 | #' Metropolis-Hastings step for theta in the Normal-Gamma shrinkage setup 110 | #' 111 | #' @docType class 112 | #' 113 | #' @importFrom R6 R6Class 114 | #' 115 | #' @noRd 116 | MH_SNG_theta <- R6Class("MH_SNG_theta", inherit = MetropolisHastings, 117 | 118 | public = list( 119 | 120 | initialize_theta = function(rate = 1, ...) { 121 | private$rate <- rate 122 | private$name <- "Normal-Gamma theta" 123 | }, 124 | 125 | propose = function(location = private$value, scale = private$scale, ...) { 126 | private$proposal <- exp(rnorm(1L, 0, scale)) * location 127 | }, 128 | 129 | adjustment = function(proposal = private$proposal, current = private$value, ...) { 130 | log(proposal) - log(current) 131 | }, 132 | 133 | posterior = function(value = private$value, ...) { 134 | dots <- list(...) 135 | sum(dgamma(dots$tau, value, (value * dots$lambda / 2), log = TRUE)) + self$prior(value) 136 | }, 137 | 138 | prior = function(value = private$value, ...) { 139 | dexp(value, rate = private$rate, log = TRUE) 140 | } 141 | ), 142 | 143 | private = list( 144 | rate = NULL 145 | ) 146 | ) 147 | 148 | 149 | #' Metropolis-Hastings step for lambda in the spatial autoregressive model 150 | #' 151 | #' @docType class 152 | #' 153 | #' @importFrom R6 R6Class 154 | #' 155 | #' @noRd 156 | MH_SAR_lambda <- R6Class("MH_SAR_lambda", inherit = MetropolisHastings, 157 | 158 | public = list( 159 | 160 | initialize_lambda = function(shape_a = 1.01, shape_b = 1.01, lower = -Inf, upper = Inf, ...) { # To-do: priors 161 | private$shape_a <- shape_a 162 | private$shape_b <- shape_b 163 | private$lower <- lower 164 | private$upper <- upper 165 | private$name <- "Spatial lambda" 166 | }, 167 | 168 | setup_lambda = function(N, M) { 169 | private$N <- N 170 | private$M <- M 171 | }, 172 | 173 | propose = function(location = private$value, scale = private$scale, ...) { # To-do: interweaving 174 | while(TRUE) { 175 | private$proposal <- rnorm(1L, location, scale) 176 | if(private$proposal < private$upper && private$proposal > private$lower) {break} 177 | } 178 | # private$proposal2 <- rnorm(1L, private$value2, private$scale) 179 | # private$proposal <- self$untransform(private$proposal2) 180 | }, 181 | 182 | acceptance = function(proposal = private$proposal, current = private$value, ...) { 183 | private$probability <- exp(self$posterior(proposal, ...) - 184 | self$posterior(current, ...) + self$adjustment(proposal, current)) 185 | }, 186 | 187 | adjustment = function(proposal = private$proposal, current = private$value) { 188 | 0 189 | # (1 - proposal^2) - (1 - current^2) # Proposed z transformed variable 190 | }, 191 | 192 | posterior = function(value = private$value, ...) { 193 | dots <- list(...) 194 | dots$get_ldet(value) - (private$N - private$M) / 2 * log(dots$get_rss(value)) + self$prior(value) 195 | }, 196 | 197 | prior = function(value = private$value, ...) { 198 | dbeta((value + 1) / 2, shape1 = private$shape_a, shape2 = private$shape_b, log = TRUE) - log(2) 199 | } 200 | ), 201 | 202 | private = list( 203 | shape_a = NULL, shape_b = NULL, 204 | lower = NULL, upper = NULL, 205 | N = NULL, M = NULL 206 | ) 207 | ) 208 | 209 | 210 | 211 | #' Metropolis-Hastings step for lambda in the spatial error model 212 | #' 213 | #' @docType class 214 | #' 215 | #' @importFrom R6 R6Class 216 | #' 217 | #' @noRd 218 | MH_SEM_lambda <- R6Class("MH_SEM_lambda", inherit = MH_SAR_lambda) 219 | 220 | 221 | 222 | #' Metropolis-Hastings step for delta in the spatially lagged explanatories model 223 | #' 224 | #' @docType class 225 | #' 226 | #' @importFrom R6 R6Class 227 | #' 228 | #' @noRd 229 | MH_SLX_delta <- R6Class("MH_SLX_delta", inherit = MetropolisHastings, 230 | 231 | public = list( 232 | 233 | initialize_delta = function(shape_a = 2, shape_b = 1, lower = 1e-12, upper = Inf, ...) { 234 | private$shape_a <- shape_a 235 | private$shape_b <- shape_b 236 | private$lower <- lower 237 | private$upper <- upper 238 | private$name <- "Connectivity delta" 239 | }, 240 | 241 | setup_delta = function(N, M) { 242 | private$N <- N 243 | private$M <- M 244 | }, 245 | 246 | propose = function(location = private$value, scale = private$scale, ...) { 247 | while(TRUE) { 248 | private$proposal <- rnorm(1L, location, scale) 249 | if(private$proposal < private$upper && private$proposal > private$lower) {break} 250 | } 251 | }, 252 | 253 | acceptance = function(proposal = private$proposal, 254 | current = private$value, ...) { 255 | private$probability <- exp(self$posterior(proposal, ...) - 256 | self$posterior(current, ...) + self$adjustment(proposal, current)) 257 | }, 258 | 259 | adjustment = function(proposal = private$proposal, current = private$value) { 260 | 0 261 | }, 262 | 263 | posterior = function(value = private$value, ...) { 264 | dots <- list(...) 265 | -(private$N - private$M) / 2 * log(dots$get_rss(value)) + self$prior(value) 266 | }, 267 | 268 | prior = function(value = private$value, ...) { 269 | dgamma(1 / value, shape = private$shape_a, rate = private$shape_b, log = TRUE) 270 | } 271 | ), 272 | 273 | private = list( 274 | shape_a = NULL, shape_b = NULL, 275 | lower = NULL, upper = NULL, 276 | N = NULL, M = NULL 277 | ) 278 | ) 279 | -------------------------------------------------------------------------------- /R/41_set_options.R: -------------------------------------------------------------------------------- 1 | 2 | #' Set up Bayesian model priors and settings 3 | #' 4 | #' @param type Character scalar with the prior type for the nested linear model. 5 | #' @param NG Settings for the Normal-Gamma prior (independent or conjugate). See \code{\link{set_NG}}. 6 | #' @param SNG Settings for the Normal-Gamma shrinkage prior (Polson and Scott, 2010). See \code{\link{set_NG}}. 7 | #' @param HS Settings for the Horseshoe shrinkage prior (Makalic and Schmidt, 2015). See \code{\link{set_NG}}. 8 | #' @param SAR Settings for the spatial autoregressive setup. See \code{\link{set_SAR}}. 9 | #' @param SLX Settings for the spatially lagged explanatory setup. See \code{\link{set_SAR}}. Note that settings for 10 | #' the spatial term 'theta' are provided to \emph{NG} instead. 11 | #' @param SEM Settings for the spatial error setup. See \code{\link{set_SAR}}. 12 | #' @param SV Settings for the stochastic volatility setup. See \code{\link{set_SV}}. 13 | #' @param ... Used to provide custom prior elements. 14 | #' 15 | #' @return Returns a list with priors and settings for a Bayesian model. 16 | #' @export 17 | #' 18 | #' @examples 19 | #' set_options("Shrinkage", SNG = set_SNG(lambda_a = 1, lambda_b = 1)) 20 | set_options <- function( 21 | type = c("Independent", "Conjugate", "Shrinkage", "Horseshoe"), 22 | NG = set_NG(), SNG = set_SNG(), HS = set_HS(), 23 | SAR = set_SAR(), SLX = set_SLX(), SEM = set_SEM(), 24 | SV = set_SV(), ... 25 | ) { 26 | 27 | type <- match.arg(type) 28 | 29 | priors <- list("NG" = NG, # Normal-Gamma (independent or conjugate) 30 | "SNG" = SNG, "HS" = HS, # Shrinkage Normal-Gamma or Horseshoe 31 | "SAR" = SAR, "SLX" = SLX, "SEM" = SEM, "SV" = SV, ...) 32 | 33 | structure(list( 34 | "type" = type, "priors" = priors 35 | ), class = "priors") 36 | } 37 | 38 | 39 | #' Set up a Normal-Gamma prior 40 | #' 41 | #' @param mu Numeric scalar or vector with the prior mean of 'beta'. 42 | #' @param precision Numeric scalar or matrix with the prior precision of 'beta'. Not used for shrinkage priors. 43 | #' @param shape,rate Numeric scalars with the prior shape and rate of 'sigma'. 44 | #' @param lambda_a,lambda_b Numeric scalars with the prior shape and rate of 'lambda'. 45 | #' @param theta_scale Numeric scalar with the proposal scale of 'theta'. Defaults to zero for a fixed value. 46 | #' @param theta_a Numeric scalar with the prior rate of 'theta'. 47 | #' @param lambda,tau,theta,zeta,nu,beta,sigma Numerics with starting values for the respective parameter. 48 | #' 49 | #' @return Returns a list with priors and settings. 50 | #' @export 51 | set_NG <- function(mu = 0, precision = 1e-8, shape = 0.01, rate = 0.01, beta = NULL, sigma = NULL) { 52 | 53 | structure(list( 54 | mu = vapply(mu, num_check, numeric(1L), min = -Inf, max = Inf, 55 | msg = "Please provide a valid value for the prior mean via 'mu'."), 56 | precision = vapply(precision, num_check, numeric(1L), min = 0, max = Inf, 57 | msg = "Please provide a valid prior variance via 'precision'."), 58 | shape = num_check(shape, min = 1e-12, max = Inf, msg = "Please provide a valid prior shape via 'shape'."), 59 | rate = num_check(rate, min = 1e-12, max = Inf, msg = "Please provide a valid prior rate via 'rate'."), 60 | beta = beta, sigma = sigma 61 | ), class = "prior_NG") 62 | } 63 | 64 | #' @export 65 | #' @rdname set_NG 66 | set_SNG <- function(lambda_a = 0.01, lambda_b = 0.01, theta_scale = 0, theta_a = 1, 67 | lambda = 1, tau = 10, theta = 0.1) { 68 | 69 | structure(list( 70 | lambda_a = num_check(lambda_a, min = 1e-12, max = Inf, 71 | msg = "Please provide a valid shape for lambda (Normal-Gamma) via 'lambda_a'."), 72 | lambda_b = num_check(lambda_b, min = 1e-12, max = Inf, 73 | msg = "Please provide a valid rate for lambda (Normal-Gamma) via 'lambda_b'."), 74 | theta_scale = num_check(theta_scale, min = 0, max = Inf, 75 | msg = "Please provide a valid proposal scale for theta (Normal-Gamma) via 'theta_scale'."), 76 | theta_a = num_check(theta_a, min = 0, max = Inf, 77 | msg = "Please provide a valid rate for theta (Normal-Gamma) via 'theta_a'."), 78 | lambda = num_check(lambda, min = 1e-12, max = Inf, 79 | msg = "Please provide a valid starting value for 'lambda' (Normal-Gamma)."), 80 | tau = vapply(tau, num_check, numeric(1L), min = 1e-12, max = Inf, 81 | msg = "Please provide a valid starting value for 'tau' (Normal-Gamma)."), 82 | theta = num_check(theta, min = 1e-12, max = Inf, 83 | msg = "Please provide a valid starting value for 'theta' (Normal-Gamma).") 84 | ), class = "prior_SNG") 85 | } 86 | 87 | #' @export 88 | #' @rdname set_NG 89 | set_HS <- function(lambda = 1, tau = 1, zeta = 1, nu = 1) { 90 | 91 | structure(list( 92 | lambda = vapply(lambda, num_check, numeric(1L), min = 1e-12, max = Inf, 93 | msg = "Please provide a valid starting value for 'lambda' (Horseshoe)."), 94 | tau = num_check(tau, min = 1e-12, max = Inf, 95 | msg = "Please provide a valid starting value for 'tau' (Horseshoe)."), 96 | zeta = num_check(zeta, min = 1e-12, max = Inf, 97 | msg = "Please provide a valid starting value for 'zeta' (Horseshoe)."), 98 | nu = vapply(nu, num_check, numeric(1L), min = 1e-12, max = Inf, 99 | msg = "Please provide a valid starting value for 'nu' (Horseshoe).") 100 | ), class = "prior_HS") 101 | } 102 | 103 | 104 | #' Set up a spatial prior 105 | #' 106 | #' @param lambda_a,lambda_b Numeric scalars with the prior shapes of the connectivity strength 'lambda'. 107 | #' @param lambda_scale Numeric scalar with the proposal scale of 'lambda'. 108 | #' @param lambda_min,lambda_max Numeric scalars with upper and lower bounds for 'lambda'. 109 | #' @param delta_a,delta_b Numeric scalars with the prior shapes of the connectivity parameter 'delta'. 110 | #' @param delta_scale Numeric scalar with the proposal scale of 'delta'. Defaults to zero for a fixed value. 111 | #' @param delta_min,delta_max Numeric scalars with upper and lower bounds for 'delta'. 112 | #' @param lambda,delta Numerics with starting values for the respective parameter. 113 | #' 114 | #' @return Returns a list with priors and settings. 115 | #' @export 116 | set_SAR <- function( 117 | lambda_a = 1.01, lambda_b = 1.01, lambda = 0, lambda_scale = 0.1, lambda_min = -1, lambda_max = 1 - 1e-12, 118 | delta_a = 1.01, delta_b = 1.01, delta = 1, delta_scale = 0, delta_min = 1e-12, delta_max = Inf) { 119 | 120 | structure(list( 121 | lambda_a = num_check(lambda_a, min = 1e-12, max = Inf, 122 | msg = "Please provide a valid shape for lambda (spatial) via 'lambda_a'."), 123 | lambda_b = num_check(lambda_b, min = 1e-12, max = Inf, 124 | msg = "Please provide a valid rate for lambda (spatial) via 'lambda_b'."), 125 | lambda_min = num_check(lambda_min, min = -Inf, max = Inf, 126 | msg = "Please provide a valid lower bound for lambda (spatial) via 'lambda_min'."), 127 | lambda_max = num_check(lambda_max, min = -Inf, max = Inf, 128 | msg = "Please provide a valid upper bound for lambda (spatial) via 'lambda_max'."), 129 | lambda = num_check(lambda, min = lambda_min, max = lambda_max, 130 | msg = "Please provide a valid starting value for lambda (spatial) via 'lambda'."), 131 | lambda_scale = num_check(lambda_scale, min = 1e-12, max = Inf, 132 | msg = "Please provide a valid proposal scale for lambda (spatial) via 'lambda_scale'."), 133 | delta_a = num_check(delta_a, min = 1e-12, max = Inf, 134 | msg = "Please provide a valid shape for delta (spatial) via 'delta_a'."), 135 | delta_b = num_check(delta_b, min = 1e-12, max = Inf, 136 | msg = "Please provide a valid rate for delta (spatial) via 'delta_b'."), 137 | delta_min = num_check(delta_min, min = -Inf, max = Inf, 138 | msg = "Please provide a valid lower bound for delta (spatial) via 'delta_min'."), 139 | delta_max = num_check(delta_max, min = -Inf, max = Inf, 140 | msg = "Please provide a valid upper bound for delta (spatial) via 'delta_max'."), 141 | delta = num_check(delta, min = delta_min, max = delta_max, 142 | msg = "Please provide a valid starting value for delta (spatial) via 'delta'."), 143 | delta_scale = num_check(delta_scale, min = 0, max = Inf, 144 | msg = "Please provide a valid proposal scale for delta (spatial) via 'delta_scale'.") 145 | ), class = "prior_SAR") 146 | } 147 | #' @export 148 | #' @rdname set_SAR 149 | set_SLX <- set_SAR 150 | #' @export 151 | #' @rdname set_SAR 152 | set_SEM <- set_SAR 153 | 154 | 155 | #' Set up a volatility prior 156 | #' 157 | #' @param priors Prior settings from \code{\link[stochvol]{specify_priors}}. 158 | #' @param mu,phi,sigma,nu,rho,beta,latent0 Numerics with starting values for the respective parameter. 159 | #' 160 | #' @return Returns a list with priors and settings. 161 | #' @export 162 | set_SV <- function( 163 | priors, mu = 0, phi = 0.5, sigma = 1, nu = Inf, rho = 0, beta = 0, latent0 = 0) { 164 | 165 | if(missing(priors)) { 166 | has_package("stochvol") 167 | priors <- stochvol::specify_priors() 168 | } 169 | 170 | structure(list( 171 | priors = priors, parameters = list( 172 | mu = num_check(mu, min = -Inf, max = Inf, msg = "Please provide a valid starting value for 'mu' (SV)."), 173 | phi = num_check(phi, min = -1, max = 1, msg = "Please provide a valid starting value for 'phi' (SV)."), 174 | sigma = num_check(sigma, min = 0, max = Inf, msg = "Please provide a valid starting value for 'sigma' (SV)."), 175 | nu = num_check(nu, min = 2, max = Inf, msg = "Please provide a valid starting value for 'nu' (SV)."), 176 | rho = num_check(rho, min = -1, max = 1, msg = "Please provide a valid starting value for 'rho' (SV)."), 177 | beta = vapply(beta, num_check, numeric(1L), min = -Inf, max = Inf, 178 | msg = "Please provide a valid starting value for 'beta' (SV)."), 179 | latent0 = num_check(latent0, min = -Inf, max = Inf, 180 | msg = "Please provide a valid starting value for the latent variable (SV) via 'latent0'.") 181 | )), class = "prior_SV") 182 | } 183 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | R Package bsreg: Bayesian Spatial Regression Models 2 | GPL (>= 3) 3 | Copyright (C) 2022 Nikolas Kuschnig 4 | 5 | United States Historical States Database 6 | CC BY-NC-SA 2.5 7 | Copyright (C) 2022 Atlas of Historical County Boundaries 8 | 9 | =========================================================================== 10 | 11 | 12 | GNU GENERAL PUBLIC LICENSE 13 | Version 3, 29 June 2007 14 | 15 | Copyright (C) 2007 Free Software Foundation, Inc. 16 | Everyone is permitted to copy and distribute verbatim copies 17 | of this license document, but changing it is not allowed. 18 | 19 | Preamble 20 | 21 | The GNU General Public License is a free, copyleft license for 22 | software and other kinds of works. 23 | 24 | The licenses for most software and other practical works are designed 25 | to take away your freedom to share and change the works. By contrast, 26 | the GNU General Public License is intended to guarantee your freedom to 27 | share and change all versions of a program--to make sure it remains free 28 | software for all its users. We, the Free Software Foundation, use the 29 | GNU General Public License for most of our software; it applies also to 30 | any other work released this way by its authors. You can apply it to 31 | your programs, too. 32 | 33 | When we speak of free software, we are referring to freedom, not 34 | price. Our General Public Licenses are designed to make sure that you 35 | have the freedom to distribute copies of free software (and charge for 36 | them if you wish), that you receive source code or can get it if you 37 | want it, that you can change the software or use pieces of it in new 38 | free programs, and that you know you can do these things. 39 | 40 | To protect your rights, we need to prevent others from denying you 41 | these rights or asking you to surrender the rights. Therefore, you have 42 | certain responsibilities if you distribute copies of the software, or if 43 | you modify it: responsibilities to respect the freedom of others. 44 | 45 | For example, if you distribute copies of such a program, whether 46 | gratis or for a fee, you must pass on to the recipients the same 47 | freedoms that you received. You must make sure that they, too, receive 48 | or can get the source code. And you must show them these terms so they 49 | know their rights. 50 | 51 | Developers that use the GNU GPL protect your rights with two steps: 52 | (1) assert copyright on the software, and (2) offer you this License 53 | giving you legal permission to copy, distribute and/or modify it. 54 | 55 | For the developers' and authors' protection, the GPL clearly explains 56 | that there is no warranty for this free software. For both users' and 57 | authors' sake, the GPL requires that modified versions be marked as 58 | changed, so that their problems will not be attributed erroneously to 59 | authors of previous versions. 60 | 61 | Some devices are designed to deny users access to install or run 62 | modified versions of the software inside them, although the manufacturer 63 | can do so. This is fundamentally incompatible with the aim of 64 | protecting users' freedom to change the software. The systematic 65 | pattern of such abuse occurs in the area of products for individuals to 66 | use, which is precisely where it is most unacceptable. Therefore, we 67 | have designed this version of the GPL to prohibit the practice for those 68 | products. If such problems arise substantially in other domains, we 69 | stand ready to extend this provision to those domains in future versions 70 | of the GPL, as needed to protect the freedom of users. 71 | 72 | Finally, every program is threatened constantly by software patents. 73 | States should not allow patents to restrict development and use of 74 | software on general-purpose computers, but in those that do, we wish to 75 | avoid the special danger that patents applied to a free program could 76 | make it effectively proprietary. To prevent this, the GPL assures that 77 | patents cannot be used to render the program non-free. 78 | 79 | The precise terms and conditions for copying, distribution and 80 | modification follow. 81 | 82 | TERMS AND CONDITIONS 83 | 84 | 1. Definitions. 85 | 86 | "This License" refers to version 3 of the GNU General Public License. 87 | 88 | "Copyright" also means copyright-like laws that apply to other kinds of 89 | works, such as semiconductor masks. 90 | 91 | "The Program" refers to any copyrightable work licensed under this 92 | License. Each licensee is addressed as "you". "Licensees" and 93 | "recipients" may be individuals or organizations. 94 | 95 | To "modify" a work means to copy from or adapt all or part of the work 96 | in a fashion requiring copyright permission, other than the making of an 97 | exact copy. The resulting work is called a "modified version" of the 98 | earlier work or a work "based on" the earlier work. 99 | 100 | A "covered work" means either the unmodified Program or a work based 101 | on the Program. 102 | 103 | To "propagate" a work means to do anything with it that, without 104 | permission, would make you directly or secondarily liable for 105 | infringement under applicable copyright law, except executing it on a 106 | computer or modifying a private copy. Propagation includes copying, 107 | distribution (with or without modification), making available to the 108 | public, and in some countries other activities as well. 109 | 110 | To "convey" a work means any kind of propagation that enables other 111 | parties to make or receive copies. Mere interaction with a user through 112 | a computer network, with no transfer of a copy, is not conveying. 113 | 114 | An interactive user interface displays "Appropriate Legal Notices" 115 | to the extent that it includes a convenient and prominently visible 116 | feature that (1) displays an appropriate copyright notice, and (2) 117 | tells the user that there is no warranty for the work (except to the 118 | extent that warranties are provided), that licensees may convey the 119 | work under this License, and how to view a copy of this License. If 120 | the interface presents a list of user commands or options, such as a 121 | menu, a prominent item in the list meets this criterion. 122 | 123 | 1. Source Code. 124 | 125 | The "source code" for a work means the preferred form of the work 126 | for making modifications to it. "Object code" means any non-source 127 | form of a work. 128 | 129 | A "Standard Interface" means an interface that either is an official 130 | standard defined by a recognized standards body, or, in the case of 131 | interfaces specified for a particular programming language, one that 132 | is widely used among developers working in that language. 133 | 134 | The "System Libraries" of an executable work include anything, other 135 | than the work as a whole, that (a) is included in the normal form of 136 | packaging a Major Component, but which is not part of that Major 137 | Component, and (b) serves only to enable use of the work with that 138 | Major Component, or to implement a Standard Interface for which an 139 | implementation is available to the public in source code form. A 140 | "Major Component", in this context, means a major essential component 141 | (kernel, window system, and so on) of the specific operating system 142 | (if any) on which the executable work runs, or a compiler used to 143 | produce the work, or an object code interpreter used to run it. 144 | 145 | The "Corresponding Source" for a work in object code form means all 146 | the source code needed to generate, install, and (for an executable 147 | work) run the object code and to modify the work, including scripts to 148 | control those activities. However, it does not include the work's 149 | System Libraries, or general-purpose tools or generally available free 150 | programs which are used unmodified in performing those activities but 151 | which are not part of the work. For example, Corresponding Source 152 | includes interface definition files associated with source files for 153 | the work, and the source code for shared libraries and dynamically 154 | linked subprograms that the work is specifically designed to require, 155 | such as by intimate data communication or control flow between those 156 | subprograms and other parts of the work. 157 | 158 | The Corresponding Source need not include anything that users 159 | can regenerate automatically from other parts of the Corresponding 160 | Source. 161 | 162 | The Corresponding Source for a work in source code form is that 163 | same work. 164 | 165 | 2. Basic Permissions. 166 | 167 | All rights granted under this License are granted for the term of 168 | copyright on the Program, and are irrevocable provided the stated 169 | conditions are met. This License explicitly affirms your unlimited 170 | permission to run the unmodified Program. The output from running a 171 | covered work is covered by this License only if the output, given its 172 | content, constitutes a covered work. This License acknowledges your 173 | rights of fair use or other equivalent, as provided by copyright law. 174 | 175 | You may make, run and propagate covered works that you do not 176 | convey, without conditions so long as your license otherwise remains 177 | in force. You may convey covered works to others for the sole purpose 178 | of having them make modifications exclusively for you, or provide you 179 | with facilities for running those works, provided that you comply with 180 | the terms of this License in conveying all material for which you do 181 | not control copyright. Those thus making or running the covered works 182 | for you must do so exclusively on your behalf, under your direction 183 | and control, on terms that prohibit them from making any copies of 184 | your copyrighted material outside their relationship with you. 185 | 186 | Conveying under any other circumstances is permitted solely under 187 | the conditions stated below. Sublicensing is not allowed; section 10 188 | makes it unnecessary. 189 | 190 | 3. Protecting Users' Legal Rights From Anti-Circumvention Law. 191 | 192 | No covered work shall be deemed part of an effective technological 193 | measure under any applicable law fulfilling obligations under article 194 | 11 of the WIPO copyright treaty adopted on 20 December 1996, or 195 | similar laws prohibiting or restricting circumvention of such 196 | measures. 197 | 198 | When you convey a covered work, you waive any legal power to forbid 199 | circumvention of technological measures to the extent such circumvention 200 | is effected by exercising rights under this License with respect to 201 | the covered work, and you disclaim any intention to limit operation or 202 | modification of the work as a means of enforcing, against the work's 203 | users, your or third parties' legal rights to forbid circumvention of 204 | technological measures. 205 | 206 | 4. Conveying Verbatim Copies. 207 | 208 | You may convey verbatim copies of the Program's source code as you 209 | receive it, in any medium, provided that you conspicuously and 210 | appropriately publish on each copy an appropriate copyright notice; 211 | keep intact all notices stating that this License and any 212 | non-permissive terms added in accord with section 7 apply to the code; 213 | keep intact all notices of the absence of any warranty; and give all 214 | recipients a copy of this License along with the Program. 215 | 216 | You may charge any price or no price for each copy that you convey, 217 | and you may offer support or warranty protection for a fee. 218 | 219 | 5. Conveying Modified Source Versions. 220 | 221 | You may convey a work based on the Program, or the modifications to 222 | produce it from the Program, in the form of source code under the 223 | terms of section 4, provided that you also meet all of these conditions: 224 | 225 | a) The work must carry prominent notices stating that you modified 226 | it, and giving a relevant date. 227 | 228 | b) The work must carry prominent notices stating that it is 229 | released under this License and any conditions added under section 230 | 7. This requirement modifies the requirement in section 4 to 231 | "keep intact all notices". 232 | 233 | c) You must license the entire work, as a whole, under this 234 | License to anyone who comes into possession of a copy. This 235 | License will therefore apply, along with any applicable section 7 236 | additional terms, to the whole of the work, and all its parts, 237 | regardless of how they are packaged. This License gives no 238 | permission to license the work in any other way, but it does not 239 | invalidate such permission if you have separately received it. 240 | 241 | d) If the work has interactive user interfaces, each must display 242 | Appropriate Legal Notices; however, if the Program has interactive 243 | interfaces that do not display Appropriate Legal Notices, your 244 | work need not make them do so. 245 | 246 | A compilation of a covered work with other separate and independent 247 | works, which are not by their nature extensions of the covered work, 248 | and which are not combined with it such as to form a larger program, 249 | in or on a volume of a storage or distribution medium, is called an 250 | "aggregate" if the compilation and its resulting copyright are not 251 | used to limit the access or legal rights of the compilation's users 252 | beyond what the individual works permit. Inclusion of a covered work 253 | in an aggregate does not cause this License to apply to the other 254 | parts of the aggregate. 255 | 256 | 6. Conveying Non-Source Forms. 257 | 258 | You may convey a covered work in object code form under the terms 259 | of sections 4 and 5, provided that you also convey the 260 | machine-readable Corresponding Source under the terms of this License, 261 | in one of these ways: 262 | 263 | a) Convey the object code in, or embodied in, a physical product 264 | (including a physical distribution medium), accompanied by the 265 | Corresponding Source fixed on a durable physical medium 266 | customarily used for software interchange. 267 | 268 | b) Convey the object code in, or embodied in, a physical product 269 | (including a physical distribution medium), accompanied by a 270 | written offer, valid for at least three years and valid for as 271 | long as you offer spare parts or customer support for that product 272 | model, to give anyone who possesses the object code either (1) a 273 | copy of the Corresponding Source for all the software in the 274 | product that is covered by this License, on a durable physical 275 | medium customarily used for software interchange, for a price no 276 | more than your reasonable cost of physically performing this 277 | conveying of source, or (2) access to copy the 278 | Corresponding Source from a network server at no charge. 279 | 280 | c) Convey individual copies of the object code with a copy of the 281 | written offer to provide the Corresponding Source. This 282 | alternative is allowed only occasionally and noncommercially, and 283 | only if you received the object code with such an offer, in accord 284 | with subsection 6b. 285 | 286 | d) Convey the object code by offering access from a designated 287 | place (gratis or for a charge), and offer equivalent access to the 288 | Corresponding Source in the same way through the same place at no 289 | further charge. You need not require recipients to copy the 290 | Corresponding Source along with the object code. If the place to 291 | copy the object code is a network server, the Corresponding Source 292 | may be on a different server (operated by you or a third party) 293 | that supports equivalent copying facilities, provided you maintain 294 | clear directions next to the object code saying where to find the 295 | Corresponding Source. Regardless of what server hosts the 296 | Corresponding Source, you remain obligated to ensure that it is 297 | available for as long as needed to satisfy these requirements. 298 | 299 | e) Convey the object code using peer-to-peer transmission, provided 300 | you inform other peers where the object code and Corresponding 301 | Source of the work are being offered to the general public at no 302 | charge under subsection 6d. 303 | 304 | A separable portion of the object code, whose source code is excluded 305 | from the Corresponding Source as a System Library, need not be 306 | included in conveying the object code work. 307 | 308 | A "User Product" is either (1) a "consumer product", which means any 309 | tangible personal property which is normally used for personal, family, 310 | or household purposes, or (2) anything designed or sold for incorporation 311 | into a dwelling. In determining whether a product is a consumer product, 312 | doubtful cases shall be resolved in favor of coverage. For a particular 313 | product received by a particular user, "normally used" refers to a 314 | typical or common use of that class of product, regardless of the status 315 | of the particular user or of the way in which the particular user 316 | actually uses, or expects or is expected to use, the product. A product 317 | is a consumer product regardless of whether the product has substantial 318 | commercial, industrial or non-consumer uses, unless such uses represent 319 | the only significant mode of use of the product. 320 | 321 | "Installation Information" for a User Product means any methods, 322 | procedures, authorization keys, or other information required to install 323 | and execute modified versions of a covered work in that User Product from 324 | a modified version of its Corresponding Source. The information must 325 | suffice to ensure that the continued functioning of the modified object 326 | code is in no case prevented or interfered with solely because 327 | modification has been made. 328 | 329 | If you convey an object code work under this section in, or with, or 330 | specifically for use in, a User Product, and the conveying occurs as 331 | part of a transaction in which the right of possession and use of the 332 | User Product is transferred to the recipient in perpetuity or for a 333 | fixed term (regardless of how the transaction is characterized), the 334 | Corresponding Source conveyed under this section must be accompanied 335 | by the Installation Information. But this requirement does not apply 336 | if neither you nor any third party retains the ability to install 337 | modified object code on the User Product (for example, the work has 338 | been installed in ROM). 339 | 340 | The requirement to provide Installation Information does not include a 341 | requirement to continue to provide support service, warranty, or updates 342 | for a work that has been modified or installed by the recipient, or for 343 | the User Product in which it has been modified or installed. Access to a 344 | network may be denied when the modification itself materially and 345 | adversely affects the operation of the network or violates the rules and 346 | protocols for communication across the network. 347 | 348 | Corresponding Source conveyed, and Installation Information provided, 349 | in accord with this section must be in a format that is publicly 350 | documented (and with an implementation available to the public in 351 | source code form), and must require no special password or key for 352 | unpacking, reading or copying. 353 | 354 | 7. Additional Terms. 355 | 356 | "Additional permissions" are terms that supplement the terms of this 357 | License by making exceptions from one or more of its conditions. 358 | Additional permissions that are applicable to the entire Program shall 359 | be treated as though they were included in this License, to the extent 360 | that they are valid under applicable law. If additional permissions 361 | apply only to part of the Program, that part may be used separately 362 | under those permissions, but the entire Program remains governed by 363 | this License without regard to the additional permissions. 364 | 365 | When you convey a copy of a covered work, you may at your option 366 | remove any additional permissions from that copy, or from any part of 367 | it. (Additional permissions may be written to require their own 368 | removal in certain cases when you modify the work.) You may place 369 | additional permissions on material, added by you to a covered work, 370 | for which you have or can give appropriate copyright permission. 371 | 372 | Notwithstanding any other provision of this License, for material you 373 | add to a covered work, you may (if authorized by the copyright holders of 374 | that material) supplement the terms of this License with terms: 375 | 376 | a) Disclaiming warranty or limiting liability differently from the 377 | terms of sections 15 and 16 of this License; or 378 | 379 | b) Requiring preservation of specified reasonable legal notices or 380 | author attributions in that material or in the Appropriate Legal 381 | Notices displayed by works containing it; or 382 | 383 | c) Prohibiting misrepresentation of the origin of that material, or 384 | requiring that modified versions of such material be marked in 385 | reasonable ways as different from the original version; or 386 | 387 | d) Limiting the use for publicity purposes of names of licensors or 388 | authors of the material; or 389 | 390 | e) Declining to grant rights under trademark law for use of some 391 | trade names, trademarks, or service marks; or 392 | 393 | f) Requiring indemnification of licensors and authors of that 394 | material by anyone who conveys the material (or modified versions of 395 | it) with contractual assumptions of liability to the recipient, for 396 | any liability that these contractual assumptions directly impose on 397 | those licensors and authors. 398 | 399 | All other non-permissive additional terms are considered "further 400 | restrictions" within the meaning of section 10. If the Program as you 401 | received it, or any part of it, contains a notice stating that it is 402 | governed by this License along with a term that is a further 403 | restriction, you may remove that term. If a license document contains 404 | a further restriction but permits relicensing or conveying under this 405 | License, you may add to a covered work material governed by the terms 406 | of that license document, provided that the further restriction does 407 | not survive such relicensing or conveying. 408 | 409 | If you add terms to a covered work in accord with this section, you 410 | must place, in the relevant source files, a statement of the 411 | additional terms that apply to those files, or a notice indicating 412 | where to find the applicable terms. 413 | 414 | Additional terms, permissive or non-permissive, may be stated in the 415 | form of a separately written license, or stated as exceptions; 416 | the above requirements apply either way. 417 | 418 | 8. Termination. 419 | 420 | You may not propagate or modify a covered work except as expressly 421 | provided under this License. Any attempt otherwise to propagate or 422 | modify it is void, and will automatically terminate your rights under 423 | this License (including any patent licenses granted under the third 424 | paragraph of section 11). 425 | 426 | However, if you cease all violation of this License, then your 427 | license from a particular copyright holder is reinstated (a) 428 | provisionally, unless and until the copyright holder explicitly and 429 | finally terminates your license, and (b) permanently, if the copyright 430 | holder fails to notify you of the violation by some reasonable means 431 | prior to 60 days after the cessation. 432 | 433 | Moreover, your license from a particular copyright holder is 434 | reinstated permanently if the copyright holder notifies you of the 435 | violation by some reasonable means, this is the first time you have 436 | received notice of violation of this License (for any work) from that 437 | copyright holder, and you cure the violation prior to 30 days after 438 | your receipt of the notice. 439 | 440 | Termination of your rights under this section does not terminate the 441 | licenses of parties who have received copies or rights from you under 442 | this License. If your rights have been terminated and not permanently 443 | reinstated, you do not qualify to receive new licenses for the same 444 | material under section 10. 445 | 446 | 9. Acceptance Not Required for Having Copies. 447 | 448 | You are not required to accept this License in order to receive or 449 | run a copy of the Program. Ancillary propagation of a covered work 450 | occurring solely as a consequence of using peer-to-peer transmission 451 | to receive a copy likewise does not require acceptance. However, 452 | nothing other than this License grants you permission to propagate or 453 | modify any covered work. These actions infringe copyright if you do 454 | not accept this License. Therefore, by modifying or propagating a 455 | covered work, you indicate your acceptance of this License to do so. 456 | 457 | 10. Automatic Licensing of Downstream Recipients. 458 | 459 | Each time you convey a covered work, the recipient automatically 460 | receives a license from the original licensors, to run, modify and 461 | propagate that work, subject to this License. You are not responsible 462 | for enforcing compliance by third parties with this License. 463 | 464 | An "entity transaction" is a transaction transferring control of an 465 | organization, or substantially all assets of one, or subdividing an 466 | organization, or merging organizations. If propagation of a covered 467 | work results from an entity transaction, each party to that 468 | transaction who receives a copy of the work also receives whatever 469 | licenses to the work the party's predecessor in interest had or could 470 | give under the previous paragraph, plus a right to possession of the 471 | Corresponding Source of the work from the predecessor in interest, if 472 | the predecessor has it or can get it with reasonable efforts. 473 | 474 | You may not impose any further restrictions on the exercise of the 475 | rights granted or affirmed under this License. For example, you may 476 | not impose a license fee, royalty, or other charge for exercise of 477 | rights granted under this License, and you may not initiate litigation 478 | (including a cross-claim or counterclaim in a lawsuit) alleging that 479 | any patent claim is infringed by making, using, selling, offering for 480 | sale, or importing the Program or any portion of it. 481 | 482 | 11. Patents. 483 | 484 | A "contributor" is a copyright holder who authorizes use under this 485 | License of the Program or a work on which the Program is based. The 486 | work thus licensed is called the contributor's "contributor version". 487 | 488 | A contributor's "essential patent claims" are all patent claims 489 | owned or controlled by the contributor, whether already acquired or 490 | hereafter acquired, that would be infringed by some manner, permitted 491 | by this License, of making, using, or selling its contributor version, 492 | but do not include claims that would be infringed only as a 493 | consequence of further modification of the contributor version. For 494 | purposes of this definition, "control" includes the right to grant 495 | patent sublicenses in a manner consistent with the requirements of 496 | this License. 497 | 498 | Each contributor grants you a non-exclusive, worldwide, royalty-free 499 | patent license under the contributor's essential patent claims, to 500 | make, use, sell, offer for sale, import and otherwise run, modify and 501 | propagate the contents of its contributor version. 502 | 503 | In the following three paragraphs, a "patent license" is any express 504 | agreement or commitment, however denominated, not to enforce a patent 505 | (such as an express permission to practice a patent or covenant not to 506 | sue for patent infringement). To "grant" such a patent license to a 507 | party means to make such an agreement or commitment not to enforce a 508 | patent against the party. 509 | 510 | If you convey a covered work, knowingly relying on a patent license, 511 | and the Corresponding Source of the work is not available for anyone 512 | to copy, free of charge and under the terms of this License, through a 513 | publicly available network server or other readily accessible means, 514 | then you must either (1) cause the Corresponding Source to be so 515 | available, or (2) arrange to deprive yourself of the benefit of the 516 | patent license for this particular work, or (3) arrange, in a manner 517 | consistent with the requirements of this License, to extend the patent 518 | license to downstream recipients. "Knowingly relying" means you have 519 | actual knowledge that, but for the patent license, your conveying the 520 | covered work in a country, or your recipient's use of the covered work 521 | in a country, would infringe one or more identifiable patents in that 522 | country that you have reason to believe are valid. 523 | 524 | If, pursuant to or in connection with a single transaction or 525 | arrangement, you convey, or propagate by procuring conveyance of, a 526 | covered work, and grant a patent license to some of the parties 527 | receiving the covered work authorizing them to use, propagate, modify 528 | or convey a specific copy of the covered work, then the patent license 529 | you grant is automatically extended to all recipients of the covered 530 | work and works based on it. 531 | 532 | A patent license is "discriminatory" if it does not include within 533 | the scope of its coverage, prohibits the exercise of, or is 534 | conditioned on the non-exercise of one or more of the rights that are 535 | specifically granted under this License. You may not convey a covered 536 | work if you are a party to an arrangement with a third party that is 537 | in the business of distributing software, under which you make payment 538 | to the third party based on the extent of your activity of conveying 539 | the work, and under which the third party grants, to any of the 540 | parties who would receive the covered work from you, a discriminatory 541 | patent license (a) in connection with copies of the covered work 542 | conveyed by you (or copies made from those copies), or (b) primarily 543 | for and in connection with specific products or compilations that 544 | contain the covered work, unless you entered into that arrangement, 545 | or that patent license was granted, prior to 28 March 2007. 546 | 547 | Nothing in this License shall be construed as excluding or limiting 548 | any implied license or other defenses to infringement that may 549 | otherwise be available to you under applicable patent law. 550 | 551 | 12. No Surrender of Others' Freedom. 552 | 553 | If conditions are imposed on you (whether by court order, agreement or 554 | otherwise) that contradict the conditions of this License, they do not 555 | excuse you from the conditions of this License. If you cannot convey a 556 | covered work so as to satisfy simultaneously your obligations under this 557 | License and any other pertinent obligations, then as a consequence you may 558 | not convey it at all. For example, if you agree to terms that obligate you 559 | to collect a royalty for further conveying from those to whom you convey 560 | the Program, the only way you could satisfy both those terms and this 561 | License would be to refrain entirely from conveying the Program. 562 | 563 | 13. Use with the GNU Affero General Public License. 564 | 565 | Notwithstanding any other provision of this License, you have 566 | permission to link or combine any covered work with a work licensed 567 | under version 3 of the GNU Affero General Public License into a single 568 | combined work, and to convey the resulting work. The terms of this 569 | License will continue to apply to the part which is the covered work, 570 | but the special requirements of the GNU Affero General Public License, 571 | section 13, concerning interaction through a network will apply to the 572 | combination as such. 573 | 574 | 14. Revised Versions of this License. 575 | 576 | The Free Software Foundation may publish revised and/or new versions of 577 | the GNU General Public License from time to time. Such new versions will 578 | be similar in spirit to the present version, but may differ in detail to 579 | address new problems or concerns. 580 | 581 | Each version is given a distinguishing version number. If the 582 | Program specifies that a certain numbered version of the GNU General 583 | Public License "or any later version" applies to it, you have the 584 | option of following the terms and conditions either of that numbered 585 | version or of any later version published by the Free Software 586 | Foundation. If the Program does not specify a version number of the 587 | GNU General Public License, you may choose any version ever published 588 | by the Free Software Foundation. 589 | 590 | If the Program specifies that a proxy can decide which future 591 | versions of the GNU General Public License can be used, that proxy's 592 | public statement of acceptance of a version permanently authorizes you 593 | to choose that version for the Program. 594 | 595 | Later license versions may give you additional or different 596 | permissions. However, no additional obligations are imposed on any 597 | author or copyright holder as a result of your choosing to follow a 598 | later version. 599 | 600 | 15. Disclaimer of Warranty. 601 | 602 | THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY 603 | APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT 604 | HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY 605 | OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, 606 | THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR 607 | PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM 608 | IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF 609 | ALL NECESSARY SERVICING, REPAIR OR CORRECTION. 610 | 611 | 16. Limitation of Liability. 612 | 613 | IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING 614 | WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS 615 | THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY 616 | GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE 617 | USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF 618 | DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD 619 | PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), 620 | EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF 621 | SUCH DAMAGES. 622 | 623 | 17. Interpretation of Sections 15 and 16. 624 | 625 | If the disclaimer of warranty and limitation of liability provided 626 | above cannot be given local legal effect according to their terms, 627 | reviewing courts shall apply local law that most closely approximates 628 | an absolute waiver of all civil liability in connection with the 629 | Program, unless a warranty or assumption of liability accompanies a 630 | copy of the Program in return for a fee. 631 | 632 | END OF TERMS AND CONDITIONS 633 | 634 | How to Apply These Terms to Your New Programs 635 | 636 | If you develop a new program, and you want it to be of the greatest 637 | possible use to the public, the best way to achieve this is to make it 638 | free software which everyone can redistribute and change under these terms. 639 | 640 | To do so, attach the following notices to the program. It is safest 641 | to attach them to the start of each source file to most effectively 642 | state the exclusion of warranty; and each file should have at least 643 | the "copyright" line and a pointer to where the full notice is found. 644 | 645 | 646 | Copyright (C) 647 | 648 | This program is free software: you can redistribute it and/or modify 649 | it under the terms of the GNU General Public License as published by 650 | the Free Software Foundation, either version 3 of the License, or 651 | (at your option) any later version. 652 | 653 | This program is distributed in the hope that it will be useful, 654 | but WITHOUT ANY WARRANTY; without even the implied warranty of 655 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 656 | GNU General Public License for more details. 657 | 658 | You should have received a copy of the GNU General Public License 659 | along with this program. If not, see . 660 | 661 | Also add information on how to contact you by electronic and paper mail. 662 | 663 | If the program does terminal interaction, make it output a short 664 | notice like this when it starts in an interactive mode: 665 | 666 | Copyright (C) 667 | This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'. 668 | This is free software, and you are welcome to redistribute it 669 | under certain conditions; type `show c' for details. 670 | 671 | The hypothetical commands `show w' and `show c' should show the appropriate 672 | parts of the General Public License. Of course, your program's commands 673 | might be different; for a GUI interface, you would use an "about box". 674 | 675 | You should also get your employer (if you work as a programmer) or school, 676 | if any, to sign a "copyright disclaimer" for the program, if necessary. 677 | For more information on this, and how to apply and follow the GNU GPL, see 678 | . 679 | 680 | The GNU General Public License does not permit incorporating your program 681 | into proprietary programs. If your program is a subroutine library, you 682 | may consider it more useful to permit linking proprietary applications with 683 | the library. If this is what you want to do, use the GNU Lesser General 684 | Public License instead of this License. But first, please read 685 | . 686 | --------------------------------------------------------------------------------