├── .Rbuildignore ├── .github ├── .gitignore └── workflows │ └── R-CMD-check.yaml ├── DESCRIPTION ├── NAMESPACE ├── NEWS ├── R ├── Smetric.R ├── bootInclude.R ├── bootTable.R ├── bootThreshold.R ├── bootnet.R ├── bootnetResultsMethods.R ├── centralityCompare.R ├── checkInput.R ├── comparePlot.R ├── defaultFunctions.R ├── estimateNetwork.R ├── multiverse.R ├── netSimulator.R ├── netSimulator_methods.R ├── parSim.R ├── plotMethod.R ├── printMethod.R ├── replicationSimulator.R ├── replicationSimulator_methods.R ├── simGraph.R ├── summaryMethod.R ├── transformations.R └── zzz.R ├── README.md ├── inst └── CITATION └── man ├── IsingGenerator.Rd ├── binarize.Rd ├── bootInclude.Rd ├── bootThreshold.Rd ├── bootnet-package.Rd ├── bootnet.Rd ├── corStability.Rd ├── differenceTest.Rd ├── estimateNetwork.Rd ├── genGGM.Rd ├── ggmGenerator.Rd ├── multiverse.Rd ├── netSimulator.Rd ├── null.Rd ├── plot.bootnet.Rd ├── plot.netSimulator.Rd ├── plotBootnetResult.Rd ├── print.bootnet.Rd ├── summary.bootnet.Rd └── transformation.Rd /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^\.github$ 4 | -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /.github/workflows/R-CMD-check.yaml: -------------------------------------------------------------------------------- 1 | # R CMD check workflow. 2 | name: R-CMD-check 3 | 4 | # Set workflow permissions. 5 | permissions: read-all 6 | 7 | # Run on pushes and pull requests to the `master` branch. 8 | on: 9 | push: 10 | branches: 11 | - master 12 | pull_request: 13 | branches: 14 | - master 15 | # Trigger manually via the GitHub UI. 16 | workflow_dispatch: 17 | 18 | # Jobs to run. 19 | jobs: 20 | 21 | # CRAN check job 22 | R-CMD-check: 23 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 24 | runs-on: ${{ matrix.config.os }} 25 | strategy: 26 | fail-fast: false 27 | matrix: 28 | config: 29 | - {os: macos-latest, r: 'release'} 30 | - {os: windows-latest, r: 'release'} 31 | - {os: ubuntu-latest, r: 'devel', http-user-agent: 'release'} 32 | - {os: ubuntu-latest, r: 'release'} 33 | - {os: ubuntu-latest, r: 'oldrel-1'} 34 | env: 35 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 36 | R_KEEP_PKG_SOURCE: yes 37 | 38 | # Actions. 39 | steps: 40 | # Checkout the repository. 41 | - name: Checkout repository 42 | uses: actions/checkout@v4 43 | 44 | # Setup `pandoc`. 45 | - name: Setup pandoc 46 | uses: r-lib/actions/setup-pandoc@v2 47 | 48 | # Setup `R`. 49 | - name: Setup R 50 | uses: r-lib/actions/setup-r@v2 51 | with: 52 | r-version: ${{ matrix.config.r }} 53 | http-user-agent: ${{ matrix.config.http-user-agent }} 54 | use-public-rspm: true 55 | 56 | # Setup package dependencies and extras. 57 | - name: Install dependencies 58 | uses: r-lib/actions/setup-r-dependencies@v2 59 | with: 60 | extra-packages: any::rcmdcheck 61 | needs: check 62 | 63 | # Perform the CRAN check. 64 | - name: Check package 65 | uses: r-lib/actions/check-r-package@v2 66 | with: 67 | upload-snapshots: true 68 | build_args: 'c("--no-manual","--compact-vignettes=gs+qpdf")' 69 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: bootnet 2 | Type: Package 3 | Title: Bootstrap Methods for Various Network Estimation Routines 4 | Version: 1.6 5 | Authors@R: c( 6 | person("Sacha", "Epskamp", email = "mail@sachaepskamp.com",role = c("aut", "cre")), 7 | person("Eiko I.", "Fried", role = c("ctb")) 8 | ) 9 | Maintainer: Sacha Epskamp 10 | Depends: ggplot2, R (>= 3.0.0) 11 | Imports: methods, igraph, IsingFit (>= 0.4), qgraph, dplyr (>= 0.3.0.2), tidyr, 12 | gtools, corpcor, IsingSampler (>= 0.2.3), mvtnorm, abind, Matrix, snow, 13 | mgm (>= 1.2), NetworkToolbox (>= 1.1.0), 14 | pbapply, networktools, rlang, tibble, tidyselect 15 | Suggests: glasso, GGMncv, BDgraph, graphicalVAR, relaimpo, lavaan, psychTools, huge 16 | Description: Bootstrap methods to assess accuracy and stability of estimated network structures 17 | and centrality indices . Allows for flexible 18 | specification of any undirected network estimation procedure in R, and offers 19 | default sets for various estimation routines. 20 | License: GPL-2 21 | BugReports: https://github.com/SachaEpskamp/bootnet/issues 22 | URL: https://github.com/SachaEpskamp/bootnet 23 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | 2 | importFrom(rlang,.data) 3 | import(tibble) 4 | import(tidyselect) 5 | 6 | importFrom("stats", "var") 7 | 8 | #importFrom("GGMncv","ggmncv") 9 | 10 | importFrom("snow","setDefaultClusterOptions") 11 | 12 | # Transformations: 13 | export(quantile_transformation) 14 | export(rank_transformation) 15 | 16 | export(multiverse) 17 | 18 | import(pbapply) 19 | # import(lavaan) 20 | import(networktools) 21 | importFrom(NetworkToolbox,"TMFG") 22 | # import(glasso) 23 | 24 | # Default functions: 25 | export(bootnet_SVAR_lavaan) 26 | export(bootInclude) 27 | export(bootnet_IsingFit) 28 | export(bootnet_EBICglasso) 29 | export(bootnet_ggmModSelect) 30 | export(bootnet_pcor) 31 | export(bootnet_cor) 32 | export(bootnet_IsingSampler) 33 | export(bootnet_adalasso) 34 | export(bootnet_huge) 35 | export(bootnet_mgm) 36 | export(bootnet_relimp) 37 | export(bootnet_TMFG) 38 | export(bootnet_LoGo) 39 | export(bootnet_ggmModSelect) 40 | export(bootnet_graphicalVAR) 41 | export(bootnet_piecewiseIsing) 42 | export(bootThreshold) 43 | 44 | 45 | 46 | # Defaults: 47 | importFrom("stats", "qnorm") 48 | # importFrom(huge,"huge","huge.npn","huge.select") 49 | # importFrom(parcor,"adalasso.net") 50 | import(mgm) 51 | #importFrom(relaimpo, "calc.relimp") 52 | 53 | # exportPattern("^[[:alpha:]]+") 54 | # Exports: 55 | export(bootnet) 56 | export(null) 57 | export(binarize) 58 | export(estimateNetwork) 59 | export(differenceTest) 60 | export(corStability) 61 | 62 | 63 | # Methods: 64 | S3method(summary, bootnet) 65 | S3method(plot, bootnet) 66 | S3method(print, bootnet) 67 | S3method(summary, bootnetResult) 68 | S3method(plot, bootnetResult) 69 | S3method(print, bootnetResult) 70 | 71 | S3method(summary, netSimulator) 72 | S3method(plot, netSimulator) 73 | S3method(print, netSimulator) 74 | 75 | S3method(summary, replicationSimulator) 76 | S3method(plot, replicationSimulator) 77 | S3method(print, replicationSimulator) 78 | 79 | export(replicationSimulator) 80 | export(netSimulator) 81 | export(genGGM) 82 | export(ggmGenerator) 83 | export(IsingGenerator) 84 | 85 | # Imports: 86 | #import(dplyr) 87 | import(snow) 88 | importFrom(IsingFit,"IsingFit") 89 | import(qgraph) 90 | import(ggplot2) 91 | importFrom("dplyr","anti_join","arrange","arrange_","bind_rows","filter","filter_","funs","group_by", 92 | "group_by_","id","left_join","mutate","mutate_","rename","select","select_","summarize","summarize_", 93 | "tally","ungroup") 94 | importFrom("igraph", "get.adjacency","watts.strogatz.game") 95 | import(tidyr) 96 | importFrom(gtools, "mixedorder") 97 | importFrom(corpcor, "cor2pcor") 98 | importFrom(IsingSampler, "IsingSampler") 99 | importFrom(corpcor, "pseudoinverse") 100 | importFrom(mvtnorm, "rmvnorm") 101 | importFrom(IsingSampler, "EstimateIsing") 102 | importFrom(abind,"abind") 103 | importFrom(Matrix,"forceSymmetric") 104 | # importFrom(BDgraph, "bdgraph.sim") 105 | 106 | #export(centrality) 107 | 108 | importFrom("methods", "is") 109 | importFrom("stats", "cor") 110 | importFrom("utils", "setTxtProgressBar", "txtProgressBar") 111 | importFrom("stats", "na.omit", "quantile", "runif", "sd") 112 | importFrom("utils", "data") 113 | importFrom("utils", "capture.output") 114 | importFrom("stats", "cov2cor") 115 | importFrom("utils", "write.table") 116 | importFrom("stats", "as.formula") 117 | importFrom("utils", "packageVersion") 118 | importFrom("stats", "lm") 119 | importFrom("stats", "weighted.mean") 120 | importFrom("utils", "combn") 121 | importFrom("graphics", "par") -------------------------------------------------------------------------------- /NEWS: -------------------------------------------------------------------------------- 1 | Changes in Version 1.6 2 | - Changed the way the pairwise, minimum and maximum sample sizes are computed for estimators using a correlation matrix as input with missing data. Thanks to Carl Falk. This might lead to different results compared to older bootnet versions! To recover old results, use the "pairwise_average_v1.5", "pairwise_minimum_v1.5", and "pairwise_maximum_v1.5" arguments in the sampleSize argument of the default functions. 3 | 4 | Changes in Version 1.5.6 5 | - Included min_sum argument in the IsingFit default 6 | 7 | Changes in Version 1.5.5 8 | - Small help file fix 9 | 10 | Changes in Version 1.5.4 11 | - IsingSampler default set now always uses method = "uni" by default 12 | - Added min_sum and threshold options to defailt = "IsingSampler" 13 | - Fixed a small problem in the bootnet package help file 14 | 15 | Changes in Version 1.5.3 16 | - Improved support for estimation functions with multiple networks of multiple types 17 | 18 | Changes in Version 1.5.2 19 | - Fixed links in README.md 20 | 21 | Changes in Version 1.5.1 22 | - Changed CRAN NOTE regarding class check 23 | - Fixed remaining deprecated dplyr functions 24 | - Fixed a bug with netSimulator and replicationSimulator print method 25 | 26 | Changes in Version 1.5 27 | - Fixed a bug with bridge centrality indices being incorrectly labeled 28 | - Fixed a bug with that occured when using order = "sample" together with the subset argument 29 | - Removed calls to deprecated dplyr functions 30 | 31 | Changes in Version 1.4.7 32 | - 'binarize' now makes values above the split 1 instead of below the split 33 | - The bootnet plot method now shows values for all centrality metrics when plotting difference tests. 34 | - the mgm default for tuning now defaults to 0.25 35 | - Fixed a bug with bridge centrality indices being incorrectly labeled 36 | 37 | Changes in Version 1.4.6 38 | - bootnet() now no longer re-estimates the network if the input is a bootnetResult 39 | - Several packages are now listed under Suggests instead of Imports 40 | - Included support for the GGMncv package 41 | 42 | Changes in Version 1.4.4 43 | - Fixed a critical error when using multiple cores in R 4.0 on Mac 44 | 45 | Changes in Version 1.4 46 | - Added the transformation functions 'quantile_transformation' and 'rank_transformation' 47 | - Several default sets now use corMethod = "cor" rather than corMethod = "cor_auto" by default! 48 | - Fixed a bug in case-drop bootstrap plotting method 49 | - "Sampled people" is now "Sampled cases" 50 | - Removed adalasso dependency due to CRAN removal 51 | - Added 'multiverse' function for multiverse-plots of bootstrap results 52 | 53 | Changes in Version 1.3 54 | - Added the options pairwise_maximum, pairwise_minimum and pairwise_average to the sampleSize argument of several default functions 55 | - Old functionality in estimateNetwork and bootnet has been removed! This includes graphFun, estFun etcetera. 56 | - Some changes that lead to NEW DEFAULT BEHAVIOR: 57 | - Added the argument 'nonPositiveDefinite' to several default estimators, which will stop with an error when the input is different. 58 | - The sampleSize argument now defaults to "pairwise_average"! 59 | - All default functions are now by default locked unless used from within estimateNetwork 60 | - The plot method now better scales a margin on the top and bottom of the plot 61 | - ggmGenerator now generates thresholds more consistently, leading to unfiform data by default or skewed data if the 'skewFactor' argument is used 62 | - ggmGenerator can now simulate missing data using the 'missing' argument 63 | - corMethod = "spearman" is now supported in several default sets 64 | - Fixed a bug with BDgraph change in genGGM function 65 | 66 | Changes in Version 1.2.4 67 | - Resolved a bug with expected influence crashing bootnet when empty networks were estimated 68 | 69 | Changes in Version 1.2.3 70 | - More informative error when statistics are not included in the plot. 71 | - Updated psych to psychTools 72 | 73 | Changes in Version 1.2.2 74 | - Added 'adjacency' argument to pcor default set, which allows for estimating a network with a fixed structure. 75 | - Fixed parametric bootstrapping 76 | 77 | Changes in Version 1.2.1 78 | - corStability should now return the non-finite values warning less often 79 | 80 | Changes in Version 1.2 81 | o New features: 82 | - Added support for statistics "bridgeStrength", "bridgeCloseness", "bridgeBetweenness", and "bridgeExpectedInfluence". Thanks to Payton Jones! 83 | - The statistics argument in bootnet can now be "all" 84 | - Added bootThreshold function to threshold a network based on bootstraps (e.g., bootstrapped interval includes 0) 85 | - Added bootInclude function to obtain a network of bootstrap inclusion probabilities 86 | - the 'statistics' argument in bootnet now defaults to c("edge","strength","outStrength","inStrength"). This means that closeness and betweenness are no longer stored by default! 87 | - corStability will now use all tested statistics by default 88 | o Updates: 89 | - The corStability function now accepts statistics written with an upper case first letter 90 | - Fixed a bug using default = "mgm" with only one binary variable 91 | - IsingFit and IsingSampler defaults now transform -1, 1 data to 0, 1 when computing network, then back when returning results 92 | - Included the 'includeDiagonal' argument to bootnet to include storing diagonal entries (self-loops) for directed networks only 93 | - Bootnet now copies the library used by the user to the clusters when using nCores > 1. This is important for checkpoint and packrat compatability 94 | - corStability now returns NA for incomputable correlations (e.g., due to infinite values) 95 | o Experimental: 96 | - Added default set "piecewiseIsing" for estimating Ising models while selecting participants on a sum-score (very experimental) 97 | - Added default set "SVAR_lavaan" for step-up structural VAR model selection using Lavaan (experimental) 98 | 99 | 100 | Changes in Version 1.1 101 | - New supported default sets: 102 | - "cor" - Correlation networks 103 | - "TMFG" - Triangulated Maximally Filtered Graph 104 | - "LoGo"- Local/Global Sparse Inverse Covariance Matrix 105 | - "ggmModSelect" - Unregularized stepwise GGM model selection 106 | - "graphicalVAR" - LASSO regularized graphical VAR models 107 | - Some changes to mgm default: 108 | - mgm version >= 1.2 is now required 109 | - Renamed lev to level 110 | - Renamed degree to order, now defaults to 2 instead of 3 111 | - Added binarySign argument. Now chosen by defult. 112 | - Added the 'replicationSimulator' function, which can be used to assess expected replicability of networks 113 | - Many default sets now support the 'principalDirection' argument, which can be used to multiply variables with the sign of the first principal component 114 | - plot method now supports split0 = TRUE, will show how often an edge was 0 and only show CIs of non-zero estimates (faded relative to proportion of times edge was zero). 115 | - Updated the 'genGGM' function to support various different network structures, with thanks to Mark Brandt! 116 | - Added RSPBC and Hybrid centrality, thanks to Alex Christensen 117 | - Added the 'alpha' argument to default set "pcor" 118 | - Added functionality for functions returning multiple graphs 119 | - Added outStrength, inStrength, outExpectedInfluence and inExpectedInfluence 120 | - Fixed a bug reporting the number of non-zero edges in the print methods 121 | - Added 'args' argument to netSimulator 122 | - Fixed a bug in which fun is not usuable in bootnet() 123 | - Added lambda.min.ratio argument to some estimators. Now defaults to 0.01 for default = "huge" 124 | - bootnet and netSimulator now show a progress bar (thanks to pbapply package) 125 | - plot method now shows bootstrapped mean in addition to sample value 126 | - The 'statistics' argument in bootnet and plot method now accept statistics with a upper case first letter, to be consistent with qgraph 127 | - CIstyle argument can now only be one value, and always defaults to quantiles 128 | 129 | 130 | 131 | Changes in Version 1.0.1 132 | - missing = "fiml" is now supported for EBICglasso and pcor default sets 133 | - Relative importance networks now do not crash when the number of predictors is 0 or 1 134 | - plotting bootnetResults now supports the labels argument 135 | - mgm default now uses matrices to resolve an error with the latest version of mgm 136 | - The plot method of networks estimated using 'estimateNetwork' now uses different defaults than qgraph! 137 | - cut defaults to NULL 138 | - theme defaults to "colorblind" 139 | - parallelEdge defaults to TRUE 140 | - layout always defaults to "spring" (rather than "circle" for undirected networks) 141 | 142 | Changes in Version 1.0.0: 143 | - Implemented the netSimulator function that allows for researchers to investigate sample size requirements and input arguments to estimateNetwork 144 | - Added genGGM, ggmGenerator, and IsingGenerator functions to be used in netSimulator 145 | - bootnet now stores less results and should have better memory usuage! Thanks to Giulio Costantini! 146 | - Fixed some bugs related to manual parametric bootstrap 147 | - EstimateNetwork now references packages used in a message 148 | - pcor default set now supports the argument 'threshold' 149 | - Fixed a bug where rule argument was not passed in bootnet default set 150 | - Bootnet now supports directed networks 151 | - Relative importance networks now implemented using default = "relimp" 152 | - Updated compatibility with MGM version 1.2.0 153 | 154 | Changes in version 0.4: 155 | - estimateNetwork now accepts a custom estimation function using the argument 'fun' 156 | - Reworked default sets as functions! 157 | - This makes it easier to change common arguments, such as the EBIC tuning parameter 158 | - See the following functions for details: 159 | - bootnet_EBICglasso 160 | - bootnet_IsingFit 161 | - The corStability function now has a greatly improved output 162 | - Default set "IsingLL" has been renamed to "IsingSampler" 163 | - Default set "mgm" is now supported 164 | - labels argument is now supported in difference plots 165 | - Quantile now uses type = 6, this makes CIs slightly wider and therefore the difference test slightly more conservative 166 | 167 | 168 | Changes in version 0.3: 169 | - Eiko Fried joined the author list 170 | - Added 'estimateNetwork' function, allowing one to estimate the network structure 171 | from within bootnet 172 | - The plot method will run qgraph on the estimated network structure 173 | - The qgraph function getWmat can now be applied to networks estimated in bootnet. 174 | Allowing one to use, e.g., centralityPlot on a network estimated with 175 | 'estimateNetwork' 176 | - Added 'differenceTest' function to test for significant differences between edge 177 | weights and centrality indices 178 | - Added 'corStability' to compute the CS-coefficient as described in our paper: 179 | - Epskamp, S., Borsboom, D., & Fried, E. I. (2016). Estimating psychological 180 | networks and their accuracy: a tutorial paper. arXiv preprint, 181 | arXiv:1604.08462. 182 | - The plot method now supports 'plot = "difference"', to make plots of significant 183 | differences between edge-weights and centralities 184 | - New default sets: 185 | - "huge" 186 | - "adalasso" 187 | - 'nCores' argument added to bootnet to use parallel computing 188 | - bootnet print methods now print a list of relevant references on the network 189 | estimation procedure used 190 | - When EBICglasso is used as default set, variables that are made ordinal are now 191 | printed only when estimating the first network 192 | - Updated CITATION such that citation("bootnet") now references the pre-print 193 | - Bootnet now gives a message on loading that it is BETA software 194 | - Added 'statistics' argument to bootnet. Now, distance and length are not stored by 195 | default 196 | - Several minor bugfixes 197 | 198 | 199 | 200 | 201 | -------------------------------------------------------------------------------- /R/Smetric.R: -------------------------------------------------------------------------------- 1 | cor0 <- function(x,y,...){ 2 | if (any(!is.finite(na.omit(x))) || any(!is.finite(na.omit(y))) || sum(!is.na(x)) < 2 || sum(!is.na(y)) < 2){ 3 | return(NA) 4 | } else if (sd(x,na.rm=TRUE)==0 | sd(y,na.rm=TRUE) == 0){ 5 | return(0) 6 | } else { 7 | return(cor(x,y,...)) 8 | } 9 | } 10 | 11 | # Smetric 12 | corStability <- function(x,cor=0.7, statistics = "all", 13 | verbose = TRUE){ 14 | 15 | # If statistics is missing, return all estimated statistics: 16 | if (!x$type %in% c("node","person")){ 17 | stop("CS-coefficient only available for person or node drop bootstrap") 18 | } 19 | if (any(statistics=="all")){ 20 | statistics <- sort(unique(x$bootTable$type)) 21 | } 22 | statistics <- statistics[statistics%in%unique(x$bootTable$type)] 23 | 24 | if (x$type == "node"){ 25 | x$bootTable$prop <- 1 - (x$bootTable$nNode / x$sample$nNodes) 26 | } else { 27 | x$bootTable$prop <- 1 - (x$bootTable$nPerson / x$sample$nPerson) 28 | } 29 | 30 | # Change first letter of statistics to lowercase: 31 | substr(statistics,0,1) <- tolower(substr(statistics,0,1)) 32 | 33 | sample <- x$sampleTable %>% 34 | filter(type %in% statistics) %>% 35 | select(node1,node2,type,original = value) 36 | 37 | 38 | 39 | max0 <- function(x){ 40 | if (length(x)==0 || all(is.na(x)))return(0) else return(max(x,na.rm=TRUE)) 41 | } 42 | 43 | 44 | S <- x$bootTable %>% 45 | filter(type %in% statistics) %>% 46 | left_join(sample,by=c("node1","node2","type")) %>% 47 | group_by(name,type,prop) %>% 48 | summarize(stability = cor0(value,original)) %>% 49 | group_by(prop,type) %>% 50 | summarize(P = mean(stability > cor,na.rm=TRUE)) %>% 51 | group_by(type) %>% 52 | summarize(Smetric = max0(prop[P > 0.95])) 53 | 54 | # all unique sampling levels: 55 | samplingLevels <- sort(unique(x$bootTable$prop)) 56 | 57 | Smetric <- S$Smetric 58 | names(Smetric) <- S$type 59 | 60 | # Print information per sampling level: 61 | if (verbose){ 62 | # Get counts: 63 | counts <- x$bootTable %>% 64 | group_by(name) %>% summarize(nPerson = unique(nPerson)) %>% 65 | group_by(nPerson) %>% 66 | tally %>% arrange(nPerson) %>% 67 | as.data.frame 68 | 69 | counts[['Drop%']] <- round(100 * (1 - counts$nPerson / x$sample$nPerson),1) 70 | rownames(counts) <- NULL 71 | counts <- counts[,c("nPerson","Drop%","n")] 72 | 73 | cat("=== Correlation Stability Analysis ===", 74 | "\n\nSampling levels tested:\n") 75 | print(counts) 76 | cat(paste0("\nMaximum drop proportions to retain correlation of ",cor," in at least 95% of the samples:\n\n")) 77 | 78 | samplingLevels <- c(0,samplingLevels,1) 79 | 80 | 81 | for (i in seq_along(Smetric)){ 82 | if (is.na(Smetric[i])){ 83 | cat(paste0(names(Smetric)[i],": Could not be computed (likely due to non-finite values)"), 84 | "\n\n" 85 | ) 86 | } else { 87 | 88 | if (any(samplingLevels < Smetric[i])){ 89 | lower <- max(which(samplingLevels < Smetric[i])) 90 | } else { 91 | lower <- 1 92 | } 93 | 94 | if (any(samplingLevels > Smetric[i])){ 95 | upper <- min(which(samplingLevels > Smetric[i])) 96 | } else { 97 | upper <- length(samplingLevels) 98 | } 99 | 100 | # Note for lower or upper bound: 101 | if (Smetric[i] == samplingLevels[2]){ 102 | note <- "(CS-coefficient is lowest level tested)" 103 | } else if (Smetric[i] == samplingLevels[length(samplingLevels)-1]){ 104 | note <- "(CS-coefficient is highest level tested)" 105 | } else { 106 | note <- "" 107 | } 108 | 109 | 110 | cat(paste0(names(Smetric)[i],": ",round(Smetric[i],3)," ",note, 111 | "\n - For more accuracy, run bootnet(..., caseMin = ",round(samplingLevels,3)[lower],", caseMax = ",round(samplingLevels,3)[upper],")"), 112 | "\n\n" 113 | ) 114 | } 115 | 116 | } 117 | cat("Accuracy can also be increased by increasing both 'nBoots' and 'caseN'.") 118 | 119 | } 120 | 121 | 122 | invisible(Smetric) 123 | } -------------------------------------------------------------------------------- /R/bootInclude.R: -------------------------------------------------------------------------------- 1 | # Function to create include probability network 2 | bootInclude <- function(bootobject,verbose=TRUE){ 3 | # Check if object is bootnet object: 4 | if (!is(bootobject,"bootnet")){ 5 | stop("'bootobject' must be an object of class 'bootnet'") 6 | } 7 | 8 | # Check type: 9 | if (bootobject$type != "nonparametric" & bootobject$type != "parametric"){ 10 | stop("Bootstrap type must be 'nonparametric' or 'parametric'") 11 | } 12 | 13 | # Extract the network object: 14 | Network <- bootobject$sample 15 | # Dummy for multiple graphs: 16 | if (!is.list(Network$graph)){ 17 | Graphs <- list(Network$graph) 18 | Directed <- list(Network$directed) 19 | Intercepts <- list(Network$intercepts) 20 | names(Graphs) <- names(Directed) <- names(Intercepts) <- 21 | unique(bootobject$bootTable$graph) 22 | } else { 23 | Graphs <- Network$graph 24 | Directed <- Network$directed 25 | Intercepts <- Network$intercepts 26 | } 27 | 28 | # For every graph: 29 | for (g in seq_along(Graphs)){ 30 | graphName <- names(Graphs)[g] 31 | 32 | # Summary table of edge weights: 33 | bootSummary <- bootobject$bootTable %>% 34 | dplyr::filter(.data[['type']] == "edge", .data[['graph']] == graphName) %>% 35 | dplyr::group_by(.data[['node1']],.data[['node2']]) %>% 36 | dplyr::summarize( 37 | propNonZero=mean(value != 0) 38 | ) 39 | 40 | # Reweight network: 41 | # if (nrow(bootSummary) > 0){ 42 | Graphs[[graphName]][] <- 0 43 | 44 | 45 | for (i in 1:nrow(bootSummary)){ 46 | Graphs[[graphName]][Network$labels == bootSummary$node1[i],Network$labels == bootSummary$node2[i]] <- bootSummary$propNonZero[i] 47 | if (!Directed[[graphName]]){ 48 | Graphs[[graphName]][Network$labels == bootSummary$node2[i],Network$labels == bootSummary$node1[i]] <- bootSummary$propNonZero[i] 49 | } 50 | } 51 | } 52 | 53 | # Return to network object: 54 | if (length(Graphs) == 1){ 55 | Network$graph <- Graphs[[1]] 56 | Network$intercepts <- NULL 57 | } else { 58 | Network$graph <- Graphs 59 | Network$intercepts <- NULL 60 | } 61 | 62 | # Add indicator network is about include proportions: 63 | Network$bootInclude <- TRUE 64 | 65 | # Return network: 66 | return(Network) 67 | } 68 | -------------------------------------------------------------------------------- /R/bootTable.R: -------------------------------------------------------------------------------- 1 | # Compute tidy table from bootnetResult object: 2 | # Result in data frame with entries: 3 | # original (logical) 4 | # name 5 | # type 6 | # node1 7 | # node2 8 | # value 9 | 10 | statTable <- function(x, 11 | name, 12 | alpha = 1, 13 | computeCentrality = TRUE, 14 | statistics = c("edge","strength","closeness","betweenness"), 15 | directed = FALSE, 16 | bridgeArgs = list(), 17 | includeDiagonal = FALSE,...){ 18 | 19 | # If list, table for every graph! 20 | if (is.list(x$graph)){ 21 | Tables <- list() 22 | for (i in seq_len(length(x$graph))){ 23 | dummyobject <- x 24 | dummyobject$graph <- x$graph[[i]] 25 | dummyobject$directed <- x$directed[[i]] 26 | Tables[[i]] <- statTable(dummyobject,name=name,alpha=alpha,computeCentrality = computeCentrality,statistics=statistics,directed=dummyobject$directed,includeDiagonal=includeDiagonal) 27 | Tables[[i]]$graph <- names(x$graph)[[i]] 28 | } 29 | return(dplyr::bind_rows(Tables)) 30 | } 31 | 32 | bridgeCentralityNames <- x[['labels']] 33 | 34 | # Statistics can be: 35 | # Change first letter of statistics to lowercase: 36 | substr(statistics,0,1) <- tolower(substr(statistics,0,1)) 37 | validStatistics <- c("intercept","edge","length","distance","closeness","betweenness","strength","expectedInfluence", 38 | "outStrength","outExpectedInfluence","inStrength","inExpectedInfluence","rspbc","hybrid", "eigenvector", 39 | "bridgeStrength", "bridgeCloseness", "bridgeBetweenness","bridgeInDegree","bridgeOutDegree", 40 | "bridgeExpectedInfluence") 41 | if (!all(statistics %in% validStatistics)){ 42 | stop(paste0("'statistics' must be one of: ",paste0("'",validStatistics,"'",collapse=", "))) 43 | } 44 | 45 | 46 | type <- NULL 47 | value <- NULL 48 | 49 | stopifnot(is(x, "bootnetResult")) 50 | tables <- list() 51 | if (is.null(x[['labels']])){ 52 | x[['labels']] <- seq_len(ncol(x[['graph']])) 53 | } 54 | 55 | # edges: 56 | if (!directed){ 57 | index <- upper.tri(x[['graph']], diag=FALSE) 58 | ind <- which(index, arr.ind=TRUE) 59 | } else { 60 | if (!includeDiagonal){ 61 | index <- diag(ncol(x[['graph']]))!=1 62 | } else { 63 | index <- matrix(TRUE,ncol(x[['graph']]),ncol(x[['graph']])) 64 | } 65 | 66 | ind <- which(index, arr.ind=TRUE) 67 | } 68 | 69 | # Weights matrix: 70 | Wmat <- qgraph::getWmat(x) 71 | 72 | if ("edge" %in% statistics){ 73 | tables$edges <- tibble::as_tibble(data.frame( 74 | name = name, 75 | type = "edge", 76 | node1 = x[['labels']][ind[,1]], 77 | node2 = x[['labels']][ind[,2]], 78 | value = Wmat[index], 79 | stringsAsFactors = FALSE 80 | )) 81 | } 82 | 83 | 84 | if ("length" %in% statistics){ 85 | tables$length <- tibble::as_tibble(data.frame( 86 | name = name, 87 | type = "length", 88 | node1 = x[['labels']][ind[,1]], 89 | node2 = x[['labels']][ind[,2]], 90 | value = abs(1/abs(Wmat[index])), 91 | stringsAsFactors = FALSE 92 | )) 93 | } 94 | 95 | # Intercepts: 96 | if (!is.null(x[['intercepts']])){ 97 | tables$intercepts <- tibble::as_tibble(data.frame( 98 | name = name, 99 | type = "intercept", 100 | node1 = x[['labels']], 101 | node2 = '', 102 | value = x[['intercepts']], 103 | stringsAsFactors = FALSE 104 | )) 105 | } 106 | 107 | if (computeCentrality){ 108 | # Centrality analysis: 109 | if (all(x[['graph']]==0)){ 110 | cent <- list( 111 | OutDegree = rep(0,ncol(x[['graph']])), 112 | InDegree = rep(0,ncol(x[['graph']])), 113 | Closeness = rep(0,ncol(x[['graph']])), 114 | Betweenness = rep(0,ncol(x[['graph']])), 115 | ShortestPathLengths = matrix(Inf,ncol(x[['graph']]),ncol(x[['graph']])), 116 | RSPBC = rep(0,ncol(x[['graph']])), 117 | Hybrid = rep(0,ncol(x[['graph']])), 118 | step1 = rep(0,ncol(x[['graph']])), 119 | expectedInfluence = rep(0,ncol(x[['graph']])), 120 | OutExpectedInfluence = rep(0,ncol(x[['graph']])), 121 | InExpectedInfluence = rep(0,ncol(x[['graph']])), 122 | bridgeStrength= rep(0,ncol(x[['graph']])), 123 | bridgeCloseness= rep(0,ncol(x[['graph']])), 124 | bridgeBetweenness= rep(0,ncol(x[['graph']])), 125 | bridgeExpectedInfluence= rep(0,ncol(x[['graph']])) 126 | ) 127 | bridgeCentralityNames <- x[['labels']] 128 | } else { 129 | cent <- qgraph::centrality(Wmat, alpha = alpha, all.shortest.paths = FALSE) 130 | # EI <- expectedInf(Wmat, step="1") 131 | # names(EI) <- "expectedInfluence" 132 | bridgecen <- c("bridgeInDegree","bridgeOutDegree","bridgeStrength", "bridgeBetweenness", "bridgeCloseness", "bridgeExpectedInfluence") 133 | if(any(bridgecen %in% statistics)){ 134 | bridgeArgs <- c(list(network=Wmat), bridgeArgs) 135 | if(is.null(bridgeArgs$communities)){ 136 | warning("If bridge statistics are to be bootstrapped, the communities argument should be provided") 137 | } 138 | 139 | b <- do.call(networktools::bridge, args=bridgeArgs) 140 | 141 | # Rename: 142 | rename <- function(x,from,to){ 143 | if (from %in% x){ 144 | x[x==from] <- to 145 | } 146 | x 147 | } 148 | names(b) <- rename(names(b), "Bridge Indegree", "bridgeInDegree") 149 | names(b) <- rename(names(b), "Bridge Outdegree", "bridgeOutDegree") 150 | names(b) <- rename(names(b), "Bridge Strength", "bridgeStrength") 151 | names(b) <- rename(names(b), "Bridge Betweenness", "bridgeBetweenness") 152 | names(b) <- rename(names(b), "Bridge Closeness", "bridgeCloseness") 153 | names(b) <- rename(names(b), "Bridge Expected Influence (1-step)", "bridgeExpectedInfluence") 154 | names(b) <- rename(names(b), "Bridge Expected Influence (2-step)", "bridgeExpectedInfluence2step") 155 | 156 | b$communities <- NULL 157 | } else { 158 | b <- NULL 159 | } 160 | if(!is.null(bridgeArgs$useCommunities) && bridgeArgs$useCommunities[1] != "all"){ 161 | b <- lapply(b, function(cen){cen[bridgeArgs$communities %in% bridgeArgs$useCommunities]}) 162 | bridgeCentralityNames <- x[['labels']][bridgeArgs$communities %in% bridgeArgs$useCommunities] 163 | } else { 164 | bridgeCentralityNames <- x[['labels']] 165 | } 166 | cent <- c(cent, b) 167 | } 168 | 169 | # strength: 170 | if ("strength" %in% statistics & !directed){ 171 | tables$strength <- tibble::as_tibble(data.frame( 172 | name = name, 173 | type = "strength", 174 | node1 = x[['labels']], 175 | node2 = '', 176 | value = cent[['OutDegree']], 177 | stringsAsFactors = FALSE 178 | )) 179 | } 180 | 181 | if ("outStrength" %in% statistics && directed){ 182 | tables$outStrength <- tibble::as_tibble(data.frame( 183 | name = name, 184 | type = "outStrength", 185 | node1 = x[['labels']], 186 | node2 = '', 187 | value = cent[['OutDegree']], 188 | stringsAsFactors = FALSE 189 | )) 190 | } 191 | if ("inStrength" %in% statistics && directed){ 192 | 193 | tables$inStrength <- tibble::as_tibble(data.frame( 194 | name = name, 195 | type = "inStrength", 196 | node1 = x[['labels']], 197 | node2 = '', 198 | value = cent[['InDegree']], 199 | stringsAsFactors = FALSE 200 | )) 201 | } 202 | 203 | # closeness: 204 | if ("closeness" %in% statistics){ 205 | tables$closeness <- tibble::as_tibble(data.frame( 206 | name = name, 207 | type = "closeness", 208 | node1 = x[['labels']], 209 | node2 = '', 210 | value = cent[['Closeness']], 211 | stringsAsFactors = FALSE 212 | )) 213 | } 214 | 215 | 216 | # betweenness: 217 | if ("betweenness" %in% statistics){ 218 | tables$betweenness <- tibble::as_tibble(data.frame( 219 | name = name, 220 | type = "betweenness", 221 | node1 = x[['labels']], 222 | node2 = '', 223 | value = cent[['Betweenness']], 224 | stringsAsFactors = FALSE 225 | )) 226 | } 227 | 228 | if ("distance" %in% statistics){ 229 | tables$sp <- tibble::as_tibble(data.frame( 230 | name = name, 231 | type = "distance", 232 | node1 = x[['labels']][ind[,1]], 233 | node2 = x[['labels']][ind[,2]], 234 | value = cent[['ShortestPathLengths']][index], 235 | stringsAsFactors = FALSE 236 | )) 237 | } 238 | 239 | if ("expectedInfluence" %in% statistics && !directed){ 240 | tables$expectedInfluence <- tibble::as_tibble(data.frame( 241 | name = name, 242 | type = "expectedInfluence", 243 | node1 = x[['labels']], 244 | node2 = '', 245 | value = cent[['OutExpectedInfluence']], 246 | stringsAsFactors = FALSE 247 | )) 248 | } 249 | 250 | # randomized shortest paths betweenness centrality: 251 | if ("rspbc" %in% statistics){ 252 | tryrspbc <- try({ 253 | tables$rspbc <- tibble::as_tibble(data.frame( 254 | name = name, 255 | type = "rspbc", 256 | node1 = x[['labels']], 257 | node2 = '', 258 | value = as.vector(NetworkToolbox::rspbc(abs(Wmat))), 259 | stringsAsFactors = FALSE 260 | )) 261 | }) 262 | 263 | if (is(tryrspbc,"try-error")){ 264 | tables$rspbc <- tibble::as_tibble(data.frame( 265 | name = name, 266 | type = "rspbc", 267 | node1 = x[['labels']], 268 | node2 = '', 269 | value = NA, 270 | stringsAsFactors = FALSE 271 | )) 272 | } 273 | } 274 | 275 | # hybrid: 276 | if ("hybrid" %in% statistics){ 277 | 278 | tryhybrid <- try({ 279 | tables$hybrid <- tibble::as_tibble(data.frame( 280 | name = name, 281 | type = "hybrid", 282 | node1 = x[['labels']], 283 | node2 = '', 284 | value = as.vector(NetworkToolbox::hybrid(abs(Wmat), BC = "random")), 285 | stringsAsFactors = FALSE 286 | )) 287 | }) 288 | 289 | if (is(tryhybrid,"try-error")){ 290 | tables$rspbc <- tibble::as_tibble(data.frame( 291 | name = name, 292 | type = "hybrid", 293 | node1 = x[['labels']], 294 | node2 = '', 295 | value = NA, 296 | stringsAsFactors = FALSE 297 | )) 298 | } 299 | 300 | } 301 | 302 | # eigenvector: 303 | if ("eigenvector" %in% statistics){ 304 | 305 | tryeigenvector <- try({ 306 | tables$eigenvector <- tibble::as_tibble(data.frame( 307 | name = name, 308 | type = "eigenvector", 309 | node1 = x[['labels']], 310 | node2 = '', 311 | value = as.vector(NetworkToolbox::eigenvector(Wmat)), 312 | stringsAsFactors = FALSE 313 | )) 314 | }) 315 | 316 | if (is(tryeigenvector,"try-error")){ 317 | tables$eigenvector <- tibble::as_tibble(data.frame( 318 | name = name, 319 | type = "eigenvector", 320 | node1 = x[['labels']], 321 | node2 = '', 322 | value = NA, 323 | stringsAsFactors = FALSE 324 | )) 325 | } 326 | 327 | } 328 | 329 | if ("outExpectedInfluence" %in% statistics && directed){ 330 | 331 | tables$outExpectedInfluence <- tibble::as_tibble(data.frame( 332 | name = name, 333 | type = "outExpectedInfluence", 334 | node1 = x[['labels']], 335 | node2 = '', 336 | value = cent[['OutExpectedInfluence']], 337 | stringsAsFactors = FALSE 338 | )) 339 | } 340 | if ("inExpectedInfluence" %in% statistics && directed){ 341 | 342 | tables$inExpectedInfluence <- tibble::as_tibble(data.frame( 343 | name = name, 344 | type = "inExpectedInfluence", 345 | node1 = x[['labels']], 346 | node2 = '', 347 | value = cent[['InExpectedInfluence']], 348 | stringsAsFactors = FALSE 349 | )) 350 | } 351 | 352 | 353 | # bridgeStrength: 354 | if ("bridgeStrength" %in% statistics){ 355 | tables$bridgeStrength <- tibble::as_tibble(data.frame( 356 | name = name, 357 | type = "bridgeStrength", 358 | node1 = bridgeCentralityNames, 359 | node2 = '', 360 | value = cent[['bridgeStrength']], 361 | stringsAsFactors = FALSE 362 | )) 363 | } 364 | 365 | # bridgeCloseness: 366 | if ("bridgeCloseness" %in% statistics){ 367 | tables$bridgeCloseness <- tibble::as_tibble(data.frame( 368 | name = name, 369 | type = "bridgeCloseness", 370 | node1 = bridgeCentralityNames, 371 | node2 = '', 372 | value = cent[['bridgeCloseness']], 373 | stringsAsFactors = FALSE 374 | )) 375 | } 376 | 377 | # bridgeBetweenness: 378 | if ("bridgeBetweenness" %in% statistics){ 379 | tables$bridgeBetweenness <- tibble::as_tibble(data.frame( 380 | name = name, 381 | type = "bridgeBetweenness", 382 | node1 = bridgeCentralityNames, 383 | node2 = '', 384 | value = cent[['bridgeBetweenness']], 385 | stringsAsFactors = FALSE 386 | )) 387 | } 388 | 389 | # bridgeExpectedInfluence: 390 | if ("bridgeExpectedInfluence" %in% statistics){ 391 | tables$bridgeExpectedInfluence <- tibble::as_tibble(data.frame( 392 | name = name, 393 | type = "bridgeExpectedInfluence", 394 | node1 = bridgeCentralityNames, 395 | node2 = '', 396 | value = cent[['bridgeExpectedInfluence']], 397 | stringsAsFactors = FALSE 398 | )) 399 | } 400 | 401 | } 402 | # for (i in seq_along(tables)){ 403 | # tables[[i]]$id <- ifelse(tables[[i]]$node2=='',paste0("N: ",tables[[i]]$node1),paste0("E: ",tables[[i]]$node1, "--", tables[[i]]$node2)) 404 | # } 405 | 406 | for (i in seq_along(tables)){ 407 | tables[[i]]$id <- ifelse(tables[[i]]$node2=='',tables[[i]]$node1,paste0(tables[[i]]$node1, ifelse(directed,"->","--"), tables[[i]]$node2)) 408 | } 409 | 410 | tab <- dplyr::bind_rows(tables) 411 | tab$nNode <- x$nNode 412 | tab$nPerson <- x$nPerson 413 | 414 | # Compute rank: 415 | tab <- tab %>% group_by(.data[['type']]) %>% 416 | mutate(rank_avg = rank(value,ties.method = "average"), 417 | rank_min = rank(value,ties.method = "min"), 418 | rank_max = rank(value,ties.method = "max")) 419 | 420 | 421 | tab$graph <- "1" 422 | 423 | return(tab) 424 | } 425 | -------------------------------------------------------------------------------- /R/bootThreshold.R: -------------------------------------------------------------------------------- 1 | # Function to threshold network based on bootstrap samples: 2 | bootThreshold <- function(bootobject, alpha = 0.05,verbose=TRUE, thresholdIntercepts = FALSE){ 3 | # Check if object is bootnet object: 4 | if (!is(bootobject,"bootnet")){ 5 | stop("'bootobject' must be an object of class 'bootnet'") 6 | } 7 | 8 | # Check type: 9 | if (bootobject$type != "nonparametric" & bootobject$type != "parametric"){ 10 | stop("Bootstrap type must be 'nonparametric' or 'parametric'") 11 | } 12 | 13 | # Check alpha: 14 | if (verbose){ 15 | exp <- expAlpha(alpha,length(bootobject$boots)) 16 | message(paste0("Expected significance level given number of bootstrap samples is approximately: ",format(signif(exp,2),scientific = FALSE))) 17 | } 18 | 19 | 20 | # Extract the network object: 21 | Network <- bootobject$sample 22 | # Dummy for multiple graphs: 23 | if (!is.list(Network$graph)){ 24 | Graphs <- list(Network$graph) 25 | Directed <- list(Network$directed) 26 | Intercepts <- list(Network$intercepts) 27 | names(Graphs) <- names(Directed) <- names(Intercepts) <- 28 | unique(bootobject$bootTable$graph) 29 | } else { 30 | Graphs <- Network$graph 31 | Directed <- Network$directed 32 | Intercepts <- Network$intercepts 33 | } 34 | 35 | # For every graph: 36 | for (g in seq_along(Graphs)){ 37 | graphName <- names(Graphs)[g] 38 | 39 | # Summary table of edge weights: 40 | bootSummary <- bootobject$bootTable %>% 41 | dplyr::filter(.data[['type']] == "edge", .data[['graph']] == graphName) %>% 42 | dplyr::group_by(.data[['node1']],.data[['node2']]) %>% 43 | dplyr::summarize( 44 | lower = quantile(value, alpha/2), 45 | upper = quantile(value, 1 - alpha/2) 46 | ) %>% 47 | dplyr::mutate(sig = upper < 0 | lower > 0) %>% 48 | filter(!.data[['sig']]) 49 | 50 | # Threshold network: 51 | if (nrow(bootSummary) > 0){ 52 | for (i in 1:nrow(bootSummary)){ 53 | Graphs[[graphName]][Network$labels == bootSummary$node1[i],Network$labels == bootSummary$node2[i]] <- 0 54 | if (!Directed[[graphName]]){ 55 | Graphs[[graphName]][Network$labels == bootSummary$node2[i],Network$labels == bootSummary$node1[i]] <- 0 56 | } 57 | } 58 | } else { 59 | message("All edges indicated to be nonzero") 60 | } 61 | 62 | # Threshold intercepts 63 | if (thresholdIntercepts){ 64 | 65 | # Summary table of edge weights: 66 | bootSummary <- bootobject$bootTable %>% 67 | dplyr::filter(.data[['type']] == "intercept", .data[['graph']] == graphName) %>% 68 | dplyr::group_by(.data[['node1']]) %>% 69 | dplyr::summarize( 70 | lower = quantile(value, alpha/2), 71 | upper = quantile(value, 1 - alpha/2) 72 | ) %>% 73 | dplyr::mutate(sig = upper < 0 | lower > 0) %>% 74 | filter(!.data[['sig']]) 75 | 76 | # Threshold network: 77 | if (nrow(bootSummary) > 0){ 78 | for (i in 1:nrow(bootSummary)){ 79 | Intercepts[[graphName]][Network$labels == bootSummary$node1[i]] <- 0 80 | } 81 | } else { 82 | message("All intercepts indicated to be nonzero") 83 | } 84 | } 85 | } 86 | 87 | # Return to network object: 88 | if (length(Graphs) == 1){ 89 | Network$graph <- Graphs[[1]] 90 | Network$intercepts <- Intercepts[[1]] 91 | } else { 92 | Network$graph <- Graphs 93 | Network$intercepts <- Intercepts 94 | } 95 | 96 | # Add indicator network is thresholded: 97 | Network$thresholded <- TRUE 98 | 99 | # Return network: 100 | return(Network) 101 | } 102 | -------------------------------------------------------------------------------- /R/bootnetResultsMethods.R: -------------------------------------------------------------------------------- 1 | # bootnetResult methods: 2 | # print.bootnetResult <- function(x, ...){ 3 | # print(x[['graph']]) 4 | # } 5 | 6 | summary.bootnetResult <- function(object, ...){ 7 | print(object, ...) 8 | # directed <- object$directed 9 | # 10 | # if (directed){ 11 | # ind <- matrix(TRUE,ncol(object$graph),ncol(object$graph)) 12 | # } else { 13 | # ind <- upper.tri(object$graph,diag=FALSE) 14 | # } 15 | # 16 | # 17 | # cat("\nNumber of nodes:",nrow(object[['graph']]), 18 | # "\nNumber of non-zero edges:",sum(object[['graph']][ind]!=0),"/",sum(ind), 19 | # "\nDensity:",mean(object[['graph']][ind]) 20 | # # "\nNumber of estimated intercepts:",NROW(object[['intercepts']]) 21 | # ) 22 | } 23 | 24 | plot.bootnetResult <- function(x, graph, 25 | weighted, signed, directed, labels, 26 | layout = "spring", 27 | parallelEdge = TRUE, cut = 0, 28 | theme = "colorblind", 29 | bootIncludeOverwrite = TRUE, ...){ 30 | 31 | if (missing(weighted)){ 32 | weighted <- x$weighted 33 | } 34 | if (missing(signed)){ 35 | signed <- x$signed 36 | } 37 | 38 | 39 | if (is.list(x$graph)){ 40 | if (missing(graph)){ 41 | stop("Object contains multiple networks; 'graph' may not be missing.") 42 | } 43 | wMat <- x[['graph']][[graph]] 44 | if (missing(directed)){ 45 | directed <- x$directed[[graph]] 46 | } 47 | } else { 48 | wMat <- x[['graph']] 49 | if (missing(directed)){ 50 | directed <- x$directed 51 | } 52 | } 53 | 54 | if (!isTRUE(weighted)){ 55 | wMat <- sign(wMat) 56 | } 57 | if (!isTRUE(signed)){ 58 | wMat <- abs(wMat) 59 | } 60 | 61 | if (missing(labels)){ 62 | labels <- x[['labels']] 63 | } 64 | 65 | if (bootIncludeOverwrite && isTRUE(x$bootInclude)){ 66 | qgraph::qgraph(wMat,labels=labels,directed=directed, 67 | parallelEdge = parallelEdge, 68 | theme = theme, 69 | cut = cut, layout = layout, 70 | edge.color = "black", maximum = 1, 71 | diag = TRUE,...) 72 | } else { 73 | 74 | qgraph::qgraph(wMat,labels=labels,directed=directed, 75 | parallelEdge = parallelEdge, 76 | theme = theme, 77 | cut = cut, layout = layout, ...) 78 | } 79 | 80 | } 81 | -------------------------------------------------------------------------------- /R/centralityCompare.R: -------------------------------------------------------------------------------- 1 | expAlpha <- function(alpha, nBoots, reps = 1000) { 2 | c(sapply(alpha,function(a){ 3 | sapply(nBoots,function(nb){ 4 | mean(replicate(reps,quantile(runif(nb),a/2, type = 6))) + 5 | (1 - mean(replicate(reps,quantile(runif(nb),1-a/2, type = 6)))) 6 | }) 7 | })) 8 | } 9 | 10 | differenceTest <- function(bootobject,x,y,measure = c("strength","closeness","betweenness"),alpha = 0.05,x2,y2, verbose=TRUE){ 11 | 12 | if (!bootobject$type %in% c("nonparametric","parametric")){ 13 | stop("Difference test requires type = 'nonparametric' or type = 'parametric'.") 14 | } 15 | 16 | stopifnot(class(bootobject) == "bootnet") 17 | 18 | if (verbose){ 19 | exp <- expAlpha(alpha,length(bootobject$boots)) 20 | message(paste0("Expected significance level given number of bootstrap samples is approximately: ",format(signif(exp,2),scientific = FALSE))) 21 | } 22 | 23 | if (any(measure %in% c("strength","betweenness","closeness")) & any(measure %in% c("edge","distance"))){ 24 | stop("Difference test can not be made for centrality index and edge weights/distances at the same time.") 25 | } 26 | 27 | if (!missing(x2)){ 28 | if (any(measure %in% c("edge","distance"))){ 29 | opts <- paste0(c(x,x2),"--",c(x2,x)) 30 | x <- opts[opts%in%bootobject$sampleTable$id][1] 31 | } else { 32 | warning("'x2' ignored") 33 | } 34 | } 35 | if (!missing(y2)){ 36 | if (any(measure %in% c("edge","distance"))){ 37 | opts <- paste0(c(y,y2),"--",c(y2,y)) 38 | y <- opts[opts%in%bootobject$sampleTable$id][1] 39 | } else { 40 | warning("'y2' ignored") 41 | } 42 | } 43 | 44 | if (is.numeric(x)){ 45 | if (any(measure %in% c("strength","betweenness","closeness"))){ 46 | x <- bootobject$sample$labels[x] 47 | } else { 48 | stop("Numeric assignment not possible for edge or distance difference test") 49 | } 50 | } 51 | 52 | if (is.numeric(y)){ 53 | if (any(measure %in% c("strength","betweenness","closeness"))){ 54 | y <- bootobject$sample$labels[y] 55 | } else { 56 | stop("Numeric assignment not possible for edge or distance difference test") 57 | } 58 | } 59 | 60 | 61 | cent <- bootobject$bootTable %>% filter(type %in% measure) %>% dplyr::select(name,id1=id,value,type) 62 | 63 | if (!all(x %in% cent$id1)){ 64 | stop("'x' is not a valid ID") 65 | } 66 | if (!all(y %in% cent$id1)){ 67 | stop("'y' is not a valid ID") 68 | } 69 | 70 | fullTable <- expand.grid(name = unique(cent$name),id1=x,id2=y,type = measure, 71 | stringsAsFactors = FALSE) 72 | 73 | 74 | Quantiles <- fullTable %>% 75 | dplyr::left_join(dplyr::select(cent,name,id1=id1,value1=value,type),by=c("name","id1","type")) %>% 76 | dplyr::left_join(dplyr::select(cent,name,id2=id1,value2=value,type),by=c("name","id2","type")) %>% 77 | dplyr::group_by(id1,id2,type) %>% 78 | dplyr::summarize(lower = quantile(value2-value1,alpha/2, type = 6), 79 | upper = quantile(value2-value1,1-alpha/2, type = 6)) %>% 80 | dplyr::mutate(contain0 = 0 >= lower & 0 <= upper) %>% 81 | dplyr::mutate(significant = !contain0) %>% 82 | dplyr::select(.data[["id1"]],.data[["id2"]],.data[["type"]],.data[["lower"]],.data[["upper"]],.data[["significant"]]) %>% 83 | dplyr::rename(measure = type) %>% 84 | as.data.frame 85 | 86 | # Results <- list( 87 | # node1 = Quantiles$node1, 88 | # node2 = Quantiles$node2, 89 | # measure = Quantiles$type, 90 | # CIlower = Quantiles$lower, 91 | # CIupper = Quantiles$upper 92 | # ) 93 | rownames(Quantiles) <- NULL 94 | 95 | 96 | return(Quantiles) 97 | } 98 | 99 | # 100 | # overlap <- function(x,measure = c("strength","closeness","betweenness"), 101 | # order = c("value","order")){ 102 | # 103 | # order <- match.arg(order) 104 | # measure <- match.arg(measure) 105 | # 106 | # cent <- x$bootTable %>% filter(type %in% measure) %>% dplyr::select(name,node1,value,type) 107 | # fullTable <- expand.grid(name = unique(cent$name),node1=unique(cent$node1),node2=unique(cent$node1),type = unique(cent$type), 108 | # stringsAsFactors = FALSE) 109 | # 110 | # Quantiles <- fullTable %>% 111 | # left_join(dplyr::select(cent,name,node1,value1=value,type),by=c("name","node1","type")) %>% 112 | # left_join(dplyr::select(cent,name,node2=node1,value2=value,type),by=c("name","node2","type")) %>% 113 | # group_by(node1,node2,type) %>% 114 | # summarize(lower = quantile(value2-value1,0.025),upper = quantile(value2-value1,0.975)) %>% 115 | # mutate(contain0 = 0 >= lower & 0 <= upper) 116 | # 117 | # #bootmean: 118 | # bootMeans <- x$bootTable %>% filter(type %in% measure) %>% 119 | # group_by(node1,type) %>% summarize(mean = mean(value,na.rm=TRUE)) 120 | # 121 | # sample <- x$sampleTable %>% filter(type %in% measure) %>% dplyr::select(node1,value,type) %>% 122 | # left_join(bootMeans,by=c("node1","type")) 123 | # 124 | # # Now for every node: minimal node equal to.... 125 | # DF <- Quantiles %>% left_join(dplyr::select(sample,node2=node1,value,type), by = c("node2","type")) %>% 126 | # group_by(node1,type) %>% 127 | # summarize( 128 | # minNode = node2[contain0][which.min(value[contain0])], 129 | # maxNode = node2[contain0][which.max(value[contain0])] 130 | # ) %>% left_join(sample,by=c("node1","type")) %>% ungroup %>% 131 | # mutate(valueMin = value[match(minNode,node1)], 132 | # valueMax = value[match(maxNode,node1)], 133 | # rank = order(order(value,mean))) %>% arrange(rank) 134 | # 135 | # if (order == "value"){ 136 | # levels <- DF$node1[order(DF$value)] 137 | # } else if (order == "order"){ 138 | # levels <- x$sample$labels 139 | # } 140 | # 141 | # Quantiles$node1 <- factor(Quantiles$node1,levels=levels) 142 | # Quantiles$node2 <- factor(Quantiles$node2,levels=levels) 143 | # Quantiles$fill <- ifelse(Quantiles$node1 == Quantiles$node2, "same", 144 | # ifelse(Quantiles$contain0,"nonsig","sig")) 145 | # DF$node2 <- DF$node1 146 | # DF$node1 <- factor(DF$node1,levels=levels) 147 | # DF$node2 <- factor(DF$node2,levels=levels) 148 | # DF$label <- as.character(round(DF$value,2)) 149 | # DF$fill <- "same" 150 | # 151 | # lab <- measure 152 | # substr(lab,1,1) <- toupper(substr(lab,1,1)) 153 | # 154 | # g <- ggplot(Quantiles,aes(x=node1,y=node2,fill=fill)) + 155 | # geom_tile(colour = 'white') + xlab("") + ylab("") + 156 | # scale_fill_manual(values = c("same" = "white","nonsig" = "lightgray","sig" = "black")) + 157 | # geom_text(data=DF,aes(label = label))+ theme(legend.position="none") + 158 | # ggtitle(lab) 159 | # 160 | # base_size <- 9 161 | # g <- g + theme_grey(base_size = base_size) + labs(x = "", 162 | # y = "") + scale_x_discrete(expand = c(0, 0)) + 163 | # scale_y_discrete(expand = c(0, 0)) + theme(legend.position = "none", 164 | # axis.ticks = element_blank(), axis.text.x = element_text(size = base_size * 165 | # 0.8, angle = 330, hjust = 0, colour = "grey50")) 166 | # 167 | # return(g) 168 | # # 169 | # # plot(DF$value,DF$rank,type="o",xaxt="n",yaxt="n",bty="n", 170 | # # xlab = measure, ylab = "node", pch = 17, cex = 2) 171 | # # 172 | # # for (i in seq_len(nrow(DF))){ 173 | # # 174 | # # lines(c(DF$valueMin[i],DF$valueMax[i]),c(DF$rank[i],DF$rank[i]),lty=2) 175 | # # lines(c(DF$valueMin[i],DF$valueMin[i]),c(DF$rank[i],DF$rank[DF$minNode[i]==DF$node1]),lty=2) 176 | # # lines(c(DF$valueMax[i],DF$valueMax[i]),c(DF$rank[i],DF$rank[DF$maxNode[i]==DF$node1]),lty=2) 177 | # # 178 | # # } 179 | # # 180 | # # axis(1,at = round(seq(min(DF$value),max(DF$value),length=6),2)) 181 | # # axis(2,at = seq_len(nrow(DF)),labels = DF$node1,las=1) 182 | # # 183 | # # 184 | # # Quantiles[Quantiles$node1 %in% c("V5","V6") & Quantiles$node2 %in% c("V5","V6"),] 185 | # # 186 | # # # ggplot(DF, aes(x=value,y=rank)) + 187 | # # # geom_line() + geom_point() + 188 | # # # theme_bw() + 189 | # # # xlab(measure) + 190 | # # # geom_line(aes(x=)) 191 | # # 192 | # # invisible(NULL) 193 | # } -------------------------------------------------------------------------------- /R/checkInput.R: -------------------------------------------------------------------------------- 1 | 2 | # Function that checks input and returns the functions: 3 | checkInput <- function( 4 | default = c("none", "EBICglasso","ggmModSelect", "pcor","IsingFit","IsingSampler", "huge","adalasso","mgm","relimp", 5 | "cor","TMFG","ggmModSelect","LoGo","graphicalVAR","piecewiseIsing","SVAR_lavaan", 6 | "GGMncv"), 7 | fun, # Estimator function 8 | # prepFun, # Fun to produce the correlation or covariance matrix 9 | # prepArgs, # list with arguments for the correlation function 10 | # estFun, # function that results in a network 11 | # estArgs, # arguments sent to the graph estimation function (if missing automatically sample size is included) 12 | # graphFun, # set to identity if missing 13 | # graphArgs, # Set to null if missing 14 | # intFun, # Set to null if missing 15 | # intArgs, # Set to null if missing 16 | # nSample, 17 | verbose=TRUE, 18 | # construct = c("default","function","arguments"), 19 | .dots = list(), 20 | ... # Arguments to the estimator function 21 | ){ 22 | construct <- "function" 23 | if (default[[1]]=="glasso") default <- "EBICglasso" 24 | if (default[[1]]=="IsingSampler") default <- "IsingSampler" 25 | default <- match.arg(default) 26 | # construct <- match.arg(construct) 27 | 28 | ### DEFAULT OPTIONS ### 29 | if (missing(fun)){ 30 | fun <- NULL 31 | } 32 | 33 | 34 | 35 | # Stop if not compatible: 36 | dots <- c(.dots,list(...)) 37 | 38 | # gather names: 39 | argNames <- character(0) 40 | # 41 | # if (!missing(prepFun)){ 42 | # argNames <- c(argNames,"prepFun") 43 | # } 44 | # if (!missing(prepArgs)){ 45 | # argNames <- c(argNames,"prepArgs") 46 | # } 47 | # if (!missing(estFun)){ 48 | # argNames <- c(argNames,"estFun") 49 | # } 50 | # if (!missing(estArgs)){ 51 | # argNames <- c(argNames,"estArgs") 52 | # } 53 | # if (!missing(graphFun)){ 54 | # argNames <- c(argNames,"graphFun") 55 | # } 56 | # if (!missing(graphArgs)){ 57 | # argNames <- c(argNames,"graphArgs") 58 | # } 59 | # if (!missing(intFun)){ 60 | # argNames <- c(argNames,"intFun") 61 | # } 62 | # if (!missing(intArgs)){ 63 | # argNames <- c(argNames,"intArgs") 64 | # } 65 | # 66 | # # Not compatible if construct is used: 67 | # if (length(dots) > 0 && construct == "arguments"){ 68 | # 69 | # stop(paste0("Ambiguous argument specification. Old functonality is used (construct = 'arguments') in combination with new functionality arguments (implying construct = 'function'): ", 70 | # paste0("'",names(dots),"'",collapse="; "),". These arguments are NOT compatible!")) 71 | # 72 | # } 73 | # 74 | # # relimp not compatable with old: 75 | # if (construct == "arguments" & default == "relimp"){ 76 | # stop("default = 'relimp' not supported with old bootnet style (construct = 'arguments')") 77 | # 78 | # } 79 | # 80 | # if (length(argNames) > 0 && construct == "function"){ 81 | # 82 | # stop(paste0("Ambiguous argument specification. New functonality is used (construct = 'function') in combination with old functionality arguments (implying construct = 'arguments'): ", 83 | # paste0("'",argNames,"'",collapse="; "),". These arguments are NOT compatible!")) 84 | # 85 | # } 86 | # 87 | # # not compatible if both dots are used and arguments are used: 88 | # if (length(argNames) > 0 & length(dots) > 0){ 89 | # 90 | # stop(paste0("Ambiguous argument specification. Both old functionality arguments are used, compatible with construct = 'arguments': ", 91 | # paste0("'",argNames,"'",collapse="; "),", as well as new functionality arguments are used, compatible with construct = 'function': ", 92 | # paste0("'",names(dots),"'",collapse="; "),". These two types of arguments are NOT compatible!")) 93 | # 94 | # } 95 | # 96 | # 97 | # # Check to construct via function or to construct via arguments: 98 | # # if no default and no fun, use arguments: 99 | # if (construct == "default"){ 100 | # construct <- "function" 101 | # 102 | # if (default == "none" && is.null(fun)){ 103 | # construct <- "arguments" 104 | # } 105 | # 106 | # # If fun is missing, default is not none and one argument is not missing, use arguments (backward competability): 107 | # if (default != "none" && is.null(fun) && (!missing(prepFun) | !missing(prepArgs) | !missing(estFun) | !missing(estArgs))){ 108 | # construct <- "arguments" 109 | # } 110 | # } 111 | # 112 | # # Check if arguments are not missing: 113 | # if (default == "none" && construct == "arguments"){ 114 | # if (missing(prepFun) | missing(prepArgs) | missing(estFun) | missing(estArgs)){ 115 | # stop("If 'default' is not set and 'fun' is missing, 'prepFun', 'prepArgs', 'estFun' and 'estArgs' may not be missing.") 116 | # } 117 | # } 118 | 119 | ### Construct estimator function via function: 120 | if (construct == "function"){ 121 | # Arguments: 122 | Args <- dots 123 | # 124 | # # Warn user that arguments are ignored: 125 | # if (!missing(prepFun)){ 126 | # warning("'prepFun' argument is ignored as a function is used as arguments. To use 'prepFun', please set construct = 'arguments'") 127 | # } 128 | # if (!missing(prepArgs)){ 129 | # warning("'prepArgs' argument is ignored as a function is used as arguments. To use 'prepArgs', please set construct = 'arguments'") 130 | # } 131 | # if (!missing(estFun)){ 132 | # warning("'estFun' argument is ignored as a function is used as arguments. To use 'estFun', please set construct = 'arguments'") 133 | # } 134 | # if (!missing(estArgs)){ 135 | # warning("'estArgs' argument is ignored as a function is used as arguments. To use 'estArgs', please set construct = 'arguments'") 136 | # } 137 | # if (!missing(graphFun)){ 138 | # warning("'graphFun' argument is ignored as a function is used as arguments. To use 'graphFun', please set construct = 'arguments'") 139 | # } 140 | # if (!missing(graphArgs)){ 141 | # warning("'graphArgs' argument is ignored as a function is used as arguments. To use 'graphArgs', please set construct = 'arguments'") 142 | # } 143 | # if (!missing(intFun)){ 144 | # warning("'intFun' argument is ignored as a function is used as arguments. To use 'intFun', please set construct = 'arguments'") 145 | # } 146 | # if (!missing(intArgs)){ 147 | # warning("'intArgs' argument is ignored as a function is used as arguments. To use 'intArgs', please set construct = 'arguments'") 148 | # } 149 | # 150 | # per default: 151 | if (default == "none"){ 152 | Function <- fun 153 | } else if (default == "EBICglasso"){ 154 | Function <- bootnet_EBICglasso 155 | } else if (default == "ggmModSelect"){ 156 | Function <- bootnet_ggmModSelect 157 | } else if (default == "IsingFit"){ 158 | Function <- bootnet_IsingFit 159 | } else if (default == "IsingSampler"){ 160 | Function <- bootnet_IsingSampler 161 | } else if (default == "pcor"){ 162 | Function <- bootnet_pcor 163 | } else if (default == "cor"){ 164 | Function <- bootnet_cor 165 | } else if (default == "adalasso"){ 166 | Function <- bootnet_adalasso 167 | } else if (default == "huge"){ 168 | Function <- bootnet_huge 169 | } else if (default == "mgm"){ 170 | Function <- bootnet_mgm 171 | } else if (default == "relimp"){ 172 | Function <- bootnet_relimp 173 | } else if (default == "TMFG"){ 174 | Function <- bootnet_TMFG 175 | } else if (default == "LoGo"){ 176 | Function <- bootnet_LoGo 177 | } else if (default == "graphicalVAR"){ 178 | Function <- bootnet_graphicalVAR 179 | } else if (default == "piecewiseIsing"){ 180 | Function <- bootnet_piecewiseIsing 181 | } else if (default == "SVAR_lavaan"){ 182 | Function <- bootnet_SVAR_lavaan 183 | } else if (default == "GGMncv"){ 184 | Function <- bootnet_GGMncv 185 | } else stop("Currently not supported.") 186 | 187 | 188 | 189 | # } else { 190 | # warning("Arguments (prepFun, estFun, etcetera) used to construct estimator. This functionality is deprecated and will no longer be supported in a future version of bootnet. Please consult the manual or contact the authors.") 191 | # 192 | # # Check dots, and warn user: 193 | # if (length(dots) > 0){ 194 | # dotNames <- names(dots) 195 | # warning(paste0("Arguments (prepFun, estFun, etcetera) used to construct estimator. As a result, the following arguments are ignored: ",paste0("'",dotNames,"'", collapse = ", "),". To use these arguments use construct = 'function' and supply a default set or set the 'fun' argument. In addition, do not use the 'prepFun', 'estFun', etcetera arguments.")) 196 | # } 197 | # 198 | # # Construct via arguments 199 | # if (!(default == "none")){ 200 | # # prepFun: 201 | # if (missing(prepFun)){ 202 | # prepFun <- switch(default, 203 | # EBICglasso = qgraph::cor_auto, 204 | # IsingFit = binarize, 205 | # IsingSampler = binarize, 206 | # pcor = qgraph::cor_auto, 207 | # huge = function(x)huge::huge.npn(na.omit(as.matrix(x)),verbose = FALSE), 208 | # adalasso = identity 209 | # ) 210 | # # prepFun <- switch(default, 211 | # # EBICglasso = cor, 212 | # # IsingFit = binarize, 213 | # # pcor = cor 214 | # # ) 215 | # } 216 | # 217 | # # prepArgs: 218 | # # qgraphVersion <- packageDescription("qgraph")$Version 219 | # # qgraphVersion <- as.numeric(strsplit(qgraphVersion,split="\\.|\\-")[[1]]) 220 | # # if (length(qgraphVersion)==1) qgraphVersion <- c(qgraphVersion,0) 221 | # # if (length(qgraphVersion)==2) qgraphVersion <- c(qgraphVersion,0) 222 | # # goodVersion <- 223 | # # (qgraphVersion[[1]] >= 1 & qgraphVersion[[2]] >= 3 & qgraphVersion[[3]] >= 1) | 224 | # # (qgraphVersion[[1]] >= 1 & qgraphVersion[[2]] > 3) | 225 | # # qgraphVersion[[1]] > 1 226 | # 227 | # if (missing(prepArgs)){ 228 | # prepArgs <- switch(default, 229 | # EBICglasso = ifElse(identical(prepFun,qgraph::cor_auto),list(verbose=verbose), 230 | # ifElse(identical(prepFun,cor),list(use = "pairwise.complete.obs"),list())), 231 | # IsingFit = list(), 232 | # pcor = ifElse(identical(prepFun,qgraph::cor_auto),list(verbose=verbose), 233 | # ifElse(identical(prepFun,cor),list(use = "pairwise.complete.obs"),list())), 234 | # IsingSampler = list(), 235 | # huge = list(), 236 | # adalasso = list() 237 | # ) 238 | # 239 | # 240 | # } 241 | # 242 | 243 | # # estFun: 244 | # if (missing(estFun)){ 245 | # estFun <- switch(default, 246 | # EBICglasso = qgraph::EBICglasso, 247 | # pcor = corpcor::cor2pcor, 248 | # IsingFit = IsingFit::IsingFit, 249 | # IsingSampler = IsingSampler::EstimateIsing, 250 | # huge = function(x)huge::huge.select(huge::huge(x,method = "glasso",verbose=FALSE), criterion = "ebic",verbose = FALSE), 251 | # adalasso = parcor::adalasso.net 252 | # ) 253 | # } 254 | 255 | # # estArgs: 256 | # if (missing(estArgs)){ 257 | # estArgs <- switch(default, 258 | # EBICglasso = list(n = nSample, returnAllResults = TRUE), 259 | # IsingFit = list(plot = FALSE, progress = FALSE), 260 | # pcor = list(), 261 | # IsingSampler = list(method = "ll"), 262 | # huge = list(), 263 | # adalasso = list() 264 | # ) 265 | # } 266 | # 267 | # # graphFun: 268 | # if (missing(graphFun)){ 269 | # graphFun <- switch(default, 270 | # EBICglasso = function(x)x[['optnet']], 271 | # IsingFit = function(x)x[['weiadj']], 272 | # pcor = function(x)as.matrix(Matrix::forceSymmetric(x)), 273 | # IsingSampler = function(x)x[['graph']], 274 | # huge = function(x)as.matrix(qgraph::wi2net(as.matrix(x$opt.icov))), 275 | # adalasso = function(x)as.matrix(Matrix::forceSymmetric(x$pcor.adalasso)) 276 | # ) 277 | # } 278 | # 279 | # # graphArgs: 280 | # if (missing(graphArgs)){ 281 | # graphArgs <- switch(default, 282 | # EBICglasso = list(), 283 | # IsingFit = list(), 284 | # pcor = list(), 285 | # IsingSampler = list(), 286 | # huge = list(), 287 | # adalasso = list() 288 | # ) 289 | # } 290 | # 291 | # intFun: 292 | # if (missing(intFun)){ 293 | # intFun <- switch(default, 294 | # EBICglasso = null, 295 | # IsingFit = function(x)x[['thresholds']], 296 | # pcor = null, 297 | # IsingSampler = function(x) x[['thresholds']], 298 | # huge = null, 299 | # adalasso = null 300 | # ) 301 | # } 302 | 303 | 304 | # } 305 | # 306 | # if (missing(prepFun)){ 307 | # prepFun <- identity 308 | # } 309 | # 310 | # if (missing(prepArgs)){ 311 | # prepArgs <- list() 312 | # } 313 | # 314 | # if (missing(graphFun)){ 315 | # graphFun <- identity 316 | # } 317 | # 318 | # if (missing(graphArgs)){ 319 | # graphArgs <- list() 320 | # } 321 | # 322 | # if (missing(intFun)){ 323 | # intFun <- null 324 | # } 325 | # 326 | # if (missing(intArgs)){ 327 | # intArgs <- list() 328 | # } 329 | # 330 | # Function: 331 | # Function <- bootnet_argEstimator 332 | # 333 | # # List of arguents: 334 | # Args <- list( 335 | # prepFun = prepFun, 336 | # prepArgs = prepArgs, 337 | # estFun = estFun, 338 | # estArgs = estArgs, 339 | # graphFun = graphFun, 340 | # graphArgs = graphArgs, 341 | # intFun = intFun, 342 | # intArgs = intArgs 343 | # ) 344 | # } 345 | # 346 | 347 | } 348 | 349 | # Output: 350 | Output <- list( 351 | data = data, 352 | default = default, 353 | estimator = Function, 354 | arguments = Args 355 | ) 356 | 357 | return(Output) 358 | } -------------------------------------------------------------------------------- /R/comparePlot.R: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/SachaEpskamp/bootnet/3ea321269d902d6cfd78b39f060eccc17fb6fc13/R/comparePlot.R -------------------------------------------------------------------------------- /R/estimateNetwork.R: -------------------------------------------------------------------------------- 1 | # This function takes data as input and produced a network. It is used inside bootnet: 2 | estimateNetwork <- function( 3 | data, 4 | default = c("none", "EBICglasso", "pcor","IsingFit","IsingSampler", "huge","adalasso","mgm","relimp", "cor","TMFG", 5 | "ggmModSelect", "LoGo","graphicalVAR", "piecewiseIsing","SVAR_lavaan", 6 | "GGMncv"), 7 | fun, # A function that takes data and returns a network or list entitled "graph" and "thresholds". optional. 8 | # prepFun, # Fun to produce the correlation or covariance matrix 9 | # prepArgs, # list with arguments for the correlation function 10 | # estFun, # function that results in a network 11 | # estArgs, # arguments sent to the graph estimation function (if missing automatically sample size is included) 12 | # graphFun, # set to identity if missing 13 | # graphArgs, # Set to null if missing 14 | # intFun, # Set to null if missing 15 | # intArgs, # Set to null if missing 16 | labels, # if missing taken from colnames 17 | verbose = TRUE, # Dummy used in cor_auto and in the future in other functions. Set to FALSE in bootnet 18 | # construct = c("default","function","arguments"), 19 | .dots = list(), 20 | weighted = TRUE, 21 | signed = TRUE, 22 | directed, 23 | datatype, 24 | checkNumeric = FALSE, 25 | # plot = TRUE, # Plot the network? 26 | ..., # Arguments to the 'fun' function 27 | .input, # Skips most of first steps if supplied 28 | memorysaver = FALSE # If set to FALSE data, estimator and results are not stored. 29 | ){ 30 | construct <- "function" 31 | # Borsboom easter egg: 32 | if (default[1] == "Borsboom") return(42) 33 | 34 | if (default[[1]]=="glasso") default <- "EBICglasso" 35 | default <- match.arg(default) 36 | 37 | # datatype test: 38 | if (missing(datatype)){ 39 | if (is(data,"tsData")){ 40 | datatype <- "graphicalVAR" 41 | } else { 42 | datatype <- "normal" 43 | } 44 | 45 | } 46 | 47 | if (!datatype%in% c("normal","graphicalVAR")){ 48 | stop("Only datatypes 'normal' and 'graphicalVAR' currently supported.") 49 | } 50 | # 51 | # If NAs and default can't handle, stop: 52 | # if (any(is.na(data)) && default %in% c("huge","adalasso")){ 53 | # stop(paste0("Missing data not supported for default set '",default,"'. Try using na.omit(data).")) 54 | # } 55 | 56 | # First test if data is a data frame: 57 | if (datatype == "normal" && !(is.data.frame(data) || is.matrix(data))){ 58 | stop("'data' argument must be a data frame") 59 | } 60 | 61 | # If matrix coerce to data frame: 62 | if (datatype == "normal" && is.matrix(data)){ 63 | data <- as.data.frame(data) 64 | } 65 | 66 | if (missing(directed)){ 67 | if (default == "graphicalVAR"){ 68 | directed <- list(contemporaneous = FALSE, temporal = TRUE) 69 | } else if (default == "SVAR_lavaan"){ 70 | directed <- list(contemporaneous = TRUE, temporal = TRUE) 71 | } else if (!default %in% c("relimp","DAG")){ 72 | directed <- FALSE 73 | } else { 74 | directed <- TRUE 75 | } 76 | } 77 | 78 | if (datatype == "normal"){ 79 | N <- ncol(data) 80 | Np <- nrow(data) 81 | if (missing(labels)){ 82 | labels <- colnames(data) 83 | } 84 | 85 | if (checkNumeric){ 86 | # Check and remove any variable that is not ordered, integer or numeric: 87 | goodColumns <- sapply(data, function(x) is.numeric(x) | is.ordered(x) | is.integer(x)) 88 | 89 | if (!all(goodColumns)){ 90 | if (verbose){ 91 | warning(paste0("Removing non-numeric columns: ",paste(which(!goodColumns),collapse="; "))) 92 | } 93 | data <- data[,goodColumns,drop=FALSE] 94 | } 95 | } 96 | 97 | 98 | } else if (datatype == "graphicalVAR"){ 99 | N <- length(data$vars) 100 | Np <- nrow(data$data_c) 101 | if (missing(labels)){ 102 | labels <- data$vars 103 | } 104 | } 105 | 106 | 107 | 108 | 109 | 110 | # Compute estimator: 111 | if (missing(.input)){ 112 | .input <- checkInput( 113 | default = default, 114 | fun = fun, 115 | # prepFun = prepFun, # Fun to produce the correlation or covariance matrix 116 | # prepArgs = prepArgs, # list with arguments for the correlation function 117 | # estFun=estFun, # function that results in a network 118 | # estArgs=estArgs, # arguments sent to the graph estimation function (if missing automatically sample size is included) 119 | # graphFun=graphFun, # set to identity if missing 120 | # graphArgs=graphArgs, # Set to null if missing 121 | # intFun=intFun, # Set to null if missing 122 | # intArgs=intArgs, # Set to null if missing 123 | # sampleSize = Np, 124 | # construct=construct, 125 | verbose=verbose, 126 | .dots=.dots, 127 | ... 128 | ) 129 | } 130 | 131 | 132 | # Add verbose: 133 | # Every estimator must have argument verbose: 134 | if ("verbose" %in% names(formals(.input$estimator))){ 135 | .input$arguments$verbose <- verbose 136 | } 137 | 138 | # Unlock function: 139 | # Every estimator must have argument verbose: 140 | if ("unlock" %in% names(formals(.input$estimator))){ 141 | .input$arguments$unlock <- TRUE 142 | } 143 | 144 | # Compute network: 145 | Result <- do.call(.input$estimator, c(list(data),.input$arguments)) 146 | 147 | if (!is.list(Result)){ 148 | sampleGraph <- Result 149 | intercepts <- NULL 150 | output <- Result 151 | nNode <- ncol(Result) 152 | } else if (is.list(Result$graph)){ 153 | sampleGraph <- Result$graph 154 | intercepts <- Result$intercepts 155 | output <- Result$results 156 | nNode <- ncol(Result$graph[[1]]) 157 | } else { 158 | sampleGraph <- Result$graph 159 | intercepts <- Result$intercepts 160 | output <- Result$results 161 | nNode <- ncol(Result$graph) 162 | } 163 | 164 | 165 | if (!is.matrix(sampleGraph)){ 166 | if (is.list(sampleGraph)){ 167 | if (!is.matrix(sampleGraph[[1]])){ 168 | stop("Estimated result is not a list of matrices encoding networks.") 169 | } 170 | } else { 171 | stop("Estimated result is not a matrix encoding a network.") 172 | } 173 | } 174 | 175 | # Special data? 176 | if (!is.list(Result) || is.null(Result$specialData)){ 177 | outdata <- data 178 | datatype <- "normal" 179 | } else { 180 | outdata <- Result$specialData$data 181 | datatype <- Result$specialData$type 182 | } 183 | 184 | 185 | sampleResult <- list( 186 | graph = sampleGraph, 187 | intercepts = intercepts, 188 | results = output, 189 | labels = labels, 190 | nNode = nNode, 191 | nPerson = Np, 192 | estimator = .input$estimator, 193 | arguments = .input$arguments, 194 | data = outdata, 195 | datatype = datatype, 196 | default = default, 197 | weighted = weighted, 198 | signed = signed, 199 | directed=directed, 200 | .input = .input, 201 | thresholded = FALSE 202 | ) 203 | class(sampleResult) <- c("bootnetResult", "list") 204 | 205 | if (default == "graphicalVAR"){ 206 | sampleResult$labels <- output$data$vars 207 | } 208 | if (default == "SVAR_lavaan"){ 209 | sampleResult$labels <- outdata$vars 210 | } 211 | 212 | # Memory save: 213 | if(memorysaver) 214 | { 215 | sampleResult$results <- NA 216 | sampleResult$estimator <- NA 217 | sampleResult$data <- NA 218 | sampleResult$.input <- NA 219 | } 220 | 221 | # Plot? 222 | # if (plot){ 223 | # plot(sampleResult,labels=labels,layout = "spring", ...) 224 | # } 225 | 226 | # Return network: 227 | return(sampleResult) 228 | } -------------------------------------------------------------------------------- /R/multiverse.R: -------------------------------------------------------------------------------- 1 | # Multiverse test: 2 | multiverse <- function(x, labels = FALSE){ 3 | 4 | # Obtain all networks: 5 | networks <- lapply(x$boots,'[[','graph') 6 | directed <- sapply(x$boots,'[[','directed') 7 | labs <- lapply(x$boots,'[[','labels') 8 | 9 | nFull <- x$sampleSize 10 | Ns <- sapply(x$boots,'[[','nPerson') 11 | 12 | # Resorder: 13 | networks <- networks[order(Ns,decreasing = TRUE)] 14 | directed <- directed[order(Ns,decreasing = TRUE)] 15 | labs <- labs[order(Ns,decreasing = TRUE)] 16 | Ns <- Ns[order(Ns,decreasing = TRUE)] 17 | 18 | # Make dfs: 19 | DFs <- lapply(seq_along(networks),function(i){ 20 | if (directed[i]){ 21 | graph <- networks[[i]] 22 | ind <- matrix(TRUE,nrow(graph),ncol(graph)) 23 | weights <- graph[ind] 24 | edges <- paste0(labs[[i]][row(graph)[ind]]," -> ",labs[[i]][col(graph)[ind]]) 25 | } else { 26 | graph <- networks[[i]] 27 | ind <- lower.tri(graph,diag=FALSE) 28 | weights <- graph[ind] 29 | edges <- paste0(labs[[i]][row(graph)[ind]]," -- ",labs[[i]][col(graph)[ind]]) 30 | } 31 | 32 | df <- data.frame( 33 | id = i, 34 | weight=weights, 35 | edge = edges, 36 | n = Ns[i] 37 | ) 38 | 39 | return(df) 40 | }) 41 | 42 | # Make a big DF: 43 | bigDF <- do.call(rbind, DFs) 44 | 45 | # Proportion of data: 46 | bigDF$prop <- round(100 * bigDF$n / nFull,1) 47 | bigDF$prop <- factor(bigDF$prop, levels = sort(unique(bigDF$prop),decreasing = TRUE), 48 | labels = paste0(sort(unique(bigDF$prop),decreasing = TRUE),"%")) 49 | 50 | # Max value: 51 | max <- max(abs(bigDF$weight),na.rm=TRUE) 52 | 53 | # Make the plot: 54 | p <- ggplot(bigDF, 55 | aes_string(x="id", 56 | y="edge", 57 | fill="weight")) + 58 | geom_tile() + 59 | # geom_text(col = rgb(0.3,0.3,0.3), size = 1) + 60 | scale_fill_gradient2("", 61 | low = "#BF0000", 62 | high = "#0000D5", 63 | mid="white",limits = c(-max,max)) + 64 | theme_bw(base_size = 12) + 65 | labs(x = "",y = "") + 66 | scale_x_discrete(expand = c(0, 0)) + 67 | scale_y_discrete(expand = c(0, 0)) + 68 | theme(legend.position = "top", axis.ticks = element_blank(), 69 | axis.text.y = element_blank(), 70 | legend.key.size = unit(0.01, "npc"), 71 | legend.key.width = unit(0.1,"npc") ) 72 | 73 | if (x$type == "person"){ 74 | p <- p + facet_grid( ~ prop, scales = "free") 75 | } 76 | 77 | if (labels){ 78 | p <- p + theme(axis.text.x = element_text(size = 3)) 79 | } 80 | 81 | return(p) 82 | } -------------------------------------------------------------------------------- /R/netSimulator.R: -------------------------------------------------------------------------------- 1 | # Function to run a standard network simulation study: 2 | # Two methods: dataGenerator function, or using graph, intercepts and model for GGM/Ising. 3 | 4 | # Generator functions: 5 | ggmGenerator <- function( 6 | ordinal = FALSE, 7 | nLevels = 4, 8 | skewFactor = 1, 9 | type = c("uniform", "random"), 10 | missing = 0 11 | ){ 12 | ORDINALDUMMY <- NULL 13 | # Generate a function with nCase as first and input as second argument 14 | type <- match.arg(type) 15 | # Wrapper: 16 | Estimator <- function(n, input){ 17 | if (is.list(input)){ 18 | if ("graph" %in% names(input)){ 19 | graph <- input$graph 20 | } else stop("'graph' not in input list.") 21 | 22 | if ("intercepts" %in% names(input)){ 23 | intercepts <- input$intercepts 24 | } else { 25 | intercepts <- rep(0,ncol(graph)) 26 | } 27 | } else { 28 | if (!is.matrix(input)){ 29 | stop("'input' is not a matrix or list.") 30 | } 31 | 32 | graph <- input 33 | intercepts <- rep(0,ncol(graph)) 34 | } 35 | # standardize: 36 | if (!all(diag(graph) == 0 | diag(graph) == 1)){ 37 | graph <- cov2cor(graph) 38 | } 39 | 40 | # Remove diag: 41 | diag(graph) <- 0 42 | 43 | # Generate data: 44 | # True sigma: 45 | if (any(eigen(diag(ncol(graph)) - graph)$values < 0)){ 46 | stop("Precision matrix is not positive semi-definite") 47 | } 48 | Sigma <- cov2cor(solve(diag(ncol(graph)) - graph)) 49 | 50 | 51 | # Generate data: 52 | Data <- mvtnorm::rmvnorm(n, sigma = Sigma) 53 | 54 | ORDINALDUMMY 55 | 56 | # Add missing: 57 | if (missing > 0){ 58 | for (i in 1:ncol(Data)){ 59 | Data[runif(nrow(Data)) < missing,i] <- NA 60 | } 61 | } 62 | 63 | return(Data) 64 | } 65 | 66 | # Deparse: 67 | deparsedEstimator <- deparse(Estimator) 68 | l <- grep("ORDINALDUMMY",deparsedEstimator) 69 | 70 | if (!ordinal){ 71 | deparsedEstimator <- deparsedEstimator[-l] 72 | } else { 73 | deparsedEstimator[l] <- sprintf(' 74 | for (i in 1:ncol(Data)){ 75 | if (is.list(input) && !is.null(input$thresholds)) { 76 | Data[,i] <- as.numeric(cut(Data[,i],sort(c(-Inf,input$thresholds[[i]],Inf)))) 77 | } else { 78 | 79 | # Generate thresholds: 80 | nLevels <- %f 81 | if (type == "random"){ 82 | thresholds <- sort(rnorm(nLevels-1)) 83 | } else { 84 | thresholds <- qnorm(seq(0,1,length=nLevels + 1)[-c(1,nLevels+1)]^(1/skewFactor)) 85 | } 86 | 87 | Data[,i] <- as.numeric(cut(Data[,i],sort(c(-Inf,thresholds,Inf)))) 88 | 89 | 90 | } 91 | 92 | }',nLevels) 93 | } 94 | 95 | # Parse again: 96 | Estimator <- eval(parse(text=deparsedEstimator)) 97 | return(Estimator) 98 | } 99 | 100 | ### Ising: 101 | IsingGenerator <- function( 102 | ... # Arguments used in IsingSampler 103 | ){ 104 | ARGDUMMY <- NULL 105 | # Generate a function with nCase as first and input as second argument 106 | 107 | # Wrapper: 108 | Gen <- function(n, input){ 109 | if (is.list(input)){ 110 | if ("graph" %in% names(input)){ 111 | graph <- input$graph 112 | } else stop("Input must be a list containing elements 'graph' and 'intercepts'") 113 | 114 | if ("intercepts" %in% names(input)){ 115 | intercepts <- input$intercepts 116 | } else { 117 | stop("Input must be a list containing elements 'graph' and 'intercepts'") 118 | } 119 | } else { 120 | stop("Input must be a list containing elements 'graph' and 'intercepts'") 121 | } 122 | 123 | Data <- IsingSampler::IsingSampler(n, 124 | input$graph, 125 | ARGDUMMY, 126 | input$intercepts 127 | 128 | ) 129 | 130 | return(Data) 131 | } 132 | 133 | # Deparse: 134 | deparsedEstimator <- deparse(Gen) 135 | l <- grep("ARGDUMMY,",deparsedEstimator) 136 | 137 | dots <-list(...) 138 | 139 | 140 | 141 | if (length(dots) == 0){ 142 | deparsedEstimator[l] <- gsub("ARGDUMMY,","",deparsedEstimator[l]) 143 | } else { 144 | # Construct arguments: 145 | txt <- sapply(dots, function(x)paste(deparse(dput(x)),collapse="\n")) 146 | deparsedEstimator[l] <- gsub("ARGDUMMY",paste(names(dots), "=", txt, collapse = ", "),deparsedEstimator[l]) 147 | } 148 | 149 | # Parse again: 150 | Estimator <- eval(parse(text=deparsedEstimator)) 151 | return(Estimator) 152 | } 153 | 154 | 155 | netSimulator <- function( 156 | input = genGGM(Nvar = 10), # A matrix, or a list with graph and intercepts elements. Or a generating function 157 | nCases = c(50,100,250,500,1000,2500), # Number of cases 158 | nReps = 100, # Number of repititions per condition 159 | nCores = 1, # Number of computer cores used 160 | default, 161 | dataGenerator, 162 | ..., # estimateNetwork arguments (if none specified, will default to default = "EBICglasso) 163 | moreArgs = list(), # List of extra args not intended to be varied as conditions 164 | moreOutput = list() # List with functions that take two weights matrices and produce some value 165 | ){ 166 | # Dots list: 167 | .dots <- list(...) 168 | # default <- match.arg(default) 169 | 170 | # Check default and dataGenerator: 171 | if (missing(default) & missing(dataGenerator)){ 172 | message("'default' and 'dataGenerator' are missing. Setting default = 'EBICglasso'") 173 | default <- "EBICglasso" 174 | } 175 | 176 | if (missing(default) & length(.dots) == 0){ 177 | message("No estimator specified. Setting default = 'EBICglasso'") 178 | default <- "EBICglasso" 179 | } 180 | 181 | # Data generator: 182 | if (!missing(default) & missing(dataGenerator)){ 183 | 184 | if (default == "EBICglasso" || default == "glasso" || default == "pcor" || default == "adalasso" || default == "huge"|| default == "ggmModSelect" || default == "LoGo"){ 185 | message("Setting 'dataGenerator = ggmGenerator(ordinal = FALSE)'") 186 | dataGenerator <- ggmGenerator(ordinal = FALSE) 187 | } else if (default == "IsingFit" || default == "IsingSampler"){ 188 | message("Setting 'dataGenerator = IsingGenerator()'") 189 | dataGenerator <- IsingGenerator() 190 | } else { 191 | stop(paste0("Default set '",default, "' not yet supported. Please manually specify 'dataGenerator'")) 192 | } 193 | 194 | 195 | } 196 | 197 | 198 | # Else none: 199 | if (missing(default)) default <- "none" 200 | 201 | # parSim arguments: 202 | Args <- c( 203 | list( 204 | # Conditions: 205 | nCases = nCases, 206 | default = default, 207 | 208 | # Setup:, 209 | write=FALSE, 210 | nCores = nCores, 211 | reps = nReps, 212 | debug=FALSE, 213 | export = c("input","dataGenerator",".dots","moreArgs","moreOutput"), 214 | 215 | expression = expression({ 216 | cor0 <- function(x,y){ 217 | if (all(is.na(x)) || all(is.na(y))){ 218 | return(NA) 219 | } 220 | 221 | if (sd(x,na.rm=TRUE)==0 | sd(y,na.rm=TRUE) == 0){ 222 | return(0) 223 | } 224 | 225 | return(cor(x,y,use="pairwise.complete.obs")) 226 | } 227 | 228 | # Generate the input: 229 | if (is.function(input)){ 230 | inputResults <- input() 231 | } else { 232 | inputResults <- input 233 | } 234 | 235 | # True network: 236 | if (is.list(inputResults)){ 237 | trueNet <- inputResults$graph 238 | } else { 239 | trueNet <- inputResults 240 | } 241 | 242 | # Generate the data: 243 | Data <- dataGenerator(nCases, inputResults) 244 | 245 | # Compute the network: 246 | args <- list() 247 | args$data <- Data 248 | args$verbose <- FALSE 249 | args$default <- default 250 | args <- c(args,moreArgs) 251 | 252 | for (i in seq_along(.dots)){ 253 | args[[names(.dots)[i]]] <- get(names(.dots)[i]) 254 | } 255 | suppressWarnings(netResults <- do.call(bootnet::estimateNetwork,args)) 256 | estNet <- qgraph::getWmat(netResults) 257 | 258 | # Compute measures: 259 | ### STORE RESULTS ### 260 | SimulationResults <- list() 261 | 262 | # Estimated edges: 263 | est <- estNet[upper.tri(estNet)] 264 | # Real edges: 265 | real <- trueNet[upper.tri(trueNet)] 266 | 267 | # Equal? 268 | SimulationResults$correctModel <- all((est == 0) == (real == 0)) 269 | 270 | # True positives: 271 | TruePos <- sum(est != 0 & real != 0) 272 | 273 | # False pos: 274 | FalsePos <- sum(est != 0 & real == 0) 275 | 276 | # True Neg: 277 | TrueNeg <- sum(est == 0 & real == 0) 278 | 279 | # False Neg: 280 | FalseNeg <- sum(est == 0 & real != 0) 281 | 282 | ### Sensitivity: 283 | SimulationResults$sensitivity <- TruePos / (TruePos + FalseNeg) 284 | 285 | # Specificity: 286 | SimulationResults$specificity <- TrueNeg / (TrueNeg + FalsePos) 287 | 288 | # Correlation: 289 | SimulationResults$correlation <- cor0(est,real) 290 | 291 | # Centrality: 292 | centTrue <- qgraph::centrality(trueNet) 293 | centEst <- qgraph::centrality(estNet) 294 | 295 | SimulationResults$strength <- cor0(centTrue$OutDegree,centEst$OutDegree) 296 | SimulationResults$closeness <- cor0(centTrue$Closeness,centEst$Closeness) 297 | SimulationResults$betweenness <- cor0(centTrue$Betweenness,centEst$Betweenness) 298 | SimulationResults$ExpectedInfluence <- cor0(centTrue$OutExpectedInfluence,centEst$OutExpectedInfluence) 299 | 300 | # 301 | # ### TEMP: REMOVE: 302 | # SimulationResults$MeanBiasFalsePositives <- mean(abs(est[real==0 & est!=0])) 303 | # # SimulationResults$Q75BiasFalsePositives <- quantile(abs(est[real==0 & est!=0]), 0.75) 304 | # SimulationResults$MaxBiasFalsePositives <- max(abs(est[real==0 & est!=0])) 305 | # SimulationResults$MaxWeight <- max(abs(est)) 306 | # SimulationResults$MeanWeight <- mean(abs(est[est!=0])) 307 | if (any(real==0 & est!=0)){ 308 | SimulationResults$MaxFalseEdgeWidth <- max(abs(est[real==0 & est!=0])) / max(abs(est)) 309 | } else { 310 | SimulationResults$MaxFalseEdgeWidth <- NA 311 | } 312 | 313 | SimulationResults$bias <- mean(abs(est - real)) 314 | # ## 315 | if (length(moreOutput) > 1){ 316 | if (is.null(names(moreOutput))){ 317 | names(moreOutput) <- paste0("moreOutput",seq_along(moreOutput)) 318 | } 319 | 320 | for (out in seq_along(moreOutput)){ 321 | SimulationResults[[out]] <- moreOutput[[i]](estNet, trueNet) 322 | } 323 | } 324 | 325 | SimulationResults 326 | })), 327 | .dots) 328 | 329 | Results <- do.call(parSim, Args) 330 | 331 | class(Results) <- c("netSimulator","data.frame") 332 | return(Results) 333 | } 334 | 335 | -------------------------------------------------------------------------------- /R/netSimulator_methods.R: -------------------------------------------------------------------------------- 1 | print.netSimulator <- function(x, digits = 2, ...) summary(x, digits = digits, ...) 2 | 3 | summary.netSimulator <- function(object, digits = 2, ...){ 4 | name <- deparse(substitute(x))[[1]] 5 | if (nchar(name) > 10) name <- "object" 6 | 7 | # Check for errors: 8 | if (all(object$error)) stop(paste0("All simulations resulted in errors:\n",paste(unique(object$errorMessage, collapse = "\n")))) 9 | 10 | Exclude <- c( 11 | "rep","id","correctModel","sensitivity","specificity","correlation","strength","closeness","betweenness","error","errorMessage","ExpectedInfluence","MaxFalseEdgeWidth","bias" 12 | ) 13 | # check number of levels: 14 | Conditions <- names(object)[!names(object)%in%Exclude] 15 | 16 | . <- NULL 17 | 18 | fun <- function(x,digits=2){ 19 | paste0(round(mean(x,na.rm=TRUE),digits), " (",round(sd(x,na.rm=TRUE),digits),")") 20 | } 21 | 22 | # Summarize per case: 23 | suppressWarnings({ 24 | df <- object %>% dplyr::select(.data[["sensitivity"]],.data[["specificity"]],.data[["correlation"]],.data[["strength"]],.data[["closeness"]],.data[["betweenness"]],all_of(Conditions)) %>% 25 | dplyr::group_by_at(Conditions) %>% dplyr::summarize_each(funs(fun(.,digits=digits))) %>% 26 | dplyr::arrange(.data[['nCases']]) %>% as.data.frame 27 | }) 28 | 29 | # 30 | 31 | row.names(df) <- NULL 32 | 33 | cat("=== netSimulator Results ===\n\n") 34 | cat("Mean (SD) values per varied levels:\n\n") 35 | print(df) 36 | # cat("Standard deviation per varied levels:\n\n") 37 | # print(dfSD) 38 | 39 | 40 | cat(paste0("\n\nUse plot(",name,") to plot results (nCases only), or as.data.frame(",name,") to see all results.")) 41 | invisible(df) 42 | } 43 | 44 | # Plot method 45 | 46 | plot.netSimulator <- function(x, xvar = "factor(nCases)", 47 | yvar = c("sensitivity", "specificity", "correlation"), 48 | xfacet = "measure", yfacet = ".", color = NULL, 49 | ylim = c(0,1), print = TRUE, xlab = "Number of cases", 50 | ylab, outlier.size = 0.5, boxplot.lwd = 0.5, style = c("fancy","basic"), ...){ 51 | 52 | style <- match.arg(style) 53 | 54 | # Check input: 55 | if (xvar != "factor(nCases)" && xlab == "Number of cases"){ 56 | warning("argument 'xvar' is not 'factor(nCases)' while argument 'xlab' is still 'Number of cases'. X-axis label might be wrong.") 57 | } 58 | 59 | # Set y-axis label: 60 | if (missing(ylab)){ 61 | if (xfacet != "measure"){ 62 | ylab <- paste(yvar, collapse = "; ") 63 | } else { 64 | ylab <- "" 65 | } 66 | } 67 | 68 | # Gather: 69 | Gathered <- x %>% 70 | tidyr::gather("measure","value",yvar) 71 | 72 | # AES: 73 | if (!is.null(color)){ 74 | Gathered[[color]] <- as.factor(Gathered[[color]]) 75 | AES <- ggplot2::aes_string(x=xvar,y="value",fill=color) 76 | } else { 77 | AES <- ggplot2::aes_string(x=xvar,y="value") 78 | } 79 | 80 | # Create plot: 81 | g <- ggplot2::ggplot(Gathered, AES) + ggplot2::facet_grid(paste0(yfacet," ~ ",xfacet)) + 82 | ggplot2::geom_boxplot(outlier.size = outlier.size,lwd=boxplot.lwd,fatten=boxplot.lwd,position = position_dodge2(preserve = "total")) 83 | 84 | 85 | 86 | if (style == "fancy"){ 87 | g <- g + ggplot2::theme_bw() +# ggplot2::ylim(ylim[1],ylim[2]) + 88 | ggplot2::scale_y_continuous(limits=ylim,breaks=seq(ylim[1],ylim[2],by=0.1)) + 89 | ggplot2::ylab(ylab) + ggplot2::xlab(xlab) + 90 | theme(axis.text.x = element_text(angle = 90, hjust = 1)) + 91 | theme( panel.grid.major.x = element_blank(),panel.grid.minor.x = element_blank()) + 92 | geom_vline(xintercept=seq(1.5, length(unique(eval( parse(text=xvar),envir = Gathered)))-0.5, 1), 93 | lwd=0.5, colour="black", alpha = 0.25) + 94 | theme(legend.position = "top") 95 | } 96 | 97 | 98 | if (print){ 99 | print(g) 100 | invisible(g) 101 | } else { 102 | return(g) 103 | } 104 | } -------------------------------------------------------------------------------- /R/parSim.R: -------------------------------------------------------------------------------- 1 | library("dplyr") 2 | library("parallel") 3 | 4 | # Opzet: 5 | parSim <- function( 6 | ..., # Simulation conditions 7 | expression, # R expression ending in data.frame of results 8 | reps = 1, 9 | write = FALSE, # if TRUE, results are written instead returned as data frame 10 | name = "parSim", 11 | nCores = 1, 12 | export, # character string of global objects to export to the cluster. 13 | exclude, # List with dplyr calls to exclude cases. Written as formula 14 | debug=FALSE, 15 | env = parent.frame() 16 | ){ 17 | # Collect the condiitions: 18 | dots <- list(...) 19 | # Expand all conditions: 20 | AllConditions <- do.call(expand.grid,c(dots,list(rep=seq_len(reps),stringsAsFactors=FALSE))) 21 | 22 | # Exclude cases: 23 | if (!missing(exclude)){ 24 | AllConditions <- AllConditions %>% filter_(.dots = exclude) 25 | } 26 | 27 | 28 | # Randomize: 29 | totCondition <- nrow(AllConditions) 30 | if (totCondition > 1){ 31 | AllConditions <- AllConditions[sample(seq_len(totCondition)),] 32 | } 33 | 34 | # Total conditions: 35 | 36 | AllConditions$id <- seq_len(totCondition) 37 | 38 | # Deparse the expression: 39 | expr <- as.expression(substitute(expression)) 40 | 41 | if (nCores > 1){ 42 | nClust <- nCores - 1 43 | 44 | 45 | ###################### 46 | ## use Socket clusters 47 | if (!debug){ 48 | cl <- snow::makeSOCKcluster(nClust) 49 | } else { 50 | cl <- snow::makeSOCKcluster(nClust, outfile = "clusterLOG.txt") 51 | } 52 | 53 | # # Start clusters: 54 | # cl <- makeCluster(getOption("cl.cores", nCores)) 55 | # 56 | # Export the sim conditions: 57 | clusterExport(cl, c("AllConditions","expr","debug"), envir = environment()) 58 | 59 | # Export global objects: 60 | if (!missing(export)){ 61 | clusterExport(cl, export, envir = env) 62 | } 63 | 64 | # Run the loop: 65 | Results <- pbapply::pblapply(seq_len(totCondition), function(i){ 66 | 67 | if (debug){ 68 | cat("\nRunning iteration:",i," / ",nrow(AllConditions),"\nTime:",as.character(Sys.time()),"\n") 69 | print(AllConditions[i,]) 70 | } 71 | 72 | tryRes <- try(df <- eval(expr, envir = AllConditions[i,])) 73 | if (is(tryRes,"try-error")){ 74 | return(list(error = TRUE, errorMessage = as.character(tryRes), id = AllConditions$id[i])) 75 | } 76 | df <- as.data.frame(df) 77 | df$id <- AllConditions$id[i] 78 | df$error <- FALSE 79 | df$errorMessage <- '' 80 | df 81 | }, cl = cl) 82 | 83 | # Stop the cluster: 84 | stopCluster(cl) 85 | } else { 86 | # Export: 87 | if (!missing(export)){ 88 | for (i in seq_along(export)){ 89 | 90 | assign(export[i], get(export[i],envir = env)) 91 | } 92 | 93 | } 94 | 95 | # Run the loop: 96 | Results <- pblapply(seq_len(totCondition), function(i){ 97 | 98 | if (debug){ 99 | cat("\nRunning iteration:",i," / ",nrow(AllConditions),"\nTime:",as.character(Sys.time()),"\n") 100 | print(AllConditions[i,]) 101 | } 102 | 103 | 104 | tryRes <- try(df <- eval(expr, envir = AllConditions[i,])) 105 | if (is(tryRes,"try-error")){ 106 | return(list(error = TRUE, errorMessage = as.character(tryRes), id = AllConditions$id[i])) 107 | } 108 | 109 | df <- as.data.frame(df) 110 | df$id <- AllConditions$id[i] 111 | df$error <- FALSE 112 | df$errorMessage <- '' 113 | df 114 | }) 115 | } 116 | 117 | # rbind the list: 118 | Results <- bind_rows(Results) 119 | Results$errorMessage <- as.character(Results$errorMessage) 120 | 121 | # Left join the results to the conditions: 122 | AllResults <- AllConditions %>% left_join(Results, by = "id") 123 | 124 | if (write){ 125 | txtFile <- paste0(name,".txt") 126 | # if (!file.exists(txtFile)){ 127 | write.table(AllResults, file = txtFile, col.names=TRUE, 128 | row.names = FALSE, append=FALSE) 129 | # } else { 130 | # write.table(AllResults, file = txtFile, col.names=FALSE, 131 | # row.names = FALSE, append=TRUE) 132 | # } 133 | 134 | return(NULL) 135 | } else { 136 | return(AllResults) 137 | } 138 | } 139 | -------------------------------------------------------------------------------- /R/printMethod.R: -------------------------------------------------------------------------------- 1 | getRefs <- function(x){ 2 | citation <- switch( 3 | x, 4 | "none" = "", 5 | "EBICglasso" = c("Friedman, J. H., Hastie, T., & Tibshirani, R. (2008). Sparse inverse covariance estimation with the graphical lasso. Biostatistics, 9 (3), 432-441.", 6 | "Foygel, R., & Drton, M. (2010). Extended Bayesian information criteria for Gaussian graphical models. ", 7 | "Friedman, J. H., Hastie, T., & Tibshirani, R. (2014). glasso: Graphical lasso estimation of gaussian graphical models. Retrieved from https://CRAN.R-project.org/package=glasso", 8 | "Epskamp, S., Cramer, A., Waldorp, L., Schmittmann, V. D., & Borsboom, D. (2012). qgraph: Network visualizations of relationships in psychometric data. Journal of Statistical Software, 48 (1), 1-18." 9 | ), 10 | "glasso" = c("Friedman, J. H., Hastie, T., & Tibshirani, R. (2008). Sparse inverse covariance estimation with the graphical lasso. Biostatistics, 9 (3), 432-441.", 11 | "Foygel, R., & Drton, M. (2010). Extended Bayesian information criteria for Gaussian graphical models. , 23 , 2020-2028.", 12 | "Friedman, J. H., Hastie, T., & Tibshirani, R. (2014). glasso: Graphical lasso estimation of gaussian graphical models. Retrieved from https://CRAN.R-project.org/package=glasso", 13 | "Epskamp, S., Cramer, A., Waldorp, L., Schmittmann, V. D., & Borsboom, D. (2012). qgraph: Network visualizations of relationships in psychometric data. Journal of Statistical Software, 48 (1), 1-18." 14 | ), 15 | "IsingFit" = "van Borkulo, C. D., Borsboom, D., Epskamp, S., Blanken, T. F., Boschloo, L., Schoevers, R. A., & Waldorp, L. J. (2014). A new method for constructing networks from binary data. Scientific reports, 4 (5918), 1-10.", 16 | "IsingSampler" = c("Epskamp, S., Maris, G., Waldorp, L., & Borsboom, D. (in press). Network psychometrics. In P. Irwing, D. Hughes, & T. Booth (Eds.), Handbook of psychometrics. New York, NY, USA: Wiley.", 17 | "Epskamp, S. (2014). IsingSampler: Sampling methods and distribution functions for the Ising model. Retrieved from github.com/SachaEpskamp/IsingSampler"), 18 | "huge" = "Zhao, T., Li, X., Liu, H., Roeder, K., Lafferty, J., & Wasserman, L. (2015). huge: High-dimensional undirected graph estimation. Retrieved from https://CRAN.R-project.org/package=huge", 19 | "adalasso" = "Kraeamer, N., Schaeafer, J., & Boulesteix, A.-L. (2009). Regularized estimation of large-scale gene association networks using graphical gaussian models. BMC Bioinformatics, 10 (1), 1-24.", 20 | "mgm" = "Jonas M. B. Haslbeck, Lourens J. Waldorp (2016). mgm: Structure Estimation for Time-Varying Mixed Graphical Models in high-dimensional Data arXiv preprint:1510.06871v2 URL http://arxiv.org/abs/1510.06871v2.", 21 | "TMFG" = c("Christensen, A. P., Kenett, Y. N., Aste, T., Silvia, P. J., & Kwapil, T. R. (2018). Network Structure of the Wisconsin Schizotypy Scales-Short Forms: Examining Psychometric Network Filtering Approaches. Behavorial Research Methods. DOI: 10.3758/s13428-018-1032-9", 22 | "Christensen, A. P. (2018). NetworkToolbox: Methods and Measures for Brain, Cognitive, and Psychometric Network Analysis in R"), 23 | "LoGo" = c("Barfuss, W., Massara, G. P., Di Matteo, T., & Aste, T. (2016). Parsimonious modeling with information filtering networks. Physical Review E, 94(6), 062306.", 24 | "Christensen, A. P. (2018). NetworkToolbox: Methods and Measures for Brain, Cognitive, and Psychometric Network Analysis in R"), 25 | "ggmModSelect" = c("Foygel, R., & Drton, M. (2010). Extended Bayesian information criteria for Gaussian graphical models.", 26 | "http://psychosystems.org/qgraph_1.5"), 27 | "graphicalVAR" = c("Abegaz, F., & Wit, E. (2013). Sparse time series chain graphical models for reconstructing genetic networks. Biostatistics, 14(3), 586???599.", 28 | "Epskamp, S., Waldorp, L. J., Mottus, R., & Borsboom, D. (2018). The Gaussian Graphical Model in Cross-sectional and Time-series Data. Multivariate Behavioral Research", 29 | "Rothman, A. J., Levina, E., & Zhu, J. (2010). Sparse multivariate regression with covariance estimation. Journal of Computational and Graphical Statistics, 19(4), 947???962.", 30 | "Wild, B., Eichler, M., Friederich, H.-C., Hartmann, M., Zipfel, S., & Herzog, W. (2010). A graphical vector autoregressive modeling approach to the analysis of electronic diary data. BMC Medical Research Methodology, 10(1), 28. doi: 10.1186/1471-2288-10-28." 31 | ), 32 | "GGMncv" = c( 33 | "Williams, D. (2020). Beyond Lasso: A Survey of Nonconvex Regularization in Gaussian Graphical Models. PsyArXiv pre-print. https://doi.org/10.31234/osf.io/ad57p" 34 | ) 35 | ) 36 | 37 | citation <- c(citation,"Epskamp, S., Borsboom, D., & Fried, E. I. (2016). Estimating psychological networks and their accuracy: a tutorial paper. arXiv preprint, arXiv:1604.08462.") 38 | 39 | citation 40 | } 41 | 42 | print.bootnet <- function(x, ...){ 43 | if (is.list(x$sample$graph)){ 44 | cat("=== bootnet Results (multiple graphs)===") 45 | print(x$sample) 46 | cat("\n") 47 | cat("\nNumber of bootstrapped networks:",length(x[['boots']]), 48 | paste0("\nResults of original samples stored in ",name,"$sample"), 49 | paste0("\nTable of all statistics from original samples stored in ",name,"$sampleTable"), 50 | paste0("\nResults of bootstraps stored in ",name,"$boots"), 51 | paste0("\nTable of all statistics from bootstraps stored in ",name,"$bootTable"), 52 | "\n", 53 | paste0("\nUse plot(",name,"$sample, graph = '...') to plot estimated network of original sample"), 54 | paste0("\nUse summary(",name,", graph = '...') to inspect summarized statistics (see ?summary.bootnet for details)"), 55 | paste0("\nUse plot(",name,", graph = '...') to plot summarized statistics (see ?plot.bootnet for details)"), 56 | "\n\nRelevant references:\n\n",paste0("\t",getRefs(x$sample$default),collapse="\n") 57 | ) 58 | } else { 59 | 60 | directed <- x$sample$directed 61 | 62 | if (directed){ 63 | ind <- matrix(TRUE,ncol(x$sample$graph),ncol(x$sample$graph)) 64 | } else { 65 | ind <- upper.tri(x$sample$graph,diag=FALSE) 66 | } 67 | 68 | 69 | name <- deparse(substitute(x))[[1]] 70 | if (nchar(name) > 10) name <- "object" 71 | cat("=== bootnet Results ===") 72 | cat("\nNumber of nodes:",nrow(x$sample[['graph']]), 73 | "\nNumber of non-zero edges in sample:",sum(x$sample[['graph']][ind]!=0),"/",sum(ind), 74 | "\nMean weight of sample:",mean(x$sample[['graph']][ind]) , 75 | "\nNumber of bootstrapped networks:",length(x[['boots']]), 76 | paste0("\nResults of original sample stored in ",name,"$sample"), 77 | paste0("\nTable of all statistics from original sample stored in ",name,"$sampleTable"), 78 | paste0("\nResults of bootstraps stored in ",name,"$boots"), 79 | paste0("\nTable of all statistics from bootstraps stored in ",name,"$bootTable"), 80 | "\n", 81 | paste0("\nUse plot(",name,"$sample) to plot estimated network of original sample"), 82 | paste0("\nUse summary(",name,") to inspect summarized statistics (see ?summary.bootnet for details)"), 83 | paste0("\nUse plot(",name,") to plot summarized statistics (see ?plot.bootnet for details)"), 84 | "\n\nRelevant references:\n\n",paste0("\t",getRefs(x$sample$default),collapse="\n") 85 | ) 86 | } 87 | 88 | } 89 | 90 | print.bootnetResult <- function(x, ...){ 91 | name <- deparse(substitute(x))[[1]] 92 | if (nchar(name) > 10) name <- "object" 93 | 94 | # Trick for printing multiple networks: 95 | if (is.list(x$graph)){ 96 | cat(paste0("\n=== Estimated networks ===")) 97 | cat(paste0("\nDefault set used: ",x$default), 98 | "\n", 99 | paste0("\nUse bootnet(",name,") to bootstrap edge weights and centrality indices"), 100 | "\n\nRelevant references:\n\n",paste0("\t",getRefs(x$default),collapse="\n")) 101 | 102 | for (i in 1:length(x$graph)){ 103 | if (x$directed[[i]]){ 104 | ind <- matrix(TRUE,ncol(x$graph[[i]]),ncol(x$graph[[i]])) 105 | } else { 106 | ind <- upper.tri(x$graph[[i]],diag=FALSE) 107 | } 108 | 109 | cat(paste0("\n\n=== ",names(x$graph)[[i]]," ===")) 110 | cat("\nNumber of nodes:",nrow(x[['graph']][[i]]), 111 | "\nNumber of non-zero edges:",sum(x[['graph']][[i]][ind]!=0),"/",sum(ind), 112 | "\nMean weight:",mean(x[['graph']][[i]][ind]) , 113 | paste0("\nNetwork stored in ",name,"$graph$",names(x$graph)[[i]]), 114 | paste0("\nUse plot(",name,", graph = '",names(x$graph)[[i]],"') to plot estimated network") 115 | ) 116 | } 117 | } else { 118 | 119 | 120 | directed <- x$directed 121 | 122 | if (directed){ 123 | ind <- matrix(TRUE,ncol(x$graph),ncol(x$graph)) 124 | } else { 125 | ind <- upper.tri(x$graph,diag=FALSE) 126 | } 127 | 128 | name <- deparse(substitute(x))[[1]] 129 | if (nchar(name) > 10) name <- "object" 130 | cat(paste0("\n=== Estimated network ===")) 131 | if (isTRUE(x$thresholded)){ 132 | cat("\nNote: network has been thresholded using 'bootThreshold'") 133 | } 134 | cat("\nNumber of nodes:",nrow(x[['graph']]), 135 | "\nNumber of non-zero edges:",sum(x[['graph']][ind]!=0),"/",sum(ind), 136 | "\nMean weight:",mean(x[['graph']][ind]) , 137 | paste0("\nNetwork stored in ",name,"$graph"), 138 | "\n", 139 | paste0("\nDefault set used: ",x$default), 140 | "\n", 141 | paste0("\nUse plot(",name,") to plot estimated network"), 142 | paste0("\nUse bootnet(",name,") to bootstrap edge weights and centrality indices"), 143 | "\n\nRelevant references:\n\n",paste0("\t",getRefs(x$default),collapse="\n") 144 | ) 145 | } 146 | 147 | } -------------------------------------------------------------------------------- /R/replicationSimulator.R: -------------------------------------------------------------------------------- 1 | # Function to run a standard network simulation study: 2 | # Two methods: dataGenerator function, or using graph, intercepts and model for GGM/Ising. 3 | 4 | replicationSimulator <- function( 5 | input = genGGM(Nvar = 10), # A matrix, or a list with graph and intercepts elements. 6 | nCases = c(50,100,250,500,1000,2500), # Number of cases 7 | nReps = 100, # Number of repititions per condition 8 | nCores = 1, # Number of computer cores used 9 | default, 10 | dataGenerator, 11 | ..., # estimateNetwork arguments (if none specified, will default to default = "EBICglasso) 12 | moreArgs = list() # List of extra args not intended to be varied as conditions 13 | ){ 14 | # Dots list: 15 | .dots <- list(...) 16 | # default <- match.arg(default) 17 | 18 | # Check default and dataGenerator: 19 | if (missing(default) & missing(dataGenerator)){ 20 | message("'default' and 'dataGenerator' are missing. Setting default = 'EBICglasso'") 21 | default <- "EBICglasso" 22 | } 23 | 24 | if (missing(default) & length(.dots) == 0){ 25 | message("No estimator specified. Setting default = 'EBICglasso'") 26 | default <- "EBICglasso" 27 | } 28 | 29 | # Data generator: 30 | if (!missing(default) & missing(dataGenerator)){ 31 | 32 | if (default == "EBICglasso" || default == "glasso" || default == "pcor" || default == "adalasso" || default == "huge" || default == "ggmModSelect"){ 33 | message("Setting 'dataGenerator = ggmGenerator(ordinal = FALSE)'") 34 | dataGenerator <- ggmGenerator(ordinal = FALSE) 35 | } else if (default == "IsingFit" || default == "IsingSampler"){ 36 | message("Setting 'dataGenerator = IsingGenerator()'") 37 | dataGenerator <- IsingGenerator() 38 | } else { 39 | stop(paste0("Default set '",default, "' not yet supported. Please manually specify 'dataGenerator'")) 40 | } 41 | 42 | 43 | } 44 | 45 | # parSim arguments: 46 | Args <- c( 47 | list( 48 | # Conditions: 49 | nCases = nCases, 50 | default = default, 51 | 52 | # Setup:, 53 | write=FALSE, 54 | nCores = nCores, 55 | reps = nReps, 56 | debug=FALSE, 57 | export = c("input","dataGenerator",".dots","moreArgs"), 58 | 59 | expression = expression({ 60 | cor0 <- function(x,y){ 61 | if (all(is.na(x)) || all(is.na(y))){ 62 | return(NA) 63 | } 64 | 65 | if (sd(x,na.rm=TRUE)==0 | sd(y,na.rm=TRUE) == 0){ 66 | return(0) 67 | } 68 | 69 | return(cor(x,y,use="pairwise.complete.obs")) 70 | } 71 | 72 | # Generate the input: 73 | if (is.function(input)){ 74 | inputResults <- input() 75 | } else { 76 | inputResults <- input 77 | } 78 | 79 | # True network: 80 | if (is.list(inputResults)){ 81 | trueNet <- inputResults$graph 82 | } else { 83 | trueNet <- inputResults 84 | } 85 | 86 | # Generate the datasets: 87 | Data1 <- dataGenerator(nCases, inputResults) 88 | Data2 <- dataGenerator(nCases, inputResults) 89 | 90 | # Compute network 1: 91 | args1 <- list() 92 | args1$data <- Data1 93 | args1$verbose <- FALSE 94 | args1$default <- default 95 | args1 <- c(args1,moreArgs) 96 | 97 | for (i in seq_along(.dots)){ 98 | args1[[names(.dots)[i]]] <- get(names(.dots)[i]) 99 | } 100 | suppressWarnings(netResults1 <- do.call(bootnet::estimateNetwork,args1)) 101 | estNet1 <- qgraph::getWmat(netResults1) 102 | 103 | 104 | # Compute network 2: 105 | args2 <- list() 106 | args2$data <- Data2 107 | args2$verbose <- FALSE 108 | args2$default <- default 109 | args2 <- c(args2,moreArgs) 110 | 111 | for (i in seq_along(.dots)){ 112 | args2[[names(.dots)[i]]] <- get(names(.dots)[i]) 113 | } 114 | suppressWarnings(netResults2 <- do.call(bootnet::estimateNetwork,args2)) 115 | estNet2 <- qgraph::getWmat(netResults2) 116 | 117 | 118 | # Compute measures: 119 | ### STORE RESULTS ### 120 | SimulationResults <- list() 121 | 122 | # Estimated edges: 123 | est1 <- estNet1[upper.tri(estNet1)] 124 | est2 <- estNet2[upper.tri(estNet2)] 125 | 126 | # Equal? 127 | SimulationResults$identical <- all((est1 == 0) == (est2 == 0)) 128 | 129 | # Correlation between edge wegights: 130 | SimulationResults$correlation <- cor(est1, est2) 131 | 132 | # Correlation nonzero edge wegights: 133 | SimulationResults$correlationNonZero <- cor(est1[est1!=0 & est2 != 0], est2[est1!=0 & est2 != 0]) 134 | 135 | # Jaccard index: 136 | SimulationResults$jaccard <- sum(est1!=0 & est2!=0) / sum(est1!=0 | est2!=0) 137 | 138 | # Percentage of edges in network 1 replicated in network 2: 139 | SimulationResults$replicatedEdges <- sum(est1!=0 & est2!=0)/sum(est1!=0) 140 | 141 | # Percentage of zeroes in network 1 replicated in network 2: 142 | SimulationResults$replicatedZeroes <- sum(est1==0 & est2==0)/sum(est1==0) 143 | 144 | # Centrality: 145 | centEst1 <- qgraph::centrality(estNet1) 146 | centEst2 <- qgraph::centrality(estNet2) 147 | 148 | SimulationResults$strength <- cor0(centEst1$OutDegree,centEst2$OutDegree) 149 | SimulationResults$closeness <- cor0(centEst1$Closeness,centEst2$Closeness) 150 | SimulationResults$betweenness <- cor0(centEst1$Betweenness,centEst2$Betweenness) 151 | 152 | SimulationResults 153 | })), 154 | .dots) 155 | 156 | Results <- do.call(parSim, Args) 157 | 158 | class(Results) <- c("replicationSimulator","data.frame") 159 | return(Results) 160 | } 161 | 162 | -------------------------------------------------------------------------------- /R/replicationSimulator_methods.R: -------------------------------------------------------------------------------- 1 | print.replicationSimulator <- function(x, digits = 2, ...) summary(x, digits = digits, ...) 2 | 3 | summary.replicationSimulator <- function(object, digits = 2, ...){ 4 | name <- deparse(substitute(x))[[1]] 5 | if (nchar(name) > 10) name <- "object" 6 | 7 | # Check for errors: 8 | if (all(object$error)) stop(paste0("All simulations resulted in errors:\n",paste(unique(object$errorMessage, collapse = "\n")))) 9 | 10 | Exclude <- c("rep", "id", "identical", "correlation", 11 | "correlationNonZero", "jaccard", "replicatedEdges", "replicatedZeroes", 12 | "strength", "closeness", "betweenness", "error", "errorMessage") 13 | 14 | # check number of levels: 15 | Conditions <- names(object)[!names(object)%in%Exclude] 16 | 17 | . <- NULL 18 | 19 | fun <- function(x,digits=2){ 20 | paste0(round(mean(x,na.rm=TRUE),digits), " (",round(sd(x,na.rm=TRUE),digits),")") 21 | } 22 | 23 | # Summarize per case: 24 | df <- object %>% dplyr::select(.data[["correlation"]], 25 | .data[["correlationNonZero"]], .data[["jaccard"]], .data[["replicatedEdges"]], .data[["replicatedZeroes"]], 26 | .data[["strength"]], .data[["closeness"]], .data[["betweenness"]], all_of(Conditions)) %>% 27 | dplyr::group_by_at(Conditions) %>% dplyr::summarize_each(funs(fun(.,digits=digits))) %>% 28 | dplyr::arrange(.data[['nCases']]) %>% as.data.frame 29 | # 30 | 31 | 32 | row.names(df) <- NULL 33 | 34 | cat("=== replicationSimulator Results ===\n\n") 35 | cat("Mean (SD) values per varied levels:\n\n") 36 | print(df) 37 | # cat("Standard deviation per varied levels:\n\n") 38 | # print(dfSD) 39 | 40 | 41 | cat(paste0("\n\nUse plot(",name,") to plot results (nCases only), or as.data.frame(",name,") to see all results.")) 42 | invisible(df) 43 | } 44 | 45 | # Plot method 46 | 47 | plot.replicationSimulator <- function(x, yvar = c("correlation","jaccard","replicatedEdges","replicatedZeroes"), ...){ 48 | plot.netSimulator(x, yvar = yvar, ...) 49 | } -------------------------------------------------------------------------------- /R/simGraph.R: -------------------------------------------------------------------------------- 1 | genGGM <- function( 2 | Nvar, 3 | p = 0, # Rewiring probability if graph = "smallworld" or "cluster", or connection probability if graph = "random". If cluster, can add multiple p's for each cluster, e.g., "c(.1, .5)" 4 | nei = 1, 5 | parRange = c(0.5,1), 6 | constant = 1.5, 7 | propPositive = 0.5, 8 | clusters = NULL, #number of clusters if graph = "cluster" 9 | graph = c("smallworld","random", "scalefree", "hub", "cluster") 10 | ){ 11 | graph <- match.arg(graph) 12 | 13 | 14 | ## Approach from 15 | # Yin, J., & Li, H. (2011). A sparse conditional gaussian graphical model for analysis of genetical genomics data. The annals of applied statistics, 5(4), 2630. 16 | 17 | # Simulate graph structure: 18 | if (graph == "smallworld"){ 19 | # Watts Strogatz small-world 20 | trueKappa <- as.matrix(igraph::get.adjacency(igraph::watts.strogatz.game(1,Nvar,nei,p))) 21 | } else if (graph == "random"){ 22 | # Ranodm network: 23 | trueKappa <- as.matrix(igraph::get.adjacency(igraph::erdos.renyi.game(Nvar, p))) 24 | } else if (graph == "scalefree") { 25 | if(!requireNamespace("BDgraph")) stop("'BDgraph' package needs to be installed.") 26 | 27 | trueKappa <- BDgraph::bdgraph.sim(p = Nvar, graph = "scale-free")$G 28 | } else if (graph == "hub") { 29 | if(!requireNamespace("BDgraph")) stop("'BDgraph' package needs to be installed.") 30 | 31 | trueKappa <- BDgraph::bdgraph.sim(p = Nvar, graph = "hub")$G 32 | class(trueKappa) <- "matrix" 33 | } else if (graph == "cluster") { 34 | if(!requireNamespace("BDgraph")) stop("'BDgraph' package needs to be installed.") 35 | 36 | trueKappa <- BDgraph::bdgraph.sim(p = Nvar, graph = "cluster", prob = p, class = clusters)$G #can be 37 | class(trueKappa) <- "matrix" 38 | } 39 | 40 | # Make edges negative and add weights: 41 | trueKappa[upper.tri(trueKappa)] <- trueKappa[upper.tri(trueKappa)] * sample(c(-1,1),sum(upper.tri(trueKappa)),TRUE,prob=c(propPositive,1-propPositive)) * 42 | runif(sum(upper.tri(trueKappa)), min(parRange ),max(parRange )) 43 | 44 | # Symmetrize: 45 | trueKappa[lower.tri(trueKappa)] <- t(trueKappa)[lower.tri(trueKappa)] 46 | 47 | # Make pos def: 48 | diag(trueKappa) <- constant * rowSums(abs(trueKappa)) 49 | diag(trueKappa) <- ifelse(diag(trueKappa)==0,1,diag(trueKappa)) 50 | trueKappa <- trueKappa/diag(trueKappa)[row(trueKappa)] 51 | trueKappa <- (trueKappa + t(trueKappa)) / 2 52 | 53 | return(as.matrix(qgraph::wi2net(trueKappa))) 54 | } 55 | 56 | # genGGM <- function( 57 | # Nvar, 58 | # p = 0, # Rewiring probability if graph = "smallworld", or connection probability if graph = "random" 59 | # nei = 1, 60 | # parRange = c(0.5,1), 61 | # constant = 1.5, 62 | # propPositive = 0.5, 63 | # graph = c("smallworld","random") 64 | # ){ 65 | # graph <- match.arg(graph) 66 | # 67 | # 68 | # ## Approach from 69 | # # Yin, J., & Li, H. (2011). A sparse conditional gaussian graphical model for analysis of genetical genomics data. The annals of applied statistics, 5(4), 2630. 70 | # 71 | # # Simulate graph structure: 72 | # if (graph == "smallworld"){ 73 | # # Watts Strogatz small-world 74 | # trueKappa <- as.matrix(igraph::get.adjacency(igraph::watts.strogatz.game(1,Nvar,nei,p))) 75 | # } else if (graph == "random"){ 76 | # # Ranodm network: 77 | # trueKappa <- as.matrix(igraph::get.adjacency(igraph::erdos.renyi.game(Nvar, p))) 78 | # } 79 | # 80 | # 81 | # # Make edges negative and add weights: 82 | # trueKappa[upper.tri(trueKappa)] <- trueKappa[upper.tri(trueKappa)] * sample(c(-1,1),sum(upper.tri(trueKappa)),TRUE,prob=c(propPositive,1-propPositive)) * 83 | # runif(sum(upper.tri(trueKappa)), min(parRange ),max(parRange )) 84 | # 85 | # # Symmetrize: 86 | # trueKappa[lower.tri(trueKappa)] <- t(trueKappa)[lower.tri(trueKappa)] 87 | # 88 | # # Make pos def: 89 | # diag(trueKappa) <- constant * rowSums(abs(trueKappa)) 90 | # diag(trueKappa) <- ifelse(diag(trueKappa)==0,1,diag(trueKappa)) 91 | # trueKappa <- trueKappa/diag(trueKappa)[row(trueKappa)] 92 | # trueKappa <- (trueKappa + t(trueKappa)) / 2 93 | # 94 | # return(as.matrix(qgraph::wi2net(trueKappa))) 95 | # } -------------------------------------------------------------------------------- /R/summaryMethod.R: -------------------------------------------------------------------------------- 1 | # Creates the summary table 2 | summary.bootnet <- function( 3 | object, # bootnet object 4 | graph, 5 | statistics = c("edge", "intercept", "strength", "closeness", "betweenness","distance"), # stats to include in the table 6 | perNode = FALSE, # Set to true to investigate nodewise stabilty per node. 7 | rank = FALSE, 8 | tol = sqrt(.Machine$double.eps), 9 | ... 10 | ){ 11 | if (length(unique(object$sampleTable$graph)) > 1 && missing(graph)){ 12 | stop("Argument 'graph' can not be missing when multiple graphs have been estimated.") 13 | } 14 | if (!missing(graph)){ 15 | object$sampleTable <- object$sampleTable[object$sampleTable$graph %in% graph,] 16 | object$bootTable <- object$bootTable[object$bootTable$graph %in% graph,] 17 | } 18 | 19 | 20 | naTo0 <- function(x){ 21 | x[is.na(x)] <- 0 22 | x 23 | } 24 | 25 | if (rank){ 26 | object$bootTable$value <- object$bootTable$rank_avg 27 | object$sampleTable$value <- object$sampleTable$rank_avg 28 | 29 | object$bootTable$value_min <- object$bootTable$rank_min 30 | object$sampleTable$rank_min <- object$sampleTable$rank_min 31 | 32 | object$bootTable$value_max <- object$bootTable$rank_max 33 | object$sampleTable$value_max <- object$sampleTable$rank_max 34 | } else { 35 | 36 | object$bootTable$value_min <- object$bootTable$value 37 | object$sampleTable$rank_min <- object$sampleTable$value 38 | 39 | object$bootTable$value_max <- object$bootTable$value 40 | object$sampleTable$value_max <- object$sampleTable$value 41 | } 42 | 43 | # Returns quantiles for type = "observation" and correlations with original for type = "node" 44 | if (!object$type %in% c("person","node")){ 45 | 46 | 47 | if (object$type == "jackknife"){ 48 | 49 | N <- object$sampleSize 50 | tab <- object$bootTable %>% 51 | dplyr::filter(.data[['type']] %in% statistics) %>% 52 | dplyr::left_join(object$sampleTable %>% dplyr::select(.data[['type']],.data[['id']],.data[['node1']],.data[['node2']],sample = .data[['value']]), by=c("id","type","node1","node2")) %>% 53 | dplyr::mutate(PS = N*.data[['sample']] - (N-1)*.data[['value']]) %>% 54 | dplyr::group_by(.data[['type']], .data[['node1']], .data[['node2']], .data[['id']]) %>% 55 | dplyr::summarize( 56 | mean = mean(.data[['value']]), 57 | sample = mean(.data[['PS']]), 58 | var = (1/(N-1)) * sum((.data[['PS']] - .data[['value']])^2), 59 | CIlower = .data[['sample']] - 2 * sqrt(.data[['var']]/N), 60 | CIupper = .data[['sample']] + 2 * sqrt(.data[['var']]/N) 61 | )%>% 62 | dplyr::select(.data[['type']], .data[['id']], .data[['node1']], .data[['node2']], .data[['sample']], .data[['mean']], .data[['CIlower']], .data[['CIupper']]) 63 | 64 | } else { 65 | 66 | tab <- object$bootTable %>% 67 | dplyr::filter(.data[['type']] %in% statistics) %>% 68 | dplyr::group_by(.data[['type']], .data[['node1']], .data[['node2']], .data[['id']]) %>% 69 | dplyr::summarize( 70 | mean = mean(.data[['value']],na.rm=TRUE), 71 | var = var(.data[['value']],na.rm=TRUE), 72 | sd = sd(.data[['value']],na.rm=TRUE), 73 | prop0 = mean(abs(.data[['value']]) < tol) %>% naTo0, 74 | # q1 = ~quantile(value,1/100, na.rm = TRUE), 75 | q2.5 = quantile(.data[['value_min']], 2.5/100, na.rm = TRUE, type = 6) %>% naTo0, 76 | # q5 = ~quantile(value, 5/100, na.rm = TRUE), 77 | # q25 = ~quantile(value, 25/100, na.rm = TRUE), 78 | # q50 = ~quantile(value, 50/100, na.rm = TRUE), 79 | # q75 = ~quantile(value, 75/100, na.rm = TRUE), 80 | # q95 = ~quantile(value, 95/100, na.rm = TRUE), 81 | q97.5 = quantile(.data[['value_max']], 97.5/100, na.rm = TRUE, type = 6) %>% naTo0, 82 | q2.5_non0 = quantile(.data[['value_min']][!abs(.data[['value_min']]) < tol], 2.5/100, na.rm = TRUE, type = 6) %>% naTo0, 83 | mean_non0 = mean(.data[['value']][!abs(.data[['value']]) < tol], na.rm = TRUE) %>% naTo0, 84 | q97.5_non0 = quantile(.data[['value_max']][!abs(.data[['value_max']]) < tol], 97.5/100, na.rm = TRUE, type = 6) %>% naTo0, 85 | var_non0 = var(.data[['value']][!abs(.data[['value']]) < tol],na.rm=TRUE) %>% naTo0, 86 | sd_non0 = sd(.data[['value']][!abs(.data[['value']]) < tol],na.rm=TRUE) %>% naTo0 87 | # q99 = ~quantile(value, 99/100, na.rm = TRUE) 88 | ) %>% 89 | dplyr::left_join(object$sampleTable %>% dplyr::select(.data[['type']],.data[['id']],.data[['node1']],.data[['node2']],sample = .data[['value']]), by=c("id","type","node1","node2")) %>% 90 | dplyr::mutate( 91 | CIlower = .data[['sample']]-2*.data[['sd']], CIupper = .data[['sample']] + 2*.data[['sd']], 92 | CIlower_non0 = .data[['mean_non0']] - 2*.data[['sd_non0']], CIupper_non0 = .data[['mean_non0']] + 2*.data[['sd_non0']]) %>% 93 | dplyr::select(.data[['type']], .data[['id']], .data[['node1']], .data[['node2']], .data[['sample']], .data[['mean']], .data[['sd']], .data[['CIlower']], .data[['CIupper']], 94 | .data[['q2.5']], .data[['q97.5']], .data[['q2.5_non0']], .data[['mean_non0']], .data[['q97.5_non0']], .data[['var_non0']], .data[['sd_non0']], .data[['prop0']]) 95 | } 96 | 97 | 98 | 99 | } else { 100 | 101 | # Nodewise 102 | tab <- object$bootTable %>% 103 | dplyr::filter(.data[['type']] %in% statistics) %>% 104 | dplyr::left_join(object$sampleTable %>% dplyr::select(.data[['type']],.data[['id']],.data[['node1']],.data[['node2']],sample = .data[['value']]), by=c("id","type","node1","node2")) 105 | 106 | if (perNode){ 107 | tab <- tab %>% group_by(.data[['id']], .data[['type']], .data[['nNode']], .data[['nPerson']]) %>% 108 | dplyr::summarize( 109 | mean = mean(.data[['value']],na.rm=TRUE), 110 | var = var(.data[['value']],na.rm=TRUE), 111 | sd = sd(.data[['value']],na.rm=TRUE), 112 | q1 = quantile(.data[['value']],1/100, na.rm = TRUE, type = 6) %>% naTo0, 113 | q2.5 = quantile(.data[['value']], 2.5/100, na.rm = TRUE, type = 6) %>% naTo0, 114 | q5 = quantile(.data[['value']], 5/100, na.rm = TRUE, type = 6) %>% naTo0, 115 | q25 = quantile(.data[['value']], 25/100, na.rm = TRUE, type = 6) %>% naTo0, 116 | q50 = quantile(.data[['value']], 50/100, na.rm = TRUE, type = 6) %>% naTo0, 117 | q75 = quantile(.data[['value']], 75/100, na.rm = TRUE, type = 6) %>% naTo0, 118 | q95 = quantile(.data[['value']], 95/100, na.rm = TRUE, type = 6) %>% naTo0, 119 | q97.5 = quantile(.data[['value']], 97.5/100, na.rm = TRUE, type = 6) %>% naTo0, 120 | q99 = quantile(.data[['value']], 99/100, na.rm = TRUE, type = 6) %>% naTo0, 121 | prop0 = mean(abs(.data[['value']]) < tol), 122 | q2.5_non0 = quantile(.data[['value']][!abs(.data[['value']]) < tol], 2.5/100, na.rm = TRUE, type = 6) %>% naTo0, 123 | mean_non0 = mean(.data[['value']][!abs(.data[['value']]) < tol], na.rm = TRUE) %>% naTo0, 124 | q97.5_non0 = quantile(.data[['value']][!abs(.data[['value']]) < tol], 97.5/100, na.rm = TRUE, type = 6) %>% naTo0, 125 | var_non0 = var(.data[['value']][!abs(.data[['value']]) < tol],na.rm=TRUE) %>% naTo0, 126 | sd_non0 = sd(.data[['value']][!abs(.data[['value']]) < tol],na.rm=TRUE) %>% naTo0 127 | ) %>% mutate( 128 | CIlower = .data[['mean']] - 2*.data[['sd']], CIupper = .data[['mean']] + 2*.data[['sd']], 129 | CIlower_non0 = .data[['mean_non0']] - 2*.data[['sd_non0']], CIupper_non0 = .data[['mean_non0']] + 2*.data[['sd_non0']] 130 | ) %>% arrange(.data[['nNode']],.data[['nPerson']]) 131 | 132 | } else { 133 | 134 | tab <- tab %>% group_by(.data[['name']], .data[['type']], .data[['nNode']], .data[['nPerson']]) %>% 135 | summarize(cor = suppressWarnings(cor(.data[['value']],sample, use = "pairwise.complete.obs"))) %>% 136 | dplyr::group_by(.data[['nNode']], .data[['nPerson']], .data[['type']]) %>% 137 | dplyr::summarize( 138 | mean = mean(.data[['cor']],na.rm=TRUE), 139 | var = var(.data[['cor']],na.rm=TRUE), 140 | sd = sd(.data[['cor']],na.rm=TRUE), 141 | q1 = quantile(.data[['cor']],1/100, na.rm = TRUE, type = 6) %>% naTo0, 142 | q2.5 = quantile(.data[['cor']], 2.5/100, na.rm = TRUE, type = 6) %>% naTo0, 143 | q5 = quantile(.data[['cor']], 5/100, na.rm = TRUE, type = 6) %>% naTo0, 144 | q25 = quantile(.data[['cor']], 25/100, na.rm = TRUE, type = 6) %>% naTo0, 145 | q50 = quantile(.data[['cor']], 50/100, na.rm = TRUE, type = 6) %>% naTo0, 146 | q75 = quantile(.data[['cor']], 75/100, na.rm = TRUE, type = 6) %>% naTo0, 147 | q95 = quantile(.data[['cor']], 95/100, na.rm = TRUE, type = 6) %>% naTo0, 148 | q97.5 = quantile(.data[['cor']], 97.5/100, na.rm = TRUE, type = 6) %>% naTo0, 149 | q99 = quantile(.data[['cor']], 99/100, na.rm = TRUE, type = 6) %>% naTo0 150 | ) %>% arrange(.data[['nNode']], .data[['nPerson']]) 151 | 152 | } 153 | } 154 | 155 | 156 | return(tab) 157 | } 158 | 159 | 160 | -------------------------------------------------------------------------------- /R/transformations.R: -------------------------------------------------------------------------------- 1 | # Copy-paste from psychonetrics: 2 | quantiletransform <- function(x){ 3 | xNoNA <- x[!is.na(x)] 4 | ord <- order(xNoNA) 5 | sorted <- sort(xNoNA) 6 | nBelow <- rank(sorted, ties.method = "min") 7 | p <- nBelow / (max(nBelow)+1) 8 | q <- qnorm(p) 9 | xTrans <- x 10 | xTrans[!is.na(xTrans)][ord] <- q 11 | return(xTrans) 12 | } 13 | 14 | quantile_transformation <- function(x){ 15 | for (i in 1:ncol(x)){ 16 | x[,i] <- quantiletransform(x[,i]) 17 | } 18 | x 19 | } 20 | 21 | rank_transformation <- function(x,ties.method = c("average", "first", "last", "random", "max", "min")){ 22 | ties.method <- match.arg(ties.method) 23 | for (i in 1:ncol(x)){ 24 | x[!is.na(x[,i]),i] <- rank(x[!is.na(x[,i]),i], ties.method = ties.method) 25 | } 26 | x 27 | } -------------------------------------------------------------------------------- /R/zzz.R: -------------------------------------------------------------------------------- 1 | # I copied this piece of code from Lavaan mainly: 2 | 3 | .onAttach <- function(libname, pkgname) { 4 | version <- read.dcf(file=system.file("DESCRIPTION", package=pkgname), 5 | fields="Version") 6 | packageStartupMessage("This is ",paste(pkgname, version)) 7 | # packageStartupMessage(pkgname, " is BETA software! Please report any bugs.") 8 | packageStartupMessage("For questions and issues, please see github.com/SachaEpskamp/bootnet.") 9 | } 10 | 11 | # Please do not look further down this script. 12 | 13 | 14 | 15 | 16 | 17 | 18 | 19 | 20 | 21 | 22 | 23 | 24 | 25 | 26 | 27 | # I am warning you... 28 | 29 | 30 | 31 | 32 | 33 | 34 | 35 | 36 | 37 | 38 | 39 | 40 | 41 | 42 | 43 | 44 | 45 | 46 | # Ok, here goes some stuff to fool the R check rather than changing all the codes... I am sorry.. 47 | P <- contain0 <- data <- fill <- id1 <- id2 <- label <- lower <- na.omit <- name <- node1 <- node2 <- 48 | original <- prop <- quantile <- runif <- sd <- stability <- type <- upper <- value <- value1 <- value2 <- nPerson <- NULL -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 |

bootnet

2 | 3 | 4 |

5 | CRAN version 6 | CRAN checks 7 | R-CMD-check 8 |

9 | 10 | ## Installation 11 | 12 | - to install from CRAN run `install.packages("bootnet")` 13 | - to install the latest version from GitHub run `remotes::install_github("SachaEpskamp/bootnet")` 14 | 15 | --- 16 | 17 | To cite `bootnet` please use: 18 | - Epskamp, S., Borsboom, D., & Fried, E. I. (2018). Estimating psychological networks and their accuracy: A tutorial paper. Behavior Research Methods, 50(1), 195–212. [https://doi.org/10.3758/s13428-017-0862-1](https://doi.org/10.3758/s13428-017-0862-1) 19 | -------------------------------------------------------------------------------- /inst/CITATION: -------------------------------------------------------------------------------- 1 | citHeader("To cite bootnet in publications, please use:") 2 | 3 | bibentry( 4 | bibtype = "Article", 5 | title = "Estimating Psychological Networks and their Accuracy: A Tutorial Paper", 6 | author = c("Sacha Epskamp", 7 | "Denny Borsboom", 8 | "Eiko I. Fried"), 9 | journal = "Behavior Research Methods", 10 | year = "2018", 11 | volume = "50", 12 | pages = "195--212", 13 | textVersion = "Epskamp, S., Borsboom, D., & Fried, E. I. (2018). Estimating psychological networks and their accuracy: A tutorial paper. Behavior research methods, 50, 195-212") 14 | 15 | -------------------------------------------------------------------------------- /man/IsingGenerator.Rd: -------------------------------------------------------------------------------- 1 | \name{IsingGenerator} 2 | \alias{IsingGenerator} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Generates a function that simulates data from the Ising model 6 | } 7 | \description{ 8 | Uses \code{\link[IsingSampler]{IsingSampler}} to generate the data. 9 | } 10 | \usage{ 11 | IsingGenerator(...) 12 | } 13 | %- maybe also 'usage' for other objects documented here. 14 | \arguments{ 15 | \item{\dots}{ 16 | Arguments passed to \code{\link[IsingSampler]{IsingSampler}} 17 | } 18 | } 19 | \value{ 20 | A function with as first argument the sample size and as second argument a named list, with element \code{graph} encoding a weights matrix and element \code{intercepts} encoding thresholds. 21 | } 22 | \author{ 23 | Sacha Epskamp 24 | } -------------------------------------------------------------------------------- /man/binarize.Rd: -------------------------------------------------------------------------------- 1 | \name{binarize} 2 | \alias{binarize} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Binarizes a dataset 6 | } 7 | \description{ 8 | This function will transform data into binary data (0,1). If the data is already binary, this function does nothing. 9 | } 10 | \usage{ 11 | binarize(x, split = "median", na.rm = TRUE, removeNArows = TRUE, verbose = TRUE) 12 | } 13 | %- maybe also 'usage' for other objects documented here. 14 | \arguments{ 15 | \item{x}{ 16 | A data frame or matrix 17 | } 18 | \item{split}{ 19 | Either a function to split on (as character or as function) or a vector. e.g., \code{split = "mean"} will split every variable on the mean of that variable, \code{split=2} will make every value above 2 a 1 and every value below 2 a 0 and a vector of the same length as each variable in the dataset will use those elements to split. 20 | } 21 | \item{na.rm}{ 22 | The \code{na.rm} argument used in the split function. 23 | } 24 | \item{removeNArows}{ 25 | Logical, should rows with \code{NA} be removed? 26 | } 27 | \item{verbose}{Output progress to the console?} 28 | } 29 | 30 | \value{ 31 | A binarized data frame 32 | } 33 | \author{ 34 | Sacha Epskamp 35 | } 36 | -------------------------------------------------------------------------------- /man/bootInclude.Rd: -------------------------------------------------------------------------------- 1 | \name{bootInclude} 2 | \alias{bootInclude} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Inclusion proportion graph 6 | } 7 | \description{ 8 | This function takes bootstrap results and returns a inclusion probability network (edge weights indicate how often a certain edge was included in the model). Note that the plotting method automatically uses a black-white color scheme (as edges are not signed and always positive). 9 | } 10 | \usage{ 11 | bootInclude(bootobject, verbose = TRUE) 12 | } 13 | %- maybe also 'usage' for other objects documented here. 14 | \arguments{ 15 | \item{bootobject}{ 16 | Nonparametric bootstrap results from \code{\link{bootnet}} 17 | } 18 | \item{verbose}{ 19 | Logical, should progress be reported to the console? 20 | } 21 | } 22 | 23 | \value{ 24 | A \code{bootnetResult} object with the following elements: 25 | \item{graph}{The weights matrix of the network} 26 | \item{intercepts}{The intercepts} 27 | \item{results}{The results of the estimation procedure} 28 | \item{labels}{A vector with node labels} 29 | \item{nNodes}{Number of nodes in the network} 30 | \item{nPerson}{Number of persons in the network} 31 | \item{input}{Input used, including the result of the default set used} 32 | 33 | %% ~Describe the value returned 34 | %% If it is a LIST, use 35 | %% \item{comp1 }{Description of 'comp1'} 36 | %% \item{comp2 }{Description of 'comp2'} 37 | %% ... 38 | } 39 | \author{ 40 | Sacha Epskamp 41 | } 42 | \seealso{ 43 | \code{\link{bootnet}}, \code{\link{estimateNetwork}} 44 | } 45 | \examples{ 46 | \dontrun{ 47 | # BFI Extraversion data from psychTools package: 48 | library("psychTools") 49 | data(bfi) 50 | # Subset of data: 51 | bfiSub <- bfi[1:250,1:25] 52 | 53 | # Estimate ggmModSelect networks (not stepwise to increase speed): 54 | Network <- estimateNetwork(bfiSub], default = "ggmModSelect", corMethod = "cor", 55 | stepwise = FALSE) 56 | 57 | # Bootstrap 100 values, using 8 cores (100 to incease speed, preferably 1000+): 58 | boots <- bootnet(Network, nBoots = 100, nCores = 8) 59 | 60 | # Threshold network: 61 | Network_inclusion <- bootInclude(boots) 62 | 63 | # Plot: 64 | plot(Network_inclusion) 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /man/bootThreshold.Rd: -------------------------------------------------------------------------------- 1 | \name{bootThreshold} 2 | \alias{bootThreshold} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Threshold network based on bootstrapped intervals 6 | } 7 | \description{ 8 | This function takes the output of \code{\link{bootnet}} and returns a network as if it had been estimated using \code{\link{estimateNetwork}}, but with edges removed (set to zero) based on some significance level. 9 | } 10 | \usage{ 11 | bootThreshold(bootobject, alpha = 0.05, verbose = TRUE, thresholdIntercepts = FALSE) 12 | } 13 | %- maybe also 'usage' for other objects documented here. 14 | \arguments{ 15 | \item{bootobject}{ 16 | Nonparametric bootstrap results from \code{\link{bootnet}} 17 | } 18 | \item{alpha}{ 19 | Significance level 20 | } 21 | \item{verbose}{ 22 | Logical, should progress be reported to the console? 23 | } 24 | \item{thresholdIntercepts}{ 25 | Logical, should intercepts also be thresholded? 26 | } 27 | } 28 | \value{ 29 | A \code{bootnetResult} object with the following elements: 30 | \item{graph}{The weights matrix of the network} 31 | \item{intercepts}{The intercepts} 32 | \item{results}{The results of the estimation procedure} 33 | \item{labels}{A vector with node labels} 34 | \item{nNodes}{Number of nodes in the network} 35 | \item{nPerson}{Number of persons in the network} 36 | \item{input}{Input used, including the result of the default set used} 37 | 38 | %% ~Describe the value returned 39 | %% If it is a LIST, use 40 | %% \item{comp1 }{Description of 'comp1'} 41 | %% \item{comp2 }{Description of 'comp2'} 42 | %% ... 43 | } 44 | \author{ 45 | Sacha Epskamp 46 | } 47 | \seealso{ 48 | \code{\link{bootnet}}, \code{\link{estimateNetwork}} 49 | } 50 | \examples{ 51 | \dontrun{ 52 | # BFI Extraversion data from psychTools package: 53 | library("psychTools") 54 | data(bfi) 55 | bfiSub <- bfi[,1:25] 56 | 57 | # Estimate unregularized network: 58 | Network <- estimateNetwork(bfiSub, default = "pcor", corMethod = "cor") 59 | 60 | # Bootstrap 1000 values, using 8 cores: 61 | boots <- bootnet(Network, nBoots = 1000, nCores = 8) 62 | 63 | # Threshold network: 64 | Network_thresholded <- bootThreshold(boots) 65 | 66 | # Plot: 67 | plot(Network_thresholded) 68 | } 69 | } 70 | 71 | -------------------------------------------------------------------------------- /man/bootnet-package.Rd: -------------------------------------------------------------------------------- 1 | \name{bootnet-package} 2 | \alias{bootnet-package} 3 | \docType{package} 4 | \title{ 5 | Bootstrap Methods for Various Network Estimation Routines 6 | } 7 | \description{ 8 | Bootstrap standard errors on various network estimation routines, such as EBICglasso from the qgraph package and IsingFit from the IsingFit package. See \code{\link{bootnet}} 9 | } 10 | 11 | \author{ 12 | Sacha Epskamp 13 | 14 | Maintainer: Sacha Epskamp 15 | } 16 | \seealso{ 17 | \code{\link{bootnet}} 18 | } 19 | -------------------------------------------------------------------------------- /man/bootnet.Rd: -------------------------------------------------------------------------------- 1 | \name{bootnet} 2 | \alias{bootnet} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Bootstrapped network estimation 6 | } 7 | \description{ 8 | This function can be used to bootstrap network estimation methods so that the spread of parameter and centrality estimates can be assessed. Most important methods are \code{type = 'nonparametric'} for the non-parametric bootstrap and \code{type = 'case'} for the case-dropping bootstrap. See also Epskamp, Borsboom and Fried (2016) for more details. 9 | } 10 | \usage{ 11 | bootnet(data, nBoots = 1000, default = c("none", "EBICglasso", 12 | "ggmModSelect", "pcor", "IsingFit", "IsingSampler", 13 | "huge", "adalasso", "mgm", "relimp", "cor", "TMFG", 14 | "ggmModSelect", "LoGo", "SVAR_lavaan", "GGMncv"), type 15 | = c("nonparametric", "parametric", "node", "person", 16 | "jackknife", "case"), nCores = 1, statistics = 17 | c("edge", "strength", "outStrength", "inStrength"), 18 | model = c("detect", "GGM", "Ising", "graphicalVAR"), 19 | fun, verbose = TRUE, labels, alpha = 1, caseMin = 20 | 0.05, caseMax = 0.75, caseN = 10, subNodes, subCases, 21 | computeCentrality = TRUE, propBoot = 1, replacement = 22 | TRUE, graph, sampleSize, intercepts, weighted, signed, 23 | directed, includeDiagonal = FALSE, communities, 24 | useCommunities, bridgeArgs = list(), library = 25 | .libPaths(), memorysaver = TRUE, ...) 26 | 27 | } 28 | %- maybe also 'usage' for other objects documented here. 29 | \arguments{ 30 | \item{data}{ 31 | A data frame or matrix containing the raw data. Must be numeric, integer or ordered factors. 32 | } 33 | \item{nBoots}{ 34 | Number of bootstraps 35 | } 36 | \item{default}{ 37 | A string indicating the method to use. See documentation at \code{\link[bootnet]{estimateNetwork}}. 38 | } 39 | \item{type}{ 40 | The kind of bootstrap method to use. 41 | } 42 | \item{nCores}{ 43 | Number of cores to use in computing results. Set to 1 to not use parallel computing. 44 | } 45 | \item{statistics}{ 46 | Vector indicating which statistics to store. Options are: 47 | \describe{ 48 | \item{\code{"edge"}}{Edge-weight} 49 | \item{\code{"strength"}}{Degree or node-strength} 50 | \item{\code{"outStrength"}}{Out-degree or Out-strength} 51 | \item{\code{"inStrength"}}{In-degree or In-strength} 52 | \item{\code{"expectedInfluence"}}{Expected Influence} 53 | \item{\code{"outExpectedInfluence"}}{Outgoing expected influence} 54 | \item{\code{"inExpectedInfluence"}}{Incoming expected influence} 55 | \item{\code{"bridgeInDegree"}}{Bridge in-degree (see \code{\link[networktools]{bridge}})} 56 | \item{\code{"bridgeOutnDegree"}}{Bridge out-degree (see \code{\link[networktools]{bridge}})} 57 | \item{\code{"bridgeStrength"}}{Bridge-strength (see \code{\link[networktools]{bridge}})} 58 | \item{\code{"bridgeCloseness"}}{Bridge-closeness (see \code{\link[networktools]{bridge}})} 59 | \item{\code{"bridgeBetweenness"}}{Bridge-betweenness (see \code{\link[networktools]{bridge}})} 60 | \item{\code{"rspbc"}}{Randomized shortest paths betweenness centrality (see \code{\link[NetworkToolbox]{rspbc}})} 61 | \item{\code{"hybrid"}}{Hybrid centrality (see \code{\link[NetworkToolbox]{hybrid}})} 62 | \item{\code{"eigenvector"}}{Eigenvector centrality (see \code{\link[NetworkToolbox]{eigenvector}})} 63 | } 64 | Can contain \code{"edge"}, \code{"strength"}, \code{"closeness"}, \code{"betweenness"}, \code{"length"}, \code{"distance"}, \code{"expectedInfluence"}, \code{"inExpectedInfluence"}, \code{"outExpectedInfluence"}. By default, length and distance are not stored. 65 | } 66 | \item{model}{ 67 | The modeling framework to use. Automatically detects if data is binary or not. 68 | } 69 | \item{fun}{ 70 | A custom estimation function, when no default set is used. This must be a function that takes the data as input (first argument) and returns either a weights matrix or a list containing the elements \code{"graph"} for the weights matrix, \code{"intercepts"} for the intercepts (optional) and \code{"results"} for the full estimation results (optional). 71 | } 72 | \item{verbose}{ 73 | Logical. Should progress of the function be printed to the console? 74 | } 75 | \item{labels}{ 76 | A character vector containing the node labels. If omitted the column names of the data are used. 77 | } 78 | \item{alpha}{ 79 | The centrality tuning parameter as used in \code{\link[qgraph]{centrality}}. 80 | } 81 | \item{subNodes}{ 82 | Range of nodes to sample in node-drop bootstrap 83 | } 84 | \item{caseMin}{Minimum proportion of cases to drop when \code{type = "case"}.} 85 | \item{caseMax}{Maximum proportion of cases to drop when \code{type = "case"}.} 86 | \item{caseN}{Number of sampling levels to test when \code{type = "case"}.} 87 | \item{subCases}{ 88 | Range of persons to sample in person-drop bootstrap 89 | } 90 | \item{computeCentrality}{ 91 | Logical, should centrality be computed? 92 | } 93 | \item{propBoot}{ 94 | Proportion of persons to sample in bootstraps. Set to lower than 1 for m out of n bootstrap 95 | } 96 | \item{replacement}{ 97 | Logical, should replacement be used in bootstrap sampling? 98 | } 99 | \item{graph}{A given network structure to use in parametric bootstrap.} 100 | \item{sampleSize}{The samplesize to use in parametric bootstrap.} 101 | \item{intercepts}{Intercepts to use in parametric bootstrap.} 102 | \item{weighted}{Logical, should the analyzed network be weighted?} 103 | \item{signed}{Logical, should the analyzed network be signed?} 104 | \item{directed}{Logical, is the analyzed network directed? Usually does not have to be set and is detected automatically.} 105 | \item{includeDiagonal}{Logical, should diagonal elements (self-loops) be included in the bootstrap? Only used when \code{directed = TRUE}.} 106 | \item{communities}{ 107 | Used for bridge centrality measures (see \code{\link[networktools]{bridge}}). 108 | } 109 | \item{useCommunities}{ 110 | Used for bridge centrality measures (see \code{\link[networktools]{bridge}}). 111 | } 112 | \item{library}{ 113 | Library location to be used in parallel computing. 114 | } 115 | \item{memorysaver}{ 116 | Logical. If TRUE (recommended) then raw bootstrapped data and results are not stored in the output object. This saves a lot of memory. Set this only to TRUE if you need the raw results or bootstrap data.} 117 | \item{bridgeArgs}{ 118 | List of arguments used in the 'bridge' function for computing bridge centrality 119 | } 120 | \item{\dots}{ 121 | Additional arguments used in the estimator function. 122 | } 123 | } 124 | 125 | \value{ 126 | A \code{bootnet} object with the following elements: 127 | \item{sampleTable}{ A data frame containing all estimated values on the real sample. } 128 | \item{bootTable}{ A data frame containing all estimated values on all bootstrapped samples. } 129 | \item{sample}{ A \code{bootnetResult} object with plot and print method containing the estimated network of the real sample. } 130 | \item{boots}{A list of \code{bootnetResult} objects containing the raw bootstrap results.} 131 | } 132 | 133 | \references{ 134 | Epskamp, S., Borsboom, D., & Fried, E. I. (2018). Estimating psychological networks and their accuracy: A tutorial paper. Behavior Research Methods, 50(1), 195-212. 135 | } 136 | \author{ 137 | Sacha Epskamp 138 | } 139 | \seealso{ 140 | \code{\link{estimateNetwork}}, \code{\link{differenceTest}}, \code{\link{corStability}}, \code{\link{plot.bootnet}}, \code{\link{summary.bootnet}} 141 | } 142 | \examples{ 143 | # BFI Extraversion data from psychTools package: 144 | library("psychTools") 145 | data(bfi) 146 | bfiSub <- bfi[,1:25] 147 | 148 | # Estimate network: 149 | Network <- estimateNetwork(bfiSub, default = "EBICglasso") 150 | 151 | # Centrality indices: 152 | library("qgraph") 153 | centralityPlot(Network) 154 | 155 | \donttest{ 156 | # Estimated network: 157 | plot(Network, layout = 'spring') 158 | 159 | ### Non-parametric bootstrap ### 160 | # Bootstrap 1000 values, using 8 cores: 161 | Results1 <- bootnet(Network, nBoots = 1000, nCores = 8) 162 | 163 | # Plot bootstrapped edge CIs: 164 | plot(Results1, labels = FALSE, order = "sample") 165 | 166 | # Plot significant differences (alpha = 0.05) of edges: 167 | plot(Results1, "edge", plot = "difference",onlyNonZero = TRUE, 168 | order = "sample") 169 | 170 | # Plot significant differences (alpha = 0.05) of node strength: 171 | plot(Results1, "strength", plot = "difference") 172 | 173 | # Test for difference in strength between node "A1" and "C2": 174 | differenceTest(Results1, "A1", "C2", "strength") 175 | 176 | ### Case-drop bootstrap ### 177 | # Bootstrap 1000 values, using 8 cores: 178 | Results2 <- bootnet(Network, nBoots = 1000, nCores = 8, 179 | type = "case") 180 | 181 | # Plot centrality stability: 182 | plot(Results2) 183 | 184 | # Compute CS-coefficients: 185 | corStability(Results2) 186 | } 187 | } 188 | -------------------------------------------------------------------------------- /man/corStability.Rd: -------------------------------------------------------------------------------- 1 | \name{corStability} 2 | \alias{corStability} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Correlation stability coefficient 6 | } 7 | \description{ 8 | This coefficient denotes the estimated maximum number of cases that can be dropped from the data to retain, with 95\% probability, a correlation of at least 0.7 (default) between statistics based on the original network and statistics computed with less cases. This coefficient should not be below 0.25 and is preferably above 0.5. See also Epskamp, Borsboom and Fried (2016) for more details. 9 | } 10 | \usage{ 11 | corStability(x, cor = 0.7, statistics = "all", verbose = TRUE) 12 | } 13 | %- maybe also 'usage' for other objects documented here. 14 | \arguments{ 15 | \item{x}{ 16 | Output of \code{\link{bootnet}}. Must be case-drop bootstrap. 17 | } 18 | \item{cor}{ 19 | The correlation level tot est at. 20 | } 21 | \item{statistics}{ 22 | The statistic(s) to test for. Can also be \code{"all"}. 23 | } 24 | \item{verbose}{Logical, should information on the progress be printed to the console?} 25 | } 26 | \references{ 27 | Epskamp, S., Borsboom, D., & Fried, E. I. (2016). Estimating psychological networks and their accuracy: a tutorial paper. arXiv preprint, arXiv:1604.08462. 28 | } 29 | \author{ 30 | Sacha Epskamp 31 | } 32 | 33 | \seealso{ 34 | \code{\link{bootnet}} 35 | } 36 | 37 | \examples{ 38 | \dontrun{ 39 | # BFI Extraversion data from psychTools package: 40 | library("psychTools") 41 | data(bfi) 42 | bfiSub <- bfi[,1:25] 43 | 44 | # Estimate network: 45 | Network <- estimateNetwork(bfiSub, default = "EBICglasso") 46 | 47 | # Bootstrap 1000 values, using 8 cores: 48 | # Bootstrap 1000 values, using 8 cores: 49 | Results2 <- bootnet(Network, nBoots = 1000, nCores = 8, 50 | type = "case") 51 | 52 | # Compute CS-coefficients: 53 | corStability(Results2) 54 | } 55 | } 56 | -------------------------------------------------------------------------------- /man/differenceTest.Rd: -------------------------------------------------------------------------------- 1 | \name{differenceTest} 2 | \alias{differenceTest} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Bootstrapped difference test 6 | } 7 | \description{ 8 | This function computes the bootstrapped difference test for edge-weights and centrality indices. A confidence interval is constructured on the difference of two values, and the test is deemed significant if zero is not in this confidence interval. See also Epskamp, Borsboom and Fried (2016) for more details. 9 | } 10 | \usage{ 11 | differenceTest(bootobject, x, y, measure = c("strength", "closeness", "betweenness"), 12 | alpha = 0.05, x2, y2, verbose = TRUE) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{bootobject}{ 17 | Output of \code{\link{bootnet}}. Must be nonparametric or parametric bootstrap. 18 | } 19 | \item{x}{ 20 | A character string indicating the ID of a node or an edge, or a number indiacting the node or edge. For an edge, can be e.g., \code{"1--2"} or \code{"x = 1, x2 = 2"}. 21 | } 22 | \item{y}{ 23 | A character string indicating the ID of a node or an edge, or a number indiacting the node or edge. For an edge, can be e.g., \code{"1--2"} or \code{"y = 1, y2 = 2"}. 24 | } 25 | \item{measure}{ 26 | Measure to test. Can be \code{"strength"}, \code{"closeness"}, \code{"betweenness"}, \code{"edge"} or \code{"distance"}. 27 | } 28 | \item{alpha}{ 29 | Signifiance level to test at. Note that the actual signifiance level is influenced by the number of bootstrap samples, and is returned in a message. 30 | } 31 | \item{x2}{ 32 | Second node in an edge. optional. 33 | } 34 | \item{y2}{ 35 | Second node in an edge. optional. 36 | } 37 | \item{verbose}{ 38 | Logical, should the message indiacting actual signifiance level be printed? 39 | } 40 | } 41 | 42 | \references{ 43 | Epskamp, S., Borsboom, D., & Fried, E. I. (2016). Estimating psychological networks and their accuracy: a tutorial paper. arXiv preprint, arXiv:1604.08462. 44 | } 45 | \author{ 46 | Sacha Epskamp 47 | } 48 | 49 | \seealso{ 50 | \code{\link{bootnet}} 51 | } 52 | 53 | \examples{ 54 | \dontrun{ 55 | # BFI Extraversion data from psychTools package: 56 | library("psychTools") 57 | data(bfi) 58 | bfiSub <- bfi[,1:25] 59 | 60 | # Estimate network: 61 | Network <- estimateNetwork(bfiSub, default = "EBICglasso") 62 | 63 | # Bootstrap 1000 values, using 8 cores: 64 | Results1 <- bootnet(Network, nBoots = 1000, nCores = 8) 65 | 66 | # Test for difference in strength between node "A1" and "C2": 67 | differenceTest(Results, "A1", "C2", "strength") 68 | 69 | # Test for difference between edge N1--N2 and N3--N4: 70 | differenceTest(Results, "N1--N2", "N3--N4", "edge") 71 | 72 | # Alternative: 73 | differenceTest(Results, x = "N1", x2 = "N2", y = "N3", 74 | y2 = "N4", measure = "edge") 75 | } 76 | } 77 | -------------------------------------------------------------------------------- /man/estimateNetwork.Rd: -------------------------------------------------------------------------------- 1 | \name{estimateNetwork} 2 | \alias{estimateNetwork} 3 | \alias{bootnet_EBICglasso} 4 | \alias{bootnet_pcor} 5 | \alias{bootnet_cor} 6 | \alias{bootnet_IsingFit} 7 | \alias{bootnet_IsingSampler} 8 | \alias{bootnet_adalasso} 9 | \alias{bootnet_huge} 10 | \alias{bootnet_mgm} 11 | \alias{bootnet_relimp} 12 | \alias{bootnet_TMFG} 13 | \alias{bootnet_LoGo} 14 | \alias{bootnet_ggmModSelect} 15 | \alias{bootnet_graphicalVAR} 16 | \alias{bootnet_SVAR_lavaan} 17 | \alias{bootnet_piecewiseIsing} 18 | \alias{bootnet_GGMncv} 19 | %- Also NEED an '\alias' for EACH other topic documented here. 20 | \title{ 21 | Estimate a network structure 22 | } 23 | \description{ 24 | This function allows for flexible estimation of a network structure using various R packages and model frameworks. This is typically done by using one of the default sets. See details for manual specification. See also Epskamp, Borsboom and Fried (2016) for more details. IMPORTANT: THE ESTIMATOR FUNCTIONS (e.g., \code{fun = bootnet_pcor}) ARE NOT INTENDED TO BE USED MANUALY (see details). 25 | } 26 | \usage{ 27 | estimateNetwork(data, default = c("none", "EBICglasso", "pcor", 28 | "IsingFit", "IsingSampler", "huge", "adalasso", "mgm", 29 | "relimp", "cor", "TMFG", "ggmModSelect", "LoGo", 30 | "graphicalVAR", "piecewiseIsing", "SVAR_lavaan", "GGMncv"), 31 | fun, labels, verbose = TRUE, .dots = list(), weighted = TRUE, 32 | signed = TRUE, directed, datatype, checkNumeric = FALSE, ..., 33 | .input, memorysaver = FALSE) 34 | 35 | bootnet_EBICglasso(data, tuning = 0.5, corMethod = c("cor", "cov", 36 | "cor_auto", "npn", "spearman"), missing = 37 | c("pairwise", "listwise", "fiml", "stop"), sampleSize 38 | = c("pairwise_average", "maximum", "minimum", 39 | "pairwise_maximum", "pairwise_minimum", 40 | "pairwise_average_v1.5", "pairwise_maximum_v1.5", 41 | "pairwise_minimum_v1.5"), verbose = TRUE, corArgs = 42 | list(), refit = FALSE, principalDirection = FALSE, 43 | lambda.min.ratio = 0.01, nlambda = 100, threshold = 44 | FALSE, unlock = FALSE, nonPositiveDefinite = c("stop", 45 | "continue"), transform = c("none", "rank", 46 | "quantile"), ...) 47 | 48 | bootnet_pcor(data, corMethod = c("cor", "cov", "cor_auto", "npn", 49 | "spearman"), missing = c("pairwise", "listwise", 50 | "fiml", "stop"), sampleSize = c("pairwise_average", 51 | "maximum", "minimum", "pairwise_maximum", 52 | "pairwise_minimum", "pairwise_average_v1.5", 53 | "pairwise_maximum_v1.5", "pairwise_minimum_v1.5"), 54 | verbose = TRUE, corArgs = list(), threshold = 0, alpha 55 | = 0.05, adjacency, principalDirection = FALSE, unlock 56 | = FALSE, nonPositiveDefinite = c("stop", "continue"), 57 | transform = c("none", "rank", "quantile")) 58 | 59 | bootnet_cor(data, corMethod = c("cor", "cov", "cor_auto", "npn", 60 | "spearman"), missing = c("pairwise", "listwise", 61 | "fiml", "stop"), sampleSize = c("pairwise_average", 62 | "maximum", "minimum", "pairwise_maximum", 63 | "pairwise_minimum", "pairwise_average_v1.5", 64 | "pairwise_maximum_v1.5", "pairwise_minimum_v1.5"), 65 | verbose = TRUE, corArgs = list(), threshold = 0, alpha 66 | = 0.05, principalDirection = FALSE, unlock = FALSE, 67 | nonPositiveDefinite = c("stop", "continue"), transform 68 | = c("none", "rank", "quantile")) 69 | 70 | bootnet_IsingFit(data, tuning = 0.25, missing = c("listwise", "stop"), 71 | verbose = TRUE, rule = c("AND", "OR"), split = 72 | "median", principalDirection = FALSE, 73 | min_sum = -Inf, unlock = FALSE) 74 | 75 | bootnet_IsingSampler(data, missing = c("listwise", "stop"), verbose = TRUE, 76 | split = "median", method = c("uni", "ll", "pl", "bi"), 77 | principalDirection = FALSE, unlock = FALSE, threshold 78 | = FALSE, alpha = 0.01, min_sum = -Inf, rule = c("AND", 79 | "OR")) 80 | 81 | bootnet_adalasso(data, missing = c("listwise", "stop"), verbose = TRUE, 82 | nFolds = 10, principalDirection = FALSE, unlock = 83 | FALSE, transform = c("none", "rank", "quantile"), ...) 84 | 85 | bootnet_huge(data, tuning = 0.5, missing = c("listwise", "stop"), 86 | verbose = TRUE, npn = TRUE, criterion = c("ebic", 87 | "ric", "stars"), principalDirection = FALSE, 88 | lambda.min.ratio = 0.01, nlambda = 100, unlock = 89 | FALSE, transform = c("none", "rank", "quantile"), ...) 90 | 91 | bootnet_mgm(data, type, level, tuning = 0.25, missing = 92 | c("listwise", "stop"), verbose = TRUE, criterion = 93 | c("EBIC", "CV"), nFolds = 10, order = 2, rule = 94 | c("AND", "OR"), binarySign, unlock = FALSE, transform 95 | = c("none", "rank", "quantile"), ...) 96 | 97 | bootnet_relimp(data, normalized = TRUE, type = "lmg", 98 | structureDefault = c("none", "custom", "EBICglasso", 99 | "pcor", "IsingFit", "IsingSampler", "huge", 100 | "adalasso", "mgm", "cor", "TMFG", "ggmModSelect", 101 | "LoGo"), missing = c("listwise", "stop"), ..., verbose 102 | = TRUE, threshold = 0, unlock = FALSE, transform = 103 | c("none", "rank", "quantile")) 104 | 105 | bootnet_TMFG(data, graphType = c("cor", "pcor"), corMethod = 106 | c("cor", "cov", "cor", "npn", "cor_auto"), missing = 107 | c("pairwise", "listwise", "fiml", "stop"), verbose = 108 | TRUE, corArgs = list(), principalDirection = FALSE, 109 | unlock = FALSE, transform = c("none", "rank", 110 | "quantile"), ...) 111 | 112 | bootnet_LoGo(data, corMethod = c("cor", "cov", "cor", "npn", 113 | "cor_auto"), missing = c("pairwise", "listwise", 114 | "fiml", "stop"), verbose = TRUE, corArgs = list(), 115 | principalDirection = FALSE, unlock = FALSE, transform 116 | = c("none", "rank", "quantile"), ...) 117 | 118 | bootnet_graphicalVAR(data, tuning = 0.5, verbose = TRUE, principalDirection 119 | = FALSE, missing = c("listwise", "stop"), unlock = 120 | FALSE, transform = c("none", "rank", "quantile"), ...) 121 | 122 | bootnet_ggmModSelect(data, tuning = 0, corMethod = c("cor", "cov", 123 | "cor_auto", "npn", "spearman"), missing = 124 | c("pairwise", "listwise", "fiml", "stop"), sampleSize 125 | = c("pairwise_average", "maximum", "minimum", 126 | "pairwise_maximum", "pairwise_minimum", 127 | "pairwise_average_v1.5", "pairwise_maximum_v1.5", 128 | "pairwise_minimum_v1.5"), verbose = TRUE, corArgs = 129 | list(), principalDirection = FALSE, start = 130 | c("glasso", "empty", "full"), stepwise = TRUE, nCores 131 | = 1, unlock = FALSE, nonPositiveDefinite = c("stop", 132 | "continue"), transform = c("none", "rank", 133 | "quantile"), ...) 134 | 135 | bootnet_piecewiseIsing(data, cutoff, missing = c("listwise", "stop"), verbose 136 | = TRUE, IsingDefault = c("IsingSampler", "IsingFit", 137 | "custom"), zeroThreshold = 1, minimalN = ncol(data) + 138 | 1, unlock = FALSE, ...) 139 | 140 | bootnet_SVAR_lavaan(data, verbose = TRUE, principalDirection = FALSE, 141 | missing = c("listwise", "stop"), criterion = "bic", 142 | eqThreshold = 1e-04, tempWhitelist, tempBlacklist, 143 | contWhitelist, contBlacklist, minimalModInd = 10, 144 | unlock = FALSE, transform = c("none", "rank", 145 | "quantile"), ...) 146 | 147 | bootnet_GGMncv(data, penalty = c("atan", "selo", "exp", "log", 148 | "sica", "scad", "mcp", "lasso"), corMethod = c("cor", 149 | "cov", "cor_auto", "npn", "spearman"), missing = 150 | c("pairwise", "listwise", "fiml", "stop"), sampleSize 151 | = c("pairwise_average", "maximum", "minimum", 152 | "pairwise_maximum", "pairwise_minimum", 153 | "pairwise_average_v1.5", "pairwise_maximum_v1.5", 154 | "pairwise_minimum_v1.5"), verbose = TRUE, corArgs = 155 | list(), principalDirection = FALSE, unlock = FALSE, 156 | nonPositiveDefinite = c("stop", "continue"), transform 157 | = c("none", "rank", "quantile"), ...) 158 | } 159 | %- maybe also 'usage' for other objects documented here. 160 | \arguments{ 161 | \item{data}{ 162 | A data frame or matrix containing the raw data. Must be numeric, integer or ordered factors. 163 | } 164 | \item{default}{ 165 | A string indicating the method to use. Specifying a \code{default} sets default values to \code{prepFun}, \code{prepArgs}, \code{estFun}, \code{estArgs}, \code{graphFun}, \code{graphArgs}, \code{intFun} and \code{intArgs}. Setting a \code{default} can be omitted but that does require specifying all above mentioned arguments. Current options are: 166 | \describe{ 167 | \item{\code{"EBICglasso"}}{Gaussian Markov random field estimation using graphical LASSO and extended Bayesian information criterion to select optimal regularization parameter. Using \code{\link[qgraph]{EBICglasso}} from the qgraph package. Calls \code{bootnet_EBICglasso}.} 168 | \item{\code{"IsingFit"}}{Ising model estimation using LASSO regularized nodewise logistic regression and extended Bayesian information criterion to select optimal regularization parameter. Using \code{\link[IsingFit]{IsingFit}} from the IsingFit package. Calls \code{bootnet_IsingFit}.} 169 | \item{\code{"IsingSampler"}}{Calls the \code{\link[IsingSampler]{EstimateIsing}} function from the IsingSampler package. } 170 | \item{\code{"pcor"}}{Partial correlation network (non-regularized Gaussian Markov random field), using \code{\link[corpcor]{cor2pcor}} from the corpcor package. Calls \code{bootnet_pcor}.} 171 | \item{\code{"cor"}}{Correlation network.} 172 | % \item{\code{"adalasso"}}{Uses the \code{\link[parcor]{adalasso.net}} from the parcor package. Calls \code{bootnet_adalasso}.} 173 | \item{\code{"huge"}}{Uses EBIC model selection of GGM networks estimated via the glasso algorithm as implemented in the huge package (as opposed to glasso and qgraph packages used in \code{default = "EBICglasso"}). Uses nonparanormal transformation in preparing the data and does not use polychoric correlations. Calls \code{bootnet_huge}.} 174 | \item{\code{"mgm"}}{Estimates a Mixed graphical model by using the the mgm (or mgmfit in older versions) function of the mgm package. Calls \code{bootnet_mgm}.} 175 | \item{\code{"TMFG"}}{Estimates a Triangulated Maximally Filtered Graph, using the function \code{TMFG} of the NetworkToolbox package. Calls \code{bootnet_TMFG}. Note that this estimates a *correlation network* by default (use the 'graphType' argument to estimate a partial correlation network instead).} 176 | \item{\code{"LoGo"}}{Estimates a Local/Global Sparse Inverse Covariance Matrix, using the function \code{LoGo} of the NetworkToolbox package. Calls \code{bootnet_LoGo}.} 177 | \item{\code{"relimp"}}{Estimates a (directed) relative importance network, using the function 'calc.relimp' of the 'relaimpo' package. The 'structureDefault' argument can be used to use a different default set for estimating the structure of the graph. Calls \code{bootnet_relimp}.} 178 | \item{\code{"ggmModSelect"}}{Estimates an unregularized GGM using the glasso algorithm and stepwise model selection, using the 'ggmModSelect' function from the qgraph package. Calls \code{bootnet_ggmModSelect}.} 179 | \item{\code{"graphicalVAR"}}{Estimates a graphical VAR model using the graphicalVAR package. This results in two networks which can be plotted using the 'graph' argument in the plot method. Calls \code{bootnet_graphicalVAR}.} 180 | } 181 | See details section for a more detailed description. 182 | } 183 | \item{fun}{ 184 | A custom estimation function, when no default set is used. This must be a function that takes the data as input (first argument) and returns either a weights matrix or a list containing the elements \code{"graph"} for the weights matrix, \code{"intercepts"} for the intercepts (optional) and \code{"results"} for the full estimation results (optional). 185 | } 186 | \item{tuning}{EBIC tuning parameter, used in 'EBICglasso', 'IsingFit', 'huge', 'mgm' and 'ggmModSelect' default sets. Note that the default value differs: 'EBICglasso', 'huge' and 'mgm' use 0.5, 'IsingFit' uses 0.25 and 'ggmModSelect uses 0.} 187 | \item{corMethod}{Correlation method, used in 'EBICglasso' and 'pcor' default sets. \code{"cor_auto"} uses \code{\link[qgraph]{cor_auto}} for polychoric and polyserial correlations, \code{"cov"} uses the \code{\link{cov}} function for covariances, \code{"cor"} will use the \code{\link{cor}} function for correlations and \code{"npn"} will apply the nonparanormal transformation (via \code{\link[huge]{huge.npn}}) and then compute correlations.} 188 | \item{missing}{How to handle missing data? \code{"pairwise"} for pairwise deletion, \code{"listwise"} for listwise deletion, \code{"fiml"} for full-information maximum likelihood and \code{"stop"} to stop with an error.} 189 | \item{sampleSize}{How will sample size be computed in EBICglasso default set? The default \code{"pairwise_average"} will set the sample size to the average of sample sizes used for each individual correlation. Other options are \code{"pairwise_maximum"} (largest sample sized used for each individual correlation), \code{"pairwise_minimum"} (smallest sample sized used for each individual correlation), \code{"maximum"} (takes total number of rows including rows with NA), and \code{"minimum"} (takes total number of rows that contain no NA). The arguments "pairwise_average_v1.5", "pairwise_minimum_v1.5", and "pairwise_maximum_v1.5" can be used to mimic bootnet's behavior in version 1.5 and earlier (which also computed the sample size based on the sample sizes for the variances).} 190 | \item{corArgs}{A list with arguments for the function used defined by \code{corMethod}.} 191 | \item{threshold}{ 192 | Thresholding to use in partial correlation networks. Can be a fixed number to threshold all absolute edges below this value, \code{'locfdr'} for local FDR, or any option corresponding to adjustments in \code{\link[psych]{corr.p}} (\code{'none'}, \code{'sig'}, \code{'holm'}, \code{'hochberg'}, \code{'hommel'}, \code{'bonferroni'}, \code{'BH'}, \code{'BY'} or \code{'fdr'})). Can also be used for \code{default = "IsingSampler"} but can only be set to a logical enabling or disabling significance thresholding. 193 | } 194 | \item{refit}{Logical used in EBICglasso default set: should the estimated model structure be refitted without LASSO regularization?} 195 | \item{rule}{The rule to use to select an edge in nodewise estimation. \code{"AND"} to only select in edge if both regression coefficients are nonzero and \code{"OR"} if only one is nonzero. Used in 'IsingFit' and 'mgm' default sets.} 196 | \item{split}{A function or character string (\code{"median"} or \code{"mean"}) indicating how to binarize values when estimating an Ising model.} 197 | \item{method}{The estimation method used in the IsingSampler default set (see \code{\link[IsingSampler]{EstimateIsing}}).} 198 | \item{npn}{Logical, should nonparanormal be used in huge default set?} 199 | \item{criterion}{The criterion used in model selection. \code{"ebic"}, \code{"ric"} or \code{"stars"} in the huge default set or \code{"EBIC"} or \code{"CV"} in the mgm default set.} 200 | \item{nFolds}{Number of folds used in k-fold cross-validation.} 201 | \item{type}{For mgm, see mgm or mgmfit; for relative importance networks, see \code{\link[relaimpo]{calc.relimp}}} 202 | \item{level}{See \code{\link[mgm]{mgm}}. Automatically set if not assigned.} 203 | \item{order}{Order up until including which interactions are included in the model. See \code{\link[mgm]{mgm}}. Automatically set if not assigned.} 204 | \item{binarySign}{See \code{\link[mgm]{mgm}}. Automatically set if not assigned.} 205 | \item{normalized}{Should normalized relative importance be used in relative importance networks?} 206 | \item{structureDefault}{In relative importance networks, default set used to compute the graph structure. Any other arguments used (using ...) are sent to the graph estimator function as well.} 207 | \item{graphType}{\code{"cor"} to estimate a correlation network and \code{"pcor"} to estimate a partial correlation network (GGM)} 208 | \item{alpha}{Significance level to test at.} 209 | \item{principalDirection}{ 210 | Rescales variables according to the sign of the first eigen-vector. This will lead to most correlations to be positive (positive manifold), leading to negative edges to be substantively interpretable. 211 | } 212 | \item{stepwise}{Logical indicating if 'ggmModSelect' should use stepwise estimation.} 213 | \item{start}{See \code{\link[qgraph]{ggmModSelect}}} 214 | \item{labels}{ 215 | A character vector containing the node labels. If omitted the column names of the data are used. 216 | } 217 | \item{verbose}{ 218 | Logical, currently only used when \code{default = "EBICglasso"} in the \code{cor_auto} function. 219 | } 220 | 221 | \item{weighted}{Logical, should the analyzed network be weighted?} 222 | \item{signed}{Logical, should the analyzed network be signed?} 223 | \item{directed}{Logical, is the analyzed network directed? Usually does not have to be set and is detected automatically.} 224 | \item{datatype}{ 225 | \code{"normal"} if the data argument is a data frame, or \code{"graphicalVAR"} if the data argument is a data list that can be used as input to the graphicalVAR package.} 226 | \item{checkNumeric}{ 227 | Logical: should the data be checked to be numeric? 228 | } 229 | \item{lambda.min.ratio}{ 230 | Minimal lambda ratio (LASSO tuning parameter) 231 | } 232 | \item{nlambda}{ 233 | Number of LASSO tuning parameters to test 234 | } 235 | \item{nCores}{ 236 | Number of cores to use in estimating networks 237 | } 238 | \item{.dots}{ 239 | A list of arguments used in the estimation function set by a default set or by the \code{fun} argument. 240 | } 241 | \item{\dots}{ 242 | A list of arguments used in the estimation function set by a default set or by the \code{fun} argument. 243 | } 244 | \item{.input}{Used internally in the bootnet function. Do not use.} 245 | \item{memorysaver}{ 246 | Logical. If TRUE attempts to save memory (RAM) by removing some objects from the output. Used by bootnet by default for bootstraps.} 247 | \item{cutoff}{ 248 | Cutoff score for sum-score to condition on when using \code{default = "piecewiseIsing"}. This is *experimental*! 249 | } 250 | \item{IsingDefault}{ 251 | Default set for Ising model structure estimation in piecewise Ising estimation. This is *experimental*! 252 | } 253 | \item{zeroThreshold}{ 254 | Used in piecewise Ising estimation. Proportion of edges needed to be exactly 0 in pieces to set edge to zero in final network. This is *experimental*! 255 | } 256 | \item{minimalN}{ 257 | Used in piecewise Ising estimation. Minimal sample sizes needed in piece estimation. This is *experimental*! 258 | } 259 | \item{eqThreshold}{ 260 | Used in SVAR_lavaan estimation (stepup SVAR estimation). This is *experimental*! Maximum difference in criterion to decide if two models are equivalent (and select one at random). 261 | } 262 | \item{tempWhitelist}{ 263 | Used in SVAR_lavaan estimation (step up SVAR estimation). This is *experimental*! Matrix with edges to be whitelisted in the temporal model. The matrix should contain two columns and a row for each edge. The elements should be characters indicating the variable names of each edge (from, to). 264 | } 265 | \item{tempBlacklist}{ 266 | Used in SVAR_lavaan estimation (step up SVAR estimation). This is *experimental*! Matrix with edges to be blacklisted in the temporal model. The matrix should contain two columns and a row for each edge. The elements should be characters indicating the variable names of each edge (from, to). 267 | } 268 | \item{contWhitelist}{ 269 | Used in SVAR_lavaan estimation (step up SVAR estimation). This is *experimental*! Matrix with edges to be whitelisted in the contemporaneous model. The matrix should contain two columns and a row for each edge. The elements should be characters indicating the variable names of each edge (from, to). 270 | } 271 | \item{contBlacklist}{ 272 | Used in SVAR_lavaan estimation (step up SVAR estimation). This is *experimental*! Matrix with edges to be blacklisted in the contemporaneous model. The matrix should contain two columns and a row for each edge. The elements should be characters indicating the variable names of each edge (from, to). 273 | } 274 | \item{minimalModInd}{ 275 | Minimal modification index to consider when adding parameters in SVAR search. 276 | } 277 | \item{adjacency}{ 278 | An 'adjacency' matrix indicating the graph structure (zeroes indicate a missing edge). 279 | } 280 | \item{nonPositiveDefinite}{ 281 | Set to \code{"stop"} to stop with an error when the input matrix is not positive definite, and to \code{"continue"} (old behavior) to continue anyway. 282 | } 283 | \item{unlock}{ 284 | Set to \code{TRUE} to not result in a standard error. This is to prevent using the inner functions seperatly (see details below). 285 | } 286 | \item{transform}{Should data be transformed before estimate the network? \code{"rank"} will call \code{\link{rank_transformation}} and \code{"quantile"}" will call \code{\link{quantile_transformation}}.} 287 | \item{penalty}{Penalty to use in the GGMncv methods.} 288 | \item{min_sum}{ The minimum sum score that is artifically possible in the dataset. Defaults to -Inf. Set this only if you know a lower sum score is not possible in the data, for example due to selection bias.} 289 | \item{AND}{Logical, should an AND-rule (both regressions need to be significant) or OR-rule (one of the regressions needs to be significant) be used?} 290 | } 291 | \details{ 292 | 293 | The user can manually specify an estimation method by assigning a custom function to the \code{'fun'} argument. This function must take data as input and output an estimated network. Fhe functions \code{bootnet_} correspond to the functions used when using default sets. E.g, \code{default = "pcor"} sets \code{fun = bootnet_pcor}. As the \code{...} leads to any argument to \code{estimateNetwork} to be passed to the estimator function, the arguments described above in these estimator functions can be used for the appropriate default method. For example, if \code{default = "pcor"}, the arguments of \code{fun = bootnet_pcor} can be used in \code{estimateNetwork}. IMPORTANT NOTE: DO NOT USE THE ESTIMATOR FUNCTIONS (e.g., \code{fun = bootnet_pcor}) YOURSELF, THEY ARE ONLY INCLUDED HERE TO SHOW WHICH ARGUMENTS YOU CAN USE IN \code{estimateNetwork}. 294 | 295 | 296 | } 297 | \references{ 298 | Epskamp, S., Borsboom, D., & Fried, E. I. (2016). Estimating psychological networks and their accuracy: a tutorial paper. arXiv preprint, arXiv:1604.08462. 299 | } 300 | \author{ 301 | Sacha Epskamp 302 | } 303 | 304 | \seealso{ 305 | \code{\link{bootnet}} 306 | } 307 | \examples{ 308 | # BFI Extraversion data from psychTools package: 309 | library("psychTools") 310 | data(bfi) 311 | bfiSub <- bfi[,1:25] 312 | 313 | # Estimate network: 314 | Network <- estimateNetwork(bfiSub, default = "EBICglasso") 315 | 316 | \dontrun{ 317 | # Some pointers: 318 | print(Network) 319 | 320 | # Estimated network: 321 | plot(Network, layout = 'spring') 322 | 323 | # Centrality indices: 324 | library("qgraph") 325 | centralityPlot(Network) 326 | 327 | # BIC model selection: 328 | Network_BIC <- estimateNetwork(bfiSub, default = "EBICglasso", tuning = 0) 329 | 330 | # Ising model: 331 | Network_BIC <- estimateNetwork(bfiSub, default = "IsingFit") 332 | } 333 | } 334 | -------------------------------------------------------------------------------- /man/genGGM.Rd: -------------------------------------------------------------------------------- 1 | \name{genGGM} 2 | \alias{genGGM} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Generates a GGM small-world network. 6 | } 7 | \description{ 8 | Simulates a GGM as described by Yin and Li (2011), using the Watts and Strogatz (1998) algorithm for generating the graph structure (see \code{\link[igraph]{watts.strogatz.game}}). 9 | } 10 | \usage{ 11 | genGGM(Nvar, p = 0, nei = 1, parRange = c(0.5,1), constant = 1.5, propPositive = 0.5, 12 | clusters = NULL, graph = c("smallworld","random", "scalefree", "hub", "cluster")) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{Nvar}{ 17 | Number of nodes 18 | } 19 | \item{p}{ 20 | Rewiring probability if graph = "smallworld" or "cluster", or connection probability if graph = "random". If cluster, can add multiple p's for each cluster, e.g., "c(.1, .5)" 21 | } 22 | \item{nei}{ 23 | Neighborhood (see \code{\link[igraph]{watts.strogatz.game}}). 24 | } 25 | \item{parRange}{ 26 | Range of partial correlation coefficients to be originally sampled. 27 | } 28 | \item{constant}{ 29 | A constant as described by Yin and Li (2011). 30 | } 31 | \item{propPositive}{ 32 | Proportion of edges to be set positive. 33 | } 34 | \item{clusters}{ 35 | Number of clusters if graph = "cluster" 36 | } 37 | \item{graph}{Type of graph to simulate} 38 | } 39 | \references{ 40 | Yin, J., and Li, H. (2011). A sparse conditional gaussian graphical model for analysis of genetical genomics data. The annals of applied statistics, 5(4), 2630. 41 | 42 | Watts, D. J., & Strogatz, S. H. (1998). Collective dynamics of `small-world' networks. nature, 393(6684), 440-442. 43 | } 44 | \author{ 45 | Sacha Epskamp 46 | } 47 | 48 | -------------------------------------------------------------------------------- /man/ggmGenerator.Rd: -------------------------------------------------------------------------------- 1 | \name{ggmGenerator} 2 | \alias{ggmGenerator} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Generates a function that simulates data from the Gaussian graphical model (GGM) 6 | } 7 | \description{ 8 | Generates data given a partial correlation network. Data can be made ordinal by using a threshold model with equally spaced thresholds. 9 | } 10 | \usage{ 11 | ggmGenerator(ordinal = FALSE, nLevels = 4, skewFactor = 1, type = 12 | c("uniform", "random"), missing = 0) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{ordinal}{ 17 | Logical, should ordinal data be generated? 18 | } 19 | \item{nLevels}{ 20 | Number of levels used in ordinal data. 21 | } 22 | \item{skewFactor}{ 23 | How skewed should ordinal data be? 1 indicates uniform data and higher values increase skewedness. 24 | } 25 | \item{type}{ 26 | Should thresholds for ordinal data be sampled at random or determined uniformly? 27 | } 28 | \item{missing}{ 29 | Proportion of data that should be simulated to be missing. 30 | } 31 | } 32 | \author{ 33 | Sacha Epskamp 34 | } -------------------------------------------------------------------------------- /man/multiverse.Rd: -------------------------------------------------------------------------------- 1 | \name{multiverse} 2 | \alias{multiverse} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Multiverse plot of bootnet results 6 | } 7 | \description{ 8 | This function makes a 'multiverse' plot of bootstrap results. Every row indicates an edge and every column a bootstrap; colors are in line of the edge strength as drawn with \code{plot.bootnetResult}. 9 | } 10 | \usage{ 11 | multiverse(x, labels = FALSE) 12 | } 13 | %- maybe also 'usage' for other objects documented here. 14 | \arguments{ 15 | \item{x}{ 16 | Results from \code{\link{bootnet}}} 17 | \item{labels}{ 18 | Logical, should labels be printed next to the plot? 19 | } 20 | } 21 | 22 | \author{ 23 | Sacha Epskamp 24 | } 25 | 26 | -------------------------------------------------------------------------------- /man/netSimulator.Rd: -------------------------------------------------------------------------------- 1 | \name{netSimulator} 2 | \alias{netSimulator} 3 | \alias{replicationSimulator} 4 | %- Also NEED an '\alias' for EACH other topic documented here. 5 | \title{ 6 | Network Estimation Performance 7 | } 8 | \description{ 9 | This function can be used to run a simulation study on the performance of network estimation by varying sample size or any argument used as input to \code{\link{estimateNetwork}}. The purpose of this function is to provide a way to assess the required sample size given a network structure, as well as to easily perform simulation studies. By default, the function uses \code{\link{genGGM}} to simulate a chain graph or small-world network. See details for more information. The \code{replicationSimulator} function instead assesses how well a network based on a second independent sample would replicate the network based on the first independent sample. 10 | } 11 | \usage{ 12 | netSimulator( 13 | input = genGGM(Nvar = 10), 14 | nCases = c(50, 100, 250, 500, 1000, 2500), 15 | nReps = 100, 16 | nCores = 1, 17 | default, 18 | dataGenerator, 19 | ..., 20 | moreArgs = list(), 21 | moreOutput = list()) 22 | 23 | replicationSimulator( 24 | input = genGGM(Nvar = 10), 25 | nCases = c(50, 100, 250, 500, 1000, 2500), 26 | nReps = 100, 27 | nCores = 1, 28 | default, 29 | dataGenerator, 30 | ..., 31 | moreArgs = list()) 32 | } 33 | %- maybe also 'usage' for other objects documented here. 34 | \arguments{ 35 | \item{input}{ 36 | Either a weights matrix, a list containing elements \code{graph} (encoding the weights matrix) and \code{intercepts} (encoding the intercepts), or a function generating such objects. By default, \code{\link{genGGM}} is used to generate a Gaussian graphical model. However, it is reccomended to replace this with a prior expected graph structure. 37 | } 38 | \item{nCases}{ 39 | The sample sizes to test for. 40 | } 41 | \item{nReps}{ 42 | Number of repetitions per sampling level. 43 | } 44 | \item{nCores}{ 45 | Number of cores to use. Set to more than 1 to use parallel computing. 46 | } 47 | \item{default}{ 48 | Default set used (see \code{\link{estimateNetwork}}). In most cases, this will set \code{dataGenerator} to the relevant generator. 49 | } 50 | \item{dataGenerator}{ 51 | A function that generates data. The first argument must be the sample size, the second argument must be the output of \code{input}. Can often be ignored if \code{default} is set. 52 | } 53 | \item{moreArgs}{A named list of arguments to be used when estimating the network, but which should not be interpreted as different conditions. Use this argument to assign arguments that require vectors.} 54 | \item{moreOutput}{List with functions that take the estimated weights matrix as first argument and the true weights matrix as second argument to produce some output.} 55 | \item{\dots}{ 56 | Arguments used by \code{\link{estimateNetwork}} to estimate the network structure. Providing a vector for any argument will simulate under each value. This way, any argument in \code{\link{estimateNetwork}} can be used in a simulation study. 57 | } 58 | } 59 | \details{ 60 | *any* argument to \code{\link{estimateNetwork}} can be used in a simulation study, with a vector (e.g., \code{rule = c("AND","OR")}) specifying that both conditions are tested. Adding too many conditions can quickly make any simulation study intractible, so only vary some arguments! The \code{dataGenerator} argument can be any function that generates data. Currently, only \code{\link{ggmGenerator}} and \code{\link{IsingGenerator}} are implemented in bootnet itself, which generates data given a Gaussian graphical model. 61 | } 62 | 63 | \author{ 64 | Sacha Epskamp 65 | } 66 | 67 | \examples{ 68 | # 5-node GGM chain graph: 69 | trueNetwork <- genGGM(5) 70 | 71 | # Simulate: 72 | Res <- netSimulator(trueNetwork, nReps = 10) 73 | 74 | # Results: 75 | Res 76 | 77 | 78 | \donttest{ 79 | # Plot: 80 | plot(Res) 81 | 82 | # BFI example: 83 | # Load data: 84 | library("psychTools") 85 | data(bfi) 86 | bfiData <- bfi[,1:25] 87 | 88 | # Estimate a network structure, with parameters refitted without LASSO regularization: 89 | library("qgraph") 90 | Network <- EBICglasso(cor_auto(bfiData), nrow(bfiData), refit = TRUE) 91 | 92 | # Simulate 100 repititions in 8 cores under different sampling levels: 93 | Sim1 <- netSimulator(Network, 94 | default = "EBICglasso", 95 | nCases = c(100,250,500), 96 | nReps = 100, 97 | nCores = 8) 98 | 99 | # Table of results: 100 | Sim1 101 | 102 | # Plot results: 103 | plot(Sim1) 104 | 105 | # Compare different default set at two sampling levels: 106 | Sim2_EBICglasso <- netSimulator(Network, 107 | default = c("EBICglasso"), 108 | nCases = c(100,250,500), 109 | nReps = 100, 110 | nCores = 8) 111 | 112 | Sim2_pcor <- netSimulator(Network, 113 | default = c("pcor"), 114 | nCases = c(100,250,500), 115 | nReps = 100, 116 | nCores = 8) 117 | 118 | Sim2_huge <- netSimulator(Network, 119 | default = c("huge"), 120 | nCases = c(100,250,500), 121 | nReps = 100, 122 | nCores = 8) 123 | 124 | Sim2 <- rbind(Sim2_EBICglasso, Sim2_pcor, Sim2_huge) 125 | 126 | # Print results: 127 | Sim2 128 | 129 | # Plot results: 130 | plot(Sim2, xfacet = "default", yvar = "correlation") 131 | 132 | # Difference using polychoric or pearson correlations in ordinal data: 133 | Sim3 <- netSimulator(Network, 134 | dataGenerator = ggmGenerator(ordinal = TRUE, nLevels = 4), 135 | default = "EBICglasso", 136 | corMethod = c("cor","cor_auto"), 137 | nCases = c(100,250, 500), 138 | nReps = 100, 139 | nCores = 8) 140 | 141 | # Print results: 142 | Sim3 143 | 144 | # Plot results: 145 | plot(Sim3, color = "corMethod") 146 | 147 | # Ising model: 148 | trueNetwork <- read.csv('http://sachaepskamp.com/files/weiadj.csv')[,-1] 149 | trueNetwork <- as.matrix(trueNetwork) 150 | Symptoms <- rownames(trueNetwork) <- colnames(trueNetwork) 151 | Thresholds <- read.csv('http://sachaepskamp.com/files/thr.csv')[,-1] 152 | 153 | # Create an input list (intercepts now needed) 154 | input <- list(graph=trueNetwork,intercepts=Thresholds) 155 | 156 | # Simulate under different sampling levels: 157 | Sim4 <- netSimulator( 158 | input = input, 159 | default = "IsingFit", 160 | nCases = c(250,500,1000), 161 | nReps = 100, 162 | nCores = 8) 163 | 164 | # Results: 165 | Sim4 166 | 167 | # Plot: 168 | plot(Sim4) 169 | 170 | # Compare AND and OR rule: 171 | Sim5 <- netSimulator( 172 | input = input, 173 | default = "IsingFit", 174 | nCases = c(250,500,1000), 175 | rule = c("AND","OR"), 176 | nReps = 100, 177 | nCores = 8) 178 | 179 | # Print: 180 | Sim5 181 | 182 | # Plot: 183 | plot(Sim5, yfacet = "rule") 184 | 185 | } 186 | } 187 | -------------------------------------------------------------------------------- /man/null.Rd: -------------------------------------------------------------------------------- 1 | \name{null} 2 | \alias{null} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Returns NULL 6 | } 7 | \description{ 8 | This function simply returns \code{NULL}. 9 | } 10 | \usage{ 11 | null(...) 12 | } 13 | %- maybe also 'usage' for other objects documented here. 14 | \arguments{ 15 | \item{\dots}{ 16 | Anything 17 | } 18 | } 19 | \author{ 20 | Sacha Epskamp 21 | } 22 | 23 | \examples{ 24 | null("Not NULL") 25 | } 26 | -------------------------------------------------------------------------------- /man/plot.bootnet.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.bootnet} 2 | \alias{plot.bootnet} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | Plots bootnet results 6 | } 7 | \description{ 8 | This function can be used to plot bootnet results by plotting all bootstrapped statistics as line or by plotting confidence intervals. 9 | } 10 | \usage{ 11 | \method{plot}{bootnet}(x, statistics, plot, graph, CIstyle = c( 12 | "quantiles", "SE"), rank = FALSE, sampleColor = 13 | "darkred", samplelwd = 1, meanColor = "black", meanlwd 14 | = 0.5, bootColor = "black", bootAlpha = 0.01, bootlwd 15 | = 0.9, areaAlpha = 0.1, order = c("id", "sample", 16 | "mean"), decreasing = TRUE, perNode = FALSE, 17 | legendNcol = 2, labels = TRUE, legend = TRUE, 18 | subsetRange = c(100, 0), area = !perNode, alpha = 19 | 0.05, onlyNonZero = FALSE, differenceShowValue, 20 | differenceEdgeColor = TRUE, verbose = TRUE, panels = 21 | TRUE, split0 = FALSE, prop0 = ifelse(split0, TRUE, 22 | FALSE), prop0_cex = 1, prop0_alpha = 0.8, 23 | prop0_minAlpha = 0.25, subset, ...) 24 | } 25 | %- maybe also 'usage' for other objects documented here. 26 | \arguments{ 27 | \item{x}{ 28 | A \code{bootnet} object 29 | } 30 | \item{statistics}{ 31 | The types of statistics to plot.Defaults to \code{"edge"} for regular bootstrap and \code{c("strength", "outStrength", "inStrength")} for node and person drop bootstrap. Use \code{"all"} to obtain all pairwise statistics tested for regular bootstraps and all nodewise statistics tested for person and node drop bootstarps.} 32 | \item{plot}{ 33 | Character string indicating what to plot. Can be \code{"area"} to produce a graph with the area indicating the confidence region, or \code{"difference"} producing a plot showing significant differences. Other options are \code{"line"} and \code{"interval"}, which are currently unstable and not recommended to use. 34 | } 35 | \item{graph}{If multipe graphs are estimated, which graph should be plotted? Currently used for \code{default = "graphicalVAR"} to plot a temporal network using \code{graph = "temporal"} or a contemporaneous network using \code{graph = "contemporaneous"}} 36 | \item{CIstyle}{ 37 | Style of CIs to construct. \code{"SE"} shows the sample statistic plus and minus two times the standard deviation of bootstraps, and \code{"quantiles"} the area between the 2.5th and 97.5th quantile. Defaults to \code{"quantiles"}. 38 | } 39 | \item{rank}{ 40 | Logical, should plots show rank of statistics instead of statistics? 41 | } 42 | \item{sampleColor}{ 43 | Color of the original sample line 44 | } 45 | \item{samplelwd}{ 46 | Line width of the original sample line 47 | } 48 | \item{bootColor}{ 49 | Color of the bootstrap lines 50 | } 51 | \item{bootAlpha}{ 52 | Alpha of the bootstrap lines 53 | } 54 | \item{bootlwd}{ 55 | Line width of the bootstrap lines 56 | } 57 | \item{areaAlpha}{ 58 | Alpha of the area 59 | } 60 | \item{order}{ 61 | String indicating how to order nodes. \code{"id"} will order nodes based on their name, \code{"mean"} will order nodes based on the average bootstrapped value of the first statistic in \code{statistics}, and \code{"sample"} will order the nodes as done in \code{"mean"} but orders ties based on their sample value. 62 | } 63 | \item{decreasing}{ 64 | Logical indicating if the ordering is decreasing or increasing. 65 | } 66 | \item{perNode}{ 67 | Logical, should centrality estimates per node be plotted instead of correlation with original parameter. Only used in node and person drop bootstrap. 68 | } 69 | \item{legendNcol}{ 70 | Number of columns in the legend if \code{perNode = TRUE}. 71 | } 72 | \item{labels}{ 73 | Logical, should labels be plotted? 74 | } 75 | \item{legend}{ 76 | Logical, should the legend be plotted? 77 | } 78 | \item{subsetRange}{ 79 | Range in percentages of the x-axis in node and person drop plots. 80 | } 81 | \item{area}{ 82 | Logical, should the confidence area be plotted? 83 | } 84 | \item{alpha}{ 85 | Signifiance level used in \code{plot = "difference"}. 86 | } 87 | \item{onlyNonZero}{Logical used when \code{plot = "difference", statistics = "edge"}, should only edges be included that were nonzero in the estimated network structure?} 88 | \item{differenceShowValue}{Logical used when \code{plot = "difference"}. Should values be shown in the diagonal of the difference plot? 89 | } 90 | \item{differenceEdgeColor}{Logical used when \code{plot = "difference", statistics = "edge"}. Should diagonal blocks be colored according to default edge colors? 91 | } 92 | \item{verbose}{Should expected alpha be printed?} 93 | \item{panels}{Logical, should panel titles be printed?} 94 | \item{meanColor}{Color of the bootstrap means.} 95 | \item{meanlwd}{Line width of the bootstrap means} 96 | \item{split0}{Logical. When set to TRUE, the displayed intervals are based on occasions when the parameter was not estimated to be zero, and an extra box is added indicating the number of times a parameter is estimated to be zero.} 97 | \item{prop0}{Logical, should boxes indicating the proportion of times parameters were estimated to be zero be added to the plot?} 98 | \item{prop0_cex}{Size of the boxes indicating number of times a parameter was set to zero.} 99 | \item{prop0_alpha}{Transparency of the boxes indicating number of times a parameter was set to zero.} 100 | \item{prop0_minAlpha}{Minimal transparency of the *lines* of plotted intervals as the proportion of times an edge was not included goes to 0.} 101 | \item{subset}{Vector indicating labels of nodes to include in the plot. This can be used to show, for example, only edges related to one particular node.} 102 | \item{\dots}{Not used.} 103 | } 104 | 105 | \value{ 106 | A \code{ggplot2} object. 107 | } 108 | \author{ 109 | Sacha Epskamp 110 | } 111 | -------------------------------------------------------------------------------- /man/plot.netSimulator.Rd: -------------------------------------------------------------------------------- 1 | \name{netSimulator and replicationSimulator methods} 2 | \alias{plot.netSimulator} 3 | \alias{print.netSimulator} 4 | \alias{summary.netSimulator} 5 | \alias{plot.replicationSimulator} 6 | \alias{print.replicationSimulator} 7 | \alias{summary.replicationSimulator} 8 | %- Also NEED an '\alias' for EACH other topic documented here. 9 | \title{ 10 | netSimulator S3 methods 11 | } 12 | \description{ 13 | Plot, print and summary methods for \code{\link{netSimulator}} output. 14 | } 15 | \usage{ 16 | \method{plot}{netSimulator}(x, xvar = "factor(nCases)", yvar = c("sensitivity", 17 | "specificity", "correlation"), xfacet = "measure", 18 | yfacet = ".", color = NULL, ylim = c(0, 1), print = 19 | TRUE, xlab = "Number of cases", ylab, outlier.size = 20 | 0.5, boxplot.lwd = 0.5, style = c("fancy", "basic"), 21 | ...) 22 | \method{print}{netSimulator}(x, digits = 2, ...) 23 | \method{summary}{netSimulator}(object, digits = 2, ...) 24 | 25 | \method{plot}{replicationSimulator}(x, yvar = c("correlation", "jaccard", 26 | "replicatedEdges", "replicatedZeroes"), ...) 27 | \method{print}{replicationSimulator}(x, digits = 2, ...) 28 | \method{summary}{replicationSimulator}(object, digits = 2, ...) 29 | } 30 | %- maybe also 'usage' for other objects documented here. 31 | \arguments{ 32 | \item{x}{ 33 | Output of \code{\link{netSimulator}}. 34 | } 35 | \item{object}{ 36 | Output of \code{\link{netSimulator}}. 37 | } 38 | \item{xvar}{ 39 | String indicating the variable to be used on the x-axis. 40 | } 41 | \item{yvar}{ 42 | String vector indicating the variable(s) to be used on the y-axis. 43 | } 44 | \item{xfacet}{ 45 | String indicating the variable to be used on the horizontal facets (or \code{"."} to omit). 46 | } 47 | \item{yfacet}{ 48 | String indicating the variable to be used on the vertical facets (or \code{"."} to omit). 49 | } 50 | \item{color}{ 51 | String indicating the variable to be used in coloring boxplots. 52 | } 53 | \item{ylim}{ 54 | Y-axis limits. 55 | } 56 | \item{print}{ 57 | Logical, should the plot be printed? This helps printing the plots to PDF files. 58 | } 59 | \item{digits}{ 60 | Number of digits to be used in print and summary method. 61 | } 62 | \item{xlab}{ 63 | X-axis label 64 | } 65 | \item{ylab}{ 66 | Y-axis label. Defaults to combining the values in \code{yvar}. Is hidden when \code{xfacet = "measure"}, as then it is clear what the y-axis represent from the facet labels. 67 | } 68 | \item{outlier.size}{Size of the outliers as plotted in boxplots.} 69 | \item{boxplot.lwd}{Line width of the boxplots} 70 | \item{style}{\code{"fance"} for a style including several aesthethic enhancements, and \code{"basic"} for a as simple as possible style.} 71 | \item{\dots}{ 72 | Arguments sent to \code{"plot.netSimulator"} from \code{"plot.replicationSimulator"} 73 | } 74 | } 75 | \author{ 76 | Sacha Epskamp 77 | } -------------------------------------------------------------------------------- /man/plotBootnetResult.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.bootnetResult} 2 | \alias{plot.bootnetResult} 3 | \title{ 4 | Plot method for bootnetResult objects 5 | } 6 | \description{ 7 | Plots the graph using the qgraph package and the \code{\link[qgraph]{qgraph}} function. Defined as \code{qgraph::qgraph(x[['graph']],labels=x[['labels']],...)} 8 | } 9 | \usage{ 10 | \method{plot}{bootnetResult}(x, graph, weighted, signed, directed, labels, layout = 11 | "spring", parallelEdge = TRUE, cut = 0, theme = 12 | "colorblind", bootIncludeOverwrite = TRUE, ...) 13 | } 14 | \arguments{ 15 | \item{x}{ 16 | A \code{bootnetResult} object 17 | } 18 | \item{graph}{Numeric or string indicating which graph to plot. Only needed when multiple graphs are estimated. For example, when using \code{default = "graphicalVAR"}, \code{graph = "temporal"} plots the temporal network and \code{graph = "contemporaneous"} plots the contemporaneous network.} 19 | \item{weighted}{Logical, should the analyzed network be weighted?} 20 | \item{signed}{Logical, should the analyzed network be signed?} 21 | \item{directed}{Logical, is the analyzed network directed? Usually does not have to be set and is detected automatically.} 22 | \item{labels}{Labels of the nodes. Defaults to the column names of the data if missing.} 23 | \item{layout}{Placement of the nodes. See \code{\link[qgraph]{qgraph}}. Always defaults to \code{"spring"}.} 24 | \item{parallelEdge}{Should edges in directed networks be plotted parallel? See \code{\link[qgraph]{qgraph}}. Defaults to \code{TRUE} instead of \code{FALSE} (as in qgraph).} 25 | \item{cut}{Should scaling in width and saturation of edges be split? See \code{\link[qgraph]{qgraph}}. Defaults to \code{0} to disable cut (qgraph chooses a cutoff with at least 20 nodes).} 26 | \item{theme}{Theme of the edge and node colors. See \code{\link[qgraph]{qgraph}}. Defaults to \code{"colorblind"} rather than the default used in qgraph (\code{"classic"}).} 27 | \item{bootIncludeOverwrite}{ 28 | Logical. If \code{TRUE}, several plot defaults are overwritten when the input is the result of \code{\link{bootInclude}} (e.g., edge colors are set to black and white). 29 | } 30 | \item{\dots}{ 31 | Arguments sent to \code{\link[qgraph]{qgraph}}. 32 | } 33 | } 34 | \author{ 35 | Sacha Epskamp 36 | } -------------------------------------------------------------------------------- /man/print.bootnet.Rd: -------------------------------------------------------------------------------- 1 | \name{print.bootnet} 2 | \alias{print.bootnet} 3 | \alias{print.bootnetResult} 4 | \alias{summary.bootnetResult} 5 | \title{ 6 | Print method for bootnet and bootnetResult objects 7 | } 8 | \description{ 9 | Prints a short overview of the results of \code{\link{bootnet}} 10 | } 11 | \usage{ 12 | \method{print}{bootnet}(x, ...) 13 | \method{print}{bootnetResult}(x, ...) 14 | \method{summary}{bootnetResult}(object, ...) 15 | } 16 | \arguments{ 17 | \item{x}{ 18 | A \code{bootnet} or \code{bootnetResult} object 19 | } 20 | \item{object}{ 21 | A \code{bootnetResult} object 22 | } 23 | \item{\dots}{ 24 | Not used. 25 | } 26 | } 27 | \author{ 28 | Sacha Epskamp 29 | } -------------------------------------------------------------------------------- /man/summary.bootnet.Rd: -------------------------------------------------------------------------------- 1 | \name{summary.bootnet} 2 | \alias{summary.bootnet} 3 | \title{ 4 | Summarize bootnet results 5 | } 6 | \description{ 7 | Creates a data frame (wrapped as \code{\link[dplyr]{tbl_df}}) containing summarized results of the bootstraps. 8 | } 9 | \usage{ 10 | \method{summary}{bootnet}(object, graph, statistics = c("edge", "intercept", 11 | "strength", "closeness", "betweenness", "distance"), 12 | perNode = FALSE, rank = FALSE, tol = 13 | sqrt(.Machine$double.eps), ...) 14 | } 15 | \arguments{ 16 | \item{object}{ 17 | A \code{bootnet} object 18 | } 19 | \item{graph}{Numeric or string indicating which graph to summarize. Only needed when multiple graphs are estimated. For example, when using \code{default = "graphicalVAR"}, \code{graph = "temporal"} plots the temporal network and \code{graph = "contemporaneous"} plots the contemporaneous network.} 20 | \item{statistics}{ 21 | The types of statistics to include in the summary table 22 | } 23 | \item{perNode}{ 24 | Logical, should centrality estimates per node be plotted instead of correlation with original parameter. Only used in node and person drop bootstrap. 25 | } 26 | \item{rank}{ 27 | Logical, should plots show rank of statistics instead of statistics? 28 | } 29 | \item{tol}{Tolerance level for assuming an edge is set to zero.} 30 | \item{\dots}{Not used.} 31 | } 32 | \value{ 33 | A \code{tbl_df} (data frame) containing summarized statistics. 34 | } 35 | \author{ 36 | Sacha Epskamp 37 | } -------------------------------------------------------------------------------- /man/transformation.Rd: -------------------------------------------------------------------------------- 1 | \name{transformation} 2 | \alias{quantile_transformation} 3 | \alias{rank_transformation} 4 | %- Also NEED an '\alias' for EACH other topic documented here. 5 | \title{ 6 | Data transformation functions 7 | } 8 | \description{ 9 | Functions to transform data 10 | } 11 | \usage{ 12 | quantile_transformation(x) 13 | rank_transformation(x, ties.method = c("average", "first", 14 | "last", "random", "max", "min")) 15 | } 16 | %- maybe also 'usage' for other objects documented here. 17 | \arguments{ 18 | \item{x}{ 19 | A dataset 20 | } 21 | \item{ties.method}{See \code{rank}.} 22 | } 23 | \author{ 24 | Sacha Epskamp 25 | } 26 | --------------------------------------------------------------------------------