├── R ├── userfriendlyscience.R ├── formatPvalue.R ├── vecTxt.R ├── convert.d.to.eer.R ├── setTabCapNumbering.R ├── trim.R ├── repeatStr.R ├── massConvertToNumeric.R ├── convert.threshold.to.er.R ├── safeRequire.R ├── samplingDistribution.R ├── userfriendlyscienceBasics.R ├── averageFishersZs.R ├── areColors.R ├── ggBarChart.R ├── findShortestInterval.R ├── ad.test_from_nortest.R ├── averagePearsonRs.R ├── diamondCoordinates.R ├── iqrOutlier.R ├── makeScales.R ├── invertItem.R ├── rawDataDiamondLayer.R ├── extractVarName.R ├── frequencies.R ├── sdConfInt.R ├── convertToNumeric.R ├── genlogFunction.R ├── prob.randomizationSuccess.R ├── detStructAddVarLabels.R ├── RsqDist.R ├── fullFact.R ├── ggEasyRidge.R ├── meanConfInt.R ├── validSums.R ├── validMeans.R ├── formatCI.R ├── createSigma.R ├── escapeRegex_(from_Hmisc).R ├── prevalencePower.R ├── sharedSubString.R ├── detStructAddVarNames.R ├── pwr.randomizationSuccess.R ├── ggpie.R ├── convert.d.to.nnc.R ├── faConfInt.R ├── removeExceptionalValues.R ├── erDataSeq.R ├── detStructComputeScales.R ├── varsToDiamondPlotDf.R ├── curfnfinder.R ├── invertItems.R ├── knitFig.R ├── examineBy.R ├── multiVarFreq.R ├── exceptionalScore.R ├── associationsToDiamondPlotDf.R ├── OmegasqDist.R └── adTest_adapted_from_Fbasics.R ├── data ├── Singh.rda ├── testRetestSimData.rda └── therapyMonitorData.rda ├── .Rbuildignore ├── docs ├── reference │ ├── nnc-1.png │ ├── nnc-2.png │ ├── CIBER-1.png │ ├── ggNNC-1.png │ ├── ggNNC-2.png │ ├── ggNNC-3.png │ ├── ggNNC-4.png │ ├── ggNNC-5.png │ ├── ggPie-1.png │ ├── ggqq-1.png │ ├── fanova-1.png │ ├── fanova-2.png │ ├── fanova-3.png │ ├── fanova-4.png │ ├── genlog-1.png │ ├── logRegr-1.png │ ├── logRegr-2.png │ ├── ggBarChart-1.png │ ├── ggBoxplot-1.png │ ├── ggBoxplot-2.png │ ├── powerHist-1.png │ ├── cohensdDist-1.png │ ├── cohensdDist-2.png │ ├── diamondPlot-1.png │ ├── diamondPlot-2.png │ ├── diamondPlot-3.png │ ├── diamondPlot-4.png │ ├── didacticPlot-1.png │ ├── didacticPlot-2.png │ ├── didacticPlot-3.png │ ├── ggEasyPlots-1.png │ ├── ggEasyPlots-2.png │ ├── ggEasyPlots-3.png │ ├── ggEasyPlots-4.png │ ├── piecewiseRegr-1.png │ ├── piecewiseRegr-2.png │ ├── scatterPlot-1.png │ ├── scatterPlot-2.png │ ├── facComAnalysis-1.png │ ├── facComAnalysis-2.png │ ├── genlogFunction-1.png │ ├── meanDiff.multi-1.png │ ├── meanDiff.multi-2.png │ ├── regrInfluential-1.png │ ├── therapyMonitor-1.png │ ├── biAxisDiamondPlot-1.png │ ├── ggConfidenceCurve-1.png │ ├── ggConfidenceCurve-2.png │ ├── meansDiamondPlot-1.png │ ├── meansDiamondPlot-2.png │ ├── meansDiamondPlot-3.png │ ├── testRetestSimData-1.png │ ├── meanSDtoDiamondPlot-1.png │ ├── meanSDtoDiamondPlot-2.png │ ├── meanSDtoDiamondPlot-3.png │ ├── meanSDtoDiamondPlot-4.png │ ├── meanSDtoDiamondPlot-5.png │ ├── meanSDtoDiamondPlot-6.png │ ├── associationsDiamondPlot-1.png │ ├── associationsDiamondPlot-2.png │ ├── asymmetricalScatterMatrix-1.png │ ├── asymmetricalScatterMatrix-2.png │ ├── processLimeSurveyDropouts-1.png │ ├── processLimeSurveyDropouts-2.png │ ├── meansComparisonDiamondPlot-1.png │ ├── meansComparisonDiamondPlot-2.png │ ├── meansComparisonDiamondPlot-3.png │ ├── userfriendlyscience-package-1.png │ ├── userfriendlyscience-package-2.png │ ├── userfriendlyscience-package-3.png │ ├── userfriendlyscience-package-4.png │ ├── userfriendlyscience-package-5.png │ └── userfriendlyscience-package-6.png ├── pkgdown.yml ├── link.svg └── jquery.sticky-kit.min.js ├── packrat └── packrat.opts ├── .gitattributes ├── man ├── findShortestInterval.Rd ├── is.nr.Rd ├── areColors.Rd ├── extractVarName.Rd ├── isTrue.Rd ├── averageFishersZs.Rd ├── iqrOutlier.Rd ├── sharedSubString.Rd ├── ggBarChart.Rd ├── ggpie.Rd ├── invertItems.Rd ├── averagePearsonRs.Rd ├── fullFact.Rd ├── processLimeSurveyDropouts.Rd ├── createSigma.Rd ├── multiVarFreq.Rd ├── oddsratio.Rd ├── genlogFunction.Rd ├── regrInfluential.Rd ├── reliability.Rd ├── formatCI.Rd ├── escapeRegex.Rd ├── convert.d.to.nnc.Rd ├── prevalencePower.Rd ├── confIntOmegaSq.Rd ├── multiResponse.Rd ├── confIntProp.Rd ├── curfnfinder.Rd ├── faConfInt.Rd ├── knitFig.Rd ├── therapyMonitorData.Rd ├── removeExceptionalValues.Rd ├── pwr.omegasq.Rd ├── sort.associationMatrix.Rd ├── validComputations.Rd ├── testRetestSimData.Rd ├── ggBoxplot.Rd ├── scatterMatrix.Rd ├── paginatedAsymmetricalScatterMatrix.Rd ├── didacticPlot.Rd ├── freq.Rd ├── Singh.Rd ├── pwr.confIntR.Rd ├── confIntV.Rd ├── omegaSqDist.Rd ├── setCaptionNumbering.Rd ├── exceptionalScore.Rd ├── exceptionalScores.Rd ├── showPearsonPower.Rd ├── examine.Rd ├── itemInspection.Rd ├── descr.Rd ├── setFigCapNumbering.Rd ├── confIntR.Rd └── RsqDist.Rd ├── .gitignore ├── inst └── CITATION ├── DESCRIPTION └── .travis.yml /R/userfriendlyscience.R: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /R/formatPvalue.R: -------------------------------------------------------------------------------- 1 | formatPvalue <- ufs::formatPvalue; 2 | -------------------------------------------------------------------------------- /R/vecTxt.R: -------------------------------------------------------------------------------- 1 | vecTxtQ <- ufs::vecTxtQ; 2 | 3 | vecTxt <- ufs::vecTxt; 4 | -------------------------------------------------------------------------------- /data/Singh.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/data/Singh.rda -------------------------------------------------------------------------------- /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^packrat/ 4 | ^\.Rprofile$ 5 | ^\.travis\.yml$ 6 | ^docs/ 7 | -------------------------------------------------------------------------------- /docs/reference/nnc-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/nnc-1.png -------------------------------------------------------------------------------- /docs/reference/nnc-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/nnc-2.png -------------------------------------------------------------------------------- /data/testRetestSimData.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/data/testRetestSimData.rda -------------------------------------------------------------------------------- /docs/reference/CIBER-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/CIBER-1.png -------------------------------------------------------------------------------- /docs/reference/ggNNC-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggNNC-1.png -------------------------------------------------------------------------------- /docs/reference/ggNNC-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggNNC-2.png -------------------------------------------------------------------------------- /docs/reference/ggNNC-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggNNC-3.png -------------------------------------------------------------------------------- /docs/reference/ggNNC-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggNNC-4.png -------------------------------------------------------------------------------- /docs/reference/ggNNC-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggNNC-5.png -------------------------------------------------------------------------------- /docs/reference/ggPie-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggPie-1.png -------------------------------------------------------------------------------- /docs/reference/ggqq-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggqq-1.png -------------------------------------------------------------------------------- /data/therapyMonitorData.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/data/therapyMonitorData.rda -------------------------------------------------------------------------------- /docs/reference/fanova-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/fanova-1.png -------------------------------------------------------------------------------- /docs/reference/fanova-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/fanova-2.png -------------------------------------------------------------------------------- /docs/reference/fanova-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/fanova-3.png -------------------------------------------------------------------------------- /docs/reference/fanova-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/fanova-4.png -------------------------------------------------------------------------------- /docs/reference/genlog-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/genlog-1.png -------------------------------------------------------------------------------- /docs/reference/logRegr-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/logRegr-1.png -------------------------------------------------------------------------------- /docs/reference/logRegr-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/logRegr-2.png -------------------------------------------------------------------------------- /docs/reference/ggBarChart-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggBarChart-1.png -------------------------------------------------------------------------------- /docs/reference/ggBoxplot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggBoxplot-1.png -------------------------------------------------------------------------------- /docs/reference/ggBoxplot-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggBoxplot-2.png -------------------------------------------------------------------------------- /docs/reference/powerHist-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/powerHist-1.png -------------------------------------------------------------------------------- /docs/reference/cohensdDist-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/cohensdDist-1.png -------------------------------------------------------------------------------- /docs/reference/cohensdDist-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/cohensdDist-2.png -------------------------------------------------------------------------------- /docs/reference/diamondPlot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/diamondPlot-1.png -------------------------------------------------------------------------------- /docs/reference/diamondPlot-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/diamondPlot-2.png -------------------------------------------------------------------------------- /docs/reference/diamondPlot-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/diamondPlot-3.png -------------------------------------------------------------------------------- /docs/reference/diamondPlot-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/diamondPlot-4.png -------------------------------------------------------------------------------- /docs/reference/didacticPlot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/didacticPlot-1.png -------------------------------------------------------------------------------- /docs/reference/didacticPlot-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/didacticPlot-2.png -------------------------------------------------------------------------------- /docs/reference/didacticPlot-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/didacticPlot-3.png -------------------------------------------------------------------------------- /docs/reference/ggEasyPlots-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggEasyPlots-1.png -------------------------------------------------------------------------------- /docs/reference/ggEasyPlots-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggEasyPlots-2.png -------------------------------------------------------------------------------- /docs/reference/ggEasyPlots-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggEasyPlots-3.png -------------------------------------------------------------------------------- /docs/reference/ggEasyPlots-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggEasyPlots-4.png -------------------------------------------------------------------------------- /docs/reference/piecewiseRegr-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/piecewiseRegr-1.png -------------------------------------------------------------------------------- /docs/reference/piecewiseRegr-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/piecewiseRegr-2.png -------------------------------------------------------------------------------- /docs/reference/scatterPlot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/scatterPlot-1.png -------------------------------------------------------------------------------- /docs/reference/scatterPlot-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/scatterPlot-2.png -------------------------------------------------------------------------------- /docs/reference/facComAnalysis-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/facComAnalysis-1.png -------------------------------------------------------------------------------- /docs/reference/facComAnalysis-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/facComAnalysis-2.png -------------------------------------------------------------------------------- /docs/reference/genlogFunction-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/genlogFunction-1.png -------------------------------------------------------------------------------- /docs/reference/meanDiff.multi-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meanDiff.multi-1.png -------------------------------------------------------------------------------- /docs/reference/meanDiff.multi-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meanDiff.multi-2.png -------------------------------------------------------------------------------- /docs/reference/regrInfluential-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/regrInfluential-1.png -------------------------------------------------------------------------------- /docs/reference/therapyMonitor-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/therapyMonitor-1.png -------------------------------------------------------------------------------- /docs/pkgdown.yml: -------------------------------------------------------------------------------- 1 | pandoc: 1.19.2.1 2 | pkgdown: 0.1.0.9000 3 | pkgdown_sha: b074c575119c35459f544f15e3dae3a6d617d9bb 4 | articles: [] 5 | 6 | -------------------------------------------------------------------------------- /docs/reference/biAxisDiamondPlot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/biAxisDiamondPlot-1.png -------------------------------------------------------------------------------- /docs/reference/ggConfidenceCurve-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggConfidenceCurve-1.png -------------------------------------------------------------------------------- /docs/reference/ggConfidenceCurve-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/ggConfidenceCurve-2.png -------------------------------------------------------------------------------- /docs/reference/meansDiamondPlot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meansDiamondPlot-1.png -------------------------------------------------------------------------------- /docs/reference/meansDiamondPlot-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meansDiamondPlot-2.png -------------------------------------------------------------------------------- /docs/reference/meansDiamondPlot-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meansDiamondPlot-3.png -------------------------------------------------------------------------------- /docs/reference/testRetestSimData-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/testRetestSimData-1.png -------------------------------------------------------------------------------- /docs/reference/meanSDtoDiamondPlot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meanSDtoDiamondPlot-1.png -------------------------------------------------------------------------------- /docs/reference/meanSDtoDiamondPlot-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meanSDtoDiamondPlot-2.png -------------------------------------------------------------------------------- /docs/reference/meanSDtoDiamondPlot-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meanSDtoDiamondPlot-3.png -------------------------------------------------------------------------------- /docs/reference/meanSDtoDiamondPlot-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meanSDtoDiamondPlot-4.png -------------------------------------------------------------------------------- /docs/reference/meanSDtoDiamondPlot-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meanSDtoDiamondPlot-5.png -------------------------------------------------------------------------------- /docs/reference/meanSDtoDiamondPlot-6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meanSDtoDiamondPlot-6.png -------------------------------------------------------------------------------- /docs/reference/associationsDiamondPlot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/associationsDiamondPlot-1.png -------------------------------------------------------------------------------- /docs/reference/associationsDiamondPlot-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/associationsDiamondPlot-2.png -------------------------------------------------------------------------------- /docs/reference/asymmetricalScatterMatrix-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/asymmetricalScatterMatrix-1.png -------------------------------------------------------------------------------- /docs/reference/asymmetricalScatterMatrix-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/asymmetricalScatterMatrix-2.png -------------------------------------------------------------------------------- /docs/reference/processLimeSurveyDropouts-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/processLimeSurveyDropouts-1.png -------------------------------------------------------------------------------- /docs/reference/processLimeSurveyDropouts-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/processLimeSurveyDropouts-2.png -------------------------------------------------------------------------------- /docs/reference/meansComparisonDiamondPlot-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meansComparisonDiamondPlot-1.png -------------------------------------------------------------------------------- /docs/reference/meansComparisonDiamondPlot-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meansComparisonDiamondPlot-2.png -------------------------------------------------------------------------------- /docs/reference/meansComparisonDiamondPlot-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/meansComparisonDiamondPlot-3.png -------------------------------------------------------------------------------- /docs/reference/userfriendlyscience-package-1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/userfriendlyscience-package-1.png -------------------------------------------------------------------------------- /docs/reference/userfriendlyscience-package-2.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/userfriendlyscience-package-2.png -------------------------------------------------------------------------------- /docs/reference/userfriendlyscience-package-3.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/userfriendlyscience-package-3.png -------------------------------------------------------------------------------- /docs/reference/userfriendlyscience-package-4.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/userfriendlyscience-package-4.png -------------------------------------------------------------------------------- /docs/reference/userfriendlyscience-package-5.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/userfriendlyscience-package-5.png -------------------------------------------------------------------------------- /docs/reference/userfriendlyscience-package-6.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/Matherion/userfriendlyscience/HEAD/docs/reference/userfriendlyscience-package-6.png -------------------------------------------------------------------------------- /R/convert.d.to.eer.R: -------------------------------------------------------------------------------- 1 | convert.d.to.eer <- function(d, cer, eventDesirable=TRUE, eventIfHigher=TRUE) { 2 | if (eventIfHigher) { 3 | return(pnorm((qnorm(cer) + d))); 4 | } else { 5 | return(1 - pnorm((qnorm(1-cer) + d))); 6 | } 7 | } 8 | -------------------------------------------------------------------------------- /packrat/packrat.opts: -------------------------------------------------------------------------------- 1 | auto.snapshot: TRUE 2 | use.cache: FALSE 3 | print.banner.on.startup: auto 4 | vcs.ignore.lib: TRUE 5 | vcs.ignore.src: FALSE 6 | external.packages: 7 | local.repos: 8 | load.external.packages.on.startup: TRUE 9 | ignored.packages: 10 | quiet.package.installation: TRUE 11 | snapshot.recommended.packages: FALSE 12 | -------------------------------------------------------------------------------- /R/setTabCapNumbering.R: -------------------------------------------------------------------------------- 1 | setTabCapNumbering <- function(table_counter_str = ":Table %s: ", 2 | resetCounterTo = 1) { 3 | setCaptionNumbering(captionName = 'tab.cap', 4 | prefix = table_counter_str, 5 | suffix = "", 6 | resetCounterTo = resetCounterTo); 7 | } 8 | -------------------------------------------------------------------------------- /R/trim.R: -------------------------------------------------------------------------------- 1 | ### trim simply trims spaces from the start and end of a string 2 | trim <- function(str) { 3 | ### Based on 'trim' in package Gdata by 4 | ### Gregory R. Warnes and others 5 | str <- sub(pattern="^ +", replacement="", x=str) 6 | str <- sub(pattern=" +$", replacement="", x=str) 7 | str <- sub(pattern="^\t+", replacement="", x=str) 8 | str <- sub(pattern="\t+$", replacement="", x=str) 9 | return(str); 10 | } -------------------------------------------------------------------------------- /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | 4 | # Custom for Visual Studio 5 | *.cs diff=csharp 6 | 7 | # Standard to msysgit 8 | *.doc diff=astextplain 9 | *.DOC diff=astextplain 10 | *.docx diff=astextplain 11 | *.DOCX diff=astextplain 12 | *.dot diff=astextplain 13 | *.DOT diff=astextplain 14 | *.pdf diff=astextplain 15 | *.PDF diff=astextplain 16 | *.rtf diff=astextplain 17 | *.RTF diff=astextplain 18 | -------------------------------------------------------------------------------- /R/repeatStr.R: -------------------------------------------------------------------------------- 1 | ### repeat a string a given number of times 2 | repeatStr <- repStr <- function (n = 1, str = " ") { 3 | if (is.character(n) && is.numeric(str)) { 4 | ### The input was switched. 5 | tmp <- n; 6 | n <- str; 7 | str <- tmp; 8 | rm(tmp); 9 | } 10 | if (n < 1) { 11 | return(""); 12 | } 13 | else if (n == 1) { 14 | return(str); 15 | } 16 | else { 17 | res <- str; 18 | for(i in c(1:(n-1))) { 19 | res <- paste0(res, str); 20 | } 21 | return(res); 22 | } 23 | } 24 | -------------------------------------------------------------------------------- /R/massConvertToNumeric.R: -------------------------------------------------------------------------------- 1 | massConvertToNumeric <- function(dat, byFactorLabel = FALSE, 2 | ignoreCharacter = TRUE, 3 | stringsAsFactors = FALSE) { 4 | storedAttributes <- attributes(dat); 5 | dat <- data.frame(lapply(dat, function(x) { 6 | if (is.character(x) && ignoreCharacter) { 7 | return(x); 8 | } 9 | else { 10 | return(convertToNumeric(x, byFactorLabel = byFactorLabel)); 11 | } 12 | }), stringsAsFactors=stringsAsFactors); 13 | attributes(dat) <- storedAttributes; 14 | return(dat); 15 | } 16 | -------------------------------------------------------------------------------- /R/convert.threshold.to.er.R: -------------------------------------------------------------------------------- 1 | convert.threshold.to.er <- function(threshold, mean, sd, 2 | eventIfHigher = TRUE, 3 | pdist = pnorm) { 4 | return(pdist(threshold, mean=mean, sd=sd, lower.tail=!eventIfHigher)); 5 | } 6 | 7 | convert.er.to.threshold <- function(er, mean, sd, 8 | eventIfHigher = TRUE, 9 | qdist = qnorm) { 10 | q <- qdist(er); 11 | if (eventIfHigher) { 12 | return(mean - q * sd); 13 | } else { 14 | return(mean + q * sd); 15 | } 16 | } 17 | -------------------------------------------------------------------------------- /R/safeRequire.R: -------------------------------------------------------------------------------- 1 | ### This function checks whether a package is installed; 2 | ### if not, it installs it. It then loads the package. 3 | safeRequire <- function(packageName, mirrorIndex=NULL) { 4 | if (!is.element(packageName, installed.packages()[,1])) { 5 | if (!is.null(mirrorIndex)) { 6 | chooseCRANmirror(ind=mirrorIndex); 7 | } 8 | install.packages(packageName, dependencies=TRUE); 9 | } 10 | suppressPackageStartupMessages(require(package = packageName, 11 | character.only=TRUE, 12 | quietly=TRUE)); 13 | } -------------------------------------------------------------------------------- /R/samplingDistribution.R: -------------------------------------------------------------------------------- 1 | samplingDistribution <- function(popValues = c(0, 1), popFrequencies = c(50, 50), 2 | sampleSize = NULL, sampleFromPop = FALSE, ...) { 3 | 4 | if (is.null(sampleSize)) { 5 | sampleSize <- sum(popFrequencies); 6 | } 7 | 8 | if (sampleFromPop) { 9 | sampleVector <- sample(popValues, size=sampleSize, 10 | replace=TRUE, prob=popFrequencies); 11 | } 12 | else { 13 | sampleVector <- rep(popValues, times=popFrequencies); 14 | } 15 | 16 | return(normalityAssessment(sampleVector = sampleVector, ...)); 17 | 18 | } -------------------------------------------------------------------------------- /man/findShortestInterval.Rd: -------------------------------------------------------------------------------- 1 | \name{findShortestInterval} 2 | \alias{findShortestInterval} 3 | \title{ 4 | Find the shortest interval 5 | } 6 | \description{ 7 | This function takes a numeric vector, sorts it, and then finds the shortest interval and returns its length. 8 | } 9 | \usage{ 10 | findShortestInterval(x) 11 | } 12 | \arguments{ 13 | \item{x}{ 14 | The numeric vector. 15 | } 16 | } 17 | \value{ 18 | The length of the shortest interval. 19 | } 20 | \author{ 21 | Gjalt-Jorn Peters 22 | 23 | Maintainer: Gjalt-Jorn Peters 24 | } 25 | \examples{ 26 | findShortestInterval(c(1, 2, 4, 7, 20, 10, 15)); 27 | } 28 | \keyword{ utilities } 29 | -------------------------------------------------------------------------------- /man/is.nr.Rd: -------------------------------------------------------------------------------- 1 | \name{is.nr} 2 | \alias{is.nr} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | is.nr 6 | } 7 | \description{ 8 | Convenience function that returns TRUE if the argument is not null, not NA, and is.numeric. 9 | } 10 | \usage{ 11 | is.nr(x) 12 | } 13 | %- maybe also 'usage' for other objects documented here. 14 | \arguments{ 15 | \item{x}{ 16 | The value or vector to check. 17 | } 18 | } 19 | \value{ 20 | TRUE or FALSE. 21 | } 22 | \author{ 23 | Gjalt-Jorn Peters 24 | 25 | Maintainer: Gjalt-Jorn Peters 26 | } 27 | \examples{ 28 | is.nr(8); ### Returns TRUE 29 | is.nr(NULL); ### Returns FALSE 30 | is.nr(NA); ### Returns FALSE 31 | } -------------------------------------------------------------------------------- /R/userfriendlyscienceBasics.R: -------------------------------------------------------------------------------- 1 | ########################################################### 2 | ########################################################### 3 | ### 4 | ### Collection of very basic functions 5 | ### 6 | ### File created by Gjalt-Jorn Peters. Questions? You can 7 | ### contact me through http://behaviorchange.eu. 8 | ### 9 | ########################################################### 10 | ########################################################### 11 | 12 | noZero <- ufs::noZero; 13 | 14 | formatR <- ufs::formatR; 15 | 16 | ifelseObj <- ufs::ifelseObj; 17 | 18 | is.odd <- ufs::is.odd 19 | is.even <- ufs::is.even 20 | 21 | `%IN%` <- ufs::`%IN%`; 22 | 23 | cat0 <- ufs::cat0; 24 | 25 | isTrue <- ufs::isTrue; 26 | 27 | is.nr <- ufs::is.nr; 28 | -------------------------------------------------------------------------------- /man/areColors.Rd: -------------------------------------------------------------------------------- 1 | \name{areColors} 2 | \alias{areColors} 3 | \title{ 4 | Check whether elements of a vector are valid colors 5 | } 6 | \description{ 7 | This function by Josh O'Brien checks whether elements of a vector are valid colors. It has been copied from a Stack Exchange answer (see \url{http://stackoverflow.com/questions/13289009/check-if-character-string-is-a-valid-color-representation}). 8 | } 9 | \usage{ 10 | areColors(x) 11 | } 12 | \arguments{ 13 | \item{x}{ 14 | The vector. 15 | } 16 | } 17 | \value{ 18 | A logical vector. 19 | } 20 | \author{ 21 | Josh O'Brien 22 | 23 | Maintainer: Gjalt-Jorn Peters 24 | } 25 | \examples{ 26 | areColors(c(NA, "black", "blackk", "1", "#00", "#000000")); 27 | } 28 | \keyword{ utilities } 29 | -------------------------------------------------------------------------------- /man/extractVarName.Rd: -------------------------------------------------------------------------------- 1 | \name{extractVarName} 2 | \alias{extractVarName} 3 | \title{ 4 | Extract variable names 5 | } 6 | \description{ 7 | Functions often get passed variables from within dataframes or other lists. However, printing these names with all their dollar signs isn't very userfriendly. This function simply uses a regular expression to extract the actual name. 8 | } 9 | \usage{ 10 | extractVarName(x) 11 | } 12 | \arguments{ 13 | \item{x}{ 14 | A character vector of one or more variable names. 15 | } 16 | } 17 | \value{ 18 | The actual variables name, with all containing objectes stripped off. 19 | } 20 | \author{ 21 | Gjalt-Jorn Peters 22 | 23 | Maintainer: Gjalt-Jorn Peters 24 | } 25 | \examples{ 26 | extractVarName('mtcars$mpg'); 27 | } 28 | \keyword{ utils } 29 | -------------------------------------------------------------------------------- /man/isTrue.Rd: -------------------------------------------------------------------------------- 1 | \name{isTrue} 2 | \alias{isTrue} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | isTrue 6 | } 7 | \description{ 8 | Returns TRUE for TRUE elements, FALSE for FALSE elements, and whatever is 9 | specified in na for NA items. 10 | } 11 | \usage{ 12 | isTrue(x, na = FALSE) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{x}{ 17 | The vector to check for TRUE, FALSE, and NA values. 18 | } 19 | \item{na}{ 20 | What to return for NA values. 21 | } 22 | } 23 | \value{ 24 | A logical vector. 25 | } 26 | \author{ 27 | Gjalt-Jorn Peters 28 | 29 | Maintainer: Gjalt-Jorn Peters 30 | } 31 | \examples{ 32 | isTrue(c(TRUE, FALSE, NA)); 33 | isTrue(c(TRUE, FALSE, NA), na=TRUE); 34 | } 35 | \keyword{ univariate } -------------------------------------------------------------------------------- /man/averageFishersZs.Rd: -------------------------------------------------------------------------------- 1 | \name{averageFishersZs} 2 | \alias{averageFishersZs} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | averageFishersZs 6 | } 7 | \description{ 8 | Takes pairs of Fisher's z's and the accompanying n's (sample sizes) and 9 | returns their average. 10 | } 11 | \usage{ 12 | averageFishersZs(zs, ns) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{zs}{ 17 | The values of Fisher's z. 18 | } 19 | \item{ns}{ 20 | The sample sizes (ns). 21 | } 22 | } 23 | \value{ 24 | The average of the Fisher's z values. 25 | } 26 | \seealso{ 27 | \code{\link{averagePearsonRs}} 28 | } 29 | \author{ 30 | Gjalt-Jorn Peters 31 | 32 | Maintainer: Gjalt-Jorn Peters 33 | } 34 | \examples{ 35 | averageFishersZs(c(1.1, 5.4), c(10, 30)); 36 | } -------------------------------------------------------------------------------- /docs/link.svg: -------------------------------------------------------------------------------- 1 | 2 | 3 | 5 | 8 | 12 | 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Windows image file caches 2 | Thumbs.db 3 | ehthumbs.db 4 | 5 | # Folder config file 6 | Desktop.ini 7 | # Recycle Bin used on file shares 8 | $RECYCLE.BIN/ 9 | # Windows Installer files 10 | *.cab 11 | *.msi 12 | *.msm 13 | *.msp 14 | # Windows shortcuts 15 | *.lnk 16 | # ========================= 17 | # Operating System Files 18 | # OSX 19 | .DS_Store 20 | .AppleDouble 21 | .LSOverride 22 | # Thumbnails 23 | ._* 24 | # Files that might appear in the root of a volume 25 | .DocumentRevisions-V100 26 | .fseventsd 27 | .Spotlight-V100 28 | .TemporaryItems 29 | .Trashes 30 | .VolumeIcon.icns 31 | # Directories potentially created on remote AFP share 32 | .AppleDB 33 | .AppleDesktop 34 | Network Trash Folder 35 | Temporary Items 36 | .apdisk 37 | # R Studio files 38 | .Rproj.user 39 | .Rhistory 40 | userfriendlyscience.Rproj 41 | packrat/src 42 | packrat/lib*/ 43 | -------------------------------------------------------------------------------- /R/averageFishersZs.R: -------------------------------------------------------------------------------- 1 | #' averageFishersZs 2 | #' 3 | #' Takes pairs of Fisher's z's and the accompanying n's (sample sizes) and 4 | #' returns their average. 5 | #' 6 | #' 7 | #' @param zs The values of Fisher's z. 8 | #' @param ns The sample sizes (ns). 9 | #' @return The average of the Fisher's z values. 10 | #' @author Gjalt-Jorn Peters 11 | #' 12 | #' Maintainer: Gjalt-Jorn Peters 13 | #' @seealso \code{\link{averagePearsonRs}} 14 | #' @examples 15 | #' 16 | #' averageFishersZs(c(1.1, 5.4), c(10, 30)); 17 | #' 18 | #' @export averageFishersZs 19 | averageFishersZs <- function(zs, ns) { 20 | if (length(zs) != length(ns)) { 21 | stop("Vector 'zs' (current length: ", length(zs), 22 | ") and vector 'ns' (current length: ", length(ns), 23 | ") must be the same length!"); 24 | } 25 | return( sum((ns - 3)* zs) / (sum(ns) - 3 * length(ns))); 26 | } 27 | -------------------------------------------------------------------------------- /inst/CITATION: -------------------------------------------------------------------------------- 1 | citHeader("To cite userfriendlyscience in publications, please use any of:") 2 | 3 | year <- sub("-.*", "", meta$Date) 4 | note <- sprintf("R package version %s", meta$Version) 5 | 6 | bibentry(bibtype = "Manual", 7 | title = "{userfriendlyscience}: Quantitative analysis made accessible", 8 | author = c(person("Gjalt-Jorn Ygram", "Peters")), 9 | year = year, 10 | note = note, 11 | url = "https://userfriendlyscience.com", 12 | doi = "10.17605/osf.io/txequ") 13 | 14 | bibentry(bibtype = "Article", 15 | title = "Diamond plots: a tutorial to introduce a visualisation tool that facilitates interpretation and comparison of multiple sample estimates while respecting their inaccuracy", 16 | author = c(person("Gjalt-Jorn Ygram", "Peters")), 17 | journal= "PsyArXiv", 18 | year = 2017, 19 | url = "https://psyarxiv.com/fzh6c") 20 | -------------------------------------------------------------------------------- /man/iqrOutlier.Rd: -------------------------------------------------------------------------------- 1 | \name{iqrOutlier} 2 | \alias{iqrOutlier} 3 | \title{ 4 | Identify outliers according to the IQR criterion 5 | } 6 | \description{ 7 | The IQR criterion holds that any value lower than one-and-a-half times the interquartile range below the first quartile, or higher than one-and-a-half times the interquartile range above the third quartile, is an outlier. This function returns a logical vector that identifies those outliers. 8 | } 9 | \usage{ 10 | iqrOutlier(x) 11 | } 12 | \arguments{ 13 | \item{x}{ 14 | The vector to scan for outliers. 15 | } 16 | } 17 | \value{ 18 | A logical vector where TRUE identifies outliers. 19 | } 20 | \author{ 21 | Gjalt-Jorn Peters 22 | 23 | Maintainer: Gjalt-Jorn Peters 24 | } 25 | \seealso{ 26 | \code{\link{IQR}} 27 | } 28 | \examples{ 29 | ### One outlier in the miles per gallon 30 | iqrOutlier(mtcars$mpg); 31 | } 32 | \keyword{ univariate } 33 | -------------------------------------------------------------------------------- /man/sharedSubString.Rd: -------------------------------------------------------------------------------- 1 | \name{sharedSubString} 2 | \alias{sharedSubString} 3 | \title{ 4 | sharedSubString 5 | } 6 | \description{ 7 | A function to find the longest shared substring in a character vector. 8 | } 9 | \usage{ 10 | sharedSubString(x, y = NULL) 11 | } 12 | \arguments{ 13 | \item{x}{ 14 | The character vector to process. 15 | } 16 | \item{y}{ 17 | Optionally, two single values can be specified. This is probably not useful to end users, but it's used by the function when it calls itself. 18 | } 19 | } 20 | \value{ 21 | A vector of length one with either the longest substring that occurs in all values of the character vector, or NA if no overlap an be found. 22 | } 23 | \author{ 24 | Gjalt-Jorn Peters 25 | 26 | Maintainer: Gjalt-Jorn Peters 27 | } 28 | \examples{ 29 | sharedSubString(c("t0_responseTime", "t1_responseTime", "t2_responseTime")); 30 | ### Returns "_responseTime" 31 | } 32 | \keyword{ character } 33 | -------------------------------------------------------------------------------- /R/areColors.R: -------------------------------------------------------------------------------- 1 | ### http://stackoverflow.com/questions/13289009/check-if-character-string-is-a-valid-color-representation 2 | 3 | 4 | 5 | #' Check whether elements of a vector are valid colors 6 | #' 7 | #' This function by Josh O'Brien checks whether elements of a vector are valid 8 | #' colors. It has been copied from a Stack Exchange answer (see 9 | #' \url{http://stackoverflow.com/questions/13289009/check-if-character-string-is-a-valid-color-representation}). 10 | #' 11 | #' 12 | #' @param x The vector. 13 | #' @return A logical vector. 14 | #' @author Josh O'Brien 15 | #' 16 | #' Maintainer: Gjalt-Jorn Peters 17 | #' @keywords utilities 18 | #' @examples 19 | #' 20 | #' areColors(c(NA, "black", "blackk", "1", "#00", "#000000")); 21 | #' 22 | #' @export areColors 23 | areColors <- function(x) { 24 | sapply(x, function(X) { 25 | tryCatch(is.matrix(col2rgb(X)), 26 | error = function(e) FALSE) 27 | }) 28 | } 29 | -------------------------------------------------------------------------------- /R/ggBarChart.R: -------------------------------------------------------------------------------- 1 | #' Bar chart using ggplot 2 | #' 3 | #' This function provides a simple interface to create a \code{\link{ggplot}} 4 | #' bar chart. 5 | #' 6 | #' 7 | #' @param vector The vector to display in the bar chart. 8 | #' @param plotTheme The theme to apply. 9 | #' @param \dots And additional arguments are passed to \code{\link{geom_bar}}. 10 | #' @return A \code{\link{ggplot}} plot is returned. 11 | #' @author Gjalt-Jorn Peters 12 | #' 13 | #' Maintainer: Gjalt-Jorn Peters 14 | #' @seealso \code{\link{geom_bar}} 15 | #' @keywords ~kwd1 ~kwd2 16 | #' @examples 17 | #' 18 | #' ggBarChart(mtcars$cyl); 19 | #' 20 | #' @export ggBarChart 21 | ggBarChart <- function(vector, plotTheme = theme_bw(), ...) { 22 | varName <- extractVarName(deparse(substitute(vector))); 23 | tmpDf <- as.data.frame(na.omit(vector)); 24 | names(tmpDf) <- varName; 25 | ggplot(tmpDf, aes_string(x=varName)) + 26 | geom_bar(...) + plotTheme; 27 | } 28 | -------------------------------------------------------------------------------- /man/ggBarChart.Rd: -------------------------------------------------------------------------------- 1 | \name{ggBarChart} 2 | \alias{ggBarChart} 3 | \title{ 4 | Bar chart using ggplot 5 | } 6 | \description{ 7 | This function provides a simple interface to create a \code{\link{ggplot}} bar chart. 8 | } 9 | \usage{ 10 | ggBarChart(vector, plotTheme = theme_bw(), ...) 11 | } 12 | \arguments{ 13 | \item{vector}{ 14 | The vector to display in the bar chart. 15 | } 16 | \item{plotTheme}{ 17 | The theme to apply. 18 | } 19 | \item{\dots}{ 20 | And additional arguments are passed to \code{\link{geom_bar}}. 21 | } 22 | } 23 | \value{ 24 | A \code{\link{ggplot}} plot is returned. 25 | } 26 | \author{ 27 | Gjalt-Jorn Peters 28 | 29 | Maintainer: Gjalt-Jorn Peters 30 | }\seealso{ 31 | \code{\link{geom_bar}} 32 | } 33 | \examples{ 34 | ggBarChart(mtcars$cyl); 35 | } 36 | % Add one or more standard keywords, see file 'KEYWORDS' in the 37 | % R documentation directory. 38 | \keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS") 39 | \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line 40 | -------------------------------------------------------------------------------- /R/findShortestInterval.R: -------------------------------------------------------------------------------- 1 | ### Function to find minimum shortest interval in numeric vector 2 | 3 | 4 | #' Find the shortest interval 5 | #' 6 | #' This function takes a numeric vector, sorts it, and then finds the shortest 7 | #' interval and returns its length. 8 | #' 9 | #' 10 | #' @param x The numeric vector. 11 | #' @return The length of the shortest interval. 12 | #' @author Gjalt-Jorn Peters 13 | #' 14 | #' Maintainer: Gjalt-Jorn Peters 15 | #' @keywords utilities 16 | #' @examples 17 | #' 18 | #' findShortestInterval(c(1, 2, 4, 7, 20, 10, 15)); 19 | #' 20 | #' @export findShortestInterval 21 | findShortestInterval <- function(x) { 22 | if (!is.numeric(x)) 23 | stop("This function only accepts numeric vectors as input."); 24 | if (length(x) == 1) 25 | return(x); 26 | x <- sort(x); 27 | i <- 1; 28 | res <- abs(x[i] - x[i+1]); 29 | while (i < length(x)) { 30 | if (res > abs(x[i] - x[i+1])) res <- abs(x[i] - x[i+1]); 31 | i <- i + 1; 32 | } 33 | return(res); 34 | } 35 | -------------------------------------------------------------------------------- /man/ggpie.Rd: -------------------------------------------------------------------------------- 1 | \name{ggPie} 2 | \alias{ggPie} 3 | \title{ 4 | A ggplot pie chart 5 | } 6 | \description{ 7 | THis function creates a pie chart. Note that these are generally quite strongly advised against, as people are not good at interpreting relative frequencies on the basis of pie charts. 8 | } 9 | \usage{ 10 | ggPie(vector, 11 | scale_fill = scale_fill_viridis(discrete=TRUE)) 12 | } 13 | \arguments{ 14 | \item{vector}{ 15 | The vector (best to pass a factor). 16 | } 17 | \item{scale_fill}{ 18 | The ggplot scale fill function to use for the colors. 19 | } 20 | } 21 | \value{ 22 | A ggplot pie chart. 23 | } 24 | \note{ 25 | This function is very strongly based on the Mathematical Coffee post at http://mathematicalcoffee.blogspot.com/2014/06/ggpie-pie-graphs-in-ggplot2.html. 26 | } 27 | \author{ 28 | Amy Chan; implemented in this package (and tweaked a bit) by Gjalt-Jorn Peters. 29 | 30 | Maintainer: Gjalt-Jorn Peters 31 | } 32 | \examples{ 33 | ggPie(mtcars$cyl); 34 | } 35 | \keyword{ hplot } 36 | -------------------------------------------------------------------------------- /R/ad.test_from_nortest.R: -------------------------------------------------------------------------------- 1 | ad.test_from_nortest <- function (x) { 2 | DNAME <- deparse(substitute(x)) 3 | x <- sort(x[complete.cases(x)]) 4 | n <- length(x) 5 | if (n < 8) 6 | stop("sample size must be greater than 7") 7 | logp1 <- pnorm((x - mean(x))/sd(x), log.p = TRUE) 8 | logp2 <- pnorm(-(x - mean(x))/sd(x), log.p = TRUE) 9 | h <- (2 * seq(1:n) - 1) * (logp1 + rev(logp2)) 10 | A <- -n - mean(h) 11 | AA <- (1 + 0.75/n + 2.25/n^2) * A 12 | if (AA < 0.2) { 13 | pval <- 1 - exp(-13.436 + 101.14 * AA - 223.73 * AA^2) 14 | } 15 | else if (AA < 0.34) { 16 | pval <- 1 - exp(-8.318 + 42.796 * AA - 59.938 * AA^2) 17 | } 18 | else if (AA < 0.6) { 19 | pval <- exp(0.9177 - 4.279 * AA - 1.38 * AA^2) 20 | } 21 | else if (AA < 10) { 22 | pval <- exp(1.2937 - 5.709 * AA + 0.0186 * AA^2) 23 | } 24 | else pval <- 3.7e-24 25 | RVAL <- list(statistic = c(A = A), p.value = pval, method = "Anderson-Darling normality test", 26 | data.name = DNAME) 27 | class(RVAL) <- "htest" 28 | return(RVAL) 29 | } 30 | -------------------------------------------------------------------------------- /R/averagePearsonRs.R: -------------------------------------------------------------------------------- 1 | #' averagePearsonRs 2 | #' 3 | #' Takes pairs of Pearson r's (correlation coefficients) and the accompanying 4 | #' n's (sample sizes) and returns their average. 5 | #' 6 | #' 7 | #' @param rs The correlation coefficients. 8 | #' @param ns The sample sizes. 9 | #' @param FishersZ Whether to compute the average through Fisher's z (only 10 | #' method implemented as of the writing of this document). 11 | #' @author Gjalt-Jorn Peters 12 | #' 13 | #' Maintainer: Gjalt-Jorn Peters 14 | #' @seealso \code{\link{averageFishersZs}}, \code{\link{convert.r.to.fisherz}} 15 | #' @keywords univariate 16 | #' @examples 17 | #' 18 | #' averagePearsonRs(c(.3, .4, .6), c(70, 80, 50)); 19 | #' 20 | #' @export averagePearsonRs 21 | averagePearsonRs <- function(rs, ns, FishersZ=TRUE) { 22 | if (FishersZ) { 23 | return(convert.fisherz.to.r(averageFishersZs(convert.r.to.fisherz(rs), ns))); 24 | } else { 25 | warning("Sorry, Alexander's method (1990, Bulletin of the Psychonomic Society) not yet implemented!"); 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /R/diamondCoordinates.R: -------------------------------------------------------------------------------- 1 | diamondCoordinates <- function(values, otherAxisValue = 1, 2 | direction = 'horizontal', 3 | autoSize=NULL, fixedSize=.15) { 4 | if (length(values) < 1) { 5 | stop("Specify at least two values!"); 6 | } 7 | 8 | min <- min(values); 9 | max <- max(values); 10 | mid <- median(values); 11 | 12 | if (is.null(autoSize) && !is.null(fixedSize)) { 13 | size <- fixedSize; 14 | } else if (!is.null(autoSize)) { 15 | size <- (.5*autoSize) * (max-min); 16 | } else { 17 | size <- .25 * (max-min); 18 | } 19 | 20 | if (direction=='horizontal') { 21 | xValues <- c(min, mid, max, mid, min); 22 | yValues <- c(otherAxisValue, otherAxisValue - size, otherAxisValue, otherAxisValue + size, otherAxisValue); 23 | } else if (direction=='vertical') { 24 | xValues <- c(otherAxisValue, otherAxisValue - size, otherAxisValue, otherAxisValue + size, otherAxisValue); 25 | yValues <- c(min, mid, max, mid, min); 26 | } 27 | 28 | return(data.frame(x=xValues, y=yValues)); 29 | 30 | } 31 | -------------------------------------------------------------------------------- /R/iqrOutlier.R: -------------------------------------------------------------------------------- 1 | ### Based on JasonAizkalns' answer at 2 | ### http://stackoverflow.com/questions/33524669/labeling-outliers-of-boxplots-in-r 3 | 4 | 5 | 6 | #' Identify outliers according to the IQR criterion 7 | #' 8 | #' The IQR criterion holds that any value lower than one-and-a-half times the 9 | #' interquartile range below the first quartile, or higher than one-and-a-half 10 | #' times the interquartile range above the third quartile, is an outlier. This 11 | #' function returns a logical vector that identifies those outliers. 12 | #' 13 | #' 14 | #' @param x The vector to scan for outliers. 15 | #' @return A logical vector where TRUE identifies outliers. 16 | #' @author Gjalt-Jorn Peters 17 | #' 18 | #' Maintainer: Gjalt-Jorn Peters 19 | #' @seealso \code{\link{IQR}} 20 | #' @keywords univariate 21 | #' @examples 22 | #' 23 | #' ### One outlier in the miles per gallon 24 | #' iqrOutlier(mtcars$mpg); 25 | #' 26 | #' @export iqrOutlier 27 | iqrOutlier <- function(x) { 28 | return(x < quantile(x, 0.25) - 1.5 * IQR(x) | x > quantile(x, 0.75) + 1.5 * IQR(x)) 29 | } 30 | -------------------------------------------------------------------------------- /R/makeScales.R: -------------------------------------------------------------------------------- 1 | ### This function actually makes the scales 2 | makeScales <- function(dat, scales, append=TRUE) { 3 | resDat <- dat[, FALSE]; 4 | for (currentScale in 1:length(scales)) { 5 | if (length(unlist(scales[currentScale])) > 1) { 6 | resDat[[names(scales[currentScale])]] <- 7 | rowMeans(dat[, unlist(scales[currentScale])], na.rm=TRUE); 8 | resDat[[names(scales[currentScale])]] <- 9 | ifelse(is.nan(resDat[[names(scales[currentScale])]]), 10 | NA, 11 | resDat[[names(scales[currentScale])]]); 12 | attributes(resDat[[names(scales[currentScale])]])$scale_item_names <- 13 | unname(unlist(scales[currentScale])); 14 | } 15 | else if (length(unlist(scales[currentScale])) == 1) { 16 | resDat[[names(scales[currentScale])]] <- dat[[unlist(scales[currentScale])]]; 17 | attributes(resDat[[names(scales[currentScale])]])$scale_item_names <- 18 | unname(unlist(scales[currentScale])); 19 | } 20 | } 21 | if (append) { 22 | return(cbind(dat, resDat)); 23 | } else { 24 | return(resDat); 25 | } 26 | } 27 | -------------------------------------------------------------------------------- /R/invertItem.R: -------------------------------------------------------------------------------- 1 | ### To invert mirrored items 2 | invertItem <- function(item, fullRange=NULL, ignorePreviousInversion = FALSE) { 3 | ### Check whether this was already inverted 4 | if (!is.null(attr(item, "inverted"))) { 5 | if ((attr(item, "inverted") == TRUE) & !(ignorePreviousInversion)) { 6 | warning("Vector '", substitute(deparse(item)), 7 | "' has already been inverted! ", 8 | "Set ignorePreviousInversion to TRUE to override this ", 9 | "check and invert the vector anyway."); 10 | } 11 | } 12 | 13 | ### Not inverted yet (or ignorePreviousInversion set to TRUE) 14 | if (is.numeric(item)) { 15 | if (is.null(fullRange)) { 16 | fullRange <- range(item, na.rm=TRUE); 17 | } 18 | else { 19 | fullRange <- range(fullRange); 20 | } 21 | res <- sum(fullRange) - item; 22 | } 23 | else { 24 | stop("Provide a numeric vector!"); 25 | } 26 | if (is.null(attr(item, "inverted"))) { 27 | attr(res, "inverted") <- TRUE; 28 | } else { 29 | attr(res, "inverted") <- !(attr(res, "inverted")); 30 | } 31 | return(res); 32 | } 33 | -------------------------------------------------------------------------------- /R/rawDataDiamondLayer.R: -------------------------------------------------------------------------------- 1 | rawDataDiamondLayer <- function(dat, items = NULL, itemOrder = 1:length(items), 2 | dataAlpha = .1, 3 | dataColor = "#444444", 4 | jitterWidth = .5, 5 | jitterHeight = .4, 6 | size=3, 7 | ...) { 8 | 9 | rawData <- na.omit(data.frame(value = unlist(dat[, items[itemOrder]]), 10 | labels = rep(1:length(items), 11 | each=nrow(dat)))); 12 | 13 | rawDataLayer <- geom_jitter(data=rawData, 14 | mapping=aes_string(x='value', y='labels'), 15 | size = size, 16 | color = dataColor, 17 | alpha = dataAlpha, 18 | stroke = 0, 19 | width=jitterWidth, 20 | height=jitterHeight, 21 | ...); 22 | 23 | return(rawDataLayer); 24 | 25 | } 26 | -------------------------------------------------------------------------------- /man/invertItems.Rd: -------------------------------------------------------------------------------- 1 | \name{invertItems} 2 | \alias{invertItems} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | invertItems 6 | } 7 | \description{ 8 | Inverts items (as in, in a questionnaire), by calling \code{\link{invertItem}} 9 | on all relevant items. 10 | } 11 | \usage{ 12 | invertItems(dat, items = NULL, ...) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{dat}{ 17 | The dataframe containing the variables to invert. 18 | } 19 | \item{items}{ 20 | The names or indices of the variables to invert. If not supplied (i.e. NULL), 21 | all variables in the dataframe will be inverted. 22 | } 23 | \item{\dots}{ 24 | Arguments (parameters) passed on to data.frame when recreating that after 25 | having used lapply. 26 | } 27 | } 28 | \value{ 29 | The dataframe with the specified items inverted. 30 | } 31 | \author{ 32 | Gjalt-Jorn Peters 33 | 34 | Maintainer: Gjalt-Jorn Peters 35 | } 36 | \seealso{ 37 | \code{\link{invertItem}} 38 | } 39 | \examples{ 40 | invertItems(mtcars, c('cyl')); 41 | } 42 | 43 | \keyword{ univariate } 44 | -------------------------------------------------------------------------------- /R/extractVarName.R: -------------------------------------------------------------------------------- 1 | #' Extract variable names 2 | #' 3 | #' Functions often get passed variables from within dataframes or other lists. 4 | #' However, printing these names with all their dollar signs isn't very 5 | #' userfriendly. This function simply uses a regular expression to extract the 6 | #' actual name. 7 | #' 8 | #' 9 | #' @param x A character vector of one or more variable names. 10 | #' @return The actual variables name, with all containing objectes stripped 11 | #' off. 12 | #' @author Gjalt-Jorn Peters 13 | #' 14 | #' Maintainer: Gjalt-Jorn Peters 15 | #' @keywords utils 16 | #' @examples 17 | #' 18 | #' extractVarName('mtcars$mpg'); 19 | #' 20 | #' @export extractVarName 21 | extractVarName <- function(x) { 22 | regexpr <- "[[:alnum:]]+\\[[[:alnum:]]*,[[:blank:]]*['\"]([[:alnum:]]+)['\"]\\]"; 23 | if (grepl(regexpr, x)) 24 | return (sub(regexpr, "\\1", x)) 25 | else 26 | return(sub(".*\\$(.*?)[])]*$", '\\1', x)); 27 | ### Extract last expression following a dollar sign and possibly 28 | ### followed by parentheses or brackets 29 | #return(sub(".*\\$(.*?)[])]*$", '\\1', x)); 30 | } 31 | -------------------------------------------------------------------------------- /man/averagePearsonRs.Rd: -------------------------------------------------------------------------------- 1 | \name{averagePearsonRs} 2 | \alias{averagePearsonRs} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | averagePearsonRs 6 | } 7 | \description{ 8 | Takes pairs of Pearson r's (correlation coefficients) and the 9 | accompanying n's (sample sizes) and returns their average. 10 | } 11 | \usage{ 12 | averagePearsonRs(rs, ns, FishersZ = TRUE) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{rs}{ 17 | The correlation coefficients. 18 | } 19 | \item{ns}{ 20 | The sample sizes. 21 | } 22 | \item{FishersZ}{ 23 | Whether to compute the average through Fisher's z (only method implemented 24 | as of the writing of this document). 25 | } 26 | } 27 | \author{ 28 | Gjalt-Jorn Peters 29 | 30 | Maintainer: Gjalt-Jorn Peters 31 | } 32 | \seealso{ 33 | \code{\link{averageFishersZs}}, \code{\link{convert.r.to.fisherz}} 34 | } 35 | \examples{ 36 | averagePearsonRs(c(.3, .4, .6), c(70, 80, 50)); 37 | } 38 | % Add one or more standard keywords, see file 'KEYWORDS' in the 39 | % R documentation directory. 40 | \keyword{ univariate } 41 | -------------------------------------------------------------------------------- /R/frequencies.R: -------------------------------------------------------------------------------- 1 | frequencies <- function(..., digits = 1, nsmall=1, transposed=FALSE, round=1, 2 | plot=FALSE, plotTheme = theme_bw()) { 3 | 4 | ### Call functions to explore the variables 5 | res <- lapply(list(...), function(x) { 6 | rsl <- list(); 7 | rsl$freq <- freq(x, digits=digits, nsmall=nsmall, 8 | transposed=transposed, round=round, 9 | plot=plot, plotTheme=plotTheme); 10 | return(rsl); 11 | }); 12 | 13 | ### Get the variable names 14 | names(res) <- unlist(as.list(substitute(list(...)))[-1]); 15 | 16 | ### Set class for correct printing and return result 17 | class(res) <- 'frequencies'; 18 | return(res); 19 | } 20 | 21 | print.frequencies <- function(x, ...) { 22 | for (currentName in names(x)) { 23 | cat0("### Frequencies for '", extractVarName(currentName), "'\n\n"); 24 | print(x[[currentName]]$freq); 25 | cat("\n"); 26 | } 27 | } 28 | 29 | pander.frequencies <- function(x, prefix="###", ...) { 30 | for (currentName in names(x)) { 31 | cat0(prefix, " Frequencies for '", extractVarName(currentName), "'\n\n"); 32 | pander(x[[currentName]]$freq); 33 | cat("\n"); 34 | } 35 | } 36 | -------------------------------------------------------------------------------- /man/fullFact.Rd: -------------------------------------------------------------------------------- 1 | \name{fullFact} 2 | \alias{fullFact} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | fullFact 6 | } 7 | \description{ 8 | This function provides a userfriendly interface to a number of advanced factor 9 | analysis functions in the \code{\link{psych}} package. 10 | } 11 | \usage{ 12 | fullFact(dat = NULL, items = NULL, rotate = "oblimin") 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{dat}{ 17 | Datafile to analyse; if NULL, a pop-up is provided to select a file. 18 | } 19 | \item{items}{ 20 | Which variables (items) to factor-analyse. If NULL, all are selected. 21 | } 22 | \item{rotate}{ 23 | Which rotation to use (see \code{\link{psych}} package). 24 | } 25 | } 26 | \value{ 27 | The outcomes, which are printed to the screen unless assigned. 28 | } 29 | \author{ 30 | Gjalt-Jorn Peters 31 | 32 | Maintainer: Gjalt-Jorn Peters 33 | } 34 | \seealso{ 35 | \code{\link{fa.parallel}}, \code{\link{vss}} 36 | } 37 | \examples{ 38 | \dontrun{ 39 | ### Not run to save processing during package testing 40 | fullFact(attitude); 41 | } 42 | } 43 | \keyword{ univariate } 44 | -------------------------------------------------------------------------------- /R/sdConfInt.R: -------------------------------------------------------------------------------- 1 | ### This function generates a confidence level for a standard deviation 2 | ### http://www.graphpad.com/guides/prism/6/statistics/index.htm?stat_confidence_interval_of_a_stand.htm 3 | ### https://www.wolframalpha.com/input/?i=confidence+interval+for+a+standard+deviation&lk=3 4 | sdConfInt <- function(vector=NULL, sd=NULL, n=NULL, conf.level=.95) { 5 | if (is.null(sd) & is.null(n)) { 6 | if (is.null(vector)) { 7 | stop("Please specify either vector, or sd and n!"); 8 | } 9 | sd <- sd(vector); 10 | n <- length(vector); 11 | } 12 | res <- list(); 13 | res$input <- list(vector=vector, sd=sd, n=n, conf.level=conf.level); 14 | res$intermediate <- list(alpha = 1-conf.level); 15 | res$intermediate$chisq.bound.lo <- qchisq(((1-res$intermediate$alpha)/2), n-1); 16 | res$intermediate$chisq.bound.hi <- qchisq(res$intermediate$alpha/2, n-1); 17 | ci.lo <- sqrt(((n-1)*sd^2)/res$intermediate$chisq.bound.lo); 18 | ci.hi <- sqrt(((n-1)*sd^2)/res$intermediate$chisq.bound.hi); 19 | res$output <- list(ci = c(ci.lo, ci.hi)); 20 | class(res) <- 'sdConfInt'; 21 | return(res); 22 | } 23 | 24 | print.sdConfInt <- function(x, digits=2, ...) { 25 | print(x$output$ci, digits=digits, ...); 26 | } 27 | -------------------------------------------------------------------------------- /man/processLimeSurveyDropouts.Rd: -------------------------------------------------------------------------------- 1 | \name{processLimeSurveyDropouts} 2 | \alias{processLimeSurveyDropouts} 3 | \title{ 4 | processLimeSurveyDropouts 5 | } 6 | \description{ 7 | This function makes it easy to parse the dropouts from a LimeSurvey questionnaire. 8 | } 9 | \usage{ 10 | processLimeSurveyDropouts(lastpage, 11 | pagenames = NULL, 12 | relevantPagenames = NULL) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{lastpage}{ 17 | A vector with the 'lastpage' variable as LimeSurvey stores it (an integer denoting the last page a participant visited, in other words, where they dropped out). 18 | } 19 | \item{pagenames}{ 20 | Optional: names for each page. 21 | } 22 | \item{relevantPagenames}{ 23 | Optional: the names of those pages that should be included. 24 | } 25 | } 26 | \details{ 27 | This will be described more in detail in a forthcoming publications. 28 | } 29 | \value{ 30 | A list with information about the dropout, including \code{\link{ggplot}}s. 31 | } 32 | \author{ 33 | Gjalt-Jorn Peters 34 | 35 | Maintainer: Gjalt-Jorn Peters 36 | } 37 | \examples{ 38 | processLimeSurveyDropouts(c(1,2,1,1,2,3,2,2,3,2,1)); 39 | } 40 | \keyword{ untilities } 41 | -------------------------------------------------------------------------------- /man/createSigma.Rd: -------------------------------------------------------------------------------- 1 | \name{createSigma} 2 | \alias{createSigma} 3 | \title{ 4 | createSigma: convenience function for mvrnorm 5 | } 6 | \description{ 7 | This function is made to quickly generate a Sigma matrix of the type required by \code{\link{mvrnorm}}. By specifying the number of variables, the mean correlation, and how much variation there should be in the correlations, it's easy to quickly generate a correlation matrix. 8 | } 9 | \usage{ 10 | createSigma(nVar, meanR = 0.3, sdR = 0, diagonal = 1) 11 | } 12 | \arguments{ 13 | \item{nVar}{ 14 | The number of variables in the correlation matrix. 15 | } 16 | \item{meanR}{ 17 | The average correlation, provided to \code{\link{rnorm}} together with \code{sdR} to generate the correlations. 18 | } 19 | \item{sdR}{ 20 | The variation in the correlations, provided to \code{\link{rnorm}} together with \code{meanR} to generate the correlations. 21 | } 22 | \item{diagonal}{ 23 | The value on the diagonal of the returned matrix: will normally be 1. 24 | } 25 | } 26 | \value{ 27 | A matrix of nVar x nVar. 28 | } 29 | \author{ 30 | Gjalt-Jorn Peters 31 | 32 | Maintainer: Gjalt-Jorn Peters 33 | } 34 | \seealso{ 35 | \code{\link{mvrnorm}}, \code{\link{rnorm}}, \code{\link{matrix}} 36 | } 37 | \examples{ 38 | createSigma(3, .5, .1); 39 | } 40 | \keyword{ datagen } 41 | -------------------------------------------------------------------------------- /man/multiVarFreq.Rd: -------------------------------------------------------------------------------- 1 | \name{multiVarFreq} 2 | \alias{multiVarFreq} 3 | \title{ 4 | Generate a table collapsing frequencies of multiple variables 5 | } 6 | \description{ 7 | This function can be used to efficiently combine the frequencies of variables with the same possible values. The frequencies are collapsed into a table with the variable names as row names and the possible values as column (variable) names. 8 | } 9 | \usage{ 10 | multiVarFreq(data, 11 | items = NULL, 12 | labels = NULL, 13 | sortByMean = TRUE) 14 | } 15 | \arguments{ 16 | \item{data}{ 17 | The dataframe containing the variables. 18 | } 19 | \item{items}{ 20 | The variable names. 21 | } 22 | \item{labels}{ 23 | Labels can be provided which will be set as row names when provided. 24 | } 25 | \item{sortByMean}{ 26 | Whether to sort the rows by mean value for each variable (only sensible if the possible values are numeric). 27 | } 28 | } 29 | \value{ 30 | The resulting dataframe, but with class 'multiVarFreq' prepended to allow pretty printing. 31 | } 32 | \author{ 33 | Gjalt-Jorn Peters 34 | 35 | Maintainer: Gjalt-Jorn Peters 36 | } 37 | \seealso{ 38 | \code{\link{table}}, \code{\link{freq}} 39 | } 40 | \examples{ 41 | multiVarFreq(mtcars, c('gear', 'carb')); 42 | } 43 | \keyword{ utilities } 44 | -------------------------------------------------------------------------------- /man/oddsratio.Rd: -------------------------------------------------------------------------------- 1 | \name{oddsratio} 2 | \alias{oddsratio} 3 | \title{ 4 | oddsratio 5 | } 6 | \description{ 7 | The oddsratio function simply computes a point estimate and confidence interval for an odds ratio. 8 | } 9 | \usage{ 10 | oddsratio(x, y = NULL, conf.level = .95, digits=2) 11 | } 12 | \arguments{ 13 | \item{x}{ 14 | x can be either a table (then y can be NULL) or a factor. 15 | } 16 | \item{y}{ 17 | If x is a factor, y also has to be a factor; x and y are then used to create the crosstable. 18 | } 19 | \item{conf.level}{ 20 | The confidence level of the confidence interval. 21 | } 22 | \item{digits}{ 23 | Number of digits to round output to 24 | } 25 | } 26 | \value{ 27 | The oddsratio function returns an object with the input and output. 28 | \item{input}{List with input arguments} 29 | \item{or}{Point estimate for odds ratio} 30 | \item{or.ci}{Confidence interval for odds ratio} 31 | } 32 | \examples{ 33 | ### Generate two factor vectors 34 | treatment <- factor(c(rep(0, 33), rep(1, 45), rep(0, 63), rep(1, 21)), 35 | levels=c(0,1), labels=c("no", "yes")); 36 | survival <- factor(c(rep(0, 78), rep(1, 84)), 37 | levels=c(0, 1), labels=c("no", "yes")); 38 | 39 | ### Compute and display odds ratio 40 | oddsratio(treatment, survival); 41 | 42 | ### Or present a table 43 | oddsratio(table(treatment, survival)); 44 | } 45 | \keyword{ utilities } -------------------------------------------------------------------------------- /R/convertToNumeric.R: -------------------------------------------------------------------------------- 1 | ### Convert a vector to numeric values and trying to be smart about it. 2 | convertToNumeric <- function(vector, byFactorLabel = FALSE) { 3 | ### Check whether the vector is datetime 4 | if (sum(sapply(class(vector), grepl, pattern='POSIX')) > 0) { 5 | return(vector); 6 | } 7 | if (!(is.factor(vector) | is.numeric(vector) | 8 | is.character(vector) | is.logical(vector))) { 9 | stop("Argument 'vector' must be a vector! Current class = '", 10 | class(vector), "'. To mass convert e.g. a dataframe, ", 11 | "use massConvertToNumber."); 12 | } 13 | if(is.factor(vector) && byFactorLabel) { 14 | ### Decimal symbol might be a comma instead of a period: convert 15 | ### factor to character vector and replace commas with periods 16 | vector <- as.numeric(gsub(as.character(vector), pattern=",", 17 | replacement="."), 18 | fixed=TRUE); 19 | return(); 20 | } 21 | else if (is.character(vector)) { 22 | return(suppressWarnings(as.numeric(gsub(as.character(vector), 23 | pattern=",", replacement=".")))); 24 | } 25 | else { 26 | ### Thus, for numeric vectors; factors to be converted by index of the levels 27 | ### instead of by their labels; and logical vectors. 28 | return(as.numeric(vector)); 29 | } 30 | } 31 | -------------------------------------------------------------------------------- /man/genlogFunction.Rd: -------------------------------------------------------------------------------- 1 | \name{genlogFunction} 2 | \alias{genlogFunction} 3 | \title{ 4 | Generalized Logistic Function 5 | } 6 | \description{ 7 | This is the core function of the generalized logistic analysis used in \code{\link{genlog}}. 8 | } 9 | \usage{ 10 | genlogFunction(x, x0, Ab, At, B, v) 11 | } 12 | \arguments{ 13 | \item{x}{ 14 | A numeric vector with measurement moments or indices of measurement moments. 15 | } 16 | \item{x0}{ 17 | A single numeric value specifying at which moment the curve is at its midpoint (when \code{v} = 1). 18 | } 19 | \item{Ab, At}{ 20 | Respectively the lowest and highest possible values of the dependent variable. 21 | } 22 | \item{B}{ 23 | The growth rate (curve steepness). 24 | } 25 | \item{v}{ 26 | Um - Peter, wat is 'v' eigenlijk? 27 | } 28 | } 29 | \details{ 30 | For details, see Verboon & Peters (2017). 31 | } 32 | \references{ 33 | Verboon, P. & Peters, G.-J. Y. (2017) Applying the generalised logistic model in SCD to deal with ceiling effects. \emph{PsyArXiv} http://INSERTLINK 34 | } 35 | \author{ 36 | Peter Verboon (Open University of the Netherlands) 37 | 38 | Maintainer: Gjalt-Jorn Peters 39 | } 40 | \seealso{ 41 | \code{\link{genlog}} 42 | } 43 | \examples{ 44 | time <- 1:20; 45 | yVar <- genlogFunction(1:20, 10, 1, 7, 1, 1); 46 | plot(time, yVar, type='l', xlab='time', ylab='y'); 47 | } 48 | \keyword{ utilities } 49 | -------------------------------------------------------------------------------- /man/regrInfluential.Rd: -------------------------------------------------------------------------------- 1 | \name{regrInfluential} 2 | \alias{regrInfluential} 3 | \title{ 4 | Detecting influential cases in regression analyses 5 | } 6 | \description{ 7 | This function combines a number of criteria for determining whether a datapoint is an influential case in a regression analysis. It then sum the criteria to compute an index of influentiality. A list of cases with an index of influentiality of 1 or more is then displayed, after which the regression analysis is repeated without those influantial cases. A scattermatrix is also displayed, showing the density curves of each variable, and in the scattermatrix, points that are colored depending on how influential each case is. 8 | } 9 | \usage{ 10 | regrInfluential(formula, data) 11 | } 12 | %- maybe also 'usage' for other objects documented here. 13 | \arguments{ 14 | \item{formula}{ 15 | The formule of the regression analysis. 16 | } 17 | \item{data}{ 18 | The data to use for the analysis. 19 | } 20 | } 21 | \value{ 22 | A \code{regrInfluential} object, which, if printed, shows the influential cases, the regression analyses repeated without those cases, and the scatter matrix. 23 | } 24 | \author{ 25 | Gjalt-Jorn Peters & Marwin Snippe 26 | 27 | Maintainer: Gjalt-Jorn Peters 28 | } 29 | \examples{ 30 | regrInfluential(mpg ~ hp, mtcars); 31 | } 32 | \keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS") 33 | \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line 34 | -------------------------------------------------------------------------------- /R/genlogFunction.R: -------------------------------------------------------------------------------- 1 | #### Definition Generalized Logistic function (NB "B" is in exp()) with scaling factor 2 | 3 | 4 | #' Generalized Logistic Function 5 | #' 6 | #' This is the core function of the generalized logistic analysis used in 7 | #' \code{\link{genlog}}. 8 | #' 9 | #' For details, see Verboon & Peters (2017). 10 | #' 11 | #' @param x A numeric vector with measurement moments or indices of measurement 12 | #' moments. 13 | #' @param x0 A single numeric value specifying at which moment the curve is at 14 | #' its midpoint (when \code{v} = 1). 15 | #' @param Ab,At Respectively the lowest and highest possible values of the 16 | #' dependent variable. 17 | #' @param B The growth rate (curve steepness). 18 | #' @param v Um - Peter, wat is 'v' eigenlijk? 19 | #' @author Peter Verboon (Open University of the Netherlands) 20 | #' 21 | #' Maintainer: Gjalt-Jorn Peters 22 | #' @seealso \code{\link{genlog}} 23 | #' @references Verboon, P. & Peters, G.-J. Y. (2017) Applying the generalised 24 | #' logistic model in SCD to deal with ceiling effects. \emph{PsyArXiv} 25 | #' http://INSERTLINK 26 | #' @keywords utilities 27 | #' @examples 28 | #' 29 | #' time <- 1:20; 30 | #' yVar <- genlogFunction(1:20, 10, 1, 7, 1, 1); 31 | #' plot(time, yVar, type='l', xlab='time', ylab='y'); 32 | #' 33 | #' @export genlogFunction 34 | genlogFunction <- function(x, x0, Ab, At, B, v) { 35 | return(Ab + ((At - Ab)/ (1 + exp(-B*(x-x0)))**(1/v))); 36 | } 37 | -------------------------------------------------------------------------------- /man/reliability.Rd: -------------------------------------------------------------------------------- 1 | \name{reliability} 2 | \alias{reliability} 3 | \title{ 4 | Reliability function similar to the SPSS RELIABILITY command 5 | } 6 | \description{ 7 | This function was developed to offer a function that roughly works similar to the SPSS RELIABILITY command. 8 | } 9 | \usage{ 10 | reliability(data, 11 | items = NULL, 12 | itemDiagnostics = FALSE, 13 | digits = 2) 14 | } 15 | \arguments{ 16 | \item{data}{ 17 | The dataframe containing the variables (items, questions) of interest. 18 | } 19 | \item{items}{ 20 | Optionally, the variables (items, questions) of interest. If omitted, all variables (items, questions) in the dataframe will be used. 21 | } 22 | \item{itemDiagnostics}{ 23 | Whether to also display the item diagnostics (specifically, the corrected item-total correlation, mean and variance excluding each item, and the reliability coefficients excluding each item). 24 | } 25 | \item{digits}{ 26 | The number of digits to use when displaying the results. 27 | } 28 | } 29 | \author{ 30 | Gjalt-Jorn Peters 31 | 32 | Maintainer: Gjalt-Jorn Peters 33 | } 34 | \seealso{ 35 | \code{\link{scaleStructure}}, the excellent \code{\link{psych}} package 36 | } 37 | \examples{ 38 | \dontrun{ 39 | ## (Not run to test because it takes a long time.) 40 | 41 | data(testRetestSimData); 42 | reliability(testRetestSimData[, 2:11], itemDiagnostics = TRUE); 43 | } 44 | } 45 | \keyword{ univar } 46 | -------------------------------------------------------------------------------- /R/prob.randomizationSuccess.R: -------------------------------------------------------------------------------- 1 | prob.randomizationSuccess <- function(n = 1000, 2 | dNonequivalence = .2, 3 | nNuisanceVars = 100) { 4 | 5 | res <- sapply(dNonequivalence, 6 | function(dNonequival) { 7 | return(sapply(n, function(nSize, dNonequiv = dNonequival) { 8 | return(sapply(nNuisanceVars, function(nNuisance, sampleSize = nSize, dNoneq = dNonequiv) { 9 | return(pdMild(dNoneq, sampleSize)^nNuisance); 10 | })); 11 | })); 12 | }); 13 | 14 | res <- array(unclass(res), dim = c(length(nNuisanceVars), 15 | length(n), 16 | length(dNonequivalence)), 17 | dimnames=list(paste("Nuisance var:", nNuisanceVars), 18 | paste("N:", n), 19 | paste('Nonequival. at: d=', dNonequivalence))); 20 | 21 | if (sum(dim(res) == 1) > 1) { 22 | return(as.vector(res)); 23 | } else if(sum(dim(res) == 1) == 1) { 24 | 25 | dims <- dim(res); 26 | dimNms <- dimnames(res); 27 | 28 | dimNms <- dimNms[dims > 1]; 29 | dims <- dims[dims > 1]; 30 | 31 | res <- matrix(res, ncol=dims[1], 32 | dimnames=dimNms); 33 | 34 | return(res); 35 | 36 | } else { 37 | return(res); 38 | } 39 | 40 | 41 | } 42 | -------------------------------------------------------------------------------- /R/detStructAddVarLabels.R: -------------------------------------------------------------------------------- 1 | detStructAddVarLabels <- function(determinantStructure, 2 | varLabelDf, 3 | varNameCol = 'varNames.cln', 4 | leftAnchorCol = 'leftAnchors', 5 | rightAnchorCol = 'rightAnchors', 6 | subQuestionCol = 'subQuestions', 7 | questionTextCol = 'questionText') { 8 | 9 | determinantStructure$Do(function(currentNode) { 10 | currentNode$leftAnchors <- varLabelDf[varLabelDf[, varNameCol] %in% 11 | unlist(currentNode$varNames), 12 | leftAnchorCol]; 13 | currentNode$rightAnchors <- varLabelDf[varLabelDf[, varNameCol] %in% 14 | unlist(currentNode$varNames), 15 | rightAnchorCol]; 16 | currentNode$subQuestions <- varLabelDf[varLabelDf[, varNameCol] %in% 17 | unlist(currentNode$varNames), 18 | subQuestionCol]; 19 | currentNode$questionTexts <- varLabelDf[varLabelDf[, varNameCol] %in% 20 | unlist(currentNode$varNames), 21 | questionTextCol]; 22 | }, traversal = 'level', filterFun = function(x) return(!is.null(x$varNames))); 23 | 24 | } 25 | -------------------------------------------------------------------------------- /man/formatCI.Rd: -------------------------------------------------------------------------------- 1 | \name{formatCI} 2 | \alias{formatCI} 3 | \title{ 4 | Pretty formatting of confidence intervals 5 | } 6 | \description{ 7 | Pretty much does what the title says. 8 | } 9 | \usage{ 10 | formatCI(ci, sep = "; ", 11 | prefix = "[", suffix = "]", 12 | digits = 2, noZero = FALSE) 13 | } 14 | \arguments{ 15 | \item{ci}{ 16 | A confidence interval (a vector of 2 elements; longer vectors work, but I guess that wouldn't make sense). 17 | } 18 | \item{sep}{ 19 | The separator of the values, usually "; " or ", ". 20 | } 21 | \item{prefix}{ 22 | The prefix, usually a type of opening parenthesis/bracket. 23 | } 24 | \item{suffix}{ 25 | The suffix, usually a type of closing parenthesis/bracket. 26 | } 27 | \item{digits}{ 28 | The number of digits to which to round the values. 29 | } 30 | \item{noZero}{ 31 | Whether to strip the leading zero (before the decimal point), as is typically done when following APA style and displaying correlations, \emph{p} values, and other numbers that cannot reach 1 or more. 32 | } 33 | } 34 | \value{ 35 | A character vector of one element. 36 | } 37 | \author{ 38 | Gjalt-Jorn Peters 39 | 40 | Maintainer: Gjalt-Jorn Peters 41 | } 42 | \seealso{ 43 | \code{\link{noZero}}, \code{\link{formatR}}, \code{\link{formatPvalue}} 44 | } 45 | \examples{ 46 | ### With leading zero ... 47 | formatCI(c(0.55, 0.021)); 48 | 49 | ### ... and without 50 | formatCI(c(0.55, 0.021), noZero=TRUE); 51 | } 52 | \keyword{ utilities } 53 | -------------------------------------------------------------------------------- /R/RsqDist.R: -------------------------------------------------------------------------------- 1 | ### From http://stats.stackexchange.com/questions/130069/what-is-the-distribution-of-r2-in-linear-regression-under-the-null-hypothesis 2 | 3 | dRsq <- function(x, nPredictors, sampleSize, populationRsq = 0) { 4 | if (populationRsq != 0) { 5 | cat0("Noncentrality parameters not implemented yet, sorry!\n"); 6 | } 7 | ### Return density for given R squared 8 | return(dbeta(x, (nPredictors-1)/2, (sampleSize - nPredictors) / 2)); 9 | } 10 | 11 | pRsq <- function(q, nPredictors, sampleSize, populationRsq = 0, lower.tail=TRUE) { 12 | if (populationRsq != 0) { 13 | cat0("Noncentrality parameters not implemented yet, sorry!\n"); 14 | } 15 | ### Return p-value for given R squared 16 | pValue <- pbeta(q, (nPredictors-1)/2, (sampleSize - nPredictors) / 2); 17 | return(ifelse(lower.tail, pValue, 1-pValue)); 18 | } 19 | 20 | qRsq <- function(p, nPredictors, sampleSize, populationRsq = 0, lower.tail=TRUE) { 21 | if (populationRsq != 0) { 22 | cat0("Noncentrality parameters not implemented yet, sorry!\n"); 23 | } 24 | p <- ifelse(lower.tail, p, 1-p); 25 | ### Return R squared for given p-value 26 | return(qbeta(1-p, (nPredictors-1)/2, (sampleSize - nPredictors) / 2)); 27 | } 28 | 29 | rRsq <- function(n, nPredictors, sampleSize, populationRsq = 0) { 30 | if (populationRsq != 0) { 31 | cat0("Noncentrality parameters not implemented yet, sorry!\n"); 32 | } 33 | ### Return random R squared value(s) 34 | return(rbeta(n, (nPredictors-1)/2, (sampleSize - nPredictors) / 2)); 35 | } 36 | -------------------------------------------------------------------------------- /R/fullFact.R: -------------------------------------------------------------------------------- 1 | #' fullFact 2 | #' 3 | #' This function provides a userfriendly interface to a number of advanced 4 | #' factor analysis functions in the \code{\link{psych}} package. 5 | #' 6 | #' 7 | #' @param dat Datafile to analyse; if NULL, a pop-up is provided to select a 8 | #' file. 9 | #' @param items Which variables (items) to factor-analyse. If NULL, all are 10 | #' selected. 11 | #' @param rotate Which rotation to use (see \code{\link{psych}} package). 12 | #' @return The outcomes, which are printed to the screen unless assigned. 13 | #' @author Gjalt-Jorn Peters 14 | #' 15 | #' Maintainer: Gjalt-Jorn Peters 16 | #' @seealso \code{\link{fa.parallel}}, \code{\link{vss}} 17 | #' @keywords univariate 18 | #' @examples 19 | #' 20 | #' \dontrun{ 21 | #' ### Not run to save processing during package testing 22 | #' fullFact(attitude); 23 | #' } 24 | #' 25 | #' @export fullFact 26 | fullFact <- function(dat = NULL, items=NULL, rotate='oblimin') { 27 | 28 | res <- list(input = as.list(environment()), 29 | intermediate = list(), 30 | output = list()); 31 | 32 | if (is.null(dat)) { 33 | dat <- getData(); 34 | } 35 | 36 | if (is.null(items)) { 37 | items <- names(dat); 38 | } 39 | 40 | res$output$parallel <- fa.parallel(dat[, items]); 41 | res$output$vss <- vss(dat[, items], rotate=rotate); 42 | 43 | class(res) <- 'fullFact'; 44 | 45 | return(res); 46 | 47 | } 48 | 49 | print.fullFact <- function(x, ...) { 50 | print(x$output); 51 | } 52 | 53 | -------------------------------------------------------------------------------- /R/ggEasyRidge.R: -------------------------------------------------------------------------------- 1 | ggEasyRidge <- function(data, items = NULL, 2 | labels = NULL, sortByMean = TRUE, 3 | xlab = NULL, ylab = NULL) { 4 | 5 | if (is.null(items)) { 6 | items <- names(data); 7 | } 8 | 9 | if (!all(items %in% names(data))) { 10 | stop("You specified items that do not exist in the data you provided (specifically, ", 11 | vecTxtQ(items[!items %in% names(data)]), ")."); 12 | } 13 | 14 | if (sortByMean && length(items) > 1) { 15 | tmpVarOrder <- order(colMeans(data[, items], 16 | na.rm=TRUE), 17 | decreasing=TRUE); 18 | } else { 19 | tmpVarOrder <- 1:length(items); 20 | } 21 | 22 | if (is.null(labels)) { 23 | labels <- items; 24 | } 25 | 26 | tmpDf <- data.frame(var = factor(rep(unlist(items), 27 | each=nrow(data)), 28 | levels=items[tmpVarOrder], 29 | labels=labels[tmpVarOrder], 30 | ordered=TRUE), 31 | val = unlist(data[, items])); 32 | 33 | ### Actual plot 34 | res <- 35 | ggplot(data = tmpDf, 36 | mapping = aes_string(x='val', 37 | y='var')) + 38 | geom_density_ridges(na.rm=TRUE, 39 | alpha=.25) + 40 | theme_minimal() + 41 | labs(x=xlab, 42 | y=ylab) + 43 | theme(axis.ticks.x = element_line()); 44 | 45 | return(res); 46 | } 47 | -------------------------------------------------------------------------------- /R/meanConfInt.R: -------------------------------------------------------------------------------- 1 | ### This function generates a confidence level for a single mean 2 | meanConfInt <- function(vector=NULL, mean=NULL, sd=NULL, n=NULL, se=NULL, conf.level=.95) { 3 | if (is.null(mean) & is.null(sd) & is.null(n) & is.null(se)) { 4 | if (is.null(vector)) { 5 | stop("Please specify either a vector with datapoints, or a mean and then also either sd and n or se!"); 6 | } 7 | mean <- mean(vector); 8 | sd <- sd(vector); 9 | n <- length(vector); 10 | se <- sd/sqrt(n); 11 | } 12 | else if (!is.null(mean) & !is.null(sd) & !is.null(n)) { 13 | se <- sd/sqrt(n); 14 | } 15 | else if (is.null(mean) | is.null(se)) { 16 | stop("Please specify either a vector with datapoints, or a mean and then also either sd and n or se!"); 17 | } 18 | 19 | res <- list(); 20 | res$input <- list(vector=vector, mean=mean, sd=sd, n=n, se=se, conf.level=conf.level); 21 | res$intermediate <- list(alpha = 1-conf.level); 22 | res$intermediate$t.bound.lo <- qt(res$intermediate$alpha/2, df=n-1); 23 | res$intermediate$t.bound.hi <- qt(1-res$intermediate$alpha/2, df=n-1); 24 | ci.lo <- mean + res$intermediate$t.bound.lo * se; 25 | ci.hi <- mean + res$intermediate$t.bound.hi * se; 26 | res$output <- list(ci = matrix(c(ci.lo, ci.hi), ncol=2)); 27 | colnames(res$output$ci) <- c('ci.lo', 'ci.hi'); 28 | rownames(res$output$ci) <- sprintf("(mean=%.2f)", mean); 29 | class(res) <- 'meanConfInt'; 30 | return(res); 31 | } 32 | 33 | print.meanConfInt <- function(x, digits=2, ...) { 34 | print(round(x$output$ci, digits=digits), ...); 35 | } 36 | -------------------------------------------------------------------------------- /R/validSums.R: -------------------------------------------------------------------------------- 1 | validSums <- function(..., 2 | requiredValidValues = 0, 3 | returnIfInvalid = NA, 4 | silent = FALSE) { 5 | dat <- list(...); 6 | if ((length(dat) == 1) && is.data.frame(dat[[1]])) { 7 | dat <- dat[[1]]; 8 | } else if (length(unique(lapply(dat, length)))==1) { 9 | dat <- as.data.frame(dat); 10 | } else { 11 | stop("The vectors you provided do not have equal lengths! Either provide a dataframe or vectors of the same length."); 12 | } 13 | if (requiredValidValues == "all") { 14 | requiredValidValues <- ncol(dat); 15 | } else if (!is.numeric(requiredValidValues)) { 16 | stop("Argument 'requiredValidValues' must be numeric or 'all', ", 17 | "but it is not 'all' and has class ", 18 | class(requiredValidValues), "."); 19 | } else if (requiredValidValues < 1) { 20 | requiredValidValuesPercentages <- requiredValidValues; 21 | requiredValidValues <- ceiling(requiredValidValues * ncol(dat)); 22 | if (!silent) { 23 | cat0("Argument 'requiredValidValues' was set to a proportion (", 24 | requiredValidValuesPercentages, "), so only computing a mean for cases ", 25 | "where that proportion of variables (i.e. ", 26 | 100 * requiredValidValuesPercentages, 27 | "%, or ", requiredValidValues, " variables) have valid values.\n"); 28 | } 29 | } 30 | nrOfValidValues <- rowSums(!is.na(dat)) >= requiredValidValues; 31 | return(ifelse(nrOfValidValues, rowSums(dat, na.rm=TRUE), returnIfInvalid)); 32 | } 33 | -------------------------------------------------------------------------------- /R/validMeans.R: -------------------------------------------------------------------------------- 1 | validMeans <- function(..., 2 | requiredValidValues = 0, 3 | returnIfInvalid = NA, 4 | silent = FALSE) { 5 | dat <- list(...); 6 | if ((length(dat) == 1) && is.data.frame(dat[[1]])) { 7 | dat <- dat[[1]]; 8 | } else if (length(unique(lapply(dat, length)))==1) { 9 | dat <- as.data.frame(dat); 10 | } else { 11 | stop("The vectors you provided do not have equal lengths! Either provide a dataframe or vectors of the same length."); 12 | } 13 | if (requiredValidValues == "all") { 14 | requiredValidValues <- ncol(dat); 15 | } else if (!is.numeric(requiredValidValues)) { 16 | stop("Argument 'requiredValidValues' must be numeric or 'all', ", 17 | "but it is not 'all' and has class ", 18 | class(requiredValidValues), "."); 19 | } else if (requiredValidValues < 1) { 20 | requiredValidValuesPercentages <- requiredValidValues; 21 | requiredValidValues <- ceiling(requiredValidValues * ncol(dat)); 22 | if (!silent) { 23 | cat0("Argument 'requiredValidValues' was set to a proportion (", 24 | requiredValidValuesPercentages, "), so only computing a mean for cases ", 25 | "where that proportion of variables (i.e. ", 26 | 100 * requiredValidValuesPercentages, 27 | "%, or ", requiredValidValues, " variables) have valid values.\n"); 28 | } 29 | } 30 | nrOfValidValues <- rowSums(!is.na(dat)) >= requiredValidValues; 31 | return(ifelse(nrOfValidValues, rowMeans(dat, na.rm=TRUE), returnIfInvalid)); 32 | } 33 | -------------------------------------------------------------------------------- /R/formatCI.R: -------------------------------------------------------------------------------- 1 | #' Pretty formatting of confidence intervals 2 | #' 3 | #' Pretty much does what the title says. 4 | #' 5 | #' 6 | #' @param ci A confidence interval (a vector of 2 elements; longer vectors 7 | #' work, but I guess that wouldn't make sense). 8 | #' @param sep The separator of the values, usually "; " or ", ". 9 | #' @param prefix The prefix, usually a type of opening parenthesis/bracket. 10 | #' @param suffix The suffix, usually a type of closing parenthesis/bracket. 11 | #' @param digits The number of digits to which to round the values. 12 | #' @param noZero Whether to strip the leading zero (before the decimal point), 13 | #' as is typically done when following APA style and displaying correlations, 14 | #' \emph{p} values, and other numbers that cannot reach 1 or more. 15 | #' @return A character vector of one element. 16 | #' @author Gjalt-Jorn Peters 17 | #' 18 | #' Maintainer: Gjalt-Jorn Peters 19 | #' @seealso \code{\link{noZero}}, \code{\link{formatR}}, 20 | #' \code{\link{formatPvalue}} 21 | #' @keywords utilities 22 | #' @examples 23 | #' 24 | #' ### With leading zero ... 25 | #' formatCI(c(0.55, 0.021)); 26 | #' 27 | #' ### ... and without 28 | #' formatCI(c(0.55, 0.021), noZero=TRUE); 29 | #' 30 | #' @export formatCI 31 | formatCI <- function(ci, sep='; ', prefix='[', suffix=']', digits=2, noZero=FALSE) { 32 | if (noZero) { 33 | return(paste0(prefix, paste0(noZero(round(ci, digits)), collapse=sep), suffix)); 34 | } else { 35 | return(paste0(prefix, paste0(round(ci, digits), collapse=sep), suffix)); 36 | } 37 | } 38 | -------------------------------------------------------------------------------- /man/escapeRegex.Rd: -------------------------------------------------------------------------------- 1 | \name{escapeRegex} 2 | \alias{escapeRegex} 3 | \alias{escapeBS} 4 | \title{ Escapes any characters that would have special meaning in a reqular expression. } 5 | \description{ 6 | Escapes any characters that would have special meaning in a reqular expression. 7 | } 8 | \usage{ 9 | escapeRegex(string) 10 | } 11 | \arguments{ 12 | \item{string}{ string being operated on. } 13 | } 14 | \details{ 15 | \code{escapeRegex} will escape any characters that would have 16 | special meaning in a reqular expression. For any string 17 | \code{grep(regexpEscape(string), string)} will always be true. 18 | } 19 | \value{ 20 | The value of the string with any characters that would have 21 | special meaning in a reqular expression escaped. 22 | } 23 | \note{ 24 | Note that this function was copied literally from the \code{Hmisc} package (to prevent importing the entire package for one line of code). 25 | } 26 | \author{ 27 | Charles Dupont\cr 28 | Department of Biostatistics\cr 29 | Vanderbilt University 30 | 31 | Maintainer: Gjalt-Jorn Peters 32 | } 33 | \seealso{ \code{\link[base]{grep}}, \code{Hmisc}, \url{http://biostat.mc.vanderbilt.edu/wiki/Main/Hmisc}, \url{https://github.com/harrelfe/Hmisc} } 34 | \examples{ 35 | string <- "this\\\\(system) {is} [full]." 36 | escapeRegex(string) 37 | \dontshow{ 38 | if(!any(grep(escapeRegex(string), string))) { 39 | stop("function escapeRegex failed test") 40 | } 41 | } 42 | } 43 | \keyword{ manip }% at least one, from doc/KEYWORDS 44 | \keyword{ character }% __ONLY ONE__ keyword per line 45 | \keyword{ programming } 46 | -------------------------------------------------------------------------------- /R/createSigma.R: -------------------------------------------------------------------------------- 1 | #' createSigma: convenience function for mvrnorm 2 | #' 3 | #' This function is made to quickly generate a Sigma matrix of the type 4 | #' required by \code{\link{mvrnorm}}. By specifying the number of variables, 5 | #' the mean correlation, and how much variation there should be in the 6 | #' correlations, it's easy to quickly generate a correlation matrix. 7 | #' 8 | #' 9 | #' @param nVar The number of variables in the correlation matrix. 10 | #' @param meanR The average correlation, provided to \code{\link{rnorm}} 11 | #' together with \code{sdR} to generate the correlations. 12 | #' @param sdR The variation in the correlations, provided to 13 | #' \code{\link{rnorm}} together with \code{meanR} to generate the correlations. 14 | #' @param diagonal The value on the diagonal of the returned matrix: will 15 | #' normally be 1. 16 | #' @return A matrix of nVar x nVar. 17 | #' @author Gjalt-Jorn Peters 18 | #' 19 | #' Maintainer: Gjalt-Jorn Peters 20 | #' @seealso \code{\link{mvrnorm}}, \code{\link{rnorm}}, \code{\link{matrix}} 21 | #' @keywords datagen 22 | #' @examples 23 | #' 24 | #' createSigma(3, .5, .1); 25 | #' 26 | #' @export createSigma 27 | createSigma <- function(nVar, meanR = .3, sdR = 0, diagonal = 1) { 28 | Sigma <- matrix(rnorm(n = nVar^2, 29 | mean = meanR, 30 | sd = sdR), 31 | ncol = nVar); 32 | Sigma[(Sigma < -1) | (Sigma > 1)] <- 1; 33 | Sigma[upper.tri(Sigma)] <- t(Sigma[lower.tri(Sigma)]) 34 | if (!is.null(diagonal)) { 35 | diag(Sigma) <- diagonal; 36 | } 37 | return(Sigma); 38 | } 39 | -------------------------------------------------------------------------------- /R/escapeRegex_(from_Hmisc).R: -------------------------------------------------------------------------------- 1 | ### Taken directly from Hmisc (to avoid importing the package for just this function) 2 | 3 | 4 | #' Escapes any characters that would have special meaning in a reqular 5 | #' expression. 6 | #' 7 | #' Escapes any characters that would have special meaning in a reqular 8 | #' expression. 9 | #' 10 | #' \code{escapeRegex} will escape any characters that would have special 11 | #' meaning in a reqular expression. For any string 12 | #' \code{grep(regexpEscape(string), string)} will always be true. 13 | #' 14 | #' @aliases escapeRegex escapeBS 15 | #' @param string string being operated on. 16 | #' @return The value of the string with any characters that would have special 17 | #' meaning in a reqular expression escaped. 18 | #' @note Note that this function was copied literally from the \code{Hmisc} 19 | #' package (to prevent importing the entire package for one line of code). 20 | #' @author Charles Dupont\cr Department of Biostatistics\cr Vanderbilt 21 | #' University 22 | #' 23 | #' Maintainer: Gjalt-Jorn Peters 24 | #' @seealso \code{\link[base]{grep}}, \code{Hmisc}, 25 | #' \url{http://biostat.mc.vanderbilt.edu/wiki/Main/Hmisc}, 26 | #' \url{https://github.com/harrelfe/Hmisc} 27 | #' @keywords manip character programming 28 | #' @examples 29 | #' 30 | #' string <- "this\\(system) {is} [full]." 31 | #' escapeRegex(string) 32 | #' \dontshow{ 33 | #' if(!any(grep(escapeRegex(string), string))) { 34 | #' stop("function escapeRegex failed test") 35 | #' } 36 | #' } 37 | #' 38 | #' @export escapeRegex 39 | escapeRegex <- function (string) 40 | { 41 | gsub("([.|()\\^{}+$*?]|\\[|\\])", "\\\\\\1", string) 42 | } 43 | -------------------------------------------------------------------------------- /man/convert.d.to.nnc.Rd: -------------------------------------------------------------------------------- 1 | \name{convert.d.to.nnc} 2 | \alias{convert.d.to.nnc} 3 | \alias{convert.d.to.eer} 4 | \title{ 5 | Helper functions for Numbers Needed for Change 6 | } 7 | \description{ 8 | These two functions are used by \code{\link{nnc}} to compute the 9 | Numbers Needed for Change. 10 | } 11 | \usage{ 12 | convert.d.to.nnc(d, cer, r = 1, 13 | eventDesirable = TRUE, 14 | eventIfHigher = TRUE) 15 | convert.d.to.eer(d, cer, 16 | eventDesirable = TRUE, 17 | eventIfHigher = TRUE) 18 | } 19 | \arguments{ 20 | \item{d}{ 21 | The value of Cohen's \emph{d}. 22 | } 23 | \item{cer}{ 24 | The Control Event Rate. 25 | } 26 | \item{r}{ 27 | The correlation between the determinant and behavior (for mediated 28 | Numbers Needed for Change). 29 | } 30 | \item{eventDesirable}{ 31 | Whether an event is desirable or undesirable. 32 | } 33 | \item{eventIfHigher}{ 34 | Whether scores above or below the threshold are considered 'an event'. 35 | } 36 | } 37 | \details{ 38 | These two functions are used by \code{\link{nnc}} to compute the 39 | Numbers Needed for Change. 40 | } 41 | \value{ 42 | The converted value. 43 | } 44 | \references{ 45 | Gruijters, S. L. K., & Peters, G.-J. Y. (2017). Introducing the Numbers Needed for Change (NNC): A practical measure of effect size for intervention research. 46 | } 47 | \author{ 48 | Gjalt-Jorn Peters & Stefan Gruijters 49 | 50 | Maintainer: Gjalt-Jorn Peters 51 | } 52 | \seealso{ 53 | \code{\link{nnc}} 54 | } 55 | \examples{ 56 | convert.d.to.eer(d=.5, cer=.25); 57 | convert.d.to.nnc(d=.5, cer=.25); 58 | } 59 | \keyword{ utilities } 60 | -------------------------------------------------------------------------------- /man/prevalencePower.Rd: -------------------------------------------------------------------------------- 1 | \name{prevalencePower} 2 | \alias{prevalencePower} 3 | \title{ 4 | Power analysis for establishing a prevalence 5 | } 6 | \description{ 7 | This function can be used to establish how many participants are required to establish a prevalence rate with a given margin of error. 8 | } 9 | \usage{ 10 | prevalencePower(expectedPrevalence, 11 | marginOfError = 0.05, 12 | conf.level = 0.95) 13 | } 14 | \arguments{ 15 | \item{expectedPrevalence}{ 16 | The expected prevalence. 17 | } 18 | \item{marginOfError}{ 19 | The desired precision. 20 | } 21 | \item{conf.level}{ 22 | The confidence of the confidence interval. 23 | } 24 | } 25 | \details{ 26 | Note that when uncertain as to the expected prevalence, it's better to assume a prevalence closer to 50\%. Prevalences closer to 0\% or 100\% are easier to detect and therefore have more power. 27 | } 28 | \value{ 29 | The required number of participants. 30 | } 31 | \author{ 32 | Gjalt-Jorn Peters 33 | 34 | Maintainer: Gjalt-Jorn Peters 35 | } 36 | \seealso{ 37 | \code{\link{convert.percentage.to.se}} 38 | } 39 | \examples{ 40 | ### Required participants for detecting a prevalence of 10\% 41 | ### with a 95\% confidence interval of 10\% wide: 42 | prevalencePower(.1); 43 | 44 | ### Required participants for detecting a prevalence of 10\% 45 | ### with a 95\% confidence interval of 4\% wide: 46 | prevalencePower(.1, .02); 47 | 48 | ### Required participants for detecting a prevalence of 60\% 49 | ### with a 95\% confidence interval of 10\% wide: 50 | prevalencePower(.6); 51 | } 52 | \keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS") 53 | \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line 54 | -------------------------------------------------------------------------------- /man/confIntOmegaSq.Rd: -------------------------------------------------------------------------------- 1 | \name{confIntOmegaSq} 2 | \alias{confIntOmegaSq} 3 | \title{ 4 | Confidence intervals for Omega Squared 5 | } 6 | \description{ 7 | This function used the \link{MBESS} function \code{\link{conf.limits.ncf}} and \code{\link{convert.ncf.to.omegasq}} to compute the point estimate and confidence interval for Omega Squared. 8 | } 9 | \usage{ 10 | confIntOmegaSq(var1, var2, conf.level = 0.95) 11 | } 12 | \arguments{ 13 | \item{var1, var2}{ 14 | The two variables: one should be a factor (or will be made a factor), the other should have at least interval level of measurement. If none of the variables is a factor, the function will look for the variable with the least unique values and change it into a factor. 15 | } 16 | \item{conf.level}{ 17 | Level of confidence for the confidence interval. 18 | } 19 | } 20 | \value{ 21 | 22 | A \code{confIntOmegaSq} object is returned, with as elements: 23 | 24 | \item{input}{The input arguments} 25 | \item{intermediate}{Objects generated while computing the output} 26 | \item{output}{The output of the function, consisting of:} 27 | \item{output$es}{The point estimate} 28 | \item{output$ci}{The confidence interval} 29 | } 30 | \note{ 31 | Formula 16 in Steiger (2004) is used for the conversion in \code{\link{convert.ncf.to.omegasq}}. 32 | } 33 | \references{ 34 | Steiger, J. H. (2004). Beyond the F test: Effect size confidence intervals and tests of close fit in the analysis of variance and contrast analysis. Psychological Methods, 9(2), 164-82. https://doi.org/10.1037/1082-989X.9.2.164 35 | } 36 | \author{ 37 | Gjalt-Jorn Peters 38 | 39 | Maintainer: Gjalt-Jorn Peters 40 | } 41 | \examples{ 42 | 43 | confIntOmegaSq(mtcars$mpg, mtcars$cyl); 44 | 45 | } 46 | \keyword{ bivar } 47 | -------------------------------------------------------------------------------- /man/multiResponse.Rd: -------------------------------------------------------------------------------- 1 | \name{multiResponse} 2 | \alias{multiResponse} 3 | \title{ 4 | Generate a table for multiple response questions 5 | } 6 | \description{ 7 | The \code{multiResponse} function mimics the behavior of the table produced by SPSS for multiple response questions. 8 | } 9 | \usage{ 10 | multiResponse(data, 11 | items = NULL, 12 | regex = NULL, 13 | endorsedOption = 1) 14 | } 15 | \arguments{ 16 | \item{data}{ 17 | Dataframe containing the variables to display. 18 | } 19 | \item{items, regex}{ 20 | Arguments \code{items} and \code{regex} can be used to specify which variables to process. \code{items} should contain the variable (column) names (or indices), and \code{regex} should contain a regular expression used to match to the column names of the dataframe. If none is provided, all variables in the dataframe are processed. 21 | } 22 | \item{endorsedOption}{ 23 | Which value represents the endorsed option (note that producing this kind of table requires dichotomous items, where each variable is either endorsed or not endorsed, so this is also a way to treat other variables as dichotomour). 24 | } 25 | } 26 | \value{ 27 | A dataframe with columns \code{Option}, \code{Frequency}, \code{Percentage}, and \code{Percentage of (X) cases}, where X is the number of cases. 28 | } 29 | \references{ 30 | This function is based on the excellent and extensive Stack Exchange answer by Ananda Mahto at https://stackoverflow.com/questions/9265003/analysis-of-multiple-response. 31 | } 32 | \author{ 33 | Ananda Mahto; implemented in this package (and tweaked a bit) by Gjalt-Jorn Peters. 34 | 35 | Maintainer: Gjalt-Jorn Peters 36 | } 37 | \examples{ 38 | multiResponse(mtcars, c('vs', 'am')); 39 | } 40 | \keyword{ utilities } 41 | -------------------------------------------------------------------------------- /R/prevalencePower.R: -------------------------------------------------------------------------------- 1 | #' Power analysis for establishing a prevalence 2 | #' 3 | #' This function can be used to establish how many participants are required to 4 | #' establish a prevalence rate with a given margin of error. 5 | #' 6 | #' Note that when uncertain as to the expected prevalence, it's better to 7 | #' assume a prevalence closer to 50\%. Prevalences closer to 0\% or 100\% are 8 | #' easier to detect and therefore have more power. 9 | #' 10 | #' @param expectedPrevalence The expected prevalence. 11 | #' @param marginOfError The desired precision. 12 | #' @param conf.level The confidence of the confidence interval. 13 | #' @return The required number of participants. 14 | #' @author Gjalt-Jorn Peters 15 | #' 16 | #' Maintainer: Gjalt-Jorn Peters 17 | #' @seealso \code{\link{convert.percentage.to.se}} 18 | #' @keywords ~kwd1 ~kwd2 19 | #' @examples 20 | #' 21 | #' ### Required participants for detecting a prevalence of 10% 22 | #' ### with a 95% confidence interval of 10% wide: 23 | #' prevalencePower(.1); 24 | #' 25 | #' ### Required participants for detecting a prevalence of 10% 26 | #' ### with a 95% confidence interval of 4% wide: 27 | #' prevalencePower(.1, .02); 28 | #' 29 | #' ### Required participants for detecting a prevalence of 60% 30 | #' ### with a 95% confidence interval of 10% wide: 31 | #' prevalencePower(.6); 32 | #' 33 | #' @export prevalencePower 34 | prevalencePower <- function(expectedPrevalence, marginOfError = .05, conf.level = .95) { 35 | ### From http://www.r-tutor.com/elementary-statistics/interval-estimation/sampling-size-population-proportion 36 | ### and http://elearning.winona.edu/projects/N701/Powerpoints/TestingSingleProp.ppt 37 | qnorm(1-((1-conf.level)/2)) ^2 * expectedPrevalence * (1-expectedPrevalence) / marginOfError^2; 38 | } 39 | -------------------------------------------------------------------------------- /man/confIntProp.Rd: -------------------------------------------------------------------------------- 1 | \name{confIntProp} 2 | \alias{confIntProp} 3 | \title{ 4 | Confidence intervals for proportions, vectorized over all arguments 5 | } 6 | \description{ 7 | This function simply computes confidence intervals for proportions. 8 | } 9 | \usage{ 10 | confIntProp(x, n, conf.level = 0.95) 11 | } 12 | \arguments{ 13 | \item{x}{ 14 | The number of 'successes', i.e. the number of events, observations, or cases that one is interested in. 15 | } 16 | \item{n}{ 17 | The total number of cases or observatons. 18 | } 19 | \item{conf.level}{ 20 | The confidence level. 21 | } 22 | } 23 | \details{ 24 | This function is the adapted source code of \code{\link{binom.test}}. Ir uses \code{\link{pbeta}}, with some lines of code taken from the \code{\link{binom.test}} source. Specifically, the count for the low category is specified as first 'shape argument' to \code{\link{pbeta}}, and the total count (either the sum of the count for the low category and the count for the high category, or the total number of cases if \code{compareHiToLo} is \code{FALSE}) minus the count for the low category as the second 'shape argument'. 25 | 26 | 27 | } 28 | \value{ 29 | The confidence interval bounds in a twodimensional matrix, with the first column containing the lower bound and the second column containing the upper bound. 30 | } 31 | \author{ 32 | Unknown (see \code{\link{binom.test}}; adapted by Gjalt-Jorn Peters) 33 | 34 | Maintainer: Gjalt-Jorn Peters 35 | } 36 | \seealso{ 37 | \code{\link{binom.test}} and \code{\link{ggProportionPlot}, the function for which this was written.} 38 | } 39 | \examples{ 40 | ### Simple case 41 | confIntProp(84, 200); 42 | 43 | ### Using vectors 44 | confIntProp(c(2,3), c(10, 20), conf.level=c(.90, .95, .99)); 45 | } 46 | \keyword{ univar } 47 | \keyword{ htest } 48 | -------------------------------------------------------------------------------- /man/curfnfinder.Rd: -------------------------------------------------------------------------------- 1 | \name{curfnfinder} 2 | \alias{curfnfinder} 3 | \title{ 4 | Function to find the name of the calling function 5 | } 6 | \description{ 7 | This function finds and returns the name of the function calling it. This 8 | can be useful, for example, when generating functions algorithmically. 9 | } 10 | \usage{ 11 | curfnfinder(skipframes = 0, skipnames = "(FUN)|(.+apply)|(replicate)", 12 | retIfNone = "Not in function", retStack = FALSE, 13 | extraPrefPerLevel = "\t") 14 | } 15 | \arguments{ 16 | \item{skipframes}{ 17 | Number of frames to skip; useful when called from an anonymous function. 18 | } 19 | \item{skipnames}{ 20 | A regular expression specifying which substrings to delete. 21 | } 22 | \item{retIfNone}{ 23 | What to return when called from outside a function. 24 | } 25 | \item{retStack}{ 26 | Whether to return the entire stack or just one function. 27 | } 28 | \item{extraPrefPerLevel}{ 29 | Extra prefixes to return for each level of the function. 30 | } 31 | } 32 | \details{ 33 | This function was written by Nick Sabbe for his package \code{addendum}. 34 | He posted it on Stack Exchange at 35 | \url{http://stackoverflow.com/questions/7307987/logging-current-function-name} 36 | and I included this here with this permission. 37 | } 38 | \value{ 39 | The current function. 40 | } 41 | \author{ 42 | Nick Sabbe (Arteveldehogeschool) 43 | 44 | Maintainer: Gjalt-Jorn Peters 45 | } 46 | \examples{ 47 | functionA <- functionB <- function() { 48 | curFn <- curfnfinder(); 49 | if (curFn == 'functionA') { 50 | cat('Doing something\n'); 51 | } else { 52 | cat('Doing something else\n'); 53 | } 54 | cat('Doing something generic.'); 55 | } 56 | functionA(); 57 | functionB(); 58 | } 59 | \keyword{ utility } 60 | -------------------------------------------------------------------------------- /man/faConfInt.Rd: -------------------------------------------------------------------------------- 1 | \name{faConfInt} 2 | \alias{faConfInt} 3 | \title{ 4 | Extract confidence bounds from psych's factor analysis object 5 | } 6 | \description{ 7 | This function contains some code from a function in \code{\link{psych}} that's not exported \code{print.psych.fa.ci} but useful nonetheless. It basically takes the outcomes of a factor analysis and extracted the confidence intervals. 8 | } 9 | \usage{ 10 | faConfInt(fa) 11 | } 12 | \arguments{ 13 | \item{fa}{ 14 | The object produced by the \code{\link{fa}} function from the \code{\link{psych}} package. It is important that the \code{n.iter} argument of \code{\link{fa}} was set to a realistic number, because otherwise, no confidence intervals will be available. 15 | } 16 | } 17 | \details{ 18 | THis function extract confidence interval bounds and combines them with factor loadings using the code from the \code{print.psych.fa.ci} in \code{\link{psych}}. 19 | } 20 | \value{ 21 | A list of dataframes, one for each extracted factor, with in each dataframe three variables: 22 | \item{lo}{lower bound of the confidence interval} 23 | \item{est}{point estimate of the factor loading} 24 | \item{hi}{upper bound of the confidence interval} 25 | } 26 | \author{ 27 | William Revelle (extracted by Gjalt-Jorn Peters) 28 | 29 | Maintainer: Gjalt-Jorn Peters 30 | } 31 | \examples{ 32 | \dontrun{ 33 | ### Not run because it takes too long to run to test it, 34 | ### and may produce warnings, both because of the bootstrapping 35 | ### required to generate the confidence intervals in fa 36 | faConfInt(fa(Thurstone.33, 2, n.iter=100, n.obs=100)); 37 | } 38 | } 39 | % Add one or more standard keywords, see file 'KEYWORDS' in the 40 | % R documentation directory. 41 | \keyword{ ~kwd1 }% use one of RShowDoc("KEYWORDS") 42 | \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line 43 | -------------------------------------------------------------------------------- /man/knitFig.Rd: -------------------------------------------------------------------------------- 1 | \name{knitFig} 2 | \alias{knitFig} 3 | \title{ 4 | Easily knit a custom figure fragment 5 | } 6 | \description{ 7 | THis function was written to make it easy to knit figures with different, or dynamically generated, widths and heights (and captions) in the same chunk when working with R Markdown. 8 | } 9 | \usage{ 10 | knitFig(plotToDraw, 11 | template = getOption("ufs.knitFig.template", NULL), 12 | figWidth = getOption("ufs.knitFig.figWidth", 16/2.54), 13 | figHeight = getOption("ufs.knitFig.figHeight", 16/2.54), 14 | figCaption = "A plot.", 15 | chunkName = NULL, ...) 16 | } 17 | \arguments{ 18 | \item{plotToDraw}{ 19 | The plot to draw, e.g. a \code{\link{ggplot}} plot. 20 | } 21 | \item{template}{ 22 | A character value with the \code{\link{knit_expand}} template to use. 23 | } 24 | \item{figWidth}{ 25 | The width to set for the figure (in inches). 26 | } 27 | \item{figHeight}{ 28 | The height to set for the figure (in inches). 29 | } 30 | \item{figCaption}{ 31 | The caption to set for the figure. 32 | } 33 | \item{chunkName}{ 34 | Optionally, the name for the chunk. To avoid problems because multiple chunks have the name "\code{unnamed-chunk-1}", if no chunk name is provided, \code{\link{digest}} is used to generate an MD5-hash from \code{\link{Sys.time}}. 35 | } 36 | \item{\dots}{ 37 | Any additional arguments are passed on to \code{\link{knit_expand}}. 38 | } 39 | } 40 | \value{ 41 | This function returns nothing, but uses \code{\link{knit_expand}} and \code{\link{knit}} to \code{\link{cat}} the result. 42 | } 43 | \author{ 44 | Gjalt-Jorn Peters 45 | 46 | Maintainer: Gjalt-Jorn Peters 47 | } 48 | \seealso{ 49 | \code{\link{knit_expand}} and \code{\link{knit}} 50 | } 51 | \examples{ 52 | \dontrun{ 53 | knitFig(ggProportionPlot(mtcars$cyl)) 54 | } 55 | } 56 | \keyword{ utilities } 57 | -------------------------------------------------------------------------------- /man/therapyMonitorData.Rd: -------------------------------------------------------------------------------- 1 | \name{therapyMonitorData} 2 | \alias{therapyMonitorData} 3 | \docType{data} 4 | \title{ 5 | Data originally published with therapyMonitor 6 | } 7 | \description{ 8 | This dataset was originally published along with a Dutch language article that described the \code{\link{therapyMonitor}} function. This version only contains the aggregated scales. 9 | } 10 | \usage{data("therapyMonitorData")} 11 | \format{ 12 | A data frame with 38 observations on the following 12 variables. 13 | \describe{ 14 | \item{\code{time}}{The measurement moment as stored by Google Forms.} 15 | \item{\code{datetime}}{The measurement moment converted to POSIXct, R's time format.} 16 | \item{\code{measurementNumber}}{The rank (number) of each measurement.} 17 | \item{\code{positiveAffect}}{The positive affect scale.} 18 | \item{\code{negativeAffect}}{The negative affect scale.} 19 | \item{\code{selfEsteem}}{A self esteem scale.} 20 | \item{\code{intimacy}}{An intimacy scale.} 21 | \item{\code{erectionMasturbation}}{Erection when masturbating.} 22 | \item{\code{erectionPartnerSex}}{Erection while having sex with partner.} 23 | \item{\code{experienceMasturbation}}{Experience when masturbating.} 24 | \item{\code{experiencePartnerSex}}{Experience while having sex with partner.} 25 | \item{\code{erectionCombined}}{Aggregated scale of both erection experience scales.} 26 | } 27 | } 28 | \details{ 29 | This dataset is an n-of-1 dataset collected during a series of therapy sessions. 30 | } 31 | \source{ 32 | van Lankveld, J., Leusink, P., & Peters, G.-J. Y. (2017). Therapie-monitoring in een blended online en face-to-face behandeling van een jonge man met situatieve erectieproblemen. \emph{Tijdschrift voor Seksuologie, 41}, 15-22. 33 | } 34 | \examples{ 35 | data(therapyMonitorData) 36 | ## maybe str(therapyMonitorData) ; plot(therapyMonitorData) ... 37 | } 38 | \keyword{datasets} 39 | -------------------------------------------------------------------------------- /R/sharedSubString.R: -------------------------------------------------------------------------------- 1 | #' sharedSubString 2 | #' 3 | #' A function to find the longest shared substring in a character vector. 4 | #' 5 | #' 6 | #' @param x The character vector to process. 7 | #' @param y Optionally, two single values can be specified. This is probably 8 | #' not useful to end users, but it's used by the function when it calls itself. 9 | #' @return A vector of length one with either the longest substring that occurs 10 | #' in all values of the character vector, or NA if no overlap an be found. 11 | #' @author Gjalt-Jorn Peters 12 | #' 13 | #' Maintainer: Gjalt-Jorn Peters 14 | #' @keywords character 15 | #' @examples 16 | #' 17 | #' sharedSubString(c("t0_responseTime", "t1_responseTime", "t2_responseTime")); 18 | #' ### Returns "_responseTime" 19 | #' 20 | #' @export sharedSubString 21 | sharedSubString <- function(x, y=NULL) { 22 | if (!is.null(y)) { 23 | if (length(x) == 1 && length(y) == 1) { 24 | if (is.na(x) || is.na(y)) { 25 | return(NA); 26 | } 27 | startPos <- 1; 28 | while (!grepl(substr(x, startPos, nchar(x)), y)) { 29 | startPos <- startPos + 1; 30 | } 31 | if (startPos < nchar(x)) { 32 | return(substr(x, startPos, nchar(x))); 33 | } else { 34 | endPos <- nchar(x); 35 | while (!grepl(substr(x, 1, endPos), y)) { 36 | endPos <- endPos - 1; 37 | } 38 | if (endPos > 1) { 39 | return(substr(x, 1, endPos)); 40 | } else { 41 | return(NA); 42 | } 43 | } 44 | } else { 45 | stop("When specifying both x and y, each must be just one value."); 46 | } 47 | } else { 48 | if (length(x) == 1) { 49 | return(x); 50 | } else if (length(x) == 2) { 51 | return(sharedSubString(x[1], x[2])); 52 | } else { 53 | return(sharedSubString(sharedSubString(x[1], x[2]), sharedSubString(x[-1]))); 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /man/removeExceptionalValues.Rd: -------------------------------------------------------------------------------- 1 | \name{removeExceptionalValues} 2 | \alias{removeExceptionalValues} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | removeExceptionalValues 6 | } 7 | \description{ 8 | A function to replace exceptional values with NA. This can be used to quickly 9 | remove impossible values, for example, when participants entered their age as 10 | 344. 11 | } 12 | \usage{ 13 | removeExceptionalValues(dat, items = NULL, exception = 0.005, 14 | silent = FALSE, stringsAsFactors = FALSE) 15 | } 16 | %- maybe also 'usage' for other objects documented here. 17 | \arguments{ 18 | \item{dat}{ 19 | The dataframe containing the items to inspect. 20 | } 21 | \item{items}{ 22 | The items to inspect. 23 | } 24 | \item{exception}{ 25 | How rare a value must be to be considered exceptional (and replaced by NA). 26 | } 27 | \item{silent}{ 28 | Can be used to suppress messages. 29 | } 30 | \item{stringsAsFactors}{ 31 | Whether to convert strings to factors when creating a dataframe from lapply 32 | output. 33 | } 34 | } 35 | \details{ 36 | Note that exceptional values may be errors (e.g. participants accidently 37 | pressed a key twice, or during data entry, something went wrong), but they may 38 | also be indicative of participants who did not seriously participate in the 39 | study. Therefore, it is advised to first use \code{\link{exceptionalScores}} to 40 | look for patterns where participants enter many exceptional scores. 41 | } 42 | \value{ 43 | The dataframe, with exceptional values replaced by NA. 44 | } 45 | \author{ 46 | Gjalt-Jorn Peters 47 | 48 | Maintainer: Gjalt-Jorn Peters 49 | } 50 | \seealso{ 51 | \code{\link{exceptionalScores}} 52 | } 53 | \examples{ 54 | removeExceptionalValues(mtcars, exception=.1); 55 | } 56 | % Add one or more standard keywords, see file 'KEYWORDS' in the 57 | % R documentation directory. 58 | \keyword{ utilities } -------------------------------------------------------------------------------- /man/pwr.omegasq.Rd: -------------------------------------------------------------------------------- 1 | \name{pwr.omegasq} 2 | \alias{pwr.omegasq} 3 | \title{ 4 | Power calculations for Omega Squared. 5 | } 6 | \description{ 7 | This function uses \code{\link{pwr.anova.test}} from the \code{\link{pwr}} package in combination with \code{\link{convert.cohensf.to.omegasq}} and \code{\link{convert.omegasq.to.cohensf}} to provide power analyses for Omega Squared. 8 | } 9 | \usage{ 10 | pwr.omegasq(k = NULL, n = NULL, omegasq = NULL, 11 | sig.level = 0.05, power = NULL, digits = 4) 12 | } 13 | %- maybe also 'usage' for other objects documented here. 14 | \arguments{ 15 | \item{k}{ 16 | The number of groups. 17 | } 18 | \item{n}{ 19 | The sample size. 20 | } 21 | \item{omegasq}{ 22 | The Omega Squared value. 23 | } 24 | \item{sig.level}{ 25 | The significance level (alpha). 26 | } 27 | \item{power}{ 28 | The power. 29 | } 30 | \item{digits}{ 31 | The number of digits desired in the output (4, the default, is quite high; but omega squared value tend to be quite low). 32 | } 33 | } 34 | \details{ 35 | This function was written to work similarly to the power functions in the \code{\link{pwr}} package. 36 | } 37 | \value{ 38 | An \code{power.htest.ufs} object that contains a number of input and output values, most notably: 39 | 40 | \item{power}{The (specified or computed) power} 41 | \item{n}{The (specified or computed) sample size in each group} 42 | \item{sig.level}{The (specified or computed) significance level (alpha)} 43 | \item{sig.level}{The (specified or computed) Omega Squared value} 44 | \item{cohensf}{The computed value for the Cohen's \emph{f} effect size measure} 45 | 46 | } 47 | 48 | \author{ 49 | Gjalt-Jorn Peters & Peter Verboon 50 | 51 | Maintainer: Gjalt-Jorn Peters 52 | } 53 | 54 | \seealso{ 55 | \code{\link{pwr.anova.test}}, \code{\link{convert.cohensf.to.omegasq}}, \code{\link{convert.omegasq.to.cohensf}} 56 | } 57 | \examples{ 58 | pwr.omegasq(omegasq=.06, k=3, power=.8) 59 | } 60 | 61 | \keyword{ htest } 62 | -------------------------------------------------------------------------------- /R/detStructAddVarNames.R: -------------------------------------------------------------------------------- 1 | detStructAddVarNames <- function(determinantStructure, 2 | names) { 3 | 4 | ### Get all behaviorRegExes that are set (should only be one) 5 | behaviorRegEx <- determinantStructure$Get('behaviorRegEx', 6 | traversal='level', 7 | filterFun=function(x) return(!is.null(x$behaviorRegEx))); 8 | 9 | ### Remove any duplicates and select the first one in case there are more 10 | behaviorRegEx <- unique(behaviorRegEx)[1]; 11 | 12 | ### Only retain the names matching that behavior regex 13 | names <- grep(behaviorRegEx, names, value=TRUE); 14 | 15 | ### Walk through the determinant structure and select the 16 | ### matching variable names, adding the to the structure 17 | determinantStructure$Do(function(currentNode, allNames = names) { 18 | if (is.list(currentNode$selection)) { 19 | currentNode$varNames <- sapply(currentNode$selection, 20 | function(x) { 21 | res <- sapply(x, 22 | grep, 23 | allNames, 24 | value=TRUE, 25 | simplify=FALSE); 26 | names(res) <- allNames; 27 | return(res); 28 | }, 29 | simplify=FALSE); 30 | names(currentNode$varNames) <- currentNode$selection; 31 | } else { 32 | currentNode$varNames <- sapply(currentNode$selection, 33 | grep, allNames, value=TRUE, simplify=FALSE); 34 | names(currentNode$varNames) <- currentNode$selection; 35 | } 36 | }, traversal = 'level', filterFun = function(x) return(!is.null(x$selection))); 37 | 38 | } 39 | -------------------------------------------------------------------------------- /man/sort.associationMatrix.Rd: -------------------------------------------------------------------------------- 1 | \name{sort.associationMatrix} 2 | \alias{sort.associationMatrix} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | sort.associationMatrix 6 | } 7 | \description{ 8 | This function sorts an \code{\link{associationMatrix}} ascendingly or descendingly by one 9 | of its columns. 10 | } 11 | \usage{ 12 | \method{sort}{associationMatrix}(x, decreasing = TRUE, byColumn = 1, ...) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{x}{ 17 | The \code{\link{associationMatrix}} object to sort. 18 | } 19 | \item{decreasing}{ 20 | Whether to sort ascendingly (FALSE) or descending (TRUE). 21 | } 22 | \item{byColumn}{ 23 | Which column to sort the matrix by, as an index. 24 | } 25 | \item{\dots}{ 26 | Passed on to \code{\link{sort}}. 27 | } 28 | } 29 | \details{ 30 | Note that if the \code{\link{associationMatrix}} contains values of different effectsizes, 31 | the sorting may be misleading. For example, a value of Cohen's d of .45 is higher 32 | than a value of Pearson's r of .35, and so will end up higher in a 'decreasing' 33 | sort - even though the association represented by an r of .35 is stronger than 34 | that represented by a d of .45. 35 | 36 | Furthermore, only asymmetrical associationMatrices can be sorted; sorting a 37 | symmetrical association matrix would also change the order of the columns, after 38 | all. 39 | } 40 | \value{ 41 | The \code{\link{associationMatrix}}, but sorted. 42 | } 43 | \author{ 44 | Gjalt-Jorn Peters 45 | 46 | Maintainer: Gjalt-Jorn Peters 47 | } 48 | \seealso{ 49 | \code{\link{associationMatrix}} 50 | } 51 | \examples{ 52 | sort(associationMatrix(infert, y=c("parity", "age"), 53 | x=c("induced", "case", "spontaneous"), colNames=TRUE)); 54 | } 55 | % Add one or more standard keywords, see file 'KEYWORDS' in the 56 | % R documentation directory. 57 | \keyword{ ~kwd1 } 58 | \keyword{ ~kwd2 }% __ONLY ONE__ keyword per line 59 | -------------------------------------------------------------------------------- /man/validComputations.Rd: -------------------------------------------------------------------------------- 1 | \name{validComputations} 2 | \alias{validComputations} 3 | \alias{validMeans} 4 | \alias{validSums} 5 | \title{ 6 | Only compute means or sums for cases with enough nonmissings 7 | } 8 | \description{ 9 | These functions have been written as equivalents of SPSS' \code{MEAN.x} and \code{SUM.x} functions, which only compute means and sums if enough cases have valid values. 10 | } 11 | \usage{ 12 | validMeans(..., 13 | requiredValidValues = 0, 14 | returnIfInvalid = NA, 15 | silent = FALSE) 16 | validSums(..., 17 | requiredValidValues = 0, 18 | returnIfInvalid = NA, 19 | silent = FALSE) 20 | } 21 | %- maybe also 'usage' for other objects documented here. 22 | \arguments{ 23 | \item{...}{ 24 | Either a dataframe or vectors for which to compute the mean or sum. 25 | } 26 | \item{requiredValidValues}{ 27 | How many values must be valid (i.e. nonmissing) to compute the mean or sum. If a number lower than 1 is provided, it is interpreted as proportion, and the number of variables is computed. For example, if \code{requiredValidValues=.8}, 80\% of the variables must have valid values. If 'all' is specified, all values must be valid (in which case the functions are equal to \code{\link{rowMeans}} and \code{\link{rowSums}}). 28 | } 29 | \item{returnIfInvalid}{ 30 | Wat to return for cases that don't have enough valid values. 31 | } 32 | \item{silent}{ 33 | Whether to show the number of cases that have to be valid if \code{requiredValidValues} is a proportion. 34 | } 35 | } 36 | \value{ 37 | A numeric vector with the resulting means or sums. 38 | } 39 | \author{ 40 | Gjalt-Jorn Peters 41 | 42 | Maintainer: Gjalt-Jorn Peters 43 | } 44 | \seealso{ 45 | \code{\link{rowMeans}}, \code{\link{rowSums}} 46 | } 47 | \examples{ 48 | validMeans(mtcars$cyl, mtcars$disp); 49 | validSums(mtcars$cyl, mtcars$disp, requiredValidValues = .8); 50 | 51 | ### Or specifying a dataframe 52 | validSums(mtcars); 53 | } 54 | \keyword{ manip } 55 | -------------------------------------------------------------------------------- /R/pwr.randomizationSuccess.R: -------------------------------------------------------------------------------- 1 | pwr.randomizationSuccess <- function(dNonequivalence = .2, 2 | pRandomizationSuccess = .95, 3 | nNuisanceVars = 100) { 4 | 5 | res <- sapply(dNonequivalence, 6 | function(dNonequival) { 7 | return(sapply(pRandomizationSuccess, function(rRandSuccess, dNonequiv = dNonequival) { 8 | return(sapply(nNuisanceVars, function(nNuisance, pSuccess = rRandSuccess, dNoneq = dNonequiv) { 9 | n <- 10; 10 | if (pdMild(dNoneq, n)^nNuisance > pSuccess) { 11 | return(n) 12 | } else { 13 | while(pdMild(dNoneq, n)^nNuisance < pSuccess) {n <- n + 100;}; 14 | n <- n - 100; 15 | while(pdMild(dNoneq, n)^nNuisance < pSuccess) {n <- n + 10;}; 16 | n <- n - 10; 17 | while(pdMild(dNoneq, n)^nNuisance < pSuccess) {n <- n + 1;}; 18 | return(n); 19 | 20 | } 21 | })); 22 | })); 23 | }); 24 | 25 | res <- array(unclass(res), dim = c(length(nNuisanceVars), 26 | length(pRandomizationSuccess), 27 | length(dNonequivalence)), 28 | dimnames=list(paste("Nuisance var:", nNuisanceVars), 29 | paste("Equival. prob:", pRandomizationSuccess), 30 | paste('Nonequival. at: d=', dNonequivalence))); 31 | 32 | if (sum(dim(res) == 1) > 1) { 33 | return(as.vector(res)); 34 | } else if(sum(dim(res) == 1) == 1) { 35 | 36 | dims <- dim(res); 37 | dimNms <- dimnames(res); 38 | 39 | dimNms <- dimNms[dims > 1]; 40 | dims <- dims[dims > 1]; 41 | 42 | res <- matrix(res, ncol=dims[1], 43 | dimnames=dimNms); 44 | 45 | return(res); 46 | 47 | } else { 48 | return(res); 49 | } 50 | 51 | } 52 | -------------------------------------------------------------------------------- /R/ggpie.R: -------------------------------------------------------------------------------- 1 | ### Credits: 2 | ### http://mathematicalcoffee.blogspot.nl/2014/06/ggpie-pie-graphs-in-ggplot2.html 3 | 4 | 5 | 6 | #' A ggplot pie chart 7 | #' 8 | #' THis function creates a pie chart. Note that these are generally quite 9 | #' strongly advised against, as people are not good at interpreting relative 10 | #' frequencies on the basis of pie charts. 11 | #' 12 | #' 13 | #' @param vector The vector (best to pass a factor). 14 | #' @param scale_fill The ggplot scale fill function to use for the colors. 15 | #' @return A ggplot pie chart. 16 | #' @note This function is very strongly based on the Mathematical Coffee post 17 | #' at 18 | #' http://mathematicalcoffee.blogspot.com/2014/06/ggpie-pie-graphs-in-ggplot2.html. 19 | #' @author Amy Chan; implemented in this package (and tweaked a bit) by 20 | #' Gjalt-Jorn Peters. 21 | #' 22 | #' Maintainer: Gjalt-Jorn Peters 23 | #' @keywords hplot 24 | #' @examples 25 | #' 26 | #' ggPie(mtcars$cyl); 27 | #' 28 | #' @export ggPie 29 | ggPie <- function (vector, scale_fill = scale_fill_viridis(discrete=TRUE)) { 30 | dat <- data.frame(table(vector)); 31 | names(dat) <- c('labels', 'totals'); 32 | totals = 'totals'; 33 | by = 'labels'; 34 | 35 | dat <- dat[dat$totals > 0, ]; 36 | 37 | return(ggplot(dat, aes_string(x=factor(1), y=totals, fill=by)) + 38 | geom_bar(stat='identity', color='black') + 39 | # removes black borders from legend 40 | guides(fill=guide_legend(override.aes=list(color=NA))) + 41 | coord_polar(theta='y') + 42 | scale_y_continuous(breaks=(sum(dat[[totals]]) - (cumsum(dat[[totals]]) - dat[[totals]] / 2)), 43 | labels=dat[[by]]) + 44 | scale_fill + 45 | theme(axis.ticks=element_blank(), 46 | axis.text.y=element_blank(), 47 | axis.text.x=element_text(color='black'), 48 | axis.title=element_blank(), 49 | legend.position="none", 50 | panel.background = element_rect(fill = "white"))); 51 | } 52 | -------------------------------------------------------------------------------- /R/convert.d.to.nnc.R: -------------------------------------------------------------------------------- 1 | #' Helper functions for Numbers Needed for Change 2 | #' 3 | #' These two functions are used by \code{\link{nnc}} to compute the Numbers 4 | #' Needed for Change. 5 | #' 6 | #' These two functions are used by \code{\link{nnc}} to compute the Numbers 7 | #' Needed for Change. 8 | #' 9 | #' @aliases convert.d.to.nnc convert.d.to.eer 10 | #' @param d The value of Cohen's \emph{d}. 11 | #' @param cer The Control Event Rate. 12 | #' @param r The correlation between the determinant and behavior (for mediated 13 | #' Numbers Needed for Change). 14 | #' @param eventDesirable Whether an event is desirable or undesirable. 15 | #' @param eventIfHigher Whether scores above or below the threshold are 16 | #' considered 'an event'. 17 | #' @return The converted value. 18 | #' @author Gjalt-Jorn Peters & Stefan Gruijters 19 | #' 20 | #' Maintainer: Gjalt-Jorn Peters 21 | #' @seealso \code{\link{nnc}} 22 | #' @references Gruijters, S. L. K., & Peters, G.-J. Y. (2017). Introducing the 23 | #' Numbers Needed for Change (NNC): A practical measure of effect size for 24 | #' intervention research. 25 | #' @keywords utilities 26 | #' @examples 27 | #' 28 | #' convert.d.to.eer(d=.5, cer=.25); 29 | #' convert.d.to.nnc(d=.5, cer=.25); 30 | #' 31 | #' @export convert.d.to.nnc 32 | convert.d.to.nnc <- function(d, cer, r = 1, eventDesirable=TRUE, eventIfHigher=TRUE) { 33 | 34 | ### Based on http://journals.plos.org/plosone/article?id=10.1371/journal.pone.0019070 35 | ### Consistent with http://rpsychologist.com/d3/cohend/ 36 | 37 | d <- convert.r.to.d(convert.d.to.r(d) * r); 38 | 39 | if (is.null(cer)) { 40 | # if (eventDesirable) { 41 | # return(1 / (2 * pnorm(d / sqrt(2)) - 1)); 42 | # } else { 43 | cat0("Not implemented yet!"); 44 | # } 45 | } else { 46 | eer <- convert.d.to.eer(d, cer, eventDesirable=eventDesirable, eventIfHigher=eventIfHigher); 47 | if (eventDesirable) { 48 | nnc <- 1 / (eer - cer); 49 | } else { 50 | nnc <- 1 / (cer - eer); 51 | } 52 | } 53 | attr(nnc, 'eer') <- eer; 54 | return(nnc); 55 | } 56 | -------------------------------------------------------------------------------- /R/faConfInt.R: -------------------------------------------------------------------------------- 1 | #' Extract confidence bounds from psych's factor analysis object 2 | #' 3 | #' This function contains some code from a function in \code{\link{psych}} 4 | #' that's not exported \code{print.psych.fa.ci} but useful nonetheless. It 5 | #' basically takes the outcomes of a factor analysis and extracted the 6 | #' confidence intervals. 7 | #' 8 | #' THis function extract confidence interval bounds and combines them with 9 | #' factor loadings using the code from the \code{print.psych.fa.ci} in 10 | #' \code{\link{psych}}. 11 | #' 12 | #' @param fa The object produced by the \code{\link{fa}} function from the 13 | #' \code{\link{psych}} package. It is important that the \code{n.iter} argument 14 | #' of \code{\link{fa}} was set to a realistic number, because otherwise, no 15 | #' confidence intervals will be available. 16 | #' @return A list of dataframes, one for each extracted factor, with in each 17 | #' dataframe three variables: \item{lo}{lower bound of the confidence interval} 18 | #' \item{est}{point estimate of the factor loading} \item{hi}{upper bound of 19 | #' the confidence interval} 20 | #' @author William Revelle (extracted by Gjalt-Jorn Peters) 21 | #' 22 | #' Maintainer: Gjalt-Jorn Peters 23 | #' @keywords ~kwd1 ~kwd2 24 | #' @examples 25 | #' 26 | #' \dontrun{ 27 | #' ### Not run because it takes too long to run to test it, 28 | #' ### and may produce warnings, both because of the bootstrapping 29 | #' ### required to generate the confidence intervals in fa 30 | #' faConfInt(fa(Thurstone.33, 2, n.iter=100, n.obs=100)); 31 | #' } 32 | #' 33 | #' @export faConfInt 34 | faConfInt <- function(fa) { 35 | 36 | ### Combine both confidence intervals and factor loadings, using 37 | ### the code from the 'psych:::print.psych.fa.ci' function 38 | lc <- data.frame(unclass(fa$loadings), fa$ci$ci); 39 | ### Create list for CIs per factor 40 | CIs <- list(); 41 | for (i in 1:fa$factors) { 42 | CIs[[i]] <- lc[, c(i + fa$factors, i, i + fa$factors * 2)]; 43 | names(CIs[[i]]) <- c('lo', 'est', 'hi'); 44 | } 45 | 46 | return(CIs); 47 | 48 | } 49 | -------------------------------------------------------------------------------- /man/testRetestSimData.Rd: -------------------------------------------------------------------------------- 1 | \name{testRetestSimData} 2 | \alias{testRetestSimData} 3 | \docType{data} 4 | \title{ 5 | testRetestSimData is a simulated dataframe used to demonstrate the testRetestAlpha coefficient function. 6 | } 7 | \description{ 8 | This dataset contains the true scores of 250 participants on some variable, and 10 items of a scale administered twice (at t0 and at t1). 9 | } 10 | \usage{data(testRetestSimData)} 11 | \format{ 12 | A data frame with 250 observations on the following 21 variables. 13 | \describe{ 14 | \item{\code{trueScore}}{The true scores} 15 | \item{\code{t0_item1}}{Score on item 1 at test} 16 | \item{\code{t0_item2}}{Score on item 2 at test} 17 | \item{\code{t0_item3}}{Score on item 3 at test} 18 | \item{\code{t0_item4}}{Score on item 4 at test} 19 | \item{\code{t0_item5}}{Score on item 5 at test} 20 | \item{\code{t0_item6}}{Score on item 6 at test} 21 | \item{\code{t0_item7}}{Score on item 7 at test} 22 | \item{\code{t0_item8}}{Score on item 8 at test} 23 | \item{\code{t0_item9}}{Score on item 9 at test} 24 | \item{\code{t0_item10}}{Score on item 10 at test} 25 | \item{\code{t1_item1}}{Score on item 1 at retest} 26 | \item{\code{t1_item2}}{Score on item 2 at retest} 27 | \item{\code{t1_item3}}{Score on item 3 at retest} 28 | \item{\code{t1_item4}}{Score on item 4 at retest} 29 | \item{\code{t1_item5}}{Score on item 5 at retest} 30 | \item{\code{t1_item6}}{Score on item 6 at retest} 31 | \item{\code{t1_item7}}{Score on item 7 at retest} 32 | \item{\code{t1_item8}}{Score on item 8 at retest} 33 | \item{\code{t1_item9}}{Score on item 9 at retest} 34 | \item{\code{t1_item10}}{Score on item 10 at retest} 35 | } 36 | } 37 | \details{ 38 | This dataset was generated with the code in the reliabilityTest.r test script. 39 | } 40 | \author{ 41 | Gjalt-Jorn Peters 42 | 43 | Maintainer: Gjalt-Jorn Peters 44 | } 45 | \examples{ 46 | data(testRetestSimData); 47 | head(testRetestSimData); 48 | hist(testRetestSimData$t0_item1); 49 | cor(testRetestSimData); 50 | } 51 | \keyword{datasets} 52 | -------------------------------------------------------------------------------- /man/ggBoxplot.Rd: -------------------------------------------------------------------------------- 1 | \name{ggBoxplot} 2 | \alias{ggBoxplot} 3 | \title{ 4 | Box plot using ggplot 5 | } 6 | \description{ 7 | This function provides a simple interface to create a \code{\link{ggplot}} box plot, organising different boxplots by levels of a factor is desired, and showing row numbers of outliers. 8 | } 9 | \usage{ 10 | ggBoxplot(dat, y = NULL, x = NULL, 11 | labelOutliers = TRUE, 12 | outlierColor = "red", 13 | theme = theme_bw(), ...) 14 | } 15 | %- maybe also 'usage' for other objects documented here. 16 | \arguments{ 17 | \item{dat}{ 18 | Either a vector of values (to display in the box plot) or a dataframe containing variables to display in the box plot. 19 | } 20 | \item{y}{ 21 | If \code{dat} is a dataframe, this is the name of the variable to make the box plot of. 22 | } 23 | \item{x}{ 24 | If \code{dat} is a dataframe, this is the name of the variable (normally a factor) to place on the X axis. Separate box plots will be generate for each level of this variable. 25 | } 26 | \item{labelOutliers}{ 27 | Whether or not to label outliers. 28 | } 29 | \item{outlierColor}{ 30 | If labeling outliers, this is the color to use. 31 | } 32 | \item{theme}{ 33 | The theme to use for the box plot. 34 | } 35 | \item{\dots}{ 36 | Any additional arguments will be passed to \code{\link{geom_boxplot}}. 37 | } 38 | } 39 | \details{ 40 | This function is based on JasonAizkalns' answer to a question on Stack Exchange (Cross Validated; see \url{http://stackoverflow.com/questions/33524669/labeling-outliers-of-boxplots-in-r}). 41 | } 42 | \value{ 43 | A \code{\link{ggplot}} plot is returned. 44 | } 45 | \author{ 46 | Jason Aizkalns; implemented in this package (and tweaked a bit) by Gjalt-Jorn Peters. 47 | 48 | Maintainer: Gjalt-Jorn Peters 49 | } 50 | \seealso{ 51 | \code{\link{geom_boxplot}} 52 | } 53 | \examples{ 54 | ### A box plot for miles per gallon in the mtcars dataset: 55 | ggBoxplot(mtcars$mpg); 56 | 57 | ### And separate for each level of 'cyl' (number of cylinder): 58 | ggBoxplot(mtcars, y='mpg', x='cyl'); 59 | } 60 | \keyword{ hplot } 61 | -------------------------------------------------------------------------------- /man/scatterMatrix.Rd: -------------------------------------------------------------------------------- 1 | \name{scatterMatrix} 2 | \alias{scatterMatrix} 3 | \title{ 4 | scatterMatrix 5 | } 6 | \description{ 7 | scatterMatrix produced a matrix with jittered scatterplots, histograms, and correlation coefficients. 8 | } 9 | \usage{ 10 | scatterMatrix(dat, items=NULL, plotSize=180, sizeMultiplier = 1, 11 | axisLabels = "none", powerHist=TRUE, ...) 12 | } 13 | \arguments{ 14 | \item{dat}{ 15 | A dataframe containing the items in the scale. All variables in this 16 | dataframe will be used if items is NULL. 17 | } 18 | \item{items}{ 19 | If not NULL, this should be a character vector with the names of the 20 | variables in the dataframe that represent items in the scale. 21 | } 22 | \item{plotSize}{ 23 | Size of the final plot in millimeters. 24 | } 25 | \item{sizeMultiplier}{ 26 | Allows more flexible control over the size of the plot elements 27 | } 28 | \item{axisLabels}{ 29 | Passed to ggpairs function to set axisLabels. 30 | } 31 | \item{powerHist}{ 32 | Whether to use the default ggpairs histogram on the diagonal of the scattermatrix, or whether to use the powerHist version. 33 | } 34 | \item{...}{ 35 | Additional arguments are passed on to powerHist. 36 | } 37 | } 38 | \value{ 39 | 40 | An object with the input and several output variables. Most notably: 41 | \item{output$scatterMatrix}{A scattermatrix with histograms on the diagonal and correlation coefficients in the upper right half.} 42 | } 43 | \author{ 44 | Gjalt-Jorn Peters 45 | 46 | Maintainer: Gjalt-Jorn Peters 47 | } 48 | \examples{ 49 | ### Note: the 'not run' is simply because running takes a lot of time, 50 | ### but these examples are all safe to run! 51 | \dontrun{ 52 | 53 | ### Generate a datafile to use 54 | exampleData <- data.frame(item1=rnorm(100)); 55 | exampleData$item2 <- exampleData$item1+rnorm(100); 56 | exampleData$item3 <- exampleData$item1+rnorm(100); 57 | exampleData$item4 <- exampleData$item2+rnorm(100); 58 | exampleData$item5 <- exampleData$item2+rnorm(100); 59 | 60 | ### Use all items 61 | scatterMatrix(dat=exampleData); 62 | } 63 | } 64 | \keyword{ utilities } 65 | \keyword{ univar } -------------------------------------------------------------------------------- /man/paginatedAsymmetricalScatterMatrix.Rd: -------------------------------------------------------------------------------- 1 | \name{paginatedAsymmetricalScatterMatrix} 2 | \alias{paginatedAsymmetricalScatterMatrix} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | paginatedAsymmetricalScatterMatrix 6 | } 7 | \description{ 8 | A function that generates a series of asymmetricalScatterMatrices, so that they 9 | can be printed or included in PDFs. 10 | } 11 | \usage{ 12 | paginatedAsymmetricalScatterMatrix(dat, cols, rows, maxRows = 5, ...) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{dat}{ 17 | The dataframe containing the variables specified in \code{cols} and \code{rows}. 18 | } 19 | \item{cols}{ 20 | The names of the variables to use for the columns. 21 | } 22 | \item{rows}{ 23 | The names of the variables to use for the rows. 24 | } 25 | \item{maxRows}{ 26 | The maximum number of rows on one 'page' (i.e. in one \code{\link{asymmetricalScatterMatrix}}). 27 | 28 | } 29 | \item{\dots}{ 30 | Extra arguments to pass on to each \code{\link{asymmetricalScatterMatrix}} call. 31 | } 32 | } 33 | \value{ 34 | An object containing the asymmetricalScatterMatrices in a list: 35 | \item{input}{Input values.} 36 | \item{intermediate}{Some values/objects generated in the process.} 37 | \item{output}{A list containing the object 'scatterMatrices', which is a list of the generated scatterMatrices.} 38 | } 39 | \author{ 40 | Gjalt-Jorn Peters 41 | 42 | Maintainer: Gjalt-Jorn Peters 43 | } 44 | \seealso{ 45 | \code{\link{asymmetricalScatterMatrix}} 46 | } 47 | \examples{ 48 | \dontrun{ 49 | ### (Not run by default because it's quite timeconsuming.) 50 | tmp <- paginatedAsymmetricalScatterMatrix(infert, cols=c("parity"), 51 | rows=c("induced", "case", 52 | "spontaneous", "age", 53 | "pooled.stratum"), 54 | maxRows = 3, 55 | showCorrelations="top-right"); 56 | tmp$output$scatterMatrices[[1]]; 57 | } 58 | } 59 | \keyword{ misc } 60 | -------------------------------------------------------------------------------- /R/removeExceptionalValues.R: -------------------------------------------------------------------------------- 1 | #' removeExceptionalValues 2 | #' 3 | #' A function to replace exceptional values with NA. This can be used to 4 | #' quickly remove impossible values, for example, when participants entered 5 | #' their age as 344. 6 | #' 7 | #' Note that exceptional values may be errors (e.g. participants accidently 8 | #' pressed a key twice, or during data entry, something went wrong), but they 9 | #' may also be indicative of participants who did not seriously participate in 10 | #' the study. Therefore, it is advised to first use 11 | #' \code{\link{exceptionalScores}} to look for patterns where participants 12 | #' enter many exceptional scores. 13 | #' 14 | #' @param dat The dataframe containing the items to inspect. 15 | #' @param items The items to inspect. 16 | #' @param exception How rare a value must be to be considered exceptional (and 17 | #' replaced by NA). 18 | #' @param silent Can be used to suppress messages. 19 | #' @param stringsAsFactors Whether to convert strings to factors when creating 20 | #' a dataframe from lapply output. 21 | #' @return The dataframe, with exceptional values replaced by NA. 22 | #' @author Gjalt-Jorn Peters 23 | #' 24 | #' Maintainer: Gjalt-Jorn Peters 25 | #' @seealso \code{\link{exceptionalScores}} 26 | #' @keywords utilities 27 | #' @examples 28 | #' 29 | #' removeExceptionalValues(mtcars, exception=.1); 30 | #' 31 | #' @export removeExceptionalValues 32 | removeExceptionalValues <- function(dat, items=NULL, exception=.005, 33 | silent=FALSE, stringsAsFactors=FALSE) { 34 | if (is.data.frame(dat)) { 35 | if (is.null(items)) { 36 | items <- names(dat); 37 | if (!silent) { 38 | cat("No items specified: extracting all variable names in dataframe.\n"); 39 | } 40 | } 41 | return(data.frame(lapply(dat, function(x) { 42 | if (is.numeric(x)) { 43 | return(ifelse(exceptionalScore(x, prob = exception), NA, x)); 44 | } else { 45 | return(x); 46 | } 47 | }), stringsAsFactors=stringsAsFactors)); 48 | } else { 49 | return(ifelse(exceptionalScore(dat, prob = exception), NA, dat)); 50 | } 51 | } 52 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: userfriendlyscience 2 | Type: Package 3 | Title: Quantitative Analysis Made Accessible 4 | Version: 0.7.2 5 | Date: 2018-09-25 6 | Authors@R: c(person("Gjalt-Jorn", "Peters", 7 | email = "gjalt-jorn@userfriendlyscience.com", 8 | role = c("aut", "cre", "ctb")), 9 | person("Peter", "Verboon", 10 | role = c("ctb")), 11 | person("James", "Green", 12 | role = c("ctb"))) 13 | Maintainer: Gjalt-Jorn Peters 14 | License: GPL (>= 3) 15 | Description: Contains a number of functions that serve 16 | two goals. First, to make R more accessible to people migrating 17 | from SPSS by adding a number of functions that behave roughly like 18 | their SPSS equivalents (also see ). Second, 19 | to make a number of slightly more advanced functions more user 20 | friendly to relatively novice users. The package also conveniently 21 | houses a number of additional functions that are intended to 22 | increase the quality of methodology and statistics in psychology, 23 | not by offering technical solutions, but by shifting perspectives, 24 | for example towards reasoning based on sampling distributions as 25 | opposed to on point estimates. 26 | URL: http://userfriendlyscience.com 27 | BugReports: https://github.com/matherion/userfriendlyscience/issues 28 | LazyData: true 29 | Imports: BiasedUrn, 30 | car, 31 | data.tree, 32 | DiagrammeR, 33 | diptest, 34 | digest, 35 | GGally, 36 | ggplot2, 37 | ggrepel, 38 | ggridges, 39 | gridExtra, 40 | GPArotation, 41 | gtable, 42 | knitr, 43 | lavaan, 44 | lme4, 45 | MASS, 46 | MBESS, 47 | minpack.lm, 48 | pander, 49 | plyr, 50 | psych, 51 | pwr, 52 | RColorBrewer, 53 | rio, 54 | scales, 55 | SCRT, 56 | SuppDists, 57 | ufs (>= 0.0.1), 58 | viridis, 59 | XML, 60 | xtable 61 | Suggests: multcompView 62 | -------------------------------------------------------------------------------- /man/didacticPlot.Rd: -------------------------------------------------------------------------------- 1 | \name{didacticPlot} 2 | \alias{didacticPlot} 3 | \alias{didacticPlotTheme} 4 | \title{ 5 | didacticPlot 6 | } 7 | \description{ 8 | didacticPlot is useful for making ggplot2 plots of distributions of t, F, Chi^2, and Pearson r, showing a given value, and shading the arie covering the more extreme values. didacticPlotTheme is the basic theme. 9 | } 10 | \usage{ 11 | didacticPlot(foundValue, statistic, df1, df2 = NULL, 12 | granularity = 1000, xLim = NULL, yLab = NULL, 13 | lineCol = "red", lineSize=1, 14 | surfaceCol = "red", textMarginFactor = 20, 15 | sided="two") 16 | didacticPlotTheme(base_size = 14, base_family = "") 17 | } 18 | \arguments{ 19 | \item{foundValue}{ 20 | The value to indicate (the 'found' value). 21 | } 22 | \item{statistic}{ 23 | One of "r", "t", "f" or "chisq". 24 | } 25 | \item{df1, df2}{ 26 | The degrees of freedom; only use df1 for the r, t and chi^2 test; for the F-test, use df1 for the degrees of freedom of the denominator and df2 for the degrees of freedom of the numerator. 27 | } 28 | \item{granularity}{ 29 | Steps to use for x-axis. 30 | } 31 | \item{xLim}{ 32 | Vector; minimum and maximum values on x axis. 33 | } 34 | \item{yLab}{ 35 | Label on y axis. 36 | } 37 | \item{lineCol}{ 38 | Colour of density line. 39 | } 40 | \item{lineSize}{ 41 | Size of density line. 42 | } 43 | \item{surfaceCol}{ 44 | Colour of coloured surface area. 45 | } 46 | \item{textMarginFactor}{ 47 | Used to calculate how close to the vertical line text labels should appear. 48 | } 49 | \item{sided}{ 50 | Whether to make a plot for a 2-sided or 1-sided test. 51 | } 52 | \item{base_size, base_family}{ 53 | Passed on to the grey ggplot theme. 54 | } 55 | } 56 | \value{ 57 | didacticPlot returns an object that contains the plot in the $plot element. 58 | } 59 | \examples{ 60 | didacticPlot(1, statistic='chisq', df1=2); 61 | 62 | didacticPlot(1, statistic='t', df1=40); 63 | 64 | didacticPlot(2.02, statistic='t', df1=40, textMarginFactor=25); 65 | 66 | ### Two sample t-test for n1 = n2 = 250, showing 67 | ### p-value of 5% 68 | # a<-didacticPlot(1.96, statistic='t', df1=498); 69 | 70 | } 71 | \keyword{ utilities } -------------------------------------------------------------------------------- /man/freq.Rd: -------------------------------------------------------------------------------- 1 | \name{freq} 2 | \alias{freq} 3 | \alias{Frequency} 4 | \alias{frequencies} 5 | %- Also NEED an '\alias' for EACH other topic documented here. 6 | \title{ 7 | Frequency tables 8 | } 9 | \description{ 10 | Function to show frequencies in a manner similar to what SPSS' "FREQUENCIES" command does. Note that \code{frequency} is an alias for \code{freq}. 11 | } 12 | \usage{ 13 | freq(vector, digits = 1, nsmall=1, transposed=FALSE, 14 | round=1, plot=FALSE, plotTheme = theme_bw()) 15 | frequencies(..., digits = 1, nsmall = 1, 16 | transposed = FALSE, round = 1, 17 | plot = FALSE, plotTheme = theme_bw()) 18 | } 19 | %- maybe also 'usage' for other objects documented here. 20 | \arguments{ 21 | \item{vector}{ 22 | A vector of values to compute frequencies for. 23 | } 24 | \item{digits}{ 25 | Minimum number of significant digits to show in result. 26 | } 27 | \item{nsmall}{ 28 | Minimum number of digits after the decimal point to show in the result. 29 | } 30 | \item{transposed}{ 31 | Whether to transpose the results when printing them (this can be useful for blind users). 32 | } 33 | \item{round}{ 34 | Number of digits to round the results to (can be used in conjunction with digits to determine format of results). 35 | } 36 | \item{plot}{ 37 | If true, a histogram is shown of the variable. 38 | } 39 | \item{plotTheme}{ 40 | The ggplot2 theme to use. 41 | } 42 | \item{\dots}{ 43 | The variables of which to provide frequencies 44 | } 45 | } 46 | \value{ 47 | 48 | An object with several elements, the most notable of which is: 49 | \item{dat}{A dataframe with the frequencies} 50 | 51 | For \code{frequencies}, these objects are in a list of their own. 52 | 53 | } 54 | \examples{ 55 | 56 | ### Create factor vector 57 | ourFactor <- factor(mtcars$gear, levels=c(3,4,5), 58 | labels=c("three", "four", "five")); 59 | ### Add some missing values 60 | factorWithMissings <- ourFactor; 61 | factorWithMissings[10] <- factorWithMissings[20] <- NA; 62 | 63 | ### Show frequencies 64 | freq(ourFactor); 65 | freq(factorWithMissings); 66 | 67 | ### ... Or for all of them at one 68 | frequencies(ourFactor, factorWithMissings); 69 | 70 | } 71 | \keyword{ univar } 72 | -------------------------------------------------------------------------------- /man/Singh.Rd: -------------------------------------------------------------------------------- 1 | \name{Singh} 2 | \alias{Singh} 3 | \docType{data} 4 | \title{ 5 | Verbal and physical aggression scores from Singh et al. (2007) 6 | } 7 | \description{ 8 | This is a dataset originally described in Singh et al. (2007), and digitized by Rumen Manolov using plot digitizer software and used to illustrate a number of single case design analysis approaches in Manolov & Moeyaert (2016). It is also used by Verboon & Peters (2017) to illustrate the \code{\link{piecewiseRegr}} and the \code{\link{genlog}} functions. 9 | } 10 | \usage{data("Singh")} 11 | \format{ 12 | A data frame with 56 observations on the following 6 variables. 13 | \describe{ 14 | \item{\code{tier}}{A numeric subject identifier.} 15 | \item{\code{id}}{A character subject identifier (i.e. a name).} 16 | \item{\code{time}}{An index of the measurement moment.} 17 | \item{\code{phase}}{A dummy variable indicating the phase of the experiment: 0 means that treatment has not yet started, 1 means that treatment has started.} 18 | \item{\code{score_physical}}{The subjects' scores on physical aggression.} 19 | \item{\code{score_verbal}}{The subjects' scores on verbal aggression.} 20 | } 21 | } 22 | \source{ 23 | See Rumen Manolov's Open Science Framework repository at \url{https://osf.io/t6ws6} for the tutorial and the original dataset. 24 | } 25 | \references{ 26 | Singh, N. N., Lancioni, G. E., Winton, A. S., Adkins, A. D., Wahler, R. G., Sabaawi, M., & Singh, J. (2007). Individuals with mental illness can control their aggressive behavior through mindfulness training. \emph{Behavior Modification, 31}(3), 313-328. http://doi.org/10.1177/0145445506293585 27 | 28 | Manolov, R., & Moeyaert, M. (2017). How Can Single-Case Data Be Analyzed? Software Resources, Tutorial, and Reflections on Analysis. \emph{Behavior Modification, 41}(2), 179-228. http://doi.org/10.1177/0145445516664307 29 | 30 | Verboon, P. & Peters, G.-J. Y. (2017) Applying the generalised logistic model in SCD to deal with ceiling effects. \emph{PsyArXiv} http://INSERTLINK 31 | } 32 | \seealso{ 33 | \code{\link{piecewiseRegr}} and \code{\link{genlog}} both contain examples using this dataset. 34 | } 35 | \examples{ 36 | ### To load the data, use: 37 | data(Singh); 38 | } 39 | \keyword{datasets} 40 | -------------------------------------------------------------------------------- /R/erDataSeq.R: -------------------------------------------------------------------------------- 1 | erDataSeq <- function(er = NULL, threshold = NULL, mean = NULL, sd = NULL, 2 | eventIfHigher = TRUE, 3 | pRange = c(.000001, .99999), xStep=.01) { 4 | 5 | if (is.null(er) && is.null(threshold)) { 6 | stop("Provide either the control event rate (er; a proportion, ", 7 | "a number between 0 and 1) or the cut-off value that determines ", 8 | "an 'event' on the same scale as mean and sd."); 9 | } 10 | 11 | if (is.null(er)) { 12 | ### Determine er from threshold 13 | if (is.null(mean) || is.null(sd)) { 14 | stop("When I need to derive the er from the threshold value, ", 15 | "you must also provide me with the mean and the standard ", 16 | "deviation!"); 17 | } 18 | er <- convert.threshold.to.er(threshold = threshold, 19 | mean = mean, 20 | sd = sd, 21 | eventIfHigher = eventIfHigher); 22 | } else if (is.null(threshold)) { 23 | if (is.null(mean) && is.null(sd)) { 24 | mean <- 0; 25 | sd <- 1; 26 | } else if (is.null(mean)) { 27 | stop("If providing an event rate (er) and a standard deviation, you must also provide a mean value!"); 28 | } else if (is.null(sd)) { 29 | stop("If providing an event rate (er) and a mean value, you must also provide a standard deviation!"); 30 | } 31 | threshold <- convert.er.to.threshold(er, 32 | mean = mean, 33 | sd = sd, 34 | eventIfHigher = eventIfHigher); 35 | } 36 | 37 | ### Get range from where to where to generate values 38 | xRange <- c(qnorm(min(pRange), mean=mean, sd=sd), 39 | qnorm(max(pRange), mean=mean, sd=sd)); 40 | 41 | res <- data.frame(x = seq(from=xRange[1], to=xRange[2], by=xStep)); 42 | res$density <- dnorm(res$x, mean=mean, sd=sd); 43 | 44 | attr(res, 'er') <- er; 45 | attr(res, 'threshold') <- threshold; 46 | attr(res, 'mean') <- mean; 47 | attr(res, 'eventIfHigher') <- eventIfHigher; 48 | attr(res, 'sd') <- sd; 49 | 50 | class(res) <- c('erDataSeq', class(res)); 51 | 52 | return(res); 53 | 54 | } 55 | -------------------------------------------------------------------------------- /R/detStructComputeScales.R: -------------------------------------------------------------------------------- 1 | detStructComputeScales <- function(determinantStructure, 2 | dat, 3 | append = TRUE, 4 | separator = "_") { 5 | 6 | if (!("determinantStructure" %in% class(determinantStructure))) { 7 | stop("The first argument must be a determint structure object!"); 8 | } 9 | 10 | if (!("data.frame" %in% class(dat))) { 11 | stop("The first argument must be a dataframe!"); 12 | } 13 | 14 | ### Get behavior regex 15 | ### Get all behaviorRegExes that are set (should only be one) 16 | behaviorRegEx <- determinantStructure$Get('behaviorRegEx', 17 | traversal='level', 18 | filterFun=function(x) return(!is.null(x$behaviorRegEx))); 19 | 20 | ### Remove any duplicates and select the first one in case there are more 21 | behaviorRegEx <- unique(behaviorRegEx); 22 | 23 | if (length(behaviorRegEx) > 1) { 24 | warning("The determinant structure you specified has more than one behavior regular expression defined. Only using the first one, '", 25 | behaviorRegEx[1], "'."); 26 | } 27 | 28 | behaviorRegEx <- behaviorRegEx[1]; 29 | 30 | ### Get all variables names of all 'product halves' 31 | scalables <- determinantStructure$Get("varNames", traversal='level', 32 | filterFun=function(x) { 33 | return(x$type == 'determinantVar'); 34 | }, simplify=FALSE); 35 | 36 | ### Remove superfluous level in between 37 | scalables <- lapply(scalables, unlist); 38 | 39 | ### Add behavior before variable names 40 | names(scalables) <- paste0(behaviorRegEx, separator, names(scalables)); 41 | 42 | ### Add new variable names to determinant structure 43 | determinantStructure$Set(scaleVarName = names(scalables), 44 | filterFun=function(x) { 45 | return(x$type == 'determinantVar'); 46 | }); 47 | 48 | dat <- makeScales(dat, scalables); 49 | 50 | if (append) { 51 | return(dat); 52 | } else { 53 | return(dat[, names(scalables)]); 54 | } 55 | 56 | } 57 | -------------------------------------------------------------------------------- /man/pwr.confIntR.Rd: -------------------------------------------------------------------------------- 1 | \name{pwr.confIntR} 2 | \alias{pwr.confIntR} 3 | \title{ 4 | Determine required sample size for a given confidence interval width for Pearson's r 5 | } 6 | \description{ 7 | This function computes how many participants you need if you want to achieve a confidence interval of a given width. This is useful when you do a study and you are interested in how strongly two variables are associated. 8 | } 9 | \usage{ 10 | pwr.confIntR(r, w = 0.1, conf.level = 0.95) 11 | } 12 | \arguments{ 13 | \item{r}{ 14 | The correlation you expect to find (confidence intervals for a given level of confidence get narrower as the correlation coefficient increases). 15 | } 16 | \item{w}{ 17 | The required half-width (or margin of error) of the confidence interval. 18 | } 19 | \item{conf.level}{ 20 | The level of confidence. 21 | } 22 | } 23 | \value{ 24 | The required sample size, or a vector or matrix of sample sizes if multiple correlation coefficients or required (half-)widths were supplied. The row and column names specify the \code{r} and \code{w} values to which the sample size in each cell corresponds. The confidence level is set as attribute to the resulting vector or matrix. 25 | } 26 | \references{ 27 | Bonett, D. G., Wright, T. A. (2000). Sample size requirements for estimating Pearson, Kendall and Spearman correlations. \emph{Psychometrika, 65}, 23-28. 28 | 29 | Bonett, D. G. (2014). CIcorr.R and sizeCIcorr.R http://people.ucsc.edu/~dgbonett/psyc181.html 30 | 31 | Moinester, M., & Gottfried, R. (2014). Sample size estimation for correlations with pre-specified confidence interval. \emph{The Quantitative Methods of Psychology, 10}(2), 124-130. http://www.tqmp.org/RegularArticles/vol10-2/p124/p124.pdf 32 | 33 | Peters, G. J. Y. & Crutzen, R. (forthcoming) An easy and foolproof method for establishing how effective an intervention or behavior change method is: required sample size for accurate parameter estimation in health psychology. 34 | } 35 | \author{ 36 | Douglas Bonett (UC Santa Cruz, United States), with minor edits by Murray Moinester (Tel Aviv University, Israel) and Gjalt-Jorn Peters (Open University of the Netherlands, the Netherlands). 37 | 38 | Maintainer: Gjalt-Jorn Peters 39 | } 40 | \seealso{ 41 | \code{\link{pwr.confIntR}} 42 | } 43 | \examples{ 44 | pwr.confIntR(c(.4, .6, .8), w=c(.1, .2)); 45 | } 46 | \keyword{ htest } 47 | -------------------------------------------------------------------------------- /man/confIntV.Rd: -------------------------------------------------------------------------------- 1 | \name{confIntV} 2 | \alias{confIntV} 3 | \alias{cramersV} 4 | \alias{crossTab} 5 | \title{ 6 | crossTab, confIntV and cramersV 7 | } 8 | \description{ 9 | These functions compute the point estimate and confidence interval for 10 | Cramer's V. The crossTab function also shows a crosstable. 11 | } 12 | \usage{ 13 | crossTab(x, y=NULL, conf.level=.95, 14 | digits=2, pValueDigits=3, ...) 15 | cramersV(x, y = NULL, digits=2) 16 | confIntV(x, y = NULL, conf.level=.95, 17 | samples = 500, digits=2, 18 | method=c('bootstrap', 'fisher'), 19 | storeBootstrappingData = FALSE) 20 | } 21 | \arguments{ 22 | \item{x}{ 23 | Either a crosstable to analyse, or one of two vectors to use to generate 24 | that crosstable. The vector should be a factor, i.e. a categorical 25 | variable identified as such by the 'factor' class). 26 | } 27 | \item{y}{ 28 | If x is a crosstable, y can (and should) be empty. If x is a vector, y 29 | must also be a vector. 30 | } 31 | \item{digits}{ 32 | Minimum number of digits after the decimal point to show in the result. 33 | } 34 | \item{pValueDigits}{ 35 | Minimum number of digits after the decimal point to show in the Chi 36 | Square p value in the result. 37 | } 38 | \item{conf.level}{ 39 | Level of confidence for the confidence interval. 40 | } 41 | \item{samples}{ 42 | Number of samples to generate when bootstrapping. 43 | } 44 | \item{method}{ 45 | Whether to use Fisher's Z or bootstrapping to compute the confidence 46 | interval. 47 | } 48 | \item{storeBootstrappingData}{ 49 | Whether to store (or discard) the data generating during the bootstrapping 50 | procedure. 51 | } 52 | \item{...}{ 53 | Extra arguments to \code{crossTab} are passed on to \code{confIntV}. 54 | } 55 | } 56 | \value{ 57 | 58 | The cramersV and confIntV functions return either a point estimate or 59 | a confidence interval for Cramer's V, an effect size to describe the 60 | association between two categorical variables. The crossTab function is 61 | just a wrapper around confIntV. 62 | 63 | } 64 | \examples{ 65 | 66 | crossTab(infert$education, infert$induced, samples=50); 67 | 68 | ### Get confidence interval for Cramer's V 69 | ### Note that by using 'table', and so removing the raw data, inhibits 70 | ### bootstrapping, which could otherwise take a while. 71 | confIntV(table(infert$education, infert$induced)); 72 | 73 | } 74 | \keyword{ bivar } 75 | -------------------------------------------------------------------------------- /man/omegaSqDist.Rd: -------------------------------------------------------------------------------- 1 | \name{omegaSqDist} 2 | \alias{domegaSq} 3 | \alias{pomegaSq} 4 | \alias{qomegaSq} 5 | \alias{romegaSq} 6 | \title{ 7 | The distribution of Omega Squared 8 | } 9 | \description{ 10 | These functions use some conversion to and from the \emph{F} distribution to provide the Omega Squared distribution. 11 | } 12 | \usage{ 13 | domegaSq(x, df1, df2, populationOmegaSq = 0) 14 | pomegaSq(q, df1, df2, populationOmegaSq = 0, lower.tail = TRUE) 15 | qomegaSq(p, df1, df2, populationOmegaSq = 0, lower.tail = TRUE) 16 | romegaSq(n, df1, df2, populationOmegaSq = 0) 17 | 18 | } 19 | \arguments{ 20 | \item{x, q}{ 21 | Vector of quantiles, or, in other words, the value(s) of Omega Squared. 22 | } 23 | \item{p}{ 24 | Vector of probabilites (\emph{p}-values). 25 | } 26 | \item{df1, df2}{ 27 | Degrees of freedom for the numerator and the denominator, respectively. 28 | } 29 | \item{n}{ 30 | Desired number of Omega Squared values. 31 | } 32 | \item{populationOmegaSq}{ 33 | The value of Omega Squared in the population; this determines the center of the Omega Squared distribution. This has not been implemented yet in this version of \code{userfriendlyscience}. If anybody has the inverse of \code{\link{convert.ncf.to.omegasq}} for me, I'll happily integrate this. 34 | } 35 | \item{lower.tail}{ 36 | logical; if TRUE (default), probabilities are the likelihood of finding an Omega Squared smaller than the specified value; otherwise, the likelihood of finding an Omega Squared larger than the specified value. 37 | } 38 | } 39 | \details{ 40 | The functions use \code{\link{convert.omegasq.to.f}} and \code{\link{convert.f.to.omegasq}} to provide the Omega Squared distribution. 41 | } 42 | \value{ 43 | \code{domegaSq} gives the density, \code{pomegaSq} gives the distribution function, \code{qomegaSq} gives the quantile function, and \code{romegaSq} generates random deviates. 44 | 45 | } 46 | \author{ 47 | Gjalt-Jorn Peters 48 | 49 | Maintainer: Gjalt-Jorn Peters 50 | } 51 | \seealso{ 52 | \code{\link{convert.omegasq.to.f}}, \code{\link{convert.f.to.omegasq}}, \code{\link{df}}, \code{\link{pf}}, \code{\link{qf}}, \code{\link{rf}} 53 | } 54 | \examples{ 55 | ### Generate 10 random Omega Squared values 56 | romegaSq(10, 66, 3); 57 | 58 | ### Probability of findings an Omega Squared 59 | ### value smaller than .06 if it's 0 in the population 60 | pomegaSq(.06, 66, 3); 61 | 62 | } 63 | \keyword{ univar } 64 | -------------------------------------------------------------------------------- /R/varsToDiamondPlotDf.R: -------------------------------------------------------------------------------- 1 | varsToDiamondPlotDf <- function(dat, items = NULL, labels = NULL, 2 | decreasing=NULL, 3 | conf.level=.95) { 4 | 5 | if (is.null(items)) { 6 | items <- names(dat); 7 | } else if (is.numeric(items)) { 8 | items <- names(dat)[items]; 9 | } 10 | if (is.null(labels)) labels <- items; 11 | 12 | # resDf <- data.frame(t(sapply(dat[, items, drop=FALSE], 13 | # function(x) { 14 | # x <- na.omit(x); 15 | # ci <- meanConfInt(x, conf.level=conf.level)$output$ci; 16 | # return(data.frame(lo = ci[1], mean = mean(x), hi = ci[2])); 17 | # }))); 18 | 19 | miniDat <- dat[, items, drop=FALSE]; 20 | notNumericVectors <- 21 | items[which(!unlist(lapply(miniDat, is.numeric)))]; 22 | if (length(notNumericVectors) > 0) { 23 | stop("Not all items are numeric (", 24 | ufs::vecTxtQ(notNumericVectors), 25 | " are not)."); 26 | } 27 | ### To fix error with mean 28 | resDf <- 29 | matrix(unlist(lapply(miniDat, 30 | function(x) { 31 | x <- na.omit(x); 32 | ci <- meanConfInt(x, conf.level=conf.level)$output$ci; 33 | return(c(ci[1], mean(x), ci[2])); 34 | })), 35 | byrow=TRUE, ncol=3); 36 | resDf <- as.data.frame(resDf); 37 | names(resDf) <- c('lo', 'mean', 'hi'); 38 | 39 | resDf$label <- labels; 40 | resDf$rownr <- 1:nrow(resDf); 41 | resDf$constant <- 1; 42 | 43 | if (!is.null(decreasing)) { 44 | ### Invert 'decreasing' because ggplot plots the lowest/first value first (near the origin). 45 | ### So a decreasing sort would normally result in higher means being displayed LOWER in 46 | ### the plot, which is counter-intuitive, hence the inversion. 47 | sortedByMean <- order(unlist(resDf$mean), decreasing=!decreasing); 48 | resDf <- resDf[sortedByMean, ]; 49 | labels <- labels[sortedByMean]; 50 | } else { 51 | ### sortedByMean is used later on to organise the raw data; therefore, this should 52 | ### reflect the order of the variables on the Y axis regardless of whether they're 53 | ### reorganised 54 | sortedByMean <- 1:length(labels); 55 | } 56 | 57 | ### Return this vector as attribute to use in meansDiamondPlot 58 | attr(resDf, 'sortedByMean') <- sortedByMean; 59 | 60 | return(resDf); 61 | 62 | } 63 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | # R for travis: see documentation at https://docs.travis-ci.com/user/languages/r 2 | 3 | language: R 4 | #sudo: false 5 | cache: packages 6 | 7 | # R for travis: see documentation at https://docs.travis-ci.com/user/languages/r 8 | 9 | ### yaml file for ggstatsplot, see 10 | ### https://github.com/wibeasley/ggstatsplot/blob/cb454e637995fb381c9d2565f461e582ab59b9b6/.travis.yml 11 | 12 | #language: R 13 | #cache: packages 14 | #latex: false 15 | #fortran: true 16 | 17 | # Note: http test servers are flaky and we want to minimze false positives 18 | # so we skip examples and vignettes (but not unit tests) on OS X. 19 | 20 | matrix: 21 | include: 22 | - os: linux 23 | dist: trusty 24 | sudo: required 25 | 26 | r_binary_packages: 27 | - stringi 28 | - magrittr 29 | - curl 30 | - jsonlite 31 | - Rcpp 32 | - bindrcpp 33 | - RcppEigen 34 | - rpf 35 | - openssl 36 | - rlang 37 | - igraph 38 | - utf8 39 | - gss 40 | - haven 41 | - XML 42 | - data.table 43 | - matrixStats 44 | - rgl 45 | - dplyr 46 | - purrr 47 | - tidyr 48 | - readr 49 | - minqa 50 | - mvtnorm 51 | - nloptr 52 | - SparseM 53 | - lme4 54 | - httpuv 55 | - markdown 56 | - OpenMx 57 | - sem 58 | - readxl 59 | - openxlsx 60 | - pander 61 | - minpack.lm 62 | - StanHeaders 63 | 64 | # "These installs will be faster than source installs, but may not always be the most recent version" 65 | # https://docs.travis-ci.com/user/languages/r/#Additional-Dependency-Fields 66 | 67 | # precise is getting decommissioned on Travis, so better to leave it out 68 | # - os: linux 69 | # dist: trusty 70 | # sudo: false 71 | # env: R_CODECOV = true 72 | # r_build_args: '--no-build-vignettes' 73 | # r_check_args: '--ignore-vignettes --no-examples' 74 | - os: osx 75 | osx_image: xcode9.2 76 | - os: osx 77 | osx_image: xcode7.3 78 | r_build_args: '--no-build-vignettes' 79 | r_check_args: '--ignore-vignettes --no-examples' 80 | 81 | addons: 82 | apt: 83 | packages: 84 | - libcurl4-openssl-dev 85 | - libxml2-dev 86 | - libgsl0-dev 87 | 88 | repos: 89 | CRAN: https://cloud.r-project.org 90 | ropensci: http://packages.ropensci.org 91 | -------------------------------------------------------------------------------- /man/setCaptionNumbering.Rd: -------------------------------------------------------------------------------- 1 | \name{setCaptionNumbering} 2 | \alias{setCaptionNumbering} 3 | \title{ 4 | Convenience function for numbered captions in knitr (and so, RMarkdown) 5 | } 6 | \description{ 7 | This function makes it easy to tell knitr (and so RMarkdown) to use numbered captions of any type. 8 | } 9 | \usage{ 10 | setCaptionNumbering(captionName = "tab.cap", 11 | prefix = ":Table \%s: ", 12 | suffix = "", 13 | captionBefore = FALSE, 14 | romanNumeralSetting = "counter_roman", 15 | optionName = paste0("setCaptionNumbering_", captionName), 16 | resetCounterTo = 1) 17 | } 18 | %- maybe also 'usage' for other objects documented here. 19 | \arguments{ 20 | \item{captionName}{ 21 | The name of the caption; this is used both as unique identifier for the counter, and to set the caption text (included between the prefix and suffix) in the chunk options. 22 | } 23 | \item{prefix}{ 24 | The text to add as prefix before the action caption; this will typically include '\%s\%' which will be replaced by the number of this caption. 25 | } 26 | \item{suffix}{ 27 | The text to add as suffix after the action caption; this can also include '\%s\%' which will be replaced by the number of this caption. Together with the \code{prefix}, this can also be used to enclose the caption in html. 28 | } 29 | \item{captionBefore}{ 30 | Whether the caption should appear before or after the relevant chunk output. 31 | } 32 | \item{romanNumeralSetting}{ 33 | The name of the option (should be retrievable with \code{\link{getOption}}) where it's configured whether to use Roman (TRUE) or Latin (FALSE) numerals. FALSE is assumed if this option isn't set. 34 | } 35 | \item{optionName}{ 36 | The name of the option to use to retrieve and set the counter. This can be used, for example, to have multiple caption types use the same counter. 37 | } 38 | \item{resetCounterTo}{ 39 | If not \code{NULL} and numeric, the counter will start at this number. 40 | } 41 | } 42 | \value{ 43 | This function returns nothing, but instead sets the appropriate \code{\link{knit_hooks}}. Or rather, just one hook. 44 | } 45 | \author{ 46 | Gjalt-Jorn Peters 47 | 48 | Maintainer: Gjalt-Jorn Peters 49 | } 50 | \examples{ 51 | \dontrun{ 52 | setCaptionNumbering(captionName='tab.cap', 53 | prefix = ":Table \%s: "); 54 | } 55 | } 56 | \keyword{ utils } 57 | -------------------------------------------------------------------------------- /R/curfnfinder.R: -------------------------------------------------------------------------------- 1 | ### Written by Nick Sabbe, http://stackoverflow.com/questions/7307987/logging-current-function-name 2 | 3 | 4 | 5 | #' Function to find the name of the calling function 6 | #' 7 | #' This function finds and returns the name of the function calling it. This 8 | #' can be useful, for example, when generating functions algorithmically. 9 | #' 10 | #' This function was written by Nick Sabbe for his package \code{addendum}. He 11 | #' posted it on Stack Exchange at 12 | #' \url{http://stackoverflow.com/questions/7307987/logging-current-function-name} 13 | #' and I included this here with this permission. 14 | #' 15 | #' @param skipframes Number of frames to skip; useful when called from an 16 | #' anonymous function. 17 | #' @param skipnames A regular expression specifying which substrings to delete. 18 | #' @param retIfNone What to return when called from outside a function. 19 | #' @param retStack Whether to return the entire stack or just one function. 20 | #' @param extraPrefPerLevel Extra prefixes to return for each level of the 21 | #' function. 22 | #' @return The current function. 23 | #' @author Nick Sabbe (Arteveldehogeschool) 24 | #' 25 | #' Maintainer: Gjalt-Jorn Peters 26 | #' @keywords utility 27 | #' @examples 28 | #' 29 | #' functionA <- functionB <- function() { 30 | #' curFn <- curfnfinder(); 31 | #' if (curFn == 'functionA') { 32 | #' cat('Doing something\n'); 33 | #' } else { 34 | #' cat('Doing something else\n'); 35 | #' } 36 | #' cat('Doing something generic.'); 37 | #' } 38 | #' functionA(); 39 | #' functionB(); 40 | #' 41 | #' @export curfnfinder 42 | curfnfinder <- function(skipframes=0, 43 | skipnames="(FUN)|(.+apply)|(replicate)", 44 | retIfNone="Not in function", 45 | retStack=FALSE, 46 | extraPrefPerLevel="\t") { 47 | prefix <- sapply(3 + skipframes+1:sys.nframe(), function(i) { 48 | currv<-sys.call(sys.parent(n=i))[[1]] 49 | return(currv) 50 | }); 51 | prefix[grep(skipnames, prefix)] <- NULL; 52 | prefix <- gsub("function \\(.*", "do.call", prefix); 53 | if(length(prefix)==0) { 54 | return(retIfNone); 55 | } 56 | else if(retStack) { 57 | return(paste(rev(prefix), collapse = "|")); 58 | } 59 | else { 60 | res <- as.character(unlist(prefix[1])); 61 | if (length(prefix) > 1) { 62 | res <- paste(paste(rep(extraPrefPerLevel, length(prefix) - 1), collapse=""), res, sep=""); 63 | } 64 | return(res); 65 | } 66 | } 67 | -------------------------------------------------------------------------------- /man/exceptionalScore.Rd: -------------------------------------------------------------------------------- 1 | \name{exceptionalScore} 2 | \alias{exceptionalScore} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | exceptionalScore 6 | } 7 | \description{ 8 | This function can be used to detect exceptionally high or low scores in a vector. 9 | } 10 | \usage{ 11 | exceptionalScore(x, prob = 0.025, both = TRUE, silent = FALSE, 12 | quantileCorrection = 1e-04, quantileType = 8) 13 | } 14 | %- maybe also 'usage' for other objects documented here. 15 | \arguments{ 16 | \item{x}{ 17 | Vector in which to detect exceptional scores. 18 | } 19 | \item{prob}{ 20 | Probability that a score is exceptionally positive or negative; i.e. scores 21 | with a quartile lower than \code{prob} or higher than 1-\code{prob} are 22 | considered exceptional (if both is TRUE, at least). So, note that a \code{prob} 23 | of .025 means that if both=TRUE, the most exceptional 5\% of the values is 24 | marked as such. 25 | } 26 | \item{both}{ 27 | Whether to consider values exceptional if they're below \code{prob} as well 28 | as above 1-\code{prob}, or whether to only consider values exceptional if 29 | they're below \code{prob} is \code{prob} is < .5, or above \code{prob} if 30 | \code{prob} > .5. 31 | } 32 | \item{silent}{ 33 | Can be used to suppress messages. 34 | } 35 | \item{quantileCorrection}{ 36 | By how much to correct the computed quantiles; this is used because when 37 | a distribution is very right-skewed, the lowest quantile is the lowest 38 | value, which is then also the mode; without subtracting a correction, almost 39 | all values would be marked as 'exceptional'. 40 | } 41 | \item{quantileType}{ 42 | The algorithm used to compute the quantiles; see \code{\link{quantile}}. 43 | } 44 | } 45 | \details{ 46 | Note that of course, by definition, \code{prob} of \code{2*prob} percent of the 47 | values is exceptional, so it is usually not a wise idea to remove scores based 48 | on their 'exceptionalness'. Instead, use \code{\link{exceptionalScores}}, 49 | which calls this function, to see how often participants answered 50 | exceptionally, and remove them based on that. 51 | } 52 | \value{ 53 | A logical vector, indicating for each value in the supplied vector whether it 54 | is exceptional. 55 | } 56 | \author{ 57 | Gjalt-Jorn Peters 58 | 59 | Maintainer: Gjalt-Jorn Peters 60 | } 61 | \seealso{ 62 | \code{\link{quantile}}, \code{\link{exceptionalScores}} 63 | } 64 | \examples{ 65 | exceptionalScore(c(1,1,2,2,2,3,3,3,4,4,4,5,5,5,5,6,6,7,8,20), prob=.05); 66 | } 67 | \keyword{ univariate } 68 | -------------------------------------------------------------------------------- /man/exceptionalScores.Rd: -------------------------------------------------------------------------------- 1 | \name{exceptionalScores} 2 | \alias{exceptionalScores} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | exceptionalScores 6 | } 7 | \description{ 8 | A function to detect participants that consistently respond exceptionally. 9 | } 10 | \usage{ 11 | exceptionalScores(dat, items = NULL, exception = 0.025, totalOnly = TRUE, 12 | append = TRUE, both = TRUE, silent = FALSE, 13 | suffix = "_isExceptional", totalVarName = "exceptionalScores") 14 | } 15 | %- maybe also 'usage' for other objects documented here. 16 | \arguments{ 17 | \item{dat}{ 18 | The dataframe containing the variables to inspect, or the vector to inspect 19 | (but for vectors, \code{\link{exceptionalScore}} might be more useful). 20 | } 21 | \item{items}{ 22 | The names of the variables to inspect. 23 | } 24 | \item{exception}{ 25 | When an item will be considered exceptional, passed on as \code{prob} to 26 | \code{\link{exceptionalScore}}. 27 | } 28 | \item{totalOnly}{ 29 | Whether to return only the number of exceptional scores for each row in the 30 | dataframe, or for each inspected item, which values are exceptional. 31 | } 32 | \item{append}{ 33 | Whether to return the supplied dataframe with the new variable(s) appended 34 | (if TRUE), or whether to only return the new variable(s) (if FALSE). 35 | } 36 | \item{both}{ 37 | Whether to look for both low and high exceptional scores (TRUE) or not (FALSE; 38 | see \code{\link{exceptionalScore}}). 39 | } 40 | \item{silent}{ 41 | Can be used to suppress messages. 42 | } 43 | \item{suffix}{ 44 | If not returning the total number of exceptional values, for each inspected 45 | variable, a new variable is returned indicating which values are exceptional. 46 | The text string is appended to each original variable name to create the new 47 | variable names. 48 | } 49 | \item{totalVarName}{ 50 | If returning only the total number of exceptional values, and appending these 51 | to the provided dataset, this text string is used as variable name. 52 | } 53 | } 54 | \value{ 55 | Either a vector containing the number of exceptional values, a dataset 56 | containing, for each inspected variable, which values are exceptional, or the 57 | provided dataset where either the total or the exceptional values for each 58 | variable are appended. 59 | } 60 | \author{ 61 | Gjalt-Jorn Peters 62 | 63 | Maintainer: Gjalt-Jorn Peters 64 | } 65 | 66 | \seealso{ 67 | \code{\link{exceptionalScore}} 68 | } 69 | \examples{ 70 | exceptionalScores(mtcars) 71 | } 72 | -------------------------------------------------------------------------------- /R/invertItems.R: -------------------------------------------------------------------------------- 1 | #' invertItems 2 | #' 3 | #' Inverts items (as in, in a questionnaire), by calling 4 | #' \code{\link{invertItem}} on all relevant items. 5 | #' 6 | #' 7 | #' @param dat The dataframe containing the variables to invert. 8 | #' @param items The names or indices of the variables to invert. If not 9 | #' supplied (i.e. NULL), all variables in the dataframe will be inverted. 10 | #' @param \dots Arguments (parameters) passed on to data.frame when recreating 11 | #' that after having used lapply. 12 | #' @return The dataframe with the specified items inverted. 13 | #' @author Gjalt-Jorn Peters 14 | #' 15 | #' Maintainer: Gjalt-Jorn Peters 16 | #' @seealso \code{\link{invertItem}} 17 | #' @keywords univariate 18 | #' @examples 19 | #' 20 | #' invertItems(mtcars, c('cyl')); 21 | #' 22 | #' @export invertItems 23 | invertItems <- function(dat, items = NULL, ...) { 24 | if (is.null(items)) { 25 | items <- names(dat); 26 | } else if ((!is.character(items)) && (!is.numeric(items))) { 27 | stop("Argument 'items' is not a character vector or numeric vector ", 28 | "(but instead of type ", typeof(dat), ")."); 29 | } 30 | usedDat <- dat[, items, drop=FALSE]; 31 | 32 | ### Previous inversions 33 | prevInv <- lapply(dat[, items], attr, 'inverted'); 34 | ### Replace NULL with FALSE 35 | prevInv <- lapply(prevInv, function(x) ifelse(is.null(x), FALSE, x)); 36 | ### Warn if one or more items were already inverted 37 | if (sum(unlist(prevInv)) > 0) { 38 | alreadyInverted <- names(prevInv)[unlist(prevInv)]; 39 | warning("Variables (columns) ", vecTxt(alreadyInverted, useQuote='"'), 40 | " have already been inverted! ", 41 | "Set ignorePreviousInversion to TRUE to override this check ", 42 | "and invert the vector anyway."); 43 | usedDat <- usedDat[, !(names(usedDat) %in% alreadyInverted), drop=FALSE]; 44 | } 45 | 46 | ### All convert factors to numeric vectors 47 | usedDat <- massConvertToNumeric(usedDat); 48 | 49 | ### Check whether any non-numeric vectors remain 50 | invalidVectors <- lapply(usedDat, is.numeric); 51 | if (FALSE %in% unlist(invalidVectors)) { 52 | invalidVectors <- names(invalidVectors)[!unlist(invalidVectors)]; 53 | warning("Variables (colums) ", vecTxt(invalidVectors, useQuote='"'), 54 | " have a type other than numeric or factor! Ignoring these."); 55 | usedDat[] <- usedDat[, !(names(usedDat) %in% invalidVectors)]; 56 | } 57 | 58 | items <- names(usedDat); 59 | 60 | dat[, items] <- data.frame(lapply(usedDat, invertItem), ...); 61 | return(dat); 62 | } 63 | -------------------------------------------------------------------------------- /R/knitFig.R: -------------------------------------------------------------------------------- 1 | #' Easily knit a custom figure fragment 2 | #' 3 | #' THis function was written to make it easy to knit figures with different, or 4 | #' dynamically generated, widths and heights (and captions) in the same chunk 5 | #' when working with R Markdown. 6 | #' 7 | #' 8 | #' @param plotToDraw The plot to draw, e.g. a \code{\link{ggplot}} plot. 9 | #' @param template A character value with the \code{\link{knit_expand}} 10 | #' template to use. 11 | #' @param figWidth The width to set for the figure (in inches). 12 | #' @param figHeight The height to set for the figure (in inches). 13 | #' @param figCaption The caption to set for the figure. 14 | #' @param chunkName Optionally, the name for the chunk. To avoid problems 15 | #' because multiple chunks have the name "\code{unnamed-chunk-1}", if no chunk 16 | #' name is provided, \code{\link{digest}} is used to generate an MD5-hash from 17 | #' \code{\link{Sys.time}}. 18 | #' @param \dots Any additional arguments are passed on to 19 | #' \code{\link{knit_expand}}. 20 | #' @return This function returns nothing, but uses \code{\link{knit_expand}} 21 | #' and \code{\link{knit}} to \code{\link{cat}} the result. 22 | #' @author Gjalt-Jorn Peters 23 | #' 24 | #' Maintainer: Gjalt-Jorn Peters 25 | #' @seealso \code{\link{knit_expand}} and \code{\link{knit}} 26 | #' @keywords utilities 27 | #' @examples 28 | #' 29 | #' \dontrun{ 30 | #' knitFig(ggProportionPlot(mtcars$cyl)) 31 | #' } 32 | #' 33 | #' @export knitFig 34 | knitFig <- function(plotToDraw, 35 | template = getOption("ufs.knitFig.template", NULL), 36 | figWidth = getOption("ufs.knitFig.figWidth", 16 / 2.54), 37 | figHeight = getOption("ufs.knitFig.figHeight", 16 / 2.54), 38 | figCaption = "A plot.", 39 | chunkName = NULL, 40 | ...) { 41 | if (is.null(template)) { 42 | template <- "\n\n```{r {{chunkName}}, fig.height={{figHeight}}, fig.width={{figWidth}}, fig.cap='{{figCaption}}', echo=FALSE, cache=FALSE, message=FALSE, results='asis' } 43 | grid.newpage(); 44 | grid.draw(tmpPlotStorage); 45 | ```\n\n"; 46 | } 47 | assign('tmpPlotStorage', plotToDraw); 48 | if (is.null(chunkName)) { 49 | chunkName <- digest(Sys.time()); 50 | } 51 | cat(knit(text = knit_expand(text = template, 52 | figWidth = figWidth, 53 | figHeight = figHeight, 54 | figCaption = figCaption, 55 | chunkName = chunkName, 56 | ...), 57 | quiet = TRUE)); 58 | } 59 | -------------------------------------------------------------------------------- /R/examineBy.R: -------------------------------------------------------------------------------- 1 | examineBy <- function(..., by=NULL, stem=TRUE, plots=TRUE, 2 | extremeValues = 5, descr.include=NULL, 3 | qqCI=TRUE, conf.level=.95) { 4 | 5 | if (is.null(by)) { 6 | stop("You have to specify a 'by' argument. If you don't want to ", 7 | "order descriptives organised by another variable, use 'examine'."); 8 | } 9 | 10 | if (length(list(...)) == 1) { 11 | dat <- list(...)[[1]]; 12 | if (is.data.frame(dat)) { 13 | varNames <- names(dat); 14 | } else { 15 | varNames <- unlist(as.list(substitute(list(...)))[-1]); 16 | } 17 | } else { 18 | if (length(unique(unlist(lapply(list(...), length)))) != 1) { 19 | stop("The vectors that were provided has unequal lengths ", 20 | "(specifically, ", vecTxt(lapply(list(...), length)), ")."); 21 | } 22 | dat <- list(...); 23 | varNames <- unlist(as.list(substitute(list(...)))[-1]); 24 | } 25 | 26 | dat <- as.data.frame(dat); 27 | names(dat) <- extractVarName(varNames); 28 | 29 | res <- dlply(dat, as.quoted(~by), examine, 30 | stem=stem, plots=plots, 31 | extremeValues=extremeValues, 32 | descr.include=descr.include, 33 | qqCI=qqCI, conf.level=conf.level); 34 | 35 | class(res) <- 'examineBy'; 36 | 37 | return(res); 38 | 39 | } 40 | 41 | print.examineBy <- function(x, ...) { 42 | 43 | for (examineObjects in 1:length(x)) { 44 | cat0(repStr("#", 60), "\n"); 45 | cat0(extractVarName(names(x)[examineObjects]), "\n"); 46 | cat0(repStr("#", 60), "\n\n"); 47 | print(x[[examineObjects]]); 48 | } 49 | 50 | } 51 | 52 | pander.examineBy <- function(x, headerPrefix = "", 53 | headerStyle = "**", 54 | secondaryHeaderPrefix = "", 55 | secondaryHeaderStyle="*", 56 | tertairyHeaderPrefix = "--> ", 57 | tertairyHeaderStyle="", 58 | separator = paste0("\n\n", repStr("-", 10), "\n\n"), 59 | ...) { 60 | 61 | for (examineObjects in 1:length(x)) { 62 | cat("\n"); 63 | if (examineObjects > 1) 64 | cat0(separator); 65 | cat0(headerPrefix, headerStyle, 66 | extractVarName(names(x)[examineObjects]), 67 | headerStyle); 68 | pander(x[[examineObjects]], 69 | headerPrefix=secondaryHeaderPrefix, 70 | headerStyle=secondaryHeaderStyle, 71 | secondaryHeaderPrefix=tertairyHeaderPrefix, 72 | secondaryHeaderStyle=tertairyHeaderStyle); 73 | } 74 | 75 | } 76 | -------------------------------------------------------------------------------- /man/showPearsonPower.Rd: -------------------------------------------------------------------------------- 1 | \name{showPearsonPower} 2 | \alias{showPearsonPower} 3 | \title{ 4 | Visualisation of the power of a Pearson correlation test 5 | } 6 | \description{ 7 | This fuction is useful when conducting power analyses for a Pearson correlation. It draws the sampling distribution of Pearson's \emph{r} assuming a null hypothesis value of \emph{r} and assuming a the hypothetical population value. The probability of making a Type 1 error is also illustrated. 8 | } 9 | \usage{ 10 | showPearsonPower(n = 100, rho = 0.3, rNull = 0, 11 | distLabels = c("Null Hypothesis", "Population"), 12 | rhoColor = "green", rhoFill = "green", 13 | rhoAlpha = 0.1, rhoLineSize = 1, 14 | rNullColor = "blue", rNullFill = "blue", 15 | rNullAlpha = 0.1, rNullLineSize = 1, 16 | type2Color = "red", type2Fill = "red", 17 | type2Alpha = 0.1, type2LineSize = 0, 18 | theme = dlvTheme(), alpha = 0.05, digits = 3) 19 | } 20 | %- maybe also 'usage' for other objects documented here. 21 | \arguments{ 22 | \item{n}{ 23 | The number of participants. 24 | } 25 | \item{rho}{ 26 | The value of the correlation coefficient in the population. 27 | } 28 | \item{rNull}{ 29 | The value of the correlation coefficient according to the null hypothesis. 30 | } 31 | \item{distLabels}{ 32 | Labels for the two distributions; the first one is the null hypothesis distribution, the second one the alternative distribution. 33 | } 34 | \item{rhoColor, rNullColor, type2Color}{ 35 | The border colors of the distributions and the region used to illustrate the Type 2 error probability. 36 | } 37 | \item{rhoFill, rNullFill, type2Fill}{ 38 | The fill colors of the distributions and the region used to illustrate the Type 2 error probability. 39 | } 40 | \item{rhoAlpha, rNullAlpha, type2Alpha}{ 41 | The alpha (transparency) of the distributions and the region used to illustrate the Type 2 error probability. 42 | } 43 | \item{rhoLineSize, rNullLineSize, type2LineSize}{ 44 | The line thicknesses of the distributions and the region used to illustrate the Type 2 error probability. 45 | } 46 | \item{theme}{ 47 | The theme to use. 48 | } 49 | \item{alpha}{ 50 | The significance level (alpha) of the null hypothesis test. 51 | } 52 | \item{digits}{ 53 | The number of digits to round to. 54 | } 55 | } 56 | \value{ 57 | A \code{\link{ggplot}} plot is returned. 58 | } 59 | \author{ 60 | Gjalt-Jorn Peters 61 | 62 | Maintainer: Gjalt-Jorn Peters 63 | } 64 | \seealso{ 65 | \code{\link{didacticPlot}} 66 | } 67 | \examples{ 68 | \dontrun{ 69 | showPearsonPower(); 70 | } 71 | } 72 | \keyword{ hplot } 73 | -------------------------------------------------------------------------------- /man/examine.Rd: -------------------------------------------------------------------------------- 1 | \name{examine} 2 | \alias{examine} 3 | \alias{examineBy} 4 | \title{ 5 | Examine one or more variables 6 | } 7 | \description{ 8 | These functions are one of many R functions enabling users to assess variable descriptives. They have been developed to mimic SPSS' 'EXAMINE' syntax command ('Explore' in the menu) as closely as possible to ease the transition for new R users and facilitate teaching courses where both programs are taught alongside each other. 9 | } 10 | \usage{ 11 | examine(..., stem = TRUE, plots = TRUE, 12 | extremeValues = 5, descr.include = NULL, 13 | qqCI = TRUE, conf.level = 0.95) 14 | examineBy(..., by=NULL, stem = TRUE, plots = TRUE, 15 | extremeValues = 5, descr.include=NULL, 16 | qqCI = TRUE, conf.level=.95) 17 | } 18 | \arguments{ 19 | \item{\dots}{ 20 | The first argument is a list of variables to provide descriptives for. Because these are the first arguments, the other arguments must be named explicitly so R does not confuse them for something that should be part of the dots. 21 | } 22 | \item{by}{ 23 | A variable by which to split the dataset before calling \code{\link{examine}}. This can be used to show the descriptives separate by levels of a factor. 24 | } 25 | \item{stem}{ 26 | Whether to display a stem and leaf plot. 27 | } 28 | \item{plots}{ 29 | Whether to display the plots generated by the \code{\link{dataShape}} function. 30 | } 31 | \item{extremeValues}{ 32 | How many extreme values to show at either end (the highest and lowest values). When set to FALSE (or 0), no extreme values are shown. 33 | } 34 | \item{qqCI}{ 35 | Whether to display confidence intervals in the QQ-plot. 36 | } 37 | \item{descr.include}{ 38 | Which descriptives to include; see \code{\link{descr}} for more information. 39 | } 40 | \item{conf.level}{ 41 | The level of confidence of the confidence interval. 42 | } 43 | } 44 | \details{ 45 | This function basically just calls the \code{\link{descr}} function, optionally supplemented with calls to \code{\link{stem}}, \code{\link{dataShape}}. 46 | } 47 | \value{ 48 | A list that is displayed when printed. 49 | } 50 | \author{ 51 | Gjalt-Jorn Peters 52 | 53 | Maintainer: Gjalt-Jorn Peters 54 | } 55 | 56 | \seealso{ 57 | \code{\link{descr}}, \code{\link{dataShape}}, \code{\link{stem}} 58 | } 59 | \examples{ 60 | ### Look at the miles per gallon descriptives: 61 | examine(mtcars$mpg, stem=FALSE, plots=FALSE); 62 | 63 | ### Separate for the different number of cylinders: 64 | examineBy(mtcars$mpg, by=mtcars$cyl, 65 | stem=FALSE, plots=FALSE, 66 | extremeValues=FALSE, 67 | descr.include=c('central tendency', 'spread')); 68 | 69 | } 70 | \keyword{ univar } 71 | -------------------------------------------------------------------------------- /R/multiVarFreq.R: -------------------------------------------------------------------------------- 1 | #' Generate a table collapsing frequencies of multiple variables 2 | #' 3 | #' This function can be used to efficiently combine the frequencies of 4 | #' variables with the same possible values. The frequencies are collapsed into 5 | #' a table with the variable names as row names and the possible values as 6 | #' column (variable) names. 7 | #' 8 | #' 9 | #' @param data The dataframe containing the variables. 10 | #' @param items The variable names. 11 | #' @param labels Labels can be provided which will be set as row names when 12 | #' provided. 13 | #' @param sortByMean Whether to sort the rows by mean value for each variable 14 | #' (only sensible if the possible values are numeric). 15 | #' @return The resulting dataframe, but with class 'multiVarFreq' prepended to 16 | #' allow pretty printing. 17 | #' @author Gjalt-Jorn Peters 18 | #' 19 | #' Maintainer: Gjalt-Jorn Peters 20 | #' @seealso \code{\link{table}}, \code{\link{freq}} 21 | #' @keywords utilities 22 | #' @examples 23 | #' 24 | #' multiVarFreq(mtcars, c('gear', 'carb')); 25 | #' 26 | #' @export multiVarFreq 27 | multiVarFreq <- function(data, 28 | items = NULL, 29 | labels = NULL, 30 | sortByMean = TRUE) { 31 | 32 | if (is.null(items)) { 33 | items <- names(data); 34 | } 35 | 36 | if (!all(items %in% names(data))) { 37 | stop("You specified items that do not exist in the data you provided (specifically, ", 38 | vecTxtQ(items[!items %in% names(data)]), ")."); 39 | } 40 | 41 | if (sortByMean && length(items) > 1) { 42 | tmpVarOrder <- order(colMeans(data[, items], 43 | na.rm=TRUE), 44 | decreasing=FALSE); 45 | } else { 46 | tmpVarOrder <- 1:length(items); 47 | } 48 | 49 | if (is.null(labels)) { 50 | labels <- items; 51 | } 52 | 53 | res <- do.call(rbind.fill, 54 | lapply(data[, items], 55 | function(x) 56 | return(as.data.frame(t(as.matrix(table(x))))) 57 | )); 58 | 59 | rownames(res) <- labels; 60 | 61 | res <- res[tmpVarOrder, ]; 62 | 63 | if (all(grepl('\\d+', colnames(res)))) { 64 | res <- res[, order(as.numeric(colnames(res)))]; 65 | } 66 | 67 | class(res) <- c('multiVarFreq', class(res)); 68 | 69 | return(res); 70 | 71 | } 72 | 73 | print.multiVarFreq <- function(x, ...) { 74 | class(x) <- 'data.frame'; 75 | x <- as.matrix(x); 76 | print(x, na.print="", ...); 77 | } 78 | 79 | pander.multiVarFreq <- function(x, ...) { 80 | class(x) <- 'data.frame'; 81 | cat("\n\n"); 82 | pander(x, missing = ""); 83 | cat("\n\n"); 84 | } 85 | -------------------------------------------------------------------------------- /docs/jquery.sticky-kit.min.js: -------------------------------------------------------------------------------- 1 | /* 2 | Sticky-kit v1.1.2 | WTFPL | Leaf Corcoran 2015 | http://leafo.net 3 | */ 4 | (function(){var b,f;b=this.jQuery||window.jQuery;f=b(window);b.fn.stick_in_parent=function(d){var A,w,J,n,B,K,p,q,k,E,t;null==d&&(d={});t=d.sticky_class;B=d.inner_scrolling;E=d.recalc_every;k=d.parent;q=d.offset_top;p=d.spacer;w=d.bottoming;null==q&&(q=0);null==k&&(k=void 0);null==B&&(B=!0);null==t&&(t="is_stuck");A=b(document);null==w&&(w=!0);J=function(a,d,n,C,F,u,r,G){var v,H,m,D,I,c,g,x,y,z,h,l;if(!a.data("sticky_kit")){a.data("sticky_kit",!0);I=A.height();g=a.parent();null!=k&&(g=g.closest(k)); 5 | if(!g.length)throw"failed to find stick parent";v=m=!1;(h=null!=p?p&&a.closest(p):b("
"))&&h.css("position",a.css("position"));x=function(){var c,f,e;if(!G&&(I=A.height(),c=parseInt(g.css("border-top-width"),10),f=parseInt(g.css("padding-top"),10),d=parseInt(g.css("padding-bottom"),10),n=g.offset().top+c+f,C=g.height(),m&&(v=m=!1,null==p&&(a.insertAfter(h),h.detach()),a.css({position:"",top:"",width:"",bottom:""}).removeClass(t),e=!0),F=a.offset().top-(parseInt(a.css("margin-top"),10)||0)-q, 6 | u=a.outerHeight(!0),r=a.css("float"),h&&h.css({width:a.outerWidth(!0),height:u,display:a.css("display"),"vertical-align":a.css("vertical-align"),"float":r}),e))return l()};x();if(u!==C)return D=void 0,c=q,z=E,l=function(){var b,l,e,k;if(!G&&(e=!1,null!=z&&(--z,0>=z&&(z=E,x(),e=!0)),e||A.height()===I||x(),e=f.scrollTop(),null!=D&&(l=e-D),D=e,m?(w&&(k=e+u+c>C+n,v&&!k&&(v=!1,a.css({position:"fixed",bottom:"",top:c}).trigger("sticky_kit:unbottom"))),eb&&!v&&(c-=l,c=Math.max(b-u,c),c=Math.min(q,c),m&&a.css({top:c+"px"})))):e>F&&(m=!0,b={position:"fixed",top:c},b.width="border-box"===a.css("box-sizing")?a.outerWidth()+"px":a.width()+"px",a.css(b).addClass(t),null==p&&(a.after(h),"left"!==r&&"right"!==r||h.append(a)),a.trigger("sticky_kit:stick")),m&&w&&(null==k&&(k=e+u+c>C+n),!v&&k)))return v=!0,"static"===g.css("position")&&g.css({position:"relative"}), 8 | a.css({position:"absolute",bottom:d,top:"auto"}).trigger("sticky_kit:bottom")},y=function(){x();return l()},H=function(){G=!0;f.off("touchmove",l);f.off("scroll",l);f.off("resize",y);b(document.body).off("sticky_kit:recalc",y);a.off("sticky_kit:detach",H);a.removeData("sticky_kit");a.css({position:"",bottom:"",top:"",width:""});g.position("position","");if(m)return null==p&&("left"!==r&&"right"!==r||a.insertAfter(h),h.remove()),a.removeClass(t)},f.on("touchmove",l),f.on("scroll",l),f.on("resize", 9 | y),b(document.body).on("sticky_kit:recalc",y),a.on("sticky_kit:detach",H),setTimeout(l,0)}};n=0;for(K=this.length;n .5. 22 | #' @param silent Can be used to suppress messages. 23 | #' @param quantileCorrection By how much to correct the computed quantiles; 24 | #' this is used because when a distribution is very right-skewed, the lowest 25 | #' quantile is the lowest value, which is then also the mode; without 26 | #' subtracting a correction, almost all values would be marked as 27 | #' 'exceptional'. 28 | #' @param quantileType The algorithm used to compute the quantiles; see 29 | #' \code{\link{quantile}}. 30 | #' @return A logical vector, indicating for each value in the supplied vector 31 | #' whether it is exceptional. 32 | #' @author Gjalt-Jorn Peters 33 | #' 34 | #' Maintainer: Gjalt-Jorn Peters 35 | #' @seealso \code{\link{quantile}}, \code{\link{exceptionalScores}} 36 | #' @keywords univariate 37 | #' @examples 38 | #' 39 | #' exceptionalScore(c(1,1,2,2,2,3,3,3,4,4,4,5,5,5,5,6,6,7,8,20), prob=.05); 40 | #' 41 | #' @export exceptionalScore 42 | exceptionalScore <- function(x, prob=.025, both=TRUE, silent=FALSE, 43 | quantileCorrection = .0001, quantileType = 8) { 44 | 45 | belowLower <- x < (quantile(x, probs=min(c(prob, 1-prob)), na.rm=TRUE, type=quantileType) - quantileCorrection); 46 | aboveUpper <- x > (quantile(x, probs=max(c(prob, 1-prob)), na.rm=TRUE, type=quantileType) + quantileCorrection); 47 | if (both) { 48 | return(belowLower | aboveUpper); 49 | } else { 50 | if (prob < .5) { 51 | return(belowLower); 52 | } else { 53 | return(aboveUpper); 54 | } 55 | } 56 | } 57 | -------------------------------------------------------------------------------- /man/itemInspection.Rd: -------------------------------------------------------------------------------- 1 | \name{itemInspection} 2 | \alias{itemInspection} 3 | %- Also NEED an '\alias' for EACH other topic documented here. 4 | \title{ 5 | itemInspection 6 | } 7 | \description{ 8 | Function to generate a PDF with four panels per page, showing some basic item characteristics. 9 | } 10 | \usage{ 11 | itemInspection(dat, items, 12 | docTitle = "Scale inspection", docAuthor = "Author", 13 | pdfLaTexPath, rnwPath, filename="itemInspection", 14 | convertFactors = TRUE, digits=4) 15 | } 16 | %- maybe also 'usage' for other objects documented here. 17 | \arguments{ 18 | \item{dat}{ 19 | Dataframe containing the items of the relevant scale 20 | } 21 | \item{items}{ 22 | Either a character vector with the itemnames, or, if the items are organised in scales, a list of character vectors with the items in each scale. 23 | } 24 | \item{docTitle}{ 25 | Title to use when generating the PDF. 26 | } 27 | \item{docAuthor}{ 28 | Author(s) to include when generating the PDF. 29 | } 30 | \item{pdfLaTexPath}{ 31 | The path to PdfLaTex. This file is part of a LaTeX installation that creates a pdf out of a .tex file. 32 | 33 | In Windows, you can download (portable) MikTex from 34 | http://miktex.org/portable. You then decide yourself 35 | where to install MikTex; pdflatex will end up in a 36 | subfolder 'miktex\\bin', so if you installed MikTex 37 | in, for example, 'C:\\Program Files\\MikTex', the total 38 | path becomes 'C:\\Program Files\\MikTex\\miktex\\bin'. Note 39 | that R uses slashes instead of backslashes to separate 40 | folders, so in this example, pdfLaTexPath should be 41 | 'C:/Program Files/MikTex/miktex/bin' 42 | 43 | In MacOS, you can install MacTex from http://tug.org/mactex/ 44 | By default, pdflatex ends up in folder '/user/texbin', which 45 | is what pdfLaTexPath should be in that default case. 46 | 47 | In Ubuntu, you can install TexLive base by using your package 48 | manager to install texlive-latex-base, or using the terminal: 49 | 'sudo apt-get install texlive-latex-base' 50 | In ubuntu, by default pdflatex ends un in folder '/usr/bin', 51 | which is what pdfLaTexPath should be in that default case. 52 | } 53 | \item{rnwPath}{ 54 | The path where the temporary files and the resulting PDF should be stored. 55 | } 56 | \item{filename}{ 57 | The filename to use to save the pdf. 58 | } 59 | \item{convertFactors}{ 60 | Whether to convert factors to numeric vectors for the analysis. 61 | } 62 | \item{digits}{ 63 | The number of digits to use in the tables. 64 | } 65 | } 66 | \value{ 67 | 68 | This function returns nothing; it just generates a PDF. 69 | 70 | } 71 | \examples{ 72 | 73 | \dontrun{ 74 | itemInspection(mtcars, items=c('disp', 'hp', 'drat'), pdfLaTexPath="valid/path/here"); 75 | } 76 | 77 | } 78 | \keyword{ utilities } 79 | -------------------------------------------------------------------------------- /man/descr.Rd: -------------------------------------------------------------------------------- 1 | \name{descr} 2 | \alias{descr} 3 | \alias{descriptives} 4 | %- Also NEED an '\alias' for EACH other topic documented here. 5 | \title{ 6 | descr (or descriptives) 7 | } 8 | \description{ 9 | This function provides a number of descriptives about your data, similar to 10 | what SPSS's DESCRIPTIVES (often called with DESCR) does. 11 | } 12 | \usage{ 13 | descr(x, digits = 4, errorOnFactor = FALSE, 14 | include = c("central tendency", "spread", "range", 15 | "distribution shape", "sample size"), 16 | maxModes = 1, 17 | t = FALSE, conf.level=.95, 18 | quantileType = 2); 19 | } 20 | %- maybe also 'usage' for other objects documented here. 21 | \arguments{ 22 | \item{x}{ 23 | The vector for which to return descriptives. 24 | } 25 | \item{digits}{ 26 | The number of digits to round the results to when showing them. 27 | } 28 | \item{errorOnFactor}{ 29 | Whether to show an error when the vector is a factor, or just show the 30 | frequencies instead. 31 | } 32 | \item{include}{ 33 | Which elements to include when showing the results. 34 | } 35 | \item{maxModes}{ 36 | Maximum number of modes to display: displays "multi" if more than this number of modes if found. 37 | } 38 | \item{t}{ 39 | Whether to transpose the dataframes when printing them to the screen (this is easier for users relying on screen readers). 40 | } 41 | \item{conf.level}{ 42 | Confidence of confidence interval around the mean in the central tendency measures. 43 | } 44 | \item{quantileType}{ 45 | The type of quantiles to be used to compute the interquartile range (IQR). See \code{\link{quantile}} for more information. 46 | } 47 | } 48 | \details{ 49 | Note that R (of course) has many similar functions, such as \code{\link{summary}}, 50 | \code{\link{describe}} in the excellent \code{\link{psych}} package. 51 | 52 | The Hartigans' Dip Test may be unfamiliar to users; it is a measure of uni- vs. multidimensionality, computed by \code{\link{dip.test}} from the \code{\link{dip.test}} package. Depending on the sample size, values over .025 can be seen as mildly indicative of multimodality, while values over .05 probably warrant closer inspection (the p-value can be obtained using \code{\link{dip.test}}; also see Table 1 of Hartigan & Hartigan (1985) for an indication as to critical values). 53 | } 54 | \value{ 55 | A list of dataframes with the requested values. 56 | } 57 | \references{ 58 | Hartigan, J. A.; Hartigan, P. M. The Dip Test of Unimodality. Ann. Statist. 13 (1985), no. 1, 70--84. doi:10.1214/aos/1176346577. http://projecteuclid.org/euclid.aos/1176346577. 59 | } 60 | \author{ 61 | Gjalt-Jorn Peters 62 | 63 | Maintainer: Gjalt-Jorn Peters 64 | } 65 | \seealso{ 66 | \code{\link{summary}}, \code{\link{describe}} 67 | } 68 | \examples{ 69 | descr(mtcars$mpg); 70 | } 71 | \keyword{ univariate } 72 | -------------------------------------------------------------------------------- /R/associationsToDiamondPlotDf.R: -------------------------------------------------------------------------------- 1 | associationsToDiamondPlotDf <- function(dat, covariates, criterion, 2 | labels = NULL, 3 | decreasing=NULL, 4 | conf.level=.95, 5 | esMetric = 'r') { 6 | 7 | if (is.null(labels)) labels <- covariates; 8 | 9 | assocMatrix <- associationMatrix(dat, x=covariates, y=criterion); 10 | 11 | resDf <- data.frame(lo = as.numeric(assocMatrix$output$raw$ci.lo), 12 | es = as.numeric(assocMatrix$output$raw$es), 13 | hi = as.numeric(assocMatrix$output$raw$ci.hi)); 14 | 15 | if (esMetric == 'r') { 16 | resDf <- data.frame(matrix(sapply(1:length(covariates), function(i) { 17 | if (assocMatrix$output$raw$esType[i] == 'd') { 18 | return(convert.d.to.r(resDf[i, ])); 19 | } else if ((assocMatrix$output$raw$esType[i] == 'etasq') || 20 | (assocMatrix$output$raw$esType[i] == 'omegasq')) { 21 | return(sqrt(resDf[i, ])); 22 | } else { 23 | return(resDf[i, ]); 24 | } 25 | }), ncol=3, byrow=TRUE)); 26 | } else if (esMetric == 'd' | esMetric == 'g') { 27 | resDf <- data.frame(matrix(sapply(1:length(covariates), function(i) { 28 | if (assocMatrix$output$raw$esType[i] == 'r' | assocMatrix$output$raw$esType[i] == 'v') { 29 | return(convert.r.to.d(resDf[i, ])); 30 | } else if ((assocMatrix$output$raw$esType[i] == 'etasq') || 31 | (assocMatrix$output$raw$esType[i] == 'omegasq')) { 32 | return(convert.r.to.d(sqrt(resDf[i, ]))); 33 | } else { 34 | return(resDf[i, ]); 35 | } 36 | }), ncol=3, byrow=TRUE)); 37 | } else { 38 | stop("No other effect size metrics implemented yet!"); 39 | } 40 | 41 | names(resDf) <- c('lo', 'es', 'hi'); 42 | resDf$label <- labels; 43 | resDf$rownr <- 1:nrow(resDf); 44 | resDf$constant <- 1; 45 | 46 | if (!is.null(decreasing)) { 47 | ### Invert 'decreasing' because ggplot plots the lowest/first value first (near the origin). 48 | ### So a decreasing sort would normally result in higher means being displayed LOWER in 49 | ### the plot, which is counter-intuitive, hence the inversion. 50 | sortedByMean <- order(unlist(resDf$es), decreasing=!decreasing); 51 | resDf <- resDf[sortedByMean, ]; 52 | labels <- labels[sortedByMean]; 53 | } else { 54 | ### sortedByMean is used later on to organise the raw data; therefore, this should 55 | ### reflect the order of the variables on the Y axis regardless of whether they're 56 | ### reorganised 57 | sortedByMean <- 1:length(labels); 58 | } 59 | 60 | ### Return this vector as attribute to use in meansDiamondPlot 61 | attr(resDf, 'sortedByMean') <- sortedByMean; 62 | 63 | return(resDf); 64 | 65 | } 66 | -------------------------------------------------------------------------------- /man/setFigCapNumbering.Rd: -------------------------------------------------------------------------------- 1 | \name{setFigCapNumbering} 2 | \alias{setFigCapNumbering} 3 | \alias{setTabCapNumbering} 4 | \title{ 5 | Automatic caption numbering knitr hooks for figures and tables 6 | } 7 | \description{ 8 | These function implement ideas by Max Gordon and DeanK (see Details) to add \code{\link{knitr}} hooks to automate the numbering of figures and tables when generating R Markdown documents. 9 | } 10 | \usage{ 11 | setFigCapNumbering(captionName = "fig.cap", 12 | figure_counter_str = "Figure \%s: ", 13 | figureClass = "", imgClass = "", 14 | figureInlineStyle = c("display:block"), 15 | imgInlineStyle = NULL, 16 | optionName = paste0("setCaptionNumbering_", captionName), 17 | resetCounterTo = 1) 18 | setTabCapNumbering(table_counter_str = ":Table \%s: ", 19 | resetCounterTo = 1) 20 | } 21 | \arguments{ 22 | \item{captionName}{ 23 | The name of the caption, used in the \code{\link{knitr}} chunk options to provide the caption text. 24 | } 25 | \item{figure_counter_str, table_counter_str}{ 26 | The string in which to add the number of the figure or table. The text '\%s' will be replaced by the number. 27 | } 28 | \item{figureClass}{ 29 | Optionally, a css class to pass to the HTML element that surrounds the . 30 | } 31 | \item{imgClass}{ 32 | Optionall, a css class to pass to the HTML element. 33 | } 34 | \item{figureInlineStyle}{ 35 | Any css style to pass to the figure element directly ('inline'). 36 | } 37 | \item{imgInlineStyle}{ 38 | Any css style to pass to the image element directly ('inline'). 39 | } 40 | \item{optionName}{ 41 | The name of the option to use to retrieve and set the counter. This can be used, for example, to have multiple caption types use the same counter. 42 | } 43 | \item{resetCounterTo}{ 44 | If not \code{NULL} and numeric, the counter will start at this number. 45 | } 46 | } 47 | \details{ 48 | The figure caption function is basically the one designed by Max Gordon (see \url{http://gforge.se/2014/01/fast-track-publishing-using-knitr-part-iii/}. 49 | 50 | The table caption function is an implementation of the ideas of DeanK (see \url{http://stackoverflow.com/questions/15258233/using-table-caption-on-r-markdown-file-using-knitr-to-use-in-pandoc-to-convert-t}) combined with Max Gordon's function. 51 | } 52 | \value{ 53 | Nothing is returned; the correct hooks are configured for \code{\link{knitr}}. 54 | } 55 | \author{ 56 | Max Gordon (setFigCapNumbering) and DeanK (setTabCapNumbering); implemented by Gjalt-Jorn Peters 57 | 58 | Maintainer: Gjalt-Jorn Peters 59 | } 60 | \seealso{ 61 | \code{\link{knitr}} 62 | } 63 | \examples{ 64 | \dontrun{ 65 | setFigCapNumbering("This is figure number \%s, with caption text: "); 66 | } 67 | } 68 | \keyword{ utils } 69 | -------------------------------------------------------------------------------- /man/confIntR.Rd: -------------------------------------------------------------------------------- 1 | \name{confIntR} 2 | \alias{confIntR} 3 | \title{ 4 | A function to compute a correlation's confidence interval 5 | } 6 | \description{ 7 | This function computes the confidence interval for a given correlation and its sample size. This is useful to obtain confidence intervals for correlations reported in papers when informing power analyses. 8 | } 9 | \usage{ 10 | confIntR(r, N, conf.level = 0.95, plot = FALSE) 11 | } 12 | \arguments{ 13 | \item{r}{ 14 | The observed correlation coefficient. 15 | } 16 | \item{N}{ 17 | The sample size of the sample where the correlation was computed. 18 | } 19 | \item{conf.level}{ 20 | The desired confidence level of the confidence interval. 21 | } 22 | \item{plot}{ 23 | Whether to show a plot of the hypothesized sampling distribution 24 | (assuming the sample value happens to be the population value) of 25 | Pearson's \emph{r}. 26 | } 27 | } 28 | \value{ 29 | The confidence interval(s) in a matrix with two columns. The left column contains the lower bound, the right column the upper bound. The \code{\link{rownames}} are the observed correlations, and the \code{\link{colnames}} are 'lo' and 'hi'. The confidence level and sample size are stored as attributes. The results are returned like this to make it easy to access single correlation coefficients from the resulting object (see the examples). 30 | 31 | } 32 | \references{ 33 | Bonett, D. G., Wright, T. A. (2000). Sample size requirements for estimating Pearson, Kendall and Spearman correlations. \emph{Psychometrika, 65}, 23-28. 34 | 35 | Bonett, D. G. (2014). CIcorr.R and sizeCIcorr.R http://people.ucsc.edu/~dgbonett/psyc181.html 36 | 37 | Moinester, M., & Gottfried, R. (2014). Sample size estimation for correlations with pre-specified confidence interval. \emph{The Quantitative Methods of Psychology, 10}(2), 124-130. http://www.tqmp.org/RegularArticles/vol10-2/p124/p124.pdf 38 | 39 | Peters, G. J. Y. & Crutzen, R. (forthcoming) An easy and foolproof method for establishing how effective an intervention or behavior change method is: required sample size for accurate parameter estimation in health psychology. 40 | } 41 | \author{ 42 | Douglas Bonett (UC Santa Cruz, United States), with minor edits by Murray Moinester (Tel Aviv University, Israel) and Gjalt-Jorn Peters (Open University of the Netherlands, the Netherlands). 43 | 44 | Maintainer: Gjalt-Jorn Peters 45 | } 46 | \seealso{ 47 | \code{\link{confIntR}} 48 | } 49 | \examples{ 50 | 51 | ### To request confidence intervals for one correlation 52 | confIntR(.3, 100); 53 | 54 | ### The lower bound of a single correlation 55 | confIntR(.3, 100)[1]; 56 | 57 | ### To request confidence intervals for multiple correlations: 58 | confIntR(c(.1, .3, .5), 250); 59 | 60 | ### The upper bound of the correlation of .5: 61 | confIntR(c(.1, .3, .5), 250)['0.5', 'hi']; 62 | 63 | } 64 | \keyword{ htest } 65 | -------------------------------------------------------------------------------- /man/RsqDist.Rd: -------------------------------------------------------------------------------- 1 | \name{RsqDist} 2 | \alias{dRsq} 3 | \alias{pRsq} 4 | \alias{qRsq} 5 | \alias{rRsq} 6 | \title{ 7 | The distribution of R squared (as obtained in a regression analysis) 8 | } 9 | \description{ 10 | These functions use the beta distribution to provide the R Squared distribution. 11 | } 12 | \usage{ 13 | dRsq(x, nPredictors, sampleSize, populationRsq = 0) 14 | pRsq(q, nPredictors, sampleSize, populationRsq = 0, lower.tail = TRUE) 15 | qRsq(p, nPredictors, sampleSize, populationRsq = 0, lower.tail = TRUE) 16 | rRsq(n, nPredictors, sampleSize, populationRsq = 0) 17 | 18 | } 19 | \arguments{ 20 | \item{x, q}{ 21 | Vector of quantiles, or, in other words, the value(s) of R Squared. 22 | } 23 | \item{p}{ 24 | Vector of probabilites (\emph{p}-values). 25 | } 26 | \item{nPredictors}{ 27 | The number of predictors. 28 | } 29 | \item{sampleSize}{ 30 | The sample size. 31 | } 32 | \item{n}{ 33 | The number of R Squared values to generate. 34 | } 35 | \item{populationRsq}{ 36 | The value of R Squared in the population; this determines the center of the R Squared distribution. This has not been implemented yet in this version of \code{userfriendlyscience}. If anybody knows how to do this and lets me know, I'll happily integrate this of course. 37 | } 38 | \item{lower.tail}{ 39 | logical; if TRUE (default), probabilities are the likelihood of finding an R Squared smaller than the specified value; otherwise, the likelihood of finding an R Squared larger than the specified value. 40 | } 41 | } 42 | \details{ 43 | The functions use \code{\link{convert.omegasq.to.f}} and \code{\link{convert.f.to.omegasq}} to provide the Omega Squared distribution. 44 | } 45 | \value{ 46 | \code{dRsq} gives the density, \code{pRsq} gives the distribution function, \code{qRsq} gives the quantile function, and \code{rRsq} generates random deviates. 47 | 48 | } 49 | \note{ 50 | These functions are based on the Stack Exchange (Cross Validated) post at \url{http://stats.stackexchange.com/questions/130069/what-is-the-distribution-of-r2-in-linear-regression-under-the-null-hypothesis}. Thus, the credits go to Alecos Papadopoulos, who provided the answer that was used to write these functions. 51 | } 52 | \author{ 53 | Gjalt-Jorn Peters (based on a CrossValidated answer by Alecos Papadopoulos) 54 | 55 | Maintainer: Gjalt-Jorn Peters 56 | } 57 | \seealso{ 58 | \code{\link{dbeta}}, \code{\link{pbeta}}, \code{\link{qbeta}}, \code{\link{rbeta}} 59 | } 60 | \examples{ 61 | ### Generate 10 random R Squared values 62 | ### with 2 predictors and 100 participants 63 | rRsq(10, 2, 100); 64 | 65 | ### Probability of finding an R Squared of 66 | ### .15 with 4 predictors and 100 participants 67 | pRsq(.15, 4, 100, lower.tail = FALSE); 68 | 69 | ### Probability of finding an R Squared of 70 | ### .15 with 15 predictors and 100 participants 71 | pRsq(.15, 15, 100, lower.tail=FALSE); 72 | 73 | } 74 | \keyword{ univar } 75 | -------------------------------------------------------------------------------- /R/OmegasqDist.R: -------------------------------------------------------------------------------- 1 | # domegaSq <- function(x, df1, df2, populationOmegaSq = 0) { 2 | # if (populationOmegaSq != 0) { 3 | # cat0("Noncentrality parameters not implemented yet, sorry!\n"); 4 | # } 5 | # ### Return density for given omega squared 6 | # return(df(convert.omegasq.to.f(x, df1, df2), df1, df2)); 7 | # } 8 | 9 | pomegaSq <- function(q, df1, df2, populationOmegaSq = 0, lower.tail=TRUE) { 10 | if (populationOmegaSq != 0) { 11 | cat0("Noncentrality parameters not implemented yet, sorry!\n"); 12 | } 13 | ### Return p-value for given omega squared 14 | return( pf(convert.omegasq.to.f(q, df1, df2), df1, df2, lower.tail=lower.tail) ); 15 | } 16 | 17 | qomegaSq <- function(p, df1, df2, populationOmegaSq = 0, lower.tail=TRUE) { 18 | if (populationOmegaSq != 0) { 19 | cat0("Noncentrality parameters not implemented yet, sorry!\n"); 20 | } 21 | ### Return omega squared for given p-value 22 | return(convert.f.to.omegasq(qf(p, df1, df2, lower.tail=lower.tail), df1, df2)); 23 | } 24 | 25 | romegaSq <- function(n, df1, df2, populationOmegaSq = 0) { 26 | if (populationOmegaSq != 0) { 27 | cat0("Noncentrality parameters not implemented yet, sorry!\n"); 28 | } 29 | ### Return random omega squared value(s) 30 | return(convert.f.to.omegasq(rf(n, df1, df2), df1, df2)); 31 | } 32 | 33 | domegaSq <- function(x, df1, df2, populationOmegaSq = 0) { 34 | return(df(convert.omegasq.to.f(x, df1, df2), df1, df2, 35 | ncp = convert.omegasq.to.f(populationOmegaSq, df1, df2))); 36 | } 37 | 38 | # pomegaSq <- function(q, df1, df2, populationOmegaSq = 0, lower.tail=TRUE) { 39 | # if (populationOmegaSq != 0) { 40 | # cat0("Noncentrality parameters not implemented yet, sorry!\n"); 41 | # } 42 | # ### Return p-value for given omega squared 43 | # return( pf(convert.omegasq.to.f(q, df1, df2), df1, df2, 44 | # lower.tail=lower.tail, 45 | # ncp = convert.omegasq.to.f(populationOmegaSq, df1, df2)) ); 46 | # } 47 | # 48 | # qomegaSq <- function(p, df1, df2, populationOmegaSq = 0, lower.tail=TRUE) { 49 | # if (populationOmegaSq != 0) { 50 | # cat0("Noncentrality parameters not implemented yet, sorry!\n"); 51 | # } 52 | # ### Return omega squared for given p-value 53 | # return(convert.f.to.omegasq(qf(p, df1, df2, 54 | # lower.tail=lower.tail, 55 | # ncp = convert.omegasq.to.f(populationOmegaSq, df1, df2)), 56 | # df1, df2)); 57 | # } 58 | # 59 | # romegaSq <- function(n, df1, df2, populationOmegaSq = 0) { 60 | # if (populationOmegaSq != 0) { 61 | # cat0("Noncentrality parameters not implemented yet, sorry!\n"); 62 | # } 63 | # ### Return random omega squared value(s) 64 | # return(convert.f.to.omegasq(rf(n, df1, df2, 65 | # ncp = convert.omegasq.to.f(populationOmegaSq, df1, df2)), 66 | # df1, df2)); 67 | # } 68 | 69 | -------------------------------------------------------------------------------- /R/adTest_adapted_from_Fbasics.R: -------------------------------------------------------------------------------- 1 | 2 | # setClass("fHTEST", list(call = structure("call", package = "methods"), 3 | # data = structure("list", package = "methods"), test = structure("list", package = "methods"), 4 | # title = structure("character", package = "methods"), description = structure("character", package = "methods"))); 5 | 6 | # new("classRepresentation", slots = list(call = structure("call", package = "methods"), 7 | # data = structure("list", package = "methods"), test = structure("list", package = "methods"), 8 | # title = structure("character", package = "methods"), description = structure("character", package = "methods")), 9 | # contains = list(), virtual = FALSE, prototype = , 10 | # validity = NULL, access = list(), className = structure("fHTEST", package = "fBasics"), 11 | # package = "fBasics", subclasses = list(), versionKey = , 12 | # sealed = FALSE) 13 | 14 | # adTest_adapted_from_Fbasics <- function (x, title = NULL, description = NULL) { 15 | # DNAME = deparse(substitute(x)) 16 | # if (class(x) == "fREG") 17 | # x = residuals(x) 18 | # x = as.vector(x) 19 | # call = match.call() 20 | # x = sort(x) 21 | # n = length(x) 22 | # if (n < 8) 23 | # stop("sample size must be greater than 7") 24 | # var.x <- var(x) 25 | # if (var.x > 0) { 26 | # p = pnorm((x - mean(x))/sqrt(var.x)) 27 | # h = (2 * seq(1:n) - 1) * (log(p) + log(1 - rev(p))) 28 | # h = h[is.finite(h)] 29 | # n = length(h) 30 | # A = -n - mean(h) 31 | # AA = (1 + 0.75/n + 2.25/n^2) * A 32 | # if (AA < 0.2) { 33 | # PVAL = 1 - exp(-13.436 + 101.14 * AA - 223.73 * AA^2) 34 | # } 35 | # else if (AA < 0.34) { 36 | # PVAL = 1 - exp(-8.318 + 42.796 * AA - 59.938 * AA^2) 37 | # } 38 | # else if (AA < 0.6) { 39 | # PVAL = exp(0.9177 - 4.279 * AA - 1.38 * AA^2) 40 | # } 41 | # else { 42 | # PVAL = exp(1.2937 - 5.709 * AA + 0.0186 * AA^2) 43 | # } 44 | # if (PVAL > 1) { 45 | # PVAL = 1 46 | # W = NA 47 | # } 48 | # } 49 | # else { 50 | # A <- Inf 51 | # PVAL <- 0 52 | # } 53 | # names(PVAL) = "" 54 | # test = list(statistic = c(A = A), p.value = PVAL, method = "Anderson - Darling Normality Test", 55 | # data.name = DNAME) 56 | # if (is.null(title)) 57 | # title = "Anderson - Darling Normality Test" 58 | # if (is.null(description)) 59 | # description = paste(as.character(date()), "by user:", Sys.getenv("USERNAME")); 60 | # #description() 61 | # 62 | # # Making it work as regular R function sans class 63 | # # ans = new("fHTEST", call = call, data = list(x = x), test = test, 64 | # # title = as.character(title), description = as.character(description)) 65 | # return(test); 66 | # } 67 | --------------------------------------------------------------------------------