├── .Rbuildignore ├── .github └── FUNDING.yml ├── .gitignore ├── .travis.yml ├── DESCRIPTION ├── LICENSE.md ├── NAMESPACE ├── R ├── build.q.set.R ├── centroid.R ├── export.pqmethod.R ├── export.qm.R ├── import.easyhtml.R ├── import.htmlq.R ├── import.pqmethod.R ├── import.q.concourse.R ├── import.q.feedback.R ├── import.q.sorts.R ├── loa.and.flags.R ├── make.cards.R ├── make.distribution.R ├── plot.QmethodRes.R ├── print.QmethodRes.R ├── q.fnames.R ├── qbstep.R ├── qdc.R ├── qdc.zsc.R ├── qfcharact.R ├── qflag.R ├── qfsi.R ├── qindtest.R ├── qmb.plot.R ├── qmb.summary.R ├── qmboots.R ├── qmethod.R ├── qpcrustes.R ├── qzscores.R ├── runInterface.R ├── summary.QmethodRes.R └── zzz.R ├── README.md ├── data ├── importexample.RData └── lipset.rda ├── docs ├── Advanced-analysis.md ├── Contribute.md ├── Cookbook.md ├── Data-management.md ├── Development.md ├── GUI-old.md ├── GUI.md ├── Plots.md ├── Reporting.md ├── Sample-plot.md ├── _config.yml ├── _includes │ └── navigation.html ├── _layouts │ └── default.html ├── _sass │ └── jekyll-theme-minimal.scss └── index.md ├── inst ├── CITATION ├── cardtemplates │ └── AveryZweckformC32010.Rnw ├── extdata │ └── importexample │ │ ├── feedback │ │ └── after │ │ │ └── JohnDoe.csv │ │ ├── qsorts │ │ ├── after │ │ │ ├── JaneDoe.csv │ │ │ └── JohnDoe.csv │ │ └── before │ │ │ ├── JaneDoe.csv │ │ │ └── JohnDoe.csv │ │ └── sample │ │ ├── concourse │ │ ├── english │ │ │ ├── life-with-q.tex │ │ │ ├── q-uprising.tex │ │ │ ├── r-dominance.tex │ │ │ ├── small-village.tex │ │ │ └── video.tex │ │ ├── german │ │ │ ├── life-with-q.tex │ │ │ ├── q-uprising.tex │ │ │ ├── r-dominance.tex │ │ │ ├── small-village.tex │ │ │ └── video.tex │ │ └── ids.csv │ │ └── sampling-structure.csv └── shiny-examples │ └── qmethod-gui │ ├── rsconnect │ └── shinyapps.io │ │ └── azabala │ │ └── qmethod-gui.dcf │ ├── server.R │ └── ui.R └── man ├── build.q.set.Rd ├── centroid.Rd ├── export.pqmethod.Rd ├── export.qm.Rd ├── import.easyhtmlq.Rd ├── import.htmlq.Rd ├── import.pqmethod.Rd ├── import.q.concourse.Rd ├── import.q.feedback.Rd ├── import.q.sorts.Rd ├── importexample.Rd ├── lipset.Rd ├── loa.and.flags.Rd ├── make.cards.Rd ├── make.distribution.Rd ├── plot.QmethodRes.Rd ├── print.QmethodRes.Rd ├── q.fnames.Rd ├── qbstep.Rd ├── qdc.Rd ├── qdc.zsc.Rd ├── qfcharact.Rd ├── qflag.Rd ├── qfsi.Rd ├── qindtest.Rd ├── qmb.plot.Rd ├── qmb.summary.Rd ├── qmboots.Rd ├── qmethod-package.Rd ├── qmethod.Rd ├── qpcrustes.Rd ├── qzscores.Rd ├── runInterface.Rd └── summary.QmethodRes.Rd /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | .git 4 | .gitignore 5 | LICENSE.md 6 | README.md 7 | CONTRIBUTING.md 8 | .DS_Store 9 | .travis.yml 10 | ^\docs 11 | -------------------------------------------------------------------------------- /.github/FUNDING.yml: -------------------------------------------------------------------------------- 1 | # These are supported funding model platforms 2 | 3 | github: # Replace with up to 4 GitHub Sponsors-enabled usernames e.g., [user1, user2] 4 | patreon: # Replace with a single Patreon username 5 | open_collective: # Replace with a single Open Collective username 6 | ko_fi: # Replace with a single Ko-fi username 7 | tidelift: # Replace with a single Tidelift platform-name/package-name e.g., npm/babel 8 | community_bridge: # Replace with a single Community Bridge project-name e.g., cloud-foundry 9 | liberapay: # Replace with a single Liberapay username 10 | issuehunt: # Replace with a single IssueHunt username 11 | otechie: # Replace with a single Otechie username 12 | custom: ['https://www.paypal.com/donate?hosted_button_id=GCMM9PTXPHNT8'] 13 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | gitHeadInfo.gin 2 | R/.Rapp.history 3 | *.DS_Store 4 | .Rproj.user 5 | .Rhistory 6 | qmethod.Rproj 7 | ~DESCRIPTION 8 | docs/ 9 | -------------------------------------------------------------------------------- /.travis.yml: -------------------------------------------------------------------------------- 1 | language: r 2 | 3 | # Don't fail just for warnings (though these need to be fixed for CRAN) 4 | warnings_are_errors: false 5 | 6 | sudo: required 7 | 8 | r_check_args: --no-examples 9 | 10 | notifications: 11 | email: false 12 | 13 | r_github_packages: 14 | - jimhester/covr 15 | - aiorazabala/qmethod 16 | 17 | after_success: 18 | - Rscript -e 'library(covr);codecov()' 19 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: qmethod 2 | Version: 1.8.4 3 | Date: 2023-03-23 4 | Title: Analysis of Subjective Perspectives Using Q Methodology 5 | Authors@R: c(person(given="Aiora", family="Zabala", comment=c("Main author", ORCID = "0000-0001-8534-3325"), email="aiora.zabala@gmail.com", role=c("aut", "cre")), 6 | person(given="Maximilian", family="Held", comment="Author of additional data management functions", email="maximilian.held83@gmail.com", role=c("aut")), 7 | person(given="Frans", family="Hermans", comment="Author of centroid extraction function", email="Hermans@iamo.de", role=c("aut")) 8 | ) 9 | Description: Analysis of Q methodology, used to identify distinct perspectives existing within a group. 10 | This methodology is used across social, health and environmental sciences to understand diversity of attitudes, discourses, or decision-making styles (for more information, see ). 11 | A single function runs the full analysis. Each step can be run separately using the corresponding functions: for automatic flagging of Q-sorts (manual flagging is optional), for statement scores, for distinguishing and consensus statements, and for general characteristics of the factors. 12 | The package allows to choose either principal components or centroid factor extraction, manual or automatic flagging, a number of mathematical methods for rotation (or none), and a number of correlation coefficients for the initial correlation matrix, among many other options. 13 | Additional functions are available to import and export data (from raw *.CSV, 'HTMLQ' and 'FlashQ' *.CSV, 'PQMethod' *.DAT and 'easy-htmlq' *.JSON files), to print and plot, to import raw data from individual *.CSV files, and to make printable cards. 14 | The package also offers functions to print Q cards and to generate Q distributions for study administration. 15 | See further details in the package documentation, and in the web pages below, which include a cookbook, guidelines for more advanced analysis (how to perform manual flagging or change the sign of factors), data management, and a graphical user interface (GUI) for online and offline use. 16 | License: GPL (>= 2) 17 | Imports: 18 | methods, 19 | psych, 20 | tools, 21 | digest, 22 | knitr, 23 | xtable, 24 | Suggests: 25 | shiny, 26 | rjson, 27 | MCMCpack 28 | LazyData: true 29 | Repository: CRAN 30 | URL: https://github.com/aiorazabala/qmethod, http://aiorazabala.github.io/qmethod/ 31 | BugReports: https://github.com/aiorazabala/qmethod/issues 32 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | import("methods", 2 | "psych", 3 | "digest", 4 | "tools", 5 | "xtable", 6 | "knitr") 7 | export("qmethod", 8 | "qzscores", 9 | "qflag", 10 | "qfcharact", 11 | "q.fnames", 12 | "qdc", 13 | "qdc.zsc", 14 | "import.pqmethod", 15 | "export.pqmethod", 16 | "import.htmlq", 17 | "import.easyhtmlq", 18 | "build.q.set", 19 | "import.q.concourse", 20 | "import.q.sorts", 21 | "import.q.feedback", 22 | "make.cards", 23 | "export.qm", 24 | "make.cards", 25 | "make.distribution", 26 | "qmboots", 27 | "qmb.summary", 28 | "qmb.plot", 29 | "qindtest", 30 | "qpcrustes", 31 | "qfsi", 32 | "qbstep", 33 | "loa.and.flags", 34 | "centroid", 35 | "runInterface") 36 | importFrom("grDevices", "grey", "rainbow", "gray") 37 | importFrom("graphics", "abline", "axis", "dotchart", "points", "legend", "mtext", "segments") 38 | importFrom("stats", "cor", "dnorm", "na.omit", "qnorm", "quantile", "sd", "var", "varimax") 39 | importFrom("utils", "capture.output", "combn", "read.csv", "read.csv2", "read.delim", "read.fwf", "packageVersion") 40 | S3method("plot", 41 | "QmethodRes") 42 | S3method("summary", 43 | "QmethodRes") 44 | S3method("print", 45 | "QmethodRes") 46 | 47 | -------------------------------------------------------------------------------- /R/build.q.set.R: -------------------------------------------------------------------------------- 1 | build.q.set <- function(q.concourse, q.sample, q.distribution) { 2 | q.sample <- as.character(q.sample) # just to be safe 3 | 4 | # Validate input ============================================================= 5 | if (!is.matrix(q.concourse)) { 6 | stop("The input specified for q.concourse is not a matrix.") 7 | } 8 | if (!is.vector(q.distribution)) { 9 | stop("The input specified for q.distribution is not a matrix.") 10 | } 11 | if (!is.vector(q.sample)) { 12 | stop("The input specified for q.sample is not a vector.") 13 | } 14 | if (length(q.sample) != sum(q.distribution)) { # test if sums are equal 15 | stop( 16 | paste( 17 | "There are", 18 | length(q.sample), 19 | "items in your q-sample, but", 20 | sum(q.distribution), 21 | "entries expected in the q-distribution", 22 | sep=" " 23 | ) 24 | ) 25 | } 26 | missing.in.concourse <- !q.sample %in% rownames(q.concourse) 27 | if (any(missing.in.concourse)) { # if any missing, stop 28 | stop( 29 | paste( 30 | "There are item handles in your sample not defined in the concourse:", 31 | q.sample[missing.in.concourse], 32 | sep=" " 33 | ) 34 | ) 35 | } 36 | 37 | # Subset the concourse ================================================= 38 | q.set <- q.concourse[q.sample,] # only add sampled rows from concourse 39 | q.set <- as.matrix(q.set) 40 | message(paste("Build a q.set of", nrow(q.set), "items.")) 41 | return(q.set) 42 | } 43 | -------------------------------------------------------------------------------- /R/centroid.R: -------------------------------------------------------------------------------- 1 | #Frans Hermans, January 2021 2 | #based on Brown 1980: Political Subjectivity, pages 208-224. 3 | 4 | 5 | centroid <- function (tmat, nfactors = 7, spc = 10^-5) 6 | #tmat is a correlation matrix 7 | #nfactors is number of components to extract. Warning: extracting more components than respondents is allowed! 8 | #spc is the threshold to accept factor results (in Brown this is set to 0.02) 9 | { 10 | if (isSymmetric(tmat) == F) 11 | stop("Input matrix should be a symmetrical correlation matrix.") 12 | if (nfactors > nrow(tmat)) warning("The number of components to extract is larger than the number of respondents.") 13 | compmat <- matrix(data = NA, nrow = nrow(tmat), ncol = nfactors, dimnames = list (rownames(tmat), LETTERS[1:nfactors])) 14 | for (i in 1:nfactors) 15 | { 16 | diag(tmat) <- 0 17 | refvec <- NULL 18 | while(all(colSums(tmat) >0) ==F) #maximize positive manifold 19 | { 20 | oo <-which(min(colSums(tmat)) == colSums(tmat)) 21 | vec <- tmat[oo,] *-1 22 | tmat[oo,] <-vec 23 | tmat[,oo] <-vec 24 | refvec <- append(refvec, oo) 25 | } 26 | rmean <-colSums(tmat) / (nrow(tmat)-1) 27 | t1 <- rmean + colSums(tmat) 28 | f1 <- t1/sqrt(sum(t1)) 29 | while(all(abs(rmean-f1^2) 1) { #if more than 1 cond 49 | paste( 50 | "/", 51 | cond, # add condition to path 52 | "/", 53 | sep = "" 54 | ) 55 | }, # otherwise, keep path simple 56 | "/", 57 | part, 58 | ".csv", 59 | sep = "" 60 | ) 61 | path <- normalizePath(path, mustWork = FALSE) # just to be safe 62 | if (file.exists(path)) { # not everyone comments 63 | current.feedback <- read.csv( 64 | path, 65 | header = TRUE, # these do have names 66 | stringsAsFactors = FALSE, # would only add confusion 67 | row.names = 1, 68 | colClasses = c("character","character","logical"), 69 | na.strings = "" # empty cells become NAs 70 | ) 71 | if (ncol(current.feedback) > 1) { # if a drop correction column is included 72 | current.feedback <- current.feedback[!(current.feedback[,2]),] # drop corrections 73 | } 74 | for (id in rownames(current.feedback)) { # loops over ids 75 | if (any(lookup.table == id)) { # do we know the id in the current feedback? 76 | row <- which(lookup.table == id, arr.ind=TRUE)[,1] # where is it in the table? 77 | handle <- rownames(lookup.table)[row] # what is the short handle? 78 | # Gathering data into array 79 | if (cond == "only.one") { 80 | q.feedback[handle,part] <- current.feedback[id,1] 81 | } else { 82 | q.feedback[handle,part,cond] <- current.feedback[id,1] 83 | } 84 | 85 | } else { 86 | warning( 87 | paste( 88 | "Feedback in", 89 | path, 90 | "under id", 91 | id, 92 | "is not defined as per manual.lookup and was ignored.", 93 | "Check whether you defined manual.lookup argument as intended." 94 | ) 95 | ) 96 | } 97 | } 98 | } 99 | } 100 | } 101 | return(q.feedback) 102 | } 103 | -------------------------------------------------------------------------------- /R/loa.and.flags.R: -------------------------------------------------------------------------------- 1 | loa.and.flags <- function(results, nload=FALSE){ 2 | nfactors <- results$brief$nfactors 3 | loa <- round(results$loa, digits=2) 4 | fla <- as.data.frame(results$flagged) 5 | names(fla) <- paste0("fg", 1:ncol(fla)) 6 | for (i in 1:nfactors) fla[[i]] <- as.character(fla[[i]]) 7 | for (i in 1:nfactors) fla[which(fla[[i]]=="FALSE"),i ] <- "" 8 | for (i in 1:nfactors) fla[which(fla[[i]]=="TRUE"),i ] <- "*" 9 | flagqs <- cbind(loa, fla) 10 | pos <- vector() 11 | for(i in 1:nfactors) pos <- c(pos, i+nfactors, i) 12 | flagqs <- flagqs[pos] 13 | if (nload) { 14 | cat("\nNumber of Q-sorts flagged for each factor:\n") 15 | print(results$f_char$characteristics["nload"]) 16 | } 17 | cat("\n") 18 | return(flagqs) 19 | } 20 | -------------------------------------------------------------------------------- /R/make.cards.R: -------------------------------------------------------------------------------- 1 | make.cards <- function(q.set, study.language=NULL, paper.format = "AveryZweckformC32010.Rnw", output.pdf = TRUE, manual.lookup = NULL, wording.font.size = NULL, file.name = "QCards", babel.language=NULL) { 2 | 3 | # Input validation also check more below 4 | if (!is.matrix(q.set)) { 5 | stop("The q.set specified is not a matrix.") 6 | } 7 | if (!is.null(study.language)) { 8 | if (!(study.language %in% colnames(q.set))) 9 | { 10 | stop("The specified study language to be printed is not available in the q.set.") 11 | } 12 | } 13 | available.formats <- list.files( 14 | path = paste( 15 | path.package("qmethod"), # where is the package? 16 | "/cardtemplates/", 17 | sep = "" 18 | ), 19 | no.. = TRUE # no dotfiles 20 | ) 21 | if (!paper.format %in% available.formats) { 22 | stop("The paper.format specified is not available.") 23 | } 24 | if (!is.logical(output.pdf)) { 25 | stop("The argument output.pdf has not been specified logically.") 26 | } 27 | if (!is.null(manual.lookup) & !is.matrix(manual.lookup)) { 28 | stop("The manual.lookup specified is not a matrix.") 29 | } 30 | if (is.null(study.language)) { # if there no languages 31 | study.language <- 1 # just look in column 1 32 | } 33 | if (!is.character((file.name))) { # if filename not character 34 | stop("The specified filename is invalid.") 35 | } 36 | if (!is.null(babel.language) & !is.character((babel.language))) { # if filename not character 37 | stop("The specified babel language is invalid.") 38 | } 39 | # Read in items ============================================================= 40 | q.set.print <- as.data.frame( # read in complete q.set, all translations 41 | x = q.set[,study.language] 42 | ) 43 | colnames(q.set.print) <- "full wording" 44 | # Create lookup table (same as in import.q.feedback and import.q.sorts!)===== 45 | if (is.null(manual.lookup)) { # in case there is no manual lookup 46 | lookup.table <- apply( # replace every language field with its hash 47 | X = q.set, 48 | MARGIN = c(1,2), 49 | digest, 50 | algo = "crc32", 51 | serialize = FALSE 52 | ) 53 | } else { # in case of manually entered lookup table 54 | lookup.table <- manual.lookup # just assign it 55 | } 56 | if (any(duplicated(lookup.table))) { # test lookup table 57 | stop ("There are duplicate IDs in the lookup table.") 58 | } 59 | 60 | # Add ids to q.set.print ==================================================== 61 | q.set.print$id <- NA # set up empty id 62 | for (handle in rownames(q.set.print)) { # loop over all ids in q.set 63 | if (is.null(manual.lookup)) { # for automatic hashing 64 | q.set.print[handle,"id"] <- lookup.table[handle,study.language] 65 | } else { 66 | q.set.print[handle,"id"] <- lookup.table[handle] # plug in id as row 67 | } 68 | 69 | } 70 | path <- paste( # assign path to template 71 | path.package("qmethod"), # where is the package? 72 | # remember, "inst" is not in path, because stuff from inst get put in root of package! 73 | "/cardtemplates/", 74 | paper.format, # hopefully will have more templates in the future 75 | sep = "" 76 | ) 77 | wording.font.size <- wording.font.size # dumb, but otherwise R complains about unused argument 78 | if (output.pdf == TRUE) { 79 | return( 80 | knit2pdf( 81 | input = path 82 | ,output = paste(file.name,".tex",sep="") 83 | ) 84 | ) 85 | } else { 86 | return( 87 | knit( 88 | input = path 89 | ,output = paste(file.name,".tex",sep="") 90 | ) 91 | ) 92 | } 93 | } 94 | -------------------------------------------------------------------------------- /R/make.distribution.R: -------------------------------------------------------------------------------- 1 | make.distribution <- function(nstat, max.bin = 5) { 2 | 3 | # Input validation 4 | if (!is.vector(nstat)) { 5 | stop("nstat is not a vector.") 6 | } 7 | if (!is.vector(max.bin)) { 8 | stop("max.bin is not a vector.") 9 | } 10 | if (!(mode(nstat) == "numeric" && nstat > 0 && nstat %% 1 == 0)) { 11 | stop("The nstat specified is not a positive integer.") 12 | } 13 | if (!(mode(max.bin) == "numeric" && max.bin > 0 && max.bin %% 1 == 0)) { 14 | stop("The max.bin specified is not a positive integer.") 15 | } 16 | if ((2 * max.bin + 1) > nstat) { 17 | stop("The nstat specified is too small for the chosen max.bin.") 18 | } 19 | 20 | nbins <- (2 * max.bin) + 1 # make sure that nbins is uneven 21 | # this +1 ensures that there will always be a 0 bin. 22 | range <- qnorm(1 / nstat) 23 | # what's the cutoff for 1/n? 24 | # This the farthest point under the normal distribution where you'd still expect a card, given nstat 25 | distribution <- dnorm(seq((-range), range, length = nbins)) / sum(dnorm(seq(-range, range, length = nbins))) * nstat 26 | distribution <- round(distribution) 27 | if (sum(distribution) != nstat) { 28 | warning( 29 | paste( 30 | "Could not fit", 31 | nstat, 32 | "items neatly under a standard normal distribution.", 33 | "You need", 34 | sum(distribution), 35 | "items for good fit instead.", 36 | "Accept that or try again with a different max.bin." 37 | ) 38 | ) 39 | } 40 | return(distribution) 41 | } 42 | -------------------------------------------------------------------------------- /R/plot.QmethodRes.R: -------------------------------------------------------------------------------- 1 | plot.QmethodRes <- function(x, 2 | xlab='z-scores', ylab='statements', 3 | pchlist = NULL, colours = NULL, 4 | fnames = NULL, legend = TRUE, 5 | dist = TRUE, pchlist.fill = NULL, 6 | leg.pos="bottomright", xlim= NULL, 7 | sort.items=T, factors = NULL, ...) { 8 | if (!is.null(factors) & dist) { 9 | warning("Interpret with care. 10 | 11 | Only a subset of all the factors is plotted (argument 'factors'), and filled markers indicate distinguishing statements (argument 'dist = TRUE'). Significant differences are calculated with respect to all the factors in the object of results (not only those factors visible).") 12 | } 13 | if (is.null(factors)) { 14 | factors <- c(1:x$brief$nfactors) 15 | } else if (max(factors) > max(c(1:x$brief$nfactors))) { 16 | warning("The numbers of factors provided are beyond the number of factors in the object of results. The default factors will be plotted.") 17 | factors <- c(1:x$brief$nfactors) 18 | } 19 | dfr <- data.frame(x$zsc) 20 | # For the rare case where one factor didn't have flags 21 | if (sum(is.na(colSums(x$zsc)))>0) { 22 | dfr <- data.frame(x$zsc[,!is.na(colSums(x$zsc))]) 23 | factors <- 1:ncol(dfr) 24 | } 25 | lowlim <- floor(min(dfr[[1]])) 26 | highlim <- ceiling(max(dfr)) 27 | if (is.null(xlim)) { 28 | xlimits <- c(lowlim, highlim) 29 | } else xlimits = xlim 30 | if (is.null(pchlist)) { 31 | pchlist <- c(1, 2, 0, 5, 6, 16, 17, 15, 18, 21, 24, 23, 22, 3, 4, 7, 8, 9) 32 | pchlist.fill <- c(16, 17, 15, 23, 25, 16, 17, 15, 18, 21, 24, 23, 22, 3, 4, 7, 8, 9) 33 | } 34 | nfactors <- length(dfr) 35 | # Sorting of items in y axis 36 | sta.order <- 1:nrow(dfr) 37 | if (is.numeric(sort.items)) { 38 | if (length(sort.items) == nrow(dfr)) sta.order <- sort.items 39 | if (length(sort.items) != nrow(dfr)) warning("The number of elements in the vector to sort the items ('sort.items') does not equal the number of items. Items will not be sorted in the plot.") 40 | } else { 41 | if (is.character(sort.items)) { 42 | if (sort.items == "consensus.top") sta.order <- order(apply(dfr, 1, sd), decreasing=T) 43 | if (sort.items != "consensus.top") warning("The argument 'sort.items' does not correspond with any allowed value. See help pages for details. Items will not be sorted in the plot.") 44 | } else { 45 | if (sort.items == F) { 46 | sta.order <- 1:nrow(dfr) 47 | } else { 48 | if (sort.items == T) sta.order <- order(apply(dfr, 1, sd)) 49 | }} 50 | } 51 | dfr <- dfr[sta.order, ] 52 | # Whether to show distinguishing statements 53 | if (dist) { 54 | pts <- qdc.zsc(x) 55 | pts <- pts[sta.order, ] 56 | } 57 | if (is.null(colours)) colours <- rainbow(length(dfr)) 58 | if (is.null(fnames) & colnames(dfr)[1] == "zsc_f1") fnames <- paste0("Factor ", factors) 59 | if (is.null(fnames) & colnames(dfr)[1] != "zsc_f1") fnames <- colnames(dfr) 60 | dotchart(dfr[[factors[1]]], lcolor=grey(0.4), 61 | xlim=xlimits, 62 | ylab=ylab, xlab=xlab, axis=NULL, 63 | pch=pchlist[[1]], color=colours[[1]], ...) 64 | if(length(factors) > 1) { 65 | for (i in 2:length(factors)){ 66 | points(x=dfr[[factors[i]]], 1:length(dfr[[factors[i]]]), pch = pchlist[i], type = "p", col=colours[[i]], bg=colours[[i]], ...) 67 | } 68 | } 69 | if (dist) { 70 | for (i in 1:length(factors)){ 71 | points(x=pts[,factors[i]], 1:length(pts[,factors[i]]), pch = pchlist.fill[i], type = "p", col=colours[[i]], bg=colours[[i]], ...) 72 | } 73 | } 74 | axis(side=2, at=1:nrow(dfr), 75 | labels=rownames(dfr), 76 | las=1, tick=F, line=-0.5, ...) 77 | abline(v=seq(from=min(xlimits), to=max(xlimits), by=0.5), col=grey(0.6), lty=3) 78 | if (legend) { 79 | if (dist) { 80 | pch.leg = pchlist.fill[1:length(factors)] 81 | } else pch.leg <- pchlist[1:length(factors)] 82 | legend(leg.pos, 83 | legend=fnames, 84 | col=colours[1:length(factors)], 85 | pch=pch.leg, 86 | pt.bg=colours[1:length(factors)], 87 | bty="n") 88 | } 89 | } 90 | 91 | -------------------------------------------------------------------------------- /R/print.QmethodRes.R: -------------------------------------------------------------------------------- 1 | print.QmethodRes <- function(x, length=10, digits=2, ...) { 2 | old.dig <- getOption("digits") 3 | options(digits=digits) 4 | nn <- c("Summary", "Original data", "Q-sort factor loadings", "Flagged Q-sorts", "Statement z-scores", "Statement factor scores", "Factor characteristics", "Distinguishing and consensus statements") 5 | names(nn) <- c("brief", "dataset", "loa", "flagged", "zsc", "zsc_n", "f_char", "qdc") 6 | ll <- length(x) 7 | nl <- nn[1:ll] 8 | dimsorts <- min(length, x$brief$nqsorts) 9 | dimstats <- min(length, x$brief$nstat) 10 | cat(x$brief$info, sep="\n") 11 | cat("\n") 12 | cat(nl[2], ":\n") 13 | print(x$dataset[1:dimstats, 1:dimsorts]) 14 | if (dimstats < x$brief$nstat) cat(" (...) See item '...$dataset' for the full data.\n") 15 | nxt <- c("loa", "flagged") 16 | for (i in nxt) { 17 | cat("\n") 18 | cat(nl[i], ":\n") 19 | print(x[[i]][1:dimsorts, ]) 20 | if (dimsorts < x$brief$nqsorts) cat(" (...) See item '...$", i, "' for the full data.\n", sep="") 21 | } 22 | 23 | cat("\n") 24 | cat(nl["zsc"], ":\n") 25 | print(round(x[["zsc"]][1:dimstats, ], digits=2)) 26 | if (dimstats < x$brief$nstat) cat(" (...) See item '...$", "zsc", "' for the full data.\n", sep="") 27 | 28 | cat("\n") 29 | cat(nl["zsc_n"], ":\n") 30 | print(x[["zsc_n"]][1:dimstats, ]) 31 | if (dimstats < x$brief$nstat) cat(" (...) See item '...$", "zsc_n", "' for the full data.\n", sep="") 32 | 33 | 34 | 35 | cat("\n", nl[7], ":\n", sep="") 36 | fcl <- c(" General factor characteristics:", " Correlation between factor z-scores:", " Standard error of differences between factors:") 37 | for (i in 1:length(x$f_char)) { 38 | cat(fcl[[i]], "\n") 39 | print(round(x$f_char[[i]], digits=2)) 40 | cat("\n") 41 | } 42 | if (ll == 8) { 43 | cat(nl[8], ":\n") 44 | print(x$qdc[1:dimstats, ]) 45 | if (dimstats < x$brief$nstat) cat(" (...) See item '...$qdc' for the full data.\n") 46 | } 47 | options(digits=old.dig) 48 | invisible(x) 49 | } 50 | -------------------------------------------------------------------------------- /R/q.fnames.R: -------------------------------------------------------------------------------- 1 | q.fnames <- function(results, fnames) { 2 | # Error checks 3 | if (!is(results, "QmethodRes")) stop("The object provided is not of class 'QmethodRes'") 4 | comb <- array(sapply(fnames, function(x) substring(x,1,1))) 5 | nos <- 0:9 6 | if(sum(comb %in% nos) > 0) stop("The names should not begin with a number") 7 | if (length(fnames) != results$brief$nfactors) stop(paste0("The names provided (", length(names), ") does not match the number of factors in the results (", results$brief$nfactors, ")")) 8 | if (max(nchar(fnames)) > 50) stop("The names provided are longer than 50 characters.") 9 | 10 | # Change factor names for meaningful names 11 | q.objects <- c("loa", "flagged", "zsc", "zsc_n") 12 | for (i in q.objects) colnames(results[[i]]) <- fnames 13 | # Factor characteristics 14 | rownames(results[[7]]$characteristics) <- fnames 15 | dimnames(results[[7]]$cor_zsc) <- list(fnames, fnames) 16 | dimnames(results[[7]]$sd_dif) <- list(fnames, fnames) 17 | return(results) 18 | } -------------------------------------------------------------------------------- /R/qbstep.R: -------------------------------------------------------------------------------- 1 | qbstep <- function(subdata, subtarget, indet, nfactors, nqsorts, nstat, 2 | qmts=qmts, qmts_log=qmts_log, rotation="unknown", 3 | flagged=flagged, cor.method="pearson", ...) { 4 | #-------------------------------------------------------------------- 5 | # 1. Generate matrix of factor loadings 6 | cor.data <- cor(subdata, method=cor.method) 7 | if (rotation=="unknown") rotation <- "none" 8 | loa <- as.data.frame(unclass(principal(cor.data, nfactors=nfactors, rotate=rotation, ...)$loadings)) 9 | 10 | # Note (2015.12.17): the original line run 'principal()' directly: 11 | # loa <- as.data.frame(unclass(principal(subdata, rotate="none", nfactors=nfactors)$loa)) 12 | # However (funny enough!) principal() blocks the console when the data introduced are a square matrix (e.g. 30 observations and 30 statements); producing the correlation table first avoids that bug. 13 | #-------------------------------------------------------------------- 14 | # 2. Apply solutions for indeterminacy issue of PCA bootstrap 15 | if (indet == "none") { 16 | #loa <- as.data.frame(PCA(subdata, graph=FALSE)$var$coord[,c(1:nfactors)]) 17 | loa <- as.matrix(unclass(varimax(as.matrix(loa))[[1]])) 18 | } 19 | if (indet == "procrustes") { 20 | #loa <- as.data.frame(PCA(subdata, graph=FALSE)$var$coord[,c(1:nfactors)]) 21 | #caution: selecting rotation ="varimax" here implies that both varimax and Procrustes are used one on top of the other, and probably just one or the other should be used. For the qindtest though, the selected rotation is used 22 | procrustes <- qpcrustes(loa=loa, target=subtarget, nfactors=nfactors) 23 | loa <- procrustes 24 | } 25 | if (indet == "qindtest" | indet == "both") { 26 | loa <- as.matrix(unclass(varimax(as.matrix(loa))[[1]])) 27 | qindeterminacy <- qindtest(loa=loa, target=subtarget, 28 | nfactors=nfactors) 29 | loa <- as.data.frame(qindeterminacy[[1]]) 30 | if (indet == "both") { 31 | loa <- qpcrustes(loa=loa, target=subtarget, nfactors=nfactors) 32 | } 33 | } 34 | #-------------------------------------------------------------------- 35 | # 3. Calculate z-scores and factor scores with the indeterminacy corrected factor loadings 'loa' 36 | flagged <- qflag(nstat=nstat, loa=loa) 37 | qstep <- qzscores(subdata, nfactors=nfactors, 38 | flagged=flagged, loa=loa, ...) 39 | #-------------------------------------------------------------------- 40 | # 4. Export necessary results 41 | step_res <- list() 42 | step_res$flagged <- list() 43 | step_res$zsc <- list() 44 | step_res$loadings <- list() 45 | 46 | qstep$flagged <- as.data.frame(qstep$flagged) 47 | qstep$zsc <- as.data.frame(qstep$zsc) 48 | qstep$loa <- as.data.frame(qstep$loa) 49 | 50 | n <- 1 51 | while (n <= nfactors) { 52 | # Flagged q sorts 53 | step_res$flagged[n] <- qstep$flagged[n] #to append in qmbr[[n]][[1]] 54 | # z-scores 55 | step_res$zsc[n] <- qstep$zsc[n] #to append in qmbr[[n]][[2]] 56 | # Factor loadings 57 | step_res$loadings[n] <- qstep$loa[n] #to append in qmbr[[n]][[3]] 58 | n <- n + 1 59 | } 60 | if (indet == "qindtest" | indet == "both") { 61 | qindt_log <- qindeterminacy[[2]] 62 | qindt <- qindeterminacy[[3]] 63 | # Test results (logical) 64 | step_res$torder_res_log <- qindt[1] #to append in qmts[1] 65 | step_res$tsign_res_log <- qindt[2] #to append in qmts[2] 66 | # Reports of solution implementation 67 | step_res$torder_res_report <- qindt_log[1] #to append in qmts_log[1] 68 | step_res$tsign_res_report <- qindt_log[2] #to append in qmts_log[2] 69 | } 70 | return(step_res) 71 | } -------------------------------------------------------------------------------- /R/qdc.R: -------------------------------------------------------------------------------- 1 | qdc <- function(dataset, nfactors, zsc, sed) { 2 | zsc <- as.data.frame(zsc) 3 | sed <- as.data.frame(sed) 4 | if (sum(is.na(colSums(zsc)))>0) warning("Q analysis: Comparisons for distinguishing and consensus statements exclude the factor(s) for which there were no flags.") 5 | if (sum(is.na(colSums(zsc)))>0 & !is.na(sum(zsc[,nfactors]))) stop("In addition, the factor without flags is not the last one. The distinguishing and consensus analysis cannot continue. You may run the full analysis manually: cor(db), then extraction and rotation, and qzscores(), skipping qdc().") 6 | # Exclude the factor that has no flags 7 | nfactors <- nfactors-sum(is.na(colSums(zsc))>0) 8 | if (nfactors==1) { 9 | qdc.res <- "Q analysis: Only one factor selected. No distinguishing and consensus statements will be calculated." 10 | warning(qdc.res) 11 | } else { 12 | # Distinguishing and consensus statements 13 | # create data frame 14 | comparisons <- combn(nfactors, 2, simplify=F) 15 | comp <- vector() 16 | for (i in 1:length(comparisons)) { 17 | comp <- append(comp, paste("f", comparisons[[i]], collapse="_", sep=""), after = length(comp)) 18 | } 19 | qdc1 <- data.frame(matrix(data=as.numeric(NA), ncol=length(comp), nrow=nrow(dataset), dimnames=list(row.names(dataset), comp))) 20 | # differences in zsc between factors 21 | for (n in 1:length(comp)) { 22 | first <- colnames(zsc)[grep(paste0("f", comparisons[[n]][1], "$"), 23 | colnames(zsc))] 24 | second <- colnames(zsc)[grep(paste0("f", comparisons[[n]][2], "$"), 25 | colnames(zsc))] 26 | qdc1[n] <- zsc[,first] - zsc[,second] 27 | } 28 | qdc2 <- as.data.frame(qdc1) 29 | # significant differences 30 | for (n in 1:length(comp)) { 31 | # find the threshold for the pair of factors 32 | sed <- data.frame(sed) 33 | first <- names(sed)[grep(paste0("f", comparisons[[n]][1], "$"), 34 | names(sed))] 35 | second <- names(sed)[grep(paste0("f", comparisons[[n]][2], "$"), 36 | names(sed))] 37 | sedth.000001 <- sed[first, second]*4.8916 # t-test values obtainable from qnorm(p = 0.000001) %>% round(4) (fix as per issue #362 on github) 38 | sedth.001 <- sed[first, second]*3.291 39 | sedth.01 <- sed[first, second]*2.576 40 | sedth.05 <- sed[first, second]*1.960 # differences are significant when > 2.58*SED for p < .01, or the same value rounded upwards (Brown, 1980, pp.245) 41 | qdc2[which(abs(qdc1[[n]]) <= sedth.05), n] <- "" 42 | qdc2[which(abs(qdc1[[n]]) > sedth.05), n] <- "*" 43 | qdc2[which(abs(qdc1[[n]]) > sedth.01), n] <- "**" 44 | qdc2[which(abs(qdc1[[n]]) > sedth.001), n] <- "***" 45 | qdc2[which(abs(qdc1[[n]]) > sedth.000001), n] <- "6*" 46 | } 47 | names(qdc2) <- paste0("sig_",names(qdc2)) 48 | qdc2$dist.and.cons <- as.character(apply(qdc2, 1, function(x) sum(x!="")==0)) 49 | qdc2[which(qdc2$dist.and.cons == T), "dist.and.cons"] <- "Consensus" 50 | if (nfactors == 2) { 51 | qdc2[which(qdc2$dist.and.cons != "Consensus"), "dist.and.cons"] <- "Distinguishing" 52 | } 53 | if (nfactors > 2) { 54 | qdc2[which(qdc2$dist.and.cons != "Consensus"), "dist.and.cons"] <- "" 55 | for (i in 1:nfactors) { 56 | varsin <- names(qdc2)[grep(i, names(qdc2))] 57 | varsout <- names(qdc2)[-grep(i, names(qdc2))] 58 | varsout <- varsout[-which(varsout=="dist.and.cons")] 59 | for (s in 1:nrow(qdc2)) { 60 | if (sum(qdc2[s, varsin] != "") == length(varsin) & sum(qdc2[s, varsout] != "") == 0) qdc2[s, "dist.and.cons"] <- paste0("Distinguishes f",i, " only") else if (sum(qdc2[s, c(varsin, varsout)] != "") == length(qdc1)) qdc2[s, "dist.and.cons"] <- "Distinguishes all" else if (sum(qdc2[s, varsin] != "") == length(varsin) & sum(qdc2[s, varsout] != "") != 0 & sum(qdc2[s, c(varsin, varsout)] != "") != length(qdc1)) qdc2[s, "dist.and.cons"] <- paste0(qdc2[s, "dist.and.cons"], "Distinguishes f",i, " ", collapse="") 61 | } 62 | #The above loop assigns these values in the column dist.and.cons, according to the following rules: 63 | # -- "Distinguishes f* only" when the differences of f* with all other factors are significant, AND all other differences are not. 64 | # -- "Distinguishes all" when all differences are significant. 65 | # -- "Distinguishes f*" when the differences of f* and all other factors are significant, AND some (but not all) of the other differences are significant. 66 | # -- "" leaves empty those which do not fullfil any of the above conditions, i.e. are not consensus neither are clearly distinguishing any factor. 67 | } 68 | } 69 | qdc.res <- cbind(qdc1, qdc2) 70 | ord <- rep(1:length(qdc1), each=2) 71 | ord[which(1:(length(qdc1)*2) %% 2 == 0)] <- ord[which(1:(length(qdc1)*2) %% 2 == 0)] + length(qdc1) 72 | qdc.res <- qdc.res[c(length(qdc.res), ord)] 73 | } 74 | return(qdc.res) 75 | } 76 | -------------------------------------------------------------------------------- /R/qdc.zsc.R: -------------------------------------------------------------------------------- 1 | qdc.zsc <- function(results) { 2 | # Extract the scores of distinguishing statements from an object of class QmethodRes 3 | nstat <- results$brief$nstat 4 | nfactors <- results$brief$nfactors 5 | zsc <- results$zsc 6 | if (sum(is.na(colSums(zsc)))>0) { 7 | zsc <- results$zsc[,!is.na(colSums(zsc))>0] 8 | nfactors <- sum(!is.na(colSums(zsc))) 9 | } 10 | dac <- results$qdc$dist.and.cons 11 | names(dac) <- rownames(results$qdc) 12 | qdc.zsc <- matrix(NA, nrow=nrow(zsc), ncol=ncol(zsc), dimnames=dimnames(zsc)) 13 | 14 | # Find which are the distinguishing statements 15 | for (i in 1:nfactors) { 16 | qdc.zsc[grep(paste0("all|f", i), dac), i] <- zsc[grep(paste0("all|f", i), dac), i] 17 | } 18 | if (nfactors==2) { 19 | for (i in 1:nfactors) { 20 | qdc.zsc[grep("Distinguishing", dac), i] <- zsc[grep("Distinguishing", dac), i] 21 | } 22 | } 23 | return(qdc.zsc) 24 | } -------------------------------------------------------------------------------- /R/qfcharact.R: -------------------------------------------------------------------------------- 1 | qfcharact <- function(loa, flagged, zsc, nfactors, av_rel_coef=0.8) { 2 | nqsorts <- nrow(loa) 3 | loa_sq <- loa^2 4 | #select FLAGGED Q sorts 5 | floa <- flagged*loa 6 | #number of loading q-sorts 7 | nload <- colSums(flagged) 8 | #Eigenvalues 9 | eigenvals <- colSums(loa_sq) 10 | #Total explained variance 11 | expl_var <- 100*(eigenvals/nqsorts) 12 | #Reliability 13 | reliability <- av_rel_coef*nload/(1+(nload-1)*av_rel_coef) 14 | #Standard Error of Factor Scores 15 | se_fscores <- apply(zsc, 2, sd)*sqrt(1-reliability) 16 | #FACTOR MATRIXES 17 | #correlation among factors 18 | f_cor <- cor(zsc) 19 | #SE of differences 20 | sed <- matrix(data = NA, nrow = nfactors, ncol = nfactors) 21 | colnames(sed) <- paste("f", 1:nfactors, sep="") 22 | row.names(sed) <- paste("f", 1:nfactors, sep="") 23 | f <- 1 24 | while (f <= ncol(floa)) { 25 | g <- 1 26 | while (g <= ncol(floa)) { 27 | sed[f,g] <- sqrt(se_fscores[[f]]^2 + se_fscores[[g]]^2) 28 | g <- g+1 29 | } 30 | f <- f+1 31 | } 32 | #Bind all together 33 | f_char <- list() 34 | f_char[[1]] <- data.frame(cbind(av_rel_coef, nload, eigenvals, expl_var, reliability, se_fscores)) 35 | row.names(f_char[[1]]) <- paste("f",1:ncol(loa), sep="") 36 | f_char[[2]] <- f_cor 37 | f_char[[3]] <- sed 38 | names(f_char) <- cbind("characteristics", "cor_zsc", "sd_dif") 39 | #cbind("Average reliability coefficient, Number of loading Q-sorts, Eigenvalues, Percentage of explained variance, Composite reliability, Standard error of factor scores", "Correlation coefficients between factors z-scores", "Standard errors of differences") 40 | return(f_char) 41 | } -------------------------------------------------------------------------------- /R/qflag.R: -------------------------------------------------------------------------------- 1 | #flags Q sorts automatically according to the given loadings matrix 2 | qflag <- function(loa=loa, nstat) { 3 | # calculate number of Q sorts and number of statements 4 | nqsorts <- nrow(loa) 5 | #FLAGGING CRITERIA: 6 | # -- 1) qsorts which factor loading is higher than the threshold for pval >0.95, and 7 | # -- 2) qsorts which square loading is higher than the sum of square loadings of the same q-sort in all other factors 8 | thold.05 <- 1.96/sqrt(nstat) 9 | loa_sq <- loa^2 10 | flagged <- matrix(data=F, nrow=nqsorts, ncol=ncol(loa)) 11 | f <- 1 12 | while (f <= ncol(loa)) { 13 | n <- 1 14 | while (n <= nqsorts) { 15 | flagged[n,f] <- loa_sq[n,f] > (rowSums(loa_sq)[[n]]-loa_sq[n,f]) & abs(loa[n,f]) > thold.05 16 | n <- n+1 17 | } 18 | f <- f+1 19 | } 20 | flagged <- as.matrix(flagged) 21 | colnames(flagged) <- paste("flag_f",1:ncol(loa), sep="") 22 | row.names(flagged) <- row.names(loa) 23 | # Checks to recommend manual inspection 24 | # Negative loading and flagged 25 | if (sum(as.matrix(loa)[which(flagged)] < 0) > 0) warning("One or more Q-sorts with negative loadings are flagged through the automatic pre-flagging. This is not necessarily an issue, but double check the flags manually, e.g. using the function 'loa.and.flags()'.") 26 | # A Q-sort flagged in more than one 27 | if (sum(apply(flagged, 1, sum) > 1) > 0) warning("One or more Q-sorts is flagged for two or more factors through the automatic pre-flagging. This is not necessarily an issue, but double check the flags manually, e.g. using the function 'loa.and.flags()'.") 28 | return(flagged) 29 | } -------------------------------------------------------------------------------- /R/qfsi.R: -------------------------------------------------------------------------------- 1 | qfsi <- function(nfactors, nstat, qscores, zsc_bn, qm) { 2 | warning("Note that this index is uncommon in Q methodology publications.") 3 | #calculate FACTOR STABILITY INDEX 4 | mx <- sum(abs(qscores))*2 #maximum possible position changes 5 | #qm is the original analysis results 6 | fsi <- data.frame(FSindex=c(1:nfactors), NFSindex=c(1:nfactors)) 7 | f <- 1 8 | while (f <= nfactors) { 9 | fsi[f,1] <- sum(abs(qm[[6]][f]-zsc_bn[f]))/nstat 10 | fsi[f,2] <- fsi[f,1]/mx 11 | f <- f+1 12 | } 13 | return(fsi) 14 | } -------------------------------------------------------------------------------- /R/qindtest.R: -------------------------------------------------------------------------------- 1 | qindtest <- function(loa, target, nfactors) { 2 | # Create data frame to log the results 3 | qindt <- as.data.frame(matrix(NA, nrow=nfactors, ncol=2)) 4 | qindt_log <- as.data.frame(matrix("ok", nrow=1, ncol=2)) 5 | qindt_log[[1]] <- as.character(qindt_log[[1]]) 6 | qindt_log[[2]] <- as.character(qindt_log[[2]]) 7 | #qindt[1] <- as.logical(qindt[1]) 8 | #qindt[2] <- as.logical(qindt[2]) 9 | dimnames(qindt) <- list(paste("f",c(1:nfactors), sep=""), c("torder", "tsign")) 10 | dimnames(qindt_log) <- list(c("log"), c("torder_log", "tsign_log")) 11 | #==================================================== 12 | # 1. Check for ORDER SWAP - REFLECTION 13 | #==================================================== 14 | #Is the absolute value of diagonal coefficients larger than non-diagonal coefficients for the same factor? (if FALSE, then test is negative, OK) 15 | corloa <- as.data.frame(cor(loa, target)) 16 | for (i in 1:nfactors) { 17 | factors <- c(1:nfactors) 18 | otherf <- c(factors[-i]) 19 | #The actual test: the 'sum' is because this returns as many responses as 'other' factors, to which the given factor is compared, if one single (or more) is higher, then it is TRUE that the given factor is incorrectly positioned 20 | qindt[i,1] <- isTRUE(sum(abs(corloa[i,i]) < abs(corloa[otherf,i])) != 0) 21 | } 22 | if (sum(qindt[,1]) == 0) { 23 | qindt_log[1,1] <- "OK: No ORDER swap issues" 24 | warning(qindt_log[1,1]) 25 | } else { 26 | #.................................................. 27 | # Add *solution* for ORDER SWAP: reorder factors according to highest coefficients 28 | loa_orig <- as.matrix(loa) 29 | # select, for each factor, the factor that has highest correlation with the target factor 30 | swap <- list() 31 | for (j in 1:nfactors) { 32 | maxcor <- max(abs(corloa[,j])) 33 | swap <- paste(swap,which(abs(corloa[,j]) == maxcor), sep=" ") # info piece 34 | loa[,j] <- loa_orig[,which(abs(corloa[,j]) == maxcor)] 35 | } 36 | #.................................................. 37 | loa <- as.matrix(loa) 38 | #check that no factor has been chosen twice 39 | corloa2 <- cor(loa) 40 | diag(corloa2) <- 0 41 | if (max(abs(corloa2)) > 0.999) { 42 | #!!! ehem, try format(corloa2, digits=16) and see that all correlations == 1 out of the diagonal are 0.9period instead of 1, which was causing an error! 43 | qindt_log[1,1] <- "ERROR in ORDER swap: at least one factor in the resample is best match for two or more factors in the target" 44 | print(qindt_log[1,1]) 45 | loa <- as.data.frame(loa_orig) 46 | } else { 47 | qindt_log[1,1] <- paste("OK - Factors reordered: ",swap[1], sep="") # (1) in the unlikely case in which the value of two or more coefficients are the same and highest, not selecting the first value would give an error---this step should actually be discarded 48 | if (length(swap) > 1) warning("The reordering in this iteration may be incorrect.") 49 | 50 | print(qindt_log[1,1]) 51 | swap <- NULL 52 | loa_orig <- NULL 53 | } 54 | } 55 | #==================================================== 56 | # 2. Check for SIGN SWAP - INVERSION 57 | #==================================================== 58 | #Are all diagonal coefficients positive? (if FALSE, then test is negative, OK) 59 | corloa <- as.data.frame(cor(loa, target)) #remake the correlation matrix again in case that factors were reordered 60 | for (i in 1:nfactors) { 61 | qindt[i,2] <- corloa[i,i] < 0 62 | } 63 | if (sum(qindt[,2]) == 0) { 64 | qindt_log[1,2] <- "OK: No SIGN swap issues" 65 | warning(qindt_log[1,2]) 66 | loa <- as.matrix(loa) 67 | } else { 68 | #.................................................. 69 | # Add *solution* for SIGN SWAP: switch sign of all factor loadings 70 | loa_orig <- as.data.frame(loa) 71 | swapfactors <- which(qindt[,2] == TRUE) 72 | loa <- as.matrix(loa) 73 | loa[ , swapfactors] <- -loa[ , swapfactors] 74 | qindt_log[1,2] <- paste("OK: SIGN swap in factors ", paste(as.character(swapfactors), collapse=", "), sep="") 75 | warning(qindt_log[1,2]) 76 | #.................................................. 77 | } 78 | qindeterminacy <- list() 79 | qindeterminacy[[1]] <- loa 80 | qindeterminacy[[2]] <- qindt_log 81 | qindeterminacy[[3]] <- qindt 82 | return(qindeterminacy) 83 | } 84 | -------------------------------------------------------------------------------- /R/qmb.plot.R: -------------------------------------------------------------------------------- 1 | qmb.plot <- function(qmbsum, type=c("zsc", "loa"), nfactors, cex = 0.7, cex.leg=0.8, errbar.col= "black", lwd=1, lty=1, vertdist = 0.2, limits=NULL, r.names=NA, sort=c("none", "difference", "sd"), sbset=NULL, leg.pos="topleft", bty = "n", plot.std = TRUE, pch= NULL, col=NULL, grid.col="gray", ...) { 2 | if(type[[1]] == "loa") { 3 | boloa <- qmbsum[[1]] 4 | db <- boloa[ ,c(grep("loa", names(boloa)), grep("SE", names(boloa)), grep("std", names(boloa)))] 5 | item <- "Q-sort" 6 | values <- "Factor loading" 7 | if(is.null(limits)) limits <- c(-1.0,1.0) 8 | } 9 | 10 | if(type[[1]] == "zsc") { 11 | boloa <- qmbsum[[2]] 12 | db <- boloa[ ,c(grep("zsc.bts", names(boloa)), grep("SE", names(boloa)), grep("std", names(boloa)))] 13 | item <- "Statement" 14 | values <- "z-score" 15 | if(is.null(limits)) { 16 | zscs <- grep("zsc.bts", names(db)) 17 | SEs <- grep("SE", names(db)) 18 | lms.down <- db[,zscs] - db[,SEs] 19 | lms.up <- db[,zscs] + db[,SEs] 20 | limits <- c(floor(min(lms.down)), ceiling(max(lms.up))) 21 | } 22 | } 23 | if(is.numeric(sbset)) db <- db[c(1:min(nrow(db), sbset)), ] 24 | nitems <- nrow(db) 25 | if(length(r.names) == nrow(db)) rownames(db) <- r.names 26 | 27 | if(sort[1] == "sd") { 28 | sds <- apply(db[,(1+nfactors):(2*nfactors)], 1, sum) 29 | db <- db[order(sds), ] 30 | } 31 | if(sort[1] == "difference") { 32 | sds <- abs(apply(db[,(1:nfactors)], 1, sd)) 33 | db <- db[order(sds), ] 34 | } 35 | 36 | #Plotting parameters 37 | db$position <- c(1:nitems) 38 | if(is.null(col)) { 39 | colegend=c(rep("black", nfactors), rep("white", 3)) 40 | dot.col=rep("black", nfactors) 41 | } else { 42 | colegend=c(col[1:nfactors], rep("white", 3)) 43 | dot.col=col[1:nfactors] 44 | } 45 | if(is.null(pch)) pich=array(c(20, 15, 17, 18)) else pich=pch[1:nfactors] 46 | if(is.null(pch)) { 47 | pitx=array(c(21, 22, 24, 23)) 48 | } else if(plot.std) { 49 | if (length(pch) >= 2*nfactors) { 50 | pitx=pch[(nfactors+1):(2*nfactors+1)] 51 | } else stop("The vector of symbols provided in 'pch' needs to be at least twice the length of the number of factors, in order to contain (a) a set of symbols for the bootstrap values and (b) a different set of symbols for the standard values.") 52 | } 53 | i=1 54 | # Plot: 55 | dotchart(db[,i], labels = rownames(db), pch=pich[i], 56 | xlim=limits, 57 | xlab=values, lcolor="white", 58 | lwd = lwd, cex=cex, color=dot.col[i], ...) 59 | mtext(item, side=2, line=1.5, cex=cex, ...) 60 | # Error bars: 61 | segments(x0=db[,i], y0=db[,"position"], 62 | x1=db[,i]+db[,nfactors+i], 63 | y1=db[,"position"], lwd = lwd, lty = lty, col = errbar.col, cex=cex, ...) 64 | segments(x0=db[,i], y0=db[,"position"], 65 | x1=db[,i]-db[,nfactors+i], 66 | y1=db[,"position"], lwd = lwd, lty = lty, col = errbar.col, cex=cex, ...) 67 | # Replot points, for them to be on top of error bars: 68 | points(x=db[,i], db[,"position"]+(vertdist*(i-1)), 69 | pch = pich[i], type = "p", lwd = lwd, 70 | cex=cex, col=dot.col[i], ...) 71 | if(plot.std) { 72 | points(x=db[,(2*nfactors)+i], db[,"position"], pch = pitx[i], 73 | type = "p", lwd = lwd, cex=cex, col=dot.col[i], ...) 74 | } 75 | # Plot 2nd and subsequent factors 76 | for (i in 2:nfactors) { 77 | # Error bars: 78 | segments(x0=db[,i], y0=db[,"position"]+(vertdist*(i-1)), 79 | x1=db[,i]+db[,nfactors+i], 80 | y1=db[,"position"]+(vertdist*(i-1)), lwd = lwd, 81 | lty = lty, col = errbar.col, cex=cex, ...) 82 | segments(x0=db[,i], y0=db[,"position"]+(vertdist*(i-1)), 83 | x1=db[,i]-db[,nfactors+i], 84 | y1=db[,"position"]+(vertdist*(i-1)), lwd = lwd, 85 | lty = lty, col = errbar.col, cex=cex, ...) 86 | # Points: 87 | points(x=db[,i], db[,"position"]+(vertdist*(i-1)), 88 | pch = pich[i], type = "p", lwd = lwd, 89 | cex=cex, col=dot.col[i], ...) 90 | if(plot.std) { 91 | points(x=db[,(2*nfactors)+i], 92 | db[,"position"]+(vertdist*(i-1)), 93 | pch = pitx[i], type = "p", lwd = lwd, 94 | cex=cex, col=dot.col[i], ...) 95 | } 96 | } 97 | abline(v=seq(floor(limits[1]), ceiling(limits[2]), 0.5), col=grid.col, lty="dotted", lwd = lwd, ...) 98 | abline(h=c(0.7:(nitems+0.7)), col=grid.col, lty="dotted", lwd = lwd, ...) 99 | if(plot.std) leg.length <- 1:(nfactors+2) else leg.length <- 1:nfactors 100 | legend(leg.pos, legend=c(paste0("Factor ", 1:nfactors), "Empty symbol: standard", "Filled symbol: bootstrap")[leg.length], pch=c(pich[1:nfactors], 0, 0)[leg.length], cex=cex.leg*cex, pt.cex=cex, col=colegend, bg=NA, bty=bty, ...) 101 | } -------------------------------------------------------------------------------- /R/qmb.summary.R: -------------------------------------------------------------------------------- 1 | qmb.summary <- function(qmboots) { 2 | # Basic info from the analysis 3 | nfactors <- qmboots$orig.res$brief$nfactors 4 | nstat <- qmboots$orig.res$brief$nstat 5 | nqsorts <- qmboots$orig.res$brief$nqsorts 6 | 7 | #------------------------------------------------------- 8 | # Gather results of Q-sorts 9 | obj.loa <- as.array(paste0("qmboots$loa.stats$factor", 1:nfactors)) 10 | 11 | loa.std <- qmboots$orig.res$loa 12 | loa.bts <- apply(obj.loa, 1, 13 | function(x) eval(parse(text=paste0(x, "[,c('mean','sd')]")))) 14 | loa.frq <- apply(obj.loa, 1, 15 | function(x) eval(parse(text=paste0(x, "[,c('flag_freq')]")))) 16 | # Give appropriate column names 17 | dimnames(loa.frq) <- list(rownames(loa.bts[[1]]), paste0("flag.freq", 1:nfactors)) 18 | colnames(loa.std) <- paste0("f", 1:nfactors, ".std") 19 | for (i in 1:nfactors) { 20 | colnames(loa.bts[[i]]) <- paste0("f", i, c(".loa", ".SE")) 21 | } 22 | 23 | # Reorder rows in standard results (bootstrap reorders Q-sorts alphab.) 24 | loa.std <- loa.std[rownames(loa.frq),] 25 | 26 | # Calculate estimate of bias 27 | loa.bts.est <- apply(obj.loa, 1, 28 | function(x) eval(parse(text=paste0(x, "[,'mean']")))) 29 | loa.bias <- loa.std - loa.bts.est 30 | names(loa.bias) <- paste0("f", 1:nfactors, ".bias") 31 | 32 | # Bind together 33 | qs <- data.frame(loa.std, do.call("cbind", loa.bts), loa.frq, loa.bias) 34 | 35 | #------------------------------------------------------- 36 | # Gather results of statements 37 | obj.zsc <- as.array(paste0("qmboots$'zscore-stats'$factor", 1:nfactors)) 38 | 39 | zsc.std <- qmboots$orig.res$zsc 40 | zsc.bts <- apply(obj.zsc, 1, 41 | function(x) eval(parse(text=paste0(x, "[,c('mean','sd')]")))) 42 | # Appropriate column names 43 | colnames(zsc.std) <- paste0("f", 1:nfactors, ".zsc.std") 44 | for (i in 1:nfactors) { 45 | colnames(zsc.bts[[i]]) <- paste0("f", i, c(".zsc.bts", ".SE")) 46 | } 47 | 48 | # And factor scores 49 | zscn.std <- qmboots$orig.res$zsc_n 50 | zscn.bts <- qmboots$'zscore-stats'$'Bootstraped factor scores' 51 | colnames(zscn.bts) <- paste0("fsc.bts.", 1:nfactors) 52 | 53 | # Calculate estimate of bias for z-scores 54 | zsc.bts.est <- apply(obj.zsc, 1, 55 | function(x) eval(parse(text=paste0(x, "[,'mean']")))) 56 | zsc.bias <- zsc.std - zsc.bts.est 57 | names(zsc.bias) <- paste0("f", 1:nfactors, ".bias") 58 | 59 | # Calculate estimate of bias for factor scores 60 | zscn.bias <- zscn.std - zscn.bts 61 | names(zscn.bias) <- paste0("f", 1:nfactors, ".fsc.bias") 62 | 63 | # Bind together 64 | st <- data.frame(zsc.std, do.call("cbind", zsc.bts), zsc.bias, 65 | zscn.std, zscn.bts, zscn.bias) 66 | qmb <- list(qs, st) 67 | names(qmb) <- c("qsorts", "statements") 68 | return(qmb) 69 | } -------------------------------------------------------------------------------- /R/qmethod.R: -------------------------------------------------------------------------------- 1 | qmethod <- function(dataset, nfactors, extraction="PCA", rotation="varimax", forced=TRUE, distribution=NULL, cor.method="pearson", silent=FALSE, spc= 10^-5, ...) { 2 | # calculate number of Q sorts and number of statements 3 | nstat <- nrow(dataset) 4 | nqsorts <- ncol(dataset) 5 | #threshold for significant values at p-value=.01 and p-value=.05 6 | thold.01 <- 2.58/sqrt(nstat) 7 | thold.05 <- 1.96/sqrt(nstat) 8 | #check that the input data is correct 9 | #if (nqsorts!=ncol(dataset)) stop("Q method input: The number of Q sorts introduced does not match with the number of columns of the data frame or matrix") else if (nstat!=nrow(dataset)) stop("Q method input: The number of statements introduced does not match with the number of rows of the data frame or matrix.") else 10 | 11 | # Validation checks 12 | if (nstat < 2) stop("Q method input: The data frame or matrix entered has less than two statements") 13 | if (nqsorts < 2) stop("Q method input: The data frame or matrix entered has less than two Q-sorts") 14 | if (!is.numeric(as.matrix(dataset)) & !is.integer(as.matrix(dataset))) stop("Q method input: The data frame or matrix entered has non-numerical values.") 15 | if (forced) { 16 | qscores <- sort(dataset[,1], decreasing=FALSE) 17 | if (sum(apply(dataset, 2, function(x) sort(x) != qscores)) > 0) stop("Q method input: The argument 'forced' is set as 'TRUE', but your data contains one or more Q-sorts that do not to follow the same distribution. 18 | For details on how to solve this error, see 'help(qmethod)', including Note.") 19 | } 20 | if (!forced) { 21 | if (is.null(distribution)) stop("Q method input: The argument 'forced' is set as 'FALSE', but no distribution has been provided in the argument 'distribution'. 22 | For details on how to solve this error or specify the argument 'distribution', see 'help(qmethod)', including Note number 2.") 23 | if (length(distribution) != nrow(dataset)) stop("Q method input: The length of the distribution provided does not match the number of statements.") 24 | if (!is.numeric(distribution) & !is.integer(distribution)) stop("Q method input: The distribution provided contains non-numerical values.") 25 | } 26 | if (length(unique(colnames(dataset))) != nqsorts) stop("Q method input: one or more Q-sort names are duplicated. Please change the names of the dataset by using colnames().") 27 | if (rotation != "varimax") warning("Note that the rotation method selected is not standard in Q methodology publications.") # See discussion at https://github.com/aiorazabala/qmethod/issues/95 28 | uncommon.rotations <- c("quartimax", "bentlerT", "geominT", "targetT", "bifactor", "TargetT", "equamax", "varimin", "specialT", "Promax", "promax", "cluster", "biquartimin", "specialQ", "oblimin", "simplimax") 29 | # Run the analysis 30 | cor.data <- cor(dataset, method=cor.method) 31 | if(extraction == "PCA") { 32 | loa <- unclass(principal(cor.data, nfactors=nfactors, rotate=rotation, ...)$loadings) #PCA from {psych} for factor loadings 33 | } 34 | if(extraction == "centroid") { 35 | loa.unr <- unclass(centroid(tmat=cor.data, nfactors=nfactors, spc)) 36 | if(rotation == "none") { 37 | loa <- loa.unr 38 | } else if(rotation == "varimax") { 39 | loa <- unclass(varimax(loa.unr[,1:nfactors])$loadings) 40 | } else if(rotation %in% uncommon.rotations) { 41 | stop("You have selected a rotation method that is not implemented for 'centroid' extraction within the 'qmethod()' wrapper. To use uncommon rotations with 'centroid' extraction, please run the 'centroid()' function manually. The help page 'help(centroid)' indicates how to run the full analysis step-by-step.") 42 | } 43 | } 44 | colnames(loa) <- paste0("f", 1:ncol(loa)) 45 | # The following depends on the qmethod functions: qflag, qzscores, qfcharact, qdc 46 | flagged <- qflag(loa=loa, nstat=nstat) 47 | qmethodresults <- qzscores(dataset, nfactors, flagged=flagged, loa=loa, forced=forced, distribution=distribution) 48 | if(extraction == "PCA") qmethodresults$brief$extraction <- extraction 49 | if(extraction == "centroid") qmethodresults$brief$extraction <- paste0(extraction, " (threshold = ", spc, ")") 50 | qmethodresults$brief$rotation <- rotation 51 | qmethodresults$brief$flagging <- "automatic" 52 | qmethodresults$brief$cor.method <- cor.method 53 | qmethodresults$brief$pkg.version <- packageVersion('qmethod') 54 | qmethodresults$brief$info <- c("Q-method analysis.", 55 | paste0("Finished on: ", 56 | qmethodresults$brief$date), 57 | paste0("'qmethod' package version: ", 58 | qmethodresults$brief$pkg.version), 59 | paste0("Original data: ", 60 | qmethodresults$brief$nstat, 61 | " statements, ", 62 | qmethodresults$brief$nqsorts, " Q-sorts"), 63 | paste0("Forced distribution: ", 64 | qmethodresults$brief$distro), 65 | paste0("Number of factors: ", 66 | qmethodresults$brief$nfactors), 67 | paste0("Extraction: ", 68 | qmethodresults$brief$extraction), 69 | paste0("Rotation: ", 70 | qmethodresults$brief$rotation), 71 | paste0("Flagging: ", 72 | qmethodresults$brief$flagging), 73 | paste0("Correlation coefficient: ", 74 | qmethodresults$brief$cor.method)) 75 | qmethodresults[[8]] <- qdc(dataset, nfactors, zsc=qmethodresults$zsc, 76 | sed=qmethodresults$f_char$sd_dif) 77 | names(qmethodresults)[8] <- "qdc" 78 | if (silent== FALSE) cat(qmethodresults$brief$info, sep="\n") 79 | return(qmethodresults) 80 | } -------------------------------------------------------------------------------- /R/qpcrustes.R: -------------------------------------------------------------------------------- 1 | #Procrustes rotation for each bootstrap step, uses procrustes() function from MCMCpack 2 | qpcrustes <- function(loa, target, nfactors) { 3 | if (!requireNamespace("MCMCpack", quietly = TRUE)) { 4 | stop("Package \"MCMCpack\" needed for this function to work. Please install it.", call. = FALSE) 5 | } 6 | prox <- as.matrix(loa) 7 | prores <- MCMCpack::procrustes(prox, target) 8 | loarot <- as.data.frame(prores[1]) 9 | warning("The procrustes rotation is not working currently due to an issue in package dependency from 'MCMCpack' and 'graph'. If you'd like to try this: 1) see the code for the function 'qpcrustes' and 2) uncomment lines 4 and 5 and comment line 6. You will need to install and load the package 'MCMCpack' to run this function") 10 | loarot <- as.data.frame(prox) 11 | colnames(loarot) <- paste("loarot_f", 1:nfactors, sep="") 12 | return(loarot) 13 | } 14 | -------------------------------------------------------------------------------- /R/qzscores.R: -------------------------------------------------------------------------------- 1 | #calculates final z-scores and factor scores, and extracts main results for Q method 2 | qzscores <- function(dataset, nfactors, loa, flagged, forced = TRUE, distribution = NULL) { 3 | # Validation checks 4 | if (0 %in% colSums(flagged)) warning("Q analysis: One or more of the factors extracted have no flagged Q-sorts and no statement calculations can be made on that specific factor. 5 | Inspect the 'loa' and 'flagged' tables carefully to see if you missed any flag.") 6 | # calculate number of Q sorts and number of statements 7 | nstat <- nrow(dataset) 8 | nqsorts <- ncol(dataset) 9 | #A. select FLAGGED Q sorts 10 | floa <- flagged*loa #as.data.frame(loa); floa[which(!flagged, arr.ind=T)] <- 0 # the latter does not work in old versions of R 11 | #B. calculate FACTOR WEIGHTS for each Q sort, in a new matrix -needs to be a data.frame to perform variable calculations 12 | fwe <- as.data.frame(apply(floa, 2, function(x) x/(1-x^2))) 13 | #C. calculate Z-SCORES for each sentence and factor 14 | #-- new matrix for wsubm*ssubmn (original matrix * q sort factor weight), and transpose 15 | wraw_all <- list() 16 | n <- 1 17 | for (i in fwe) { 18 | wraw_all[[n]] <- t(t(dataset)*i) 19 | names(wraw_all[[n]]) <- paste("wraw_",n,sep="") 20 | wraw_all[[n]] <- as.data.frame(wraw_all[[n]]) 21 | n <- n+1 22 | } 23 | #-- sums, average and stdev for each statement 24 | zsc_sum <- data.frame(cbind(1:nstat)) 25 | zsc_mea <- data.frame(cbind(1:nstat)) 26 | zsc_std <- data.frame(cbind(1:nstat)) 27 | row.names(zsc_sum) <- row.names(dataset) 28 | row.names(zsc_mea) <- row.names(dataset) 29 | row.names(zsc_std) <- row.names(dataset) 30 | n <- 1 31 | while (n <= ncol(floa)) { 32 | zsc_sum[,n] <- rowSums(wraw_all[[n]]) 33 | zsc_mea[,n] <- mean(rowSums(wraw_all[[n]])) 34 | zsc_std[,n] <- sd(rowSums(wraw_all[[n]])) 35 | n <- n+1 36 | } 37 | colnames(zsc_sum) <- paste("z_sum_",c(1:ncol(floa)),sep="") 38 | colnames(zsc_mea) <- paste("z_mea_",c(1:ncol(floa)),sep="") 39 | colnames(zsc_std) <- paste("z_std_",c(1:ncol(floa)),sep="") 40 | #-- z-scores for each statement 41 | zsc <- matrix(NA, ncol=nfactors, nrow=nstat) 42 | row.names(zsc) <- row.names(dataset) 43 | n <- 1 44 | while (n <= ncol(floa)) { 45 | if(sum(flagged[,n]) == 0) {} else {zsc[,n] <- (zsc_sum[,n]-zsc_mea[,n])/zsc_std[,n]} 46 | n <- n+1 47 | } 48 | colnames(zsc) <- paste("zsc_f",c(1:ncol(floa)),sep="") 49 | #D. FACTOR SCORES: rounded z-scores 50 | if (forced) { 51 | qscores <- sort(dataset[,1], decreasing=FALSE) 52 | if (sum(apply(dataset, 2, function(x) sort(x) != qscores)) > 0) stop("Q method input: The argument 'forced' is set as 'TRUE', but your data contains one or more Q-sorts that do not to follow the same distribution. 53 | For details on how to solve this error, see 'help(qmethod)', including Note.") 54 | } 55 | if (!forced) { 56 | if (is.null(distribution)) stop("Q method input: The argument 'forced' is set as 'FALSE', but no distribution has been provided in the argument 'distribution'.") 57 | if (length(distribution) != nrow(dataset)) stop("Q method input: The length of the distribution provided does not match the number of statements.") 58 | if (!is.numeric(distribution) & !is.integer(distribution)) stop("Q method input: The distribution provided contains non-numerical values.") 59 | qscores <- sort(distribution, decreasing=FALSE) 60 | } 61 | zsc_n <- as.matrix(zsc) 62 | f <- 1 63 | while (f <= ncol(floa)) { 64 | if (length(unique(zsc[,f])) == length(zsc[,f])) { 65 | zsc_n[,f] <- qscores[rank(zsc[,f])] 66 | } else { 67 | zsc_n[,f] <- qscores[rank(zsc[,f])] 68 | # statements with identical z-score 69 | izsc <- which(round(rank(zsc[,f])) != rank(zsc[,f])) 70 | uizsc <- unique(zsc[izsc,f]) 71 | for (g in uizsc) { 72 | izscn <- which(zsc[,f] == g) 73 | zsc_n[izscn,f] <- min(zsc_n[izscn,f]) 74 | } 75 | } 76 | if (sum(!is.na(zsc[,f])) == 0) zsc_n[,f] <- rep(NA, length(zsc_n[,f])) 77 | f <- f+1 78 | } 79 | colnames(zsc_n) <- paste("fsc_f",c(1:ncol(floa)),sep="") 80 | #E. FACTOR CHARACTERISTICS 81 | f_char <- qfcharact(loa, flagged, zsc, nfactors) 82 | #F. FINAL OUTPUTS 83 | brief <- list() 84 | brief$date <- date() 85 | brief$pkg.version <- packageVersion('qmethod') 86 | brief$nstat <- nstat 87 | brief$nqsorts <- nqsorts 88 | brief$distro <- forced 89 | brief$nfactors <- nfactors 90 | brief$extraction <- "Unknown: loadings were provided separately." 91 | brief$rotation <- "Unknown: loadings were provided separately." 92 | brief$cor.method <- "Unknown: loadings were provided separately." 93 | brief$info <- c("Q-method z-scores.", 94 | paste0("Finished on: ", brief$date), 95 | paste0("'qmethod' package version: ", brief$pkg.version), 96 | paste0("Original data: ", brief$nstat, " statements, ", brief$nqsorts, " Q-sorts"), 97 | paste0("Forced distribution: ", brief$distro), 98 | paste0("Number of factors: ", brief$nfactors), 99 | paste0("Extraction: ", brief$extraction), 100 | paste0("Rotation: ", brief$rotation), 101 | paste0("Flagging: Unknown: flagged Q-sorts were provided separately."), 102 | paste0("Correlation coefficient: ", brief$cor.method)) 103 | # brief <- paste0("z-scores calculated on ", date(), ". Original data: ", nstat, " statements, ", nqsorts, " Q-sorts. Number of factors: ",nfactors,".") 104 | qmethodresults <- list() 105 | qmethodresults[[1]] <- brief 106 | qmethodresults[[2]] <- dataset 107 | qmethodresults[[3]] <- loa 108 | qmethodresults[[4]] <- flagged 109 | qmethodresults[[5]] <- zsc 110 | qmethodresults[[6]] <- zsc_n 111 | qmethodresults[[7]] <- f_char 112 | names(qmethodresults) <- c("brief", "dataset", "loa", "flagged", "zsc", "zsc_n", "f_char") 113 | class(qmethodresults) <- "QmethodRes" 114 | return(qmethodresults) 115 | } -------------------------------------------------------------------------------- /R/runInterface.R: -------------------------------------------------------------------------------- 1 | #' @export 2 | runInterface <- function() { 3 | if (!requireNamespace("shiny", quietly = TRUE)) { 4 | stop("Package \"shiny\" needed for this function to work. Please install it.", call. = FALSE) 5 | } 6 | appDir <- system.file("shiny-examples", "qmethod-gui", package = "qmethod") 7 | if (appDir == "") { 8 | stop("Could not find example directory. Try re-installing `qmethod`.", call. = FALSE) 9 | } 10 | 11 | shiny::runApp(appDir, display.mode = "normal") 12 | } 13 | -------------------------------------------------------------------------------- /R/summary.QmethodRes.R: -------------------------------------------------------------------------------- 1 | summary.QmethodRes <- function(object, ...) { 2 | cat(object$brief$info, sep="\n") 3 | cat("\nFactor scores\n") 4 | print(object[[6]], quote=FALSE) 5 | fch <- t(object[[7]][[1]]) 6 | rownames(fch) <- c( 7 | "Average reliability coefficient", 8 | "Number of loading Q-sorts", 9 | "Eigenvalues", 10 | "Percentage of explained variance", 11 | "Composite reliability", 12 | "Standard error of factor scores") 13 | print(fch, quote=FALSE, digits=2) 14 | } -------------------------------------------------------------------------------- /R/zzz.R: -------------------------------------------------------------------------------- 1 | .onAttach <- function(...) { 2 | packageStartupMessage(" 3 | This is 'qmethod' v.", packageVersion("qmethod"),". 4 | 5 | Please cite as: 6 | Zabala, A. (2014) qmethod: A Package to Explore Human Perspectives Using Q Methodology. The R Journal, 6(2):163-173. 7 | ") 8 | 9 | } 10 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | [![CRAN_Status_Badge](http://www.r-pkg.org/badges/version/qmethod)](http://cran.r-project.org/web/packages/qmethod) 2 | [![cran 3 | checks](https://badges.cranchecks.info/worst/qmethod.svg)](https://cranchecks.info/pkgs/qmethod) 4 | [![Build Status](https://travis-ci.org/aiorazabala/qmethod.svg)](https://travis-ci.org/aiorazabala/qmethod) 5 | [![CRAN RStudio mirror downloads](http://cranlogs.r-pkg.org/badges/qmethod)](http://cran.r-project.org/web/packages/qmethod/index.html) 6 | [![DOI](https://zenodo.org/badge/DOI/10.5281/zenodo.593190.svg)](https://doi.org/10.5281/zenodo.593190) 7 | [![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/donate?hosted_button_id=GCMM9PTXPHNT8) 8 | 9 | 10 | 11 | qmethod 12 | ======= 13 | This R package performs the analysis of Q methodology data. See [more details in the website](http://aiorazabala.github.io/qmethod/) and a [visual demo here](http://aiorazabala.github.io/qmethod/GUI). 14 | 15 | [Q](http://qmethod.org/about) is a methodology to study the distinct perspectives existing within a group on a topic of interest. It is used across social, health, and environmental studies. 16 | 17 | You can install the stable version from [CRAN](http://cran.r-project.org/web/packages/qmethod/index.html): 18 | 19 | ```{r} 20 | install.packages('qmethod') 21 | ``` 22 | 23 | To install the latest (not fully tested) version from [github](https://github.com/aiorazabala/qmethod): 24 | 25 | ```{r} 26 | library(devtools) 27 | install_github("aiorazabala/qmethod") 28 | ``` 29 | 30 | To contribute, check out the [guidelines](http://aiorazabala.github.io/qmethod/contribute). 31 | 32 | If you find the package useful, consider [supporting it with a donation.](https://www.paypal.com/donate?hosted_button_id=GCMM9PTXPHNT8) 33 | 34 | -------------------------------------------------------------------------------- /data/importexample.RData: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiorazabala/qmethod/5e69a465dc67227d95dddacb64d802ed08b81afb/data/importexample.RData -------------------------------------------------------------------------------- /data/lipset.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/aiorazabala/qmethod/5e69a465dc67227d95dddacb64d802ed08b81afb/data/lipset.rda -------------------------------------------------------------------------------- /docs/Advanced-analysis.md: -------------------------------------------------------------------------------- 1 | ## Advanced analysis 2 | 3 | The function `qmethod()` runs the full analysis with the default loadings and with automatic flagging. 4 | 5 | To run more advanced analysis and assess the results at each step, you can run the analysis function by function. For **example, to change the automatic flags manually (manual flagging), invert or manipulate Q-sort loadings**, continue reading. 6 | 7 | The following is sample code to run the analysis function by function. To adapt it to your data, replace `lipset[[1]]` with your dataset, and adjust the value of `factors`. 8 | ### 1. Load your data and the package 9 | ```r 10 | data(lipset) # Sample data 11 | library(qmethod) 12 | ``` 13 | ### 2. Calculate (and inspect) Q-sort factor loadings 14 | ```r 15 | # Set the number of factors to extract and rotate 16 | factors <- 3 17 | # The following runs Q analysis and keeps only 18 | # the default factor loadings only: 19 | mloa <- qmethod(lipset[[1]], 20 | nfactors = factors, 21 | extraction = "PCA", # Also "centroid" 22 | rotation = "varimax", # Also "none" 23 | forced = TRUE)$loa 24 | mloa # Inspect the loadings 25 | ``` 26 | ### 3. Manipulate 27 | 28 | #### Invert loadings if necessary 29 | 30 | This example inverts the sign of the first factor. To invert other factors, replace 1 with the number of the factor to invert. 31 | 32 | ```r 33 | mloa[1] <- -mloa[1] 34 | ``` 35 | 36 | #### Manual flagging (either A or B) 37 | 38 | Perform an automatic pre-flagging and inspect. 39 | 40 | ```r 41 | # Automatic flagging: 42 | mflagged <- qflag(loa = mloa, nstat = 33) 43 | # Inspect the automatic flags: 44 | mflagged 45 | ``` 46 | Note: For an easier inspection of flags, see how to print the loadings next to the flags [in Step 8 of the Cookbook](http://aiorazabala.github.io/qmethod/Cookbook#8-explore-the-factor-loadings). 47 | 48 | ##### A. Modify any individual flag 49 | 50 | This example eliminates the flag for Q-sort FR9 in factor 3. 51 | 52 | ```r 53 | mflagged["FR9", 3] <- FALSE 54 | ``` 55 | 56 | ##### B. Generate a completely new set of flags 57 | ```r 58 | # Create a vector of flags for each factor 59 | # ('TRUE' for flagged Q-sorts): 60 | flags1 <- c(FALSE, FALSE, TRUE, TRUE, TRUE, FALSE, FALSE, FALSE, FALSE) 61 | flags2 <- c( TRUE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, FALSE) 62 | flags3 <- c(FALSE, FALSE, FALSE, FALSE, FALSE, TRUE, TRUE, FALSE, FALSE) 63 | 64 | # Bind the vectors together: 65 | mflagged <- data.frame(flags1, flags2, flags3) 66 | 67 | # Set the Q-sort names (not necessary, but useful): 68 | rownames(mflagged) <- rownames(mloa) 69 | ``` 70 | ### 4. Run Q analysis with the modified loadings and/or flags 71 | ```r 72 | results <- qzscores(lipset[[1]], 73 | nfactors = factors, 74 | forced = TRUE, 75 | flagged = mflagged, # Modified flags 76 | loa = mloa) # Modified loadings 77 | results # See your results 78 | ``` 79 | 80 | ### 5. Calculate distinguishing and consensus statements 81 | ```r 82 | results[[8]] <- qdc(lipset[[1]], 83 | nfactors = factors, 84 | zsc = results[[5]], 85 | sed = as.data.frame(results[[7]][[3]])) 86 | names(results)[8] <- "qdc" 87 | results # See your results 88 | ``` 89 | -------------------------------------------------------------------------------- /docs/Contribute.md: -------------------------------------------------------------------------------- 1 | ## Contributing 2 | 3 | 4 | **Contributions** to developing the package are **most welcome**. 5 | Great to have you here. [`qmethod`](https://github.com/aiorazabala/qmethod) was created by [Aiora Zabala](http://www.landecon.cam.ac.uk/directory/aiora-zabala), with contributions by [Max Held](http://www.maxheld.de) and [Frans Hermans](www.maxheld.de). Some friendly suggestions for contributing: 6 | 7 | ### Learn More & Join the Conversation 8 | 9 | Communication on developing this package happens right here on **our [GitHub issues](https://github.com/aiorazabala/qmethod/issues)** and by email. 10 | If you have any suggestion, you can raise it here as a new issue, send us an email, or both – that way, we can keep everyone in the loop and have a public record. 11 | If you can, *browse the existing issues first* to avoid duplication. 12 | 13 | To learn more about the current state of development: 14 | 15 | - Browse the [existing issues](https://github.com/aiorazabala/qmethod/issues) and join the conversation by commenting on or [adding new issues](https://github.com/aiorazabala/qmethod/issues/new). 16 | - Read the [package reference manual](http://cran.r-project.org/web/packages/qmethod/qmethod.pdf) to familiarise yourself with how `qmethod` works (can also be found under `/man/`). 17 | - Read the [`qmethod` website](./) for some additional documentation. 18 | 19 | 20 | ### Beta-test New Features 21 | 22 | `qmethod` is under development, and we're keen to have more beta-test new features. Here](./Development) is how. 23 | 24 | 25 | ### Add New Features 26 | 27 | There's so much more to do, and we're excited for new additions. We're roughly following the [GitHub Flow](https://guides.github.com/introduction/flow/) development model: 28 | 29 | 1. **Read and log [issues](https://github.com/aiorazabala/qmethod/issues)**, so that we know what everyone is up to and interested in. 30 | - Create an issue *before* you start to work on *anything* so we can avoid duplicate efforts. 31 | - Assign `backlog` milestone if it's not happening anytime soon. 32 | - Assign oneself as an assignee if one is actively working on it (so as to avoid duplicate efforts). 33 | 2. Collaborators: **Create forks**, such that other collaborators can work (and mess up) in their own sandbox. 34 | - *Remember to pull in upstream changes* from `aiorazabala/qmethod/master` frequently, so as to stay up to date and minimize merge conflicts ([here's how](https://help.github.com/articles/syncing-a-fork/)). 35 | 3. **Create "feature-branches"** 36 | - Create a branch off of (forked) masters for some feature to be added. 37 | - If you can, keep feature-branches focused on relatively few, *well*, features, and keep them separate from bugfixes. 38 | - Creating "feature-branches" max seem cumbersome, but it pays off with transparent pull requests (see below). 39 | 4. **Put up a pull-request** ([here's how](https://help.github.com/articles/using-pull-requests/)) 40 | - *create the feature-branch early*, and name it with the prefix `WIP` (for work-in-progress) so that other people can provide early feedback and know what's being worked on. 41 | - Remove the `WIP`-prefix once the work is done. 42 | At this stage, a feature should be *fully documented* and *tested* (passing `R CMD check` on all platforms). 43 | Notice that Travis CI included here on GitHub only tests on Linux machines; @maxheld83 can test on OS X and [`win-builder`](http://win-builder.r-project.org/) on Windows, if you do not have these platforms yourself. 44 | - @aiorazabala as the creator of `qmethod` will then review the changes and accept the pull request if possible. 45 | By accepting, the pull request is then merged into @aiorazabala's `master` 46 | 5. **Shipping**: Periodically, whenever significant work has been done, @aiorazabala drafts a release (as per [#121](https://github.com/aiorazabala/qmethod/issues/121)) from `master`, essentially just marking some point in the history of the package as `x.x.x`, and sends it off to CRAN. 47 | 48 | 49 | ### Testing 50 | 51 | Testing is an important part of quality software development, especially for scientific software, where users rely on the accuracy and reproducibility of results. 52 | 53 | Most essentially, any changes **should pass the tests for submitting packages to CRAN**. This involves running the following in your command line: 54 | `R CMD check --as-cran qmethod_1.4.0.tar.gz` 55 | and running the package through [WinBuilder](http://win-builder.r-project.org/), or 56 | either `R-devel CMD check --as-cran qmethod_1.4.0.tar.gz` 57 | ...and getting **no errors** (or solving whatever may arise). 58 | If in the [CRAN package checks for qmethod](https://cran.r-project.org/web/checks/check_results_qmethod.html), your changes induce any change in the 'OK' Status, the maintainer(s) will poke you to fix it. 59 | 60 | 61 | To learn more about testing, consider Hadley Wickham's [`testthat` package](https://cran.r-project.org/package=testthat) (which is what we're using here) and his book chapter [on testing](http://r-pkgs.had.co.nz/tests.html). 62 | 63 | It is advised that new functions and changes *come with* appropriate tests: 64 | 65 | - For internal **consistency**. For example, `q.mrot.choose`, the interactive rotation function, should always produce a rotation matrix of `rank == nfactors` (pseudo-code; the number of factors should be the same as the rank of the rotation matrix). 66 | - Where applicable, new functions and changes should test against **old versions** (`>= 1.2.0`) *known* to be validated by Aiora against `PQMethod` (see [Zabala 2014](http://journal.r-project.org/archive/2014-2/zabala.pdf)). 67 | - Whenever possible, new functions and changes should test against **(published) results** *known* to be true, using publicy available data. 68 | 69 | In addition to such tests for new functions, missing tests for old functions [would also be very welcome]. 70 | 71 | ### Help pages 72 | 73 | We give a lot of importance to the usability of the package. Whether you've developed a fantastic function won't matter, if it is not easy to use. For this, we believe that **good documentation is essential**. Please make sure that all new functions are well documented in the help pages. Some general points to observe when writing the help: 74 | - Write full sentences (with noun and verb). 75 | - Be consistent in the use of terms, e.g. do not write Q-Methodology and Q methodology in the same page, or CSV and *.csv, but stick to a single form. If in doubt, check the conventions used in the existing help pages, e.g. those for `qmethod()` 76 | - Think that the user might not necessarily be familiar with all the terms: define everything necessary or link to pages that give more details. 77 | - Don't be afraid of repeating: better to be in excess than to miss an explanation. 78 | 79 | 80 | ### Git(Hub) 81 | 82 | If you're unfamiliar with Git(Hub), it's probably worth spending some time learning these tools and conventions first. 83 | 84 | Here are some great places to start: 85 | 86 | - [GitHub help](https://help.github.com), especially [using pull requests](https://help.github.com/articles/using-pull-requests/) 87 | - [GitHub Guides](https://help.github.com/articles/using-pull-requests/), especially [GitHub Flow](https://guides.github.com/introduction/flow) and [Mastering Issues](https://guides.github.com/features/issues) 88 | - [Official Git Documentation](https://git-scm.com/doc) 89 | -------------------------------------------------------------------------------- /docs/Development.md: -------------------------------------------------------------------------------- 1 | ## Development 2 | 3 | `qmethod` is under continued development, and fixes or additions are [released on GitHub](https://github.com/aiorazabala/qmethod/releases) and published on [CRAN](https://cran.r-project.org/web/packages/qmethod/index.html). 4 | 5 | To join development, please consider our [guidelines](./Contribute). 6 | 7 | You are invited to **beta-test new features**. To do so: 8 | 9 | 1. Explore the [issues](https://github.com/aiorazabala/qmethod/issues), [milestones](https://github.com/aiorazabala/qmethod/milestones) and open [pull requests](https://github.com/aiorazabala/qmethod/pulls) to find a [**feature branch**](https://guides.github.com/introduction/flow/) you are interested in. 10 | (Make sure to familiarize yourself with (Git)Hub first.) 11 | 2. Using the [`devtools`](https://cran.r-project.org/package=devtools) package, you can install such a feature branch or forked version *directly from GitHub*, using the appropriate `repo` (or `pull`) and `ref` (branch, commit or tag). 12 | (To learn how to install R, read the [cookbook](./Cookbook)). 13 | For example, to install Max's current fork, including additional plots, manual rotation etc from PR [97](https://github.com/aiorazabala/qmethod/pull/97) you would run: 14 | 15 | ```r 16 | install.packages("devtools") # if you don't have it yet 17 | library(devtools) 18 | install_github(repo = "maxheld83/qmethod", ref = "master") 19 | library(qmethod) 20 | ``` 21 | 22 | 3. You can now use this feature branch or forked version of `qmethod`. 23 | 24 | 25 | Please note: 26 | 27 | - If you have any suggestions or find any bugs, please report them on our [issue tracker](https://github.com/aiorazabala/qmethod/issues), and refer to the version (and commit) you were using, ideally with a reproducible example of the problem. 28 | - **Do not use these beta versions for mission-critical work or publications. There may be bugs.** 29 | - Remember to stay up to date by frequently re-running the above command. 30 | 31 | 32 | To return to your previous, "safe" version of `qmethod` from CRAN, run: 33 | 34 | ```r 35 | remove.packages("qmethod") 36 | install.packages("qmethod") 37 | library("qmethod") 38 | ``` 39 | -------------------------------------------------------------------------------- /docs/GUI-old.md: -------------------------------------------------------------------------------- 1 | ## Graphical User Interface: install manually (deprecated after `qmethod` v1.8) 2 | 3 | These were two ways to install the GUI in `qmethod` versions <1.8, before the GUI was integrated into the package. It's left here for reference. 4 | 5 | #### A. Run this script in R 6 | 7 | ```{r} 8 | source('http://aiorazabala.net/qmethod-gui/qmethod-gui-install.R') 9 | ``` 10 | The graphical interface will open in your web browser. 11 | 12 | This code does the following: it creates a folder named ```qmethod-gui```, downloads the two files for the visual interface (```server.R``` and ```ui.R```), installs the two R packages required for the visual interface (```qmethod``` and [shiny](http://cran.r-project.org/web/packages/shiny/index.html)), loads the two packages, and finally runs the interface, which will open in your web browser. 13 | 14 | Once installed, to run it again follow these steps: 15 | * Open R 16 | * Copy and paste the code below (replace ```'C://Mypath'``` with the location of the folder ```qmethod-gui``` on your computer, e.g. if the folder in your computer is *C://My Documents/qmethod-gui/*, use ```setwd("C://My Documents")``` ). 17 | ```{r} 18 | setwd("C://Mypath") # 1. Set the location of the folder 'qmethod-gui' 19 | library(shiny) # 2. Load the package 20 | runApp("qmethod-gui") # 3. Run the application 21 | ``` 22 | 23 | *** 24 | #### B. Do the above steps manually 25 | 1. Create a folder, e.g. ```qmethod-gui``` 26 | 2. Download these two files in the above folder: 27 | [server.R](http://aiorazabala.net/qmethod-gui/server.R) and 28 | [ui.R](http://aiorazabala.net/qmethod-gui/ui.R) 29 | 3. Load the required library and run the application: 30 | ```{r} 31 | library(shiny) 32 | runApp("qmethod-gui") 33 | ``` 34 | Within quotations in ```runApp("qmethod-gui")```, put the name of the folder created in step 1 (i.e. where you downloaded the files ```'ui.R'``` and ```'server.R'```). This folder can take any name, e.g. ```"myqmethod-gui"```. 35 | 36 | *** 37 | -------------------------------------------------------------------------------- /docs/GUI.md: -------------------------------------------------------------------------------- 1 | ## Graphical User Interface 2 | 3 | If you are not familiar with R, there is a Graphical User Interface (GUI) that can be used either online or offline. The [online version](https://azabala.shinyapps.io/qmethod-gui/) does not require installation. The offline version requires installing R and the package first (see instructions below). 4 | 5 | [![](http://aiorazabala.net/qmethod-gui/Qmethod_Shiny_GUI2.png)](https://azabala.shinyapps.io/qmethod-gui/) 6 | 7 | Note that the GUI offers only limited functionality from all what's available using the full package (See 'Technical notes' in the GUI). 8 | ### A. [Online version (link to external site)](https://azabala.shinyapps.io/qmethod-gui/) 9 | ### B. Install the offline version: 10 | 11 | 1. Install [R](https://cran.r-project.org/) and the package [qmethod](https://cran.r-project.org/web/packages/qmethod/index.html) in your computer, following steps 2 and 3 in the [Cookbook](./Cookbook). This needs to be done only once. 12 | 13 | 2. Open R. Run the package and the GUI, by copying & pasting this code: 14 | ```{r} 15 | library(qmethod) 16 | runInterface() 17 | ``` 18 | 19 | You might need to also install the `shiny` package that provides the interface, e.g. `install.packages("shiny")`. 20 | 21 | *** 22 | Are you looking for the manual way of installing the GUI? (for `qmethod` versions <1.8), [see the old instructions here](./GUI-old) (deprecated). 23 | -------------------------------------------------------------------------------- /docs/Plots.md: -------------------------------------------------------------------------------- 1 | ## Plots 2 | 3 | This is an example of what you can do with this package. 4 | 5 | The plot below allows the viewer to quickly identify which are the most distinguishing statements (at the top, e.g. statements 16 and 33) and which are of consensus (at the bottom, e.g. statements 20 and 17). 6 | 7 | It shows, all in one: the z-scores for each statement and factor, the statements of consensus (bottom), the statements over which factors disagree most (top), and the distinguishing statements for each factor (filled markers). 8 | 9 | 10 | ![](http://aiorazabala.net/wp-content/uploads/2017/01/qplot-2.png) 11 | 12 | Distinguishing statements are those which z-score is significantly different across factors, in other words, those statements which markers in the plot are very separated. 13 | 14 | A statement can distinguish all factors, where all markers are very separated (e.g. statement 33) or it can distinguish one factor from the rest (e.g. the marker for statement 16 in factor 3 is very separated from the markers of the other two factors, which cluster together on the left hand side). 15 | 16 | 17 | Sample code for this image: 18 | 19 | ```r 20 | # Load data 21 | data(lipset) 22 | # Perform the Q analysis (extract with PCA, 23 | # rotate three factors using varimax) 24 | results <- qmethod(lipset[[1]], nfactors = 3, 25 | rotation = "varimax") 26 | plot(results) 27 | 28 | # Create a title for the plot 29 | title <- "Sample data (lipset). Filled symbols indicate distinguishing statements. 30 | Statements are ordered from most distinguishing (top) to most consensus (bottom)" 31 | 32 | # Plot into a PNG file 33 | png("qplot.png", 700, 600, family="Open Sans") 34 | plot(results, main = title, 35 | # Specify colours for markers 36 | colours = c("#e41a1c", "#377eb8", "#4daf4a"), 37 | # Specify range of x axis 38 | xlim=c(-2.5, 2.5), 39 | # Fine tune the font size 40 | cex=1.1, cex.axis=0.8, cex.main=1, 41 | # Print the legend at the bottom left 42 | leg.pos="bottomleft") 43 | dev.off() 44 | ``` 45 | -------------------------------------------------------------------------------- /docs/Reporting.md: -------------------------------------------------------------------------------- 1 | ## Reporting 2 | 3 | Comprehensive reporting of a research study is important for transparency and reproducibility. 4 | 5 | The figure below illustrates the process of a Q study. The elements with dashes indicate important research decisions and can be used as a checklist for standardised reporting (image from [Zabala et al 2018](https://conbio.onlinelibrary.wiley.com/doi/full/10.1111/cobi.13123)). 6 | 7 | 8 | [![Research process of Q methodology](https://www.ncbi.nlm.nih.gov/pmc/articles/instance/6849601/bin/COBI-32-1185-g001.jpg)](https://conbio.onlinelibrary.wiley.com/doi/full/10.1111/cobi.13123) 9 | 10 | 11 | The key analytical decisions are shown more in detail below, and indicated with numbers. These analytical decisions are further explained [in this paper](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0148087#sec006) (image from [Zabala & Pascual 2016]( https://doi.org/10.1371/journal.pone.0148087)). 12 | 13 | 14 | 15 | 16 | 17 | 18 | -------------------------------------------------------------------------------- /docs/Sample-plot.md: -------------------------------------------------------------------------------- 1 | ## Sample plot of Q data 2 | 3 | This is an example of what you can do with this package. 4 | 5 | The plot below allows the viewer to quickly identify which are the most distinguishing statements (at the top, e.g. statements 16 and 33) and which are of consensus (at the bottom, e.g. statements 20 and 17). 6 | 7 | It shows, all in one: the z-scores for each statement and factor, the statements of consensus (bottom), the statements over which factors disagree most (top), and the distinguishing statements for each factor (filled markers). 8 | 9 | 10 | ![](http://aiorazabala.net/wp-content/uploads/2017/01/qplot-2.png) 11 | 12 | Distinguishing statements are those which z-score is significantly different across factors, in other words, those statements which markers in the plot are very separated. 13 | 14 | A statement can distinguish all factors, where all markers are very separated (e.g. statement 33) or it can distinguish one factor from the rest (e.g. the marker for statement 16 in factor 3 is very separated from the markers of the other two factors, which cluster together on the left hand side). 15 | 16 | 17 | Sample code for this image: 18 | 19 | ```r 20 | # Load data 21 | data(lipset) 22 | # Perform the Q analysis (extract with PCA, 23 | # rotate three factors using varimax) 24 | results <- qmethod(lipset[[1]], nfactors = 3, 25 | rotation = "varimax") 26 | plot(results) 27 | 28 | # Create a title for the plot 29 | title <- "Sample data (lipset). Filled symbols indicate distinguishing statements. 30 | Statements are ordered from most distinguishing (top) to most consensus (bottom)" 31 | 32 | # Plot into a PNG file 33 | png("qplot.png", 700, 600, family="Open Sans") 34 | plot(results, main = title, 35 | # Specify colours for markers 36 | colours = c("#e41a1c", "#377eb8", "#4daf4a"), 37 | # Specify range of x axis 38 | xlim=c(-2.5, 2.5), 39 | # Fine tune the font size 40 | cex=1.1, cex.axis=0.8, cex.main=1, 41 | # Print the legend at the bottom left 42 | leg.pos="bottomleft") 43 | dev.off() 44 | ``` 45 | -------------------------------------------------------------------------------- /docs/_config.yml: -------------------------------------------------------------------------------- 1 | theme: jekyll-theme-minimal -------------------------------------------------------------------------------- /docs/_includes/navigation.html: -------------------------------------------------------------------------------- 1 | 14 | Would you like to send the author an enquiry about Q methodology? 15 | Read this first. 16 |
17 | -------------------------------------------------------------------------------- /docs/_layouts/default.html: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | {% seo %} 9 | 10 | 13 | 14 | 15 |
16 |
17 |

{{ site.title | default: site.github.repository_name }}

18 | 19 | {% if site.logo %} 20 | Logo 21 | {% endif %} 22 | 23 |

{{ site.description | default: site.github.project_tagline }}

24 | 25 | {% include navigation.html %} 26 | 27 | {% if site.github.is_project_page %} 28 |

View the Project on GitHub {{ site.github.repository_nwo }}

29 | {% endif %} 30 | 31 | {% if site.github.is_user_page %} 32 |

View My GitHub Profile

33 | {% endif %} 34 | 35 | {% if site.show_downloads %} 36 | 41 | {% endif %} 42 |
43 | 44 | 45 |
46 | 47 | {{ content }} 48 | 49 |
50 | 56 |
57 | 58 | {% if site.google_analytics %} 59 | 67 | {% endif %} 68 | 69 | 70 | 71 | -------------------------------------------------------------------------------- /docs/_sass/jekyll-theme-minimal.scss: -------------------------------------------------------------------------------- 1 | @import "fonts"; 2 | @import "rouge-github"; 3 | 4 | body { 5 | background-color: #fff; 6 | padding:50px; 7 | font: 14px/1.5 "Noto Sans", "Helvetica Neue", Helvetica, Arial, sans-serif; 8 | color:#727272; 9 | font-weight:400; 10 | } 11 | 12 | h1, h2, h3, h4, h5, h6 { 13 | color:#222; 14 | margin:0 0 20px; 15 | } 16 | 17 | p, ul, ol, table, pre, dl { 18 | margin:0 0 20px; 19 | } 20 | 21 | h1, h2, h3 { 22 | line-height:1.1; 23 | } 24 | 25 | h1 { 26 | font-size:28px; 27 | } 28 | 29 | h2 { 30 | color:#393939; 31 | } 32 | 33 | h3, h4, h5, h6 { 34 | color:#494949; 35 | } 36 | 37 | a { 38 | color:#267CB9; 39 | text-decoration:none; 40 | } 41 | 42 | a:hover, a:focus { 43 | color:#069; 44 | font-weight: bold; 45 | } 46 | 47 | a small { 48 | font-size:11px; 49 | color:#777; 50 | margin-top:-0.3em; 51 | display:block; 52 | } 53 | 54 | a:hover small { 55 | color:#777; 56 | } 57 | 58 | .wrapper { 59 | width:860px; 60 | margin:0 auto; 61 | } 62 | 63 | blockquote { 64 | border-left:1px solid #e5e5e5; 65 | margin:0; 66 | padding:0 0 0 20px; 67 | font-style:italic; 68 | } 69 | 70 | code, pre { 71 | font-family:Monaco, Bitstream Vera Sans Mono, Lucida Console, Terminal, Consolas, Liberation Mono, DejaVu Sans Mono, Courier New, monospace; 72 | color:#333; 73 | } 74 | 75 | pre { 76 | padding:8px 15px; 77 | background: #f8f8f8; 78 | border-radius:5px; 79 | border:1px solid #e5e5e5; 80 | overflow-x: auto; 81 | } 82 | 83 | table { 84 | width:100%; 85 | border-collapse:collapse; 86 | } 87 | 88 | th, td { 89 | text-align:left; 90 | padding:5px 10px; 91 | border-bottom:1px solid #e5e5e5; 92 | } 93 | 94 | dt { 95 | color:#444; 96 | font-weight:700; 97 | } 98 | 99 | th { 100 | color:#444; 101 | } 102 | 103 | img { 104 | max-width:100%; 105 | } 106 | 107 | header { 108 | width:270px; 109 | float:left; 110 | position:fixed; 111 | -webkit-font-smoothing:subpixel-antialiased; 112 | } 113 | 114 | ul.downloads { 115 | list-style:none; 116 | height:40px; 117 | padding:0; 118 | background: #f4f4f4; 119 | border-radius:5px; 120 | border:1px solid #e0e0e0; 121 | width:270px; 122 | } 123 | 124 | .downloads li { 125 | width:89px; 126 | float:left; 127 | border-right:1px solid #e0e0e0; 128 | height:40px; 129 | } 130 | 131 | .downloads li:first-child a { 132 | border-radius:5px 0 0 5px; 133 | } 134 | 135 | .downloads li:last-child a { 136 | border-radius:0 5px 5px 0; 137 | } 138 | 139 | .downloads a { 140 | line-height:1; 141 | font-size:11px; 142 | color:#676767; 143 | display:block; 144 | text-align:center; 145 | padding-top:6px; 146 | height:34px; 147 | } 148 | 149 | .downloads a:hover, .downloads a:focus { 150 | color:#675C5C; 151 | font-weight:bold; 152 | } 153 | 154 | .downloads ul a:active { 155 | background-color:#f0f0f0; 156 | } 157 | 158 | strong { 159 | color:#222; 160 | font-weight:700; 161 | } 162 | 163 | .downloads li + li + li { 164 | border-right:none; 165 | width:89px; 166 | } 167 | 168 | .downloads a strong { 169 | font-size:14px; 170 | display:block; 171 | color:#222; 172 | } 173 | 174 | section { 175 | width:500px; 176 | float:right; 177 | padding-bottom:50px; 178 | } 179 | 180 | small { 181 | font-size:11px; 182 | } 183 | 184 | hr { 185 | border:0; 186 | background:#e5e5e5; 187 | height:1px; 188 | margin:0 0 20px; 189 | } 190 | 191 | footer { 192 | width:270px; 193 | float:left; 194 | position:fixed; 195 | bottom:50px; 196 | -webkit-font-smoothing:subpixel-antialiased; 197 | } 198 | 199 | @media print, screen and (max-width: 960px) { 200 | 201 | div.wrapper { 202 | width:auto; 203 | margin:0; 204 | } 205 | 206 | header, section, footer { 207 | float:none; 208 | position:static; 209 | width:auto; 210 | } 211 | 212 | header { 213 | padding-right:320px; 214 | } 215 | 216 | section { 217 | border:1px solid #e5e5e5; 218 | border-width:1px 0; 219 | padding:20px 0; 220 | margin:0 0 20px; 221 | } 222 | 223 | header a small { 224 | display:inline; 225 | } 226 | 227 | header ul { 228 | position:absolute; 229 | right:50px; 230 | top:52px; 231 | } 232 | } 233 | 234 | @media print, screen and (max-width: 720px) { 235 | body { 236 | word-wrap:break-word; 237 | } 238 | 239 | header { 240 | padding:0; 241 | } 242 | 243 | header ul, header p.view { 244 | position:static; 245 | } 246 | 247 | pre, code { 248 | word-wrap:normal; 249 | } 250 | } 251 | 252 | @media print, screen and (max-width: 480px) { 253 | body { 254 | padding:15px; 255 | } 256 | 257 | .downloads { 258 | width:99%; 259 | } 260 | 261 | .downloads li, .downloads li + li + li { 262 | width:33%; 263 | } 264 | } 265 | 266 | @media print { 267 | body { 268 | padding:0.4in; 269 | font-size:12pt; 270 | color:#444; 271 | } 272 | } 273 | 274 | /*Extras - AZ*/ 275 | 276 | ul.navigation { 277 | padding: 10px 10px 10px 15px; 278 | border-radius: 5px; 279 | border: 1px solid #e5e5e5; 280 | font-size: smaller; 281 | list-style-type: none; 282 | } 283 | ul.navigation ul { 284 | list-style-type: none; 285 | margin-bottom:0; 286 | } 287 | h1 a { 288 | text-shadow: 5px 5px 15px #267CB9, 0px 0px 19px #000; 289 | font-size: 1.5em; 290 | color: #ffffff; 291 | font-style: italic; 292 | } 293 | h1 a:hover { 294 | color: #ffffff; 295 | transition: 0.3s; 296 | transform: scale(1.1)translate(-50%, -50%); 297 | text-shadow: 3px 3px 3px #000, -5px -5px 19px #444, 5px 5px 20px #267CB9, 5px 5px 15px #267CB9; 298 | } 299 | -------------------------------------------------------------------------------- /docs/index.md: -------------------------------------------------------------------------------- 1 | ## _qmethod_, an R package to analyse Q methodology data 2 | 3 | Q is a methodology to study the distinct perspectives existing within a group of people, on a topic of interest. 4 | It is used across disciplines. See further about the method in [http://qmethod.org/about](http://qmethod.org/about). 5 | 6 | --- 7 | > **_New to R? ▶ [Jump directly to the graphical interface](https://azabala.shinyapps.io/qmethod-gui/)._** 8 | --- 9 | 10 | ### Overview 11 | 12 | This package performs the analysis of Q methodology data. Data can be imported from a range of formats, and results can be explored and exported in multiple ways. See a [graphical interface](./GUI) with the basic functionality and [an example of what you can do with this package](./Sample-plot). 13 | 14 | The package provides all the options for standard Q analysis, such as different extraction methods (principal components analysis and centroid factor extraction), rotation methods (none or varimax), and both forced and non-forced distributions. Manual flagging can be easily run using R code [(see an example)](./Advanced-analysis#a-modify-any-individual-flag). Additional options include different correlation coefficients for the initial correlation matrix (Pearson, Spearman and Kendall) and other mathematical rotations. 15 | 16 | A single function runs the full analysis ([`qmethod()`](https://www.rdocumentation.org/packages/qmethod/versions/1.5.5/topics/qmethod)). Each step can also be run separately using the corresponding functions for correlation matrix, automatic flagging, statement scores, distinguishing and consensus statements, and general factor characteristics. 17 | 18 | Additional functions are available to import data from raw `.CSV`, ['HTMLQ'](https://github.com/aproxima/htmlq) and 'FlashQ' `.CSV` files, ['PQMethod'](http://schmolck.org/qmethod/) `.DAT` files and ['easy-htmlq'](https://github.com/shawnbanasick/easy-htmlq) `.JSON` files; to [plot](./plot) and summarise Q results; to import raw data from individual and multilingual `.CSV` files; to make printable cards; and to perform [bootstrapping](https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0148087). 19 | 20 | For full details about what you can do, see the [package reference manual](http://cran.r-project.org/web/packages/qmethod/qmethod.pdf). 21 | 22 | 23 | 24 | ### Resources 25 | 26 | Here are further links to learn more about the software and about conducting Q methodology (more references in the [package reference manual](http://cran.r-project.org/web/packages/qmethod/qmethod.pdf)): 27 | 28 | * A [cookbook of _qmethod_](./Cookbook). 29 | * The [package in CRAN, the R repository](http://cran.r-project.org/web/packages/qmethod/index.html). 30 | * The [package reference manual](http://cran.r-project.org/web/packages/qmethod/qmethod.pdf). 31 | * The latest code of the package is on [this Github site](https://github.com/aiorazabala/qmethod). 32 | * An introduction to the package, usage, and validation in [Zabala (2014)](http://journal.r-project.org/archive/2014-2/zabala.pdf). 33 | * For those not familiar with R, see a [simple graphical visual interface](./GUI) to use either online (without installation) or offline. 34 | * Introduction to Q methodology (slides of a graduate course): [session 1](http://aiorazabala.net/learnQ/Qmethod_AZ_slides_S2.pdf) and [session 2](http://aiorazabala.net/learnQ/Qmethod_AZ_slides_S2.pdf), including a demo of online Q-sorting using [HtmlQ](https://github.com/aproxima/htmlq). 35 | * Recommendations to [report Q studies](./Reporting), for transparency and to facilitate evidence review and reproducibility. 36 | * Suggested best practices in [data management](./Data-management) for Q studies. 37 | * A [video demonstrating the use of "qmethod" to analyse Q sort data](https://www.youtube.com/watch?v=Cxm6U1L88uU) (by [Dr. Kimberlee Everson](https://www.wku.edu/ealr/staff/kimberlee_everson)) 38 | 39 | 40 | *** 41 | 42 | ### Development 43 | 44 | The package has been created and is maintained by [Aiora Zabala](http://aiorazabala.net), with contributions from [Max Held](http://www.maxheld.de/) and [Frans Hermans](https://www.researchgate.net/profile/Frans-Hermans-3). 45 | Further contributions are most welcome. To do so, please read the [guidelines](./Contribute), post your suggestions on the [issue tracker](https://github.com/aiorazabala/qmethod/issues), or email the maintainer. 46 | 47 | The package is free and open source. It has been thoroughly [tested an validated](http://journal.r-project.org/archive/2014-2/zabala.pdf). If you use it, please [cite it](https://cran.r-project.org/web/packages/qmethod/citation.html) in your work. 48 | 49 | Pathway of priority developments, should resources allow [(also a record of some of the previous work is here)](https://github.com/aiorazabala/qmethod/issues?q=is%3Aissue): 50 | 51 | - [ ] Video explaining how to interpret the Q plot [#373](https://github.com/aiorazabala/qmethod/issues/373) 52 | - [ ] Shiny GUI & usability 53 | [#375](https://github.com/aiorazabala/qmethod/issues/375), 54 | [#374](https://github.com/aiorazabala/qmethod/issues/374), 55 | [#369](https://github.com/aiorazabala/qmethod/issues/369), 56 | [#366](https://github.com/aiorazabala/qmethod/issues/366), 57 | ~[#287](https://github.com/aiorazabala/qmethod/issues/287)~, 58 | ~[#262](https://github.com/aiorazabala/qmethod/issues/262)~, 59 | [#217](https://github.com/aiorazabala/qmethod/issues/217), 60 | [#195](https://github.com/aiorazabala/qmethod/issues/195), 61 | ~[#81](https://github.com/aiorazabala/qmethod/issues/81)~, 62 | [#23](https://github.com/aiorazabala/qmethod/issues/23) 63 | - [ ] Compatibility with online Q-sort collection using [HTMLQ](https://github.com/aproxima/htmlq) and [easy-HTMLQ](https://github.com/shawnbanasick/easy-htmlq) (also FlashQ): 64 | - [X] Import Q-sorts, [#368](https://github.com/aiorazabala/qmethod/issues/368), 65 | - [ ] Import Q-set [#370](https://github.com/aiorazabala/qmethod/issues/370), P-set data (done). 66 | 67 | If you find the package and these resources useful, consider [supporting maintenance and further enhancements:](https://www.paypal.com/donate?hosted_button_id=GCMM9PTXPHNT8) 68 | 69 | [![Donate](https://img.shields.io/badge/Donate-PayPal-green.svg)](https://www.paypal.com/donate?hosted_button_id=GCMM9PTXPHNT8) 70 | -------------------------------------------------------------------------------- /inst/CITATION: -------------------------------------------------------------------------------- 1 | citHeader("To cite 'qmethod' in publications, please use:") 2 | 3 | 4 | bibentry( 5 | bibtype = "Article", 6 | title = "{qmethod}: A Package to Explore Human Perspectives Using Q Methodology", 7 | author = c(person("Aiora", "Zabala")), 8 | year = 2014, 9 | journal = "The R Journal", 10 | year = 2014, 11 | volume = 6, 12 | number = 2, 13 | pages = "163--173", 14 | url = "https://journal.r-project.org/archive/2014-2/zabala.pdf", 15 | textVersion = "Aiora Zabala. qmethod: A Package to Explore Human Perspectives Using Q Methodology. The R Journal, 6(2):163-173, Dec 2014." 16 | ) 17 | 18 | -------------------------------------------------------------------------------- /inst/cardtemplates/AveryZweckformC32010.Rnw: -------------------------------------------------------------------------------- 1 | % this createsactual paper business cards for q items as follows 2 | % sized 85*84mm (business cards) 3 | % 10 per a4 page 4 | % on Avery Zweckform C32010 5 | \documentclass[a4paper,12pt]{article} % because, why not? 6 | \usepackage{longtable} % for multi-page tables 7 | \usepackage{array} % for center, bottom vertical alignment in tables 8 | \usepackage{calc} % to easily identify dimensions etc. 9 | \usepackage[T1]{fontenc} % read in eur sim 10 | \usepackage[utf8]{inputenc} % allow accents etc in inputs 11 | <>= 12 | if(!is.null(babel.language)) { 13 | cat( 14 | "\\usepackage[", 15 | babel.language, # for international support 16 | "]{babel}", 17 | sep="" 18 | ) 19 | } 20 | @ 21 | \usepackage[ 22 | a4paper, 23 | height=54mm*5, % 5 cards on top of one another 24 | width=85mm*2+10mm, % 2 cards abreast, 10mm aisle 25 | vcentering, % should be vertically centered 26 | hcentering % should be horizontaly centered 27 | ] 28 | {geometry 29 | } 30 | \begin{document} 31 | \pagestyle{empty} 32 | \setlength{\tabcolsep}{5mm} % for margins and "aisle" margin between cards, fix by trial and error 33 | \newcolumntype{H}{>{\centering\arraybackslash\Huge\ttfamily}m} % need this because font size should be different for ID 34 | <>= 35 | q.set.print$empty <- c("\\rule[-27mm]{0mm}{54mm}") # creates empty column with a white height line to make sure rows have some height 36 | print.xtable( 37 | xtable( 38 | x = q.set.print[c("id","empty","full wording")], 39 | type = "latex", 40 | align = c( 41 | "m{0mm}", # this is just for the rownames, isn't actually printed 42 | "H{72mm}", # this is for the hash or id, must be a little shorter to fit 43 | "m{0mm}", # empty column in the middle with stretcher vertical line, width is given by colsep in the above (no need for extra width here) - this adds up to 1cm anyway 44 | "m{75mm}" # the real deal with the full item 45 | ) 46 | ), # smaller than cards for margin, 47 | tabular.environment = "longtable", 48 | latex.environment = "center", 49 | table.placement = "p", 50 | floating = FALSE, 51 | include.rownames = FALSE, 52 | include.colnames = FALSE, 53 | comment = FALSE, 54 | #if(!is.null(wording.font.size)) { 55 | # paste(size = wording.font.size) 56 | #} 57 | size = wording.font.size, 58 | sanitize.text.function = identity, 59 | hline.after = NULL 60 | ) 61 | @ 62 | \end{document} 63 | -------------------------------------------------------------------------------- /inst/extdata/importexample/feedback/after/JohnDoe.csv: -------------------------------------------------------------------------------- 1 | item_id,item_feedback,correction 2 | i01,"I don't like Asterix and Obelix",FALSE 3 | i02,"There is a typo here!",TRUE 4 | -------------------------------------------------------------------------------- /inst/extdata/importexample/qsorts/after/JaneDoe.csv: -------------------------------------------------------------------------------- 1 | ,i01, 2 | i02,i03,i04 3 | -1,0,1 -------------------------------------------------------------------------------- /inst/extdata/importexample/qsorts/after/JohnDoe.csv: -------------------------------------------------------------------------------- 1 | ,i01, 2 | i02,i03,i04 3 | -1,0,1 -------------------------------------------------------------------------------- /inst/extdata/importexample/qsorts/before/JaneDoe.csv: -------------------------------------------------------------------------------- 1 | ,i01, 2 | i02,i03,i04 3 | -1,0,1 -------------------------------------------------------------------------------- /inst/extdata/importexample/qsorts/before/JohnDoe.csv: -------------------------------------------------------------------------------- 1 | ,i01, 2 | i02,i03,i04 3 | -1,0,1 -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/concourse/english/life-with-q.tex: -------------------------------------------------------------------------------- 1 | And life is not easy for the R-legionaries who bother to read the works of Stephenson and Brown, for these posit actual Q logics of inquiry. -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/concourse/english/q-uprising.tex: -------------------------------------------------------------------------------- 1 | All of the social sciences? 2 | Well, not entirely \ldots . -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/concourse/english/r-dominance.tex: -------------------------------------------------------------------------------- 1 | The year is 2014 AD. 2 | The empirical social sciences are entirely occupied by survey research. -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/concourse/english/small-village.tex: -------------------------------------------------------------------------------- 1 | One small community of indomitable Q-methodologists still holds out against the survey-wiedling bean counters of social change. -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/concourse/english/video.tex: -------------------------------------------------------------------------------- 1 | check out this video: https://www.youtube.com/watch?v=k61ETJK31ao -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/concourse/german/life-with-q.tex: -------------------------------------------------------------------------------- 1 | Und das Leben ist nicht leicht fuer die R-Legionaere, die sich die Muehe machen Stephenson und Brown zu lesen, denn diese stellen eigene Q-Forschungslogiken auf. -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/concourse/german/q-uprising.tex: -------------------------------------------------------------------------------- 1 | Die ganzen Sozialwissenschaften \ldots ? 2 | Nein! -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/concourse/german/r-dominance.tex: -------------------------------------------------------------------------------- 1 | Wir befinden uns im Jahre 2014 nach Christus. 2 | Die empirischen Sozialwissenschaften sind vollstaendig besetzt von der Umfrageforschung. -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/concourse/german/small-village.tex: -------------------------------------------------------------------------------- 1 | Ein von unbeugsamen Q-Methodologen bewohntes Dorf hoert nicht auf Widerstand zu leisten gegen die Fragebogen-wedelnden Erbsenzaehler. -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/concourse/german/video.tex: -------------------------------------------------------------------------------- 1 | schaut mal das video an: https://www.youtube.com/watch?v=k61ETJK31ao -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/concourse/ids.csv: -------------------------------------------------------------------------------- 1 | ID,handle 2 | i01,r-dominance 3 | i02,q-uprising 4 | i03,small-village 5 | i04,life-with-q 6 | i05,video 7 | -------------------------------------------------------------------------------- /inst/extdata/importexample/sample/sampling-structure.csv: -------------------------------------------------------------------------------- 1 | handle 2 | life-with-q 3 | q-uprising 4 | r-dominance 5 | small-village 6 | -------------------------------------------------------------------------------- /inst/shiny-examples/qmethod-gui/rsconnect/shinyapps.io/azabala/qmethod-gui.dcf: -------------------------------------------------------------------------------- 1 | name: qmethod-gui 2 | title: qmethod-gui 3 | username: 4 | account: azabala 5 | server: shinyapps.io 6 | hostUrl: https://api.shinyapps.io/v1 7 | appId: 33162 8 | bundleId: 7947984 9 | url: https://azabala.shinyapps.io/qmethod-gui/ 10 | asMultiple: FALSE 11 | asStatic: FALSE 12 | -------------------------------------------------------------------------------- /inst/shiny-examples/qmethod-gui/server.R: -------------------------------------------------------------------------------- 1 | library(shiny) 2 | library(qmethod) 3 | library(knitr) 4 | data(lipset) 5 | 6 | 7 | shinyServer(function(input, output) { 8 | 9 | data <- reactive({ 10 | inFile <- input$file1 11 | if (input$statnames) read.csv(inFile$datapath, header=input$header, sep=input$sep, quote=input$quote, row.names=1) else read.csv(inFile$datapath, header=input$header, sep=input$sep, quote=input$quote) 12 | }) 13 | 14 | output$contents <- renderTable({ 15 | # input$file1 will be NULL initially. After the user selects 16 | # and uploads a file, it will be a data frame with 'name', 17 | # 'size', 'type', and 'datapath' columns. The 'datapath' 18 | # column will contain the local filenames where the data can 19 | # be found. 20 | inFile <- input$file1 21 | if (is.null(inFile)) 22 | return(NULL) 23 | data() 24 | }) 25 | 26 | output$rawinfo <- renderText({ 27 | inFile <- input$file1 28 | if (is.null(inFile)) 29 | return(NULL) 30 | raw <- data() 31 | paste0("The loaded data have ", nrow(raw)," statements and ", ncol(raw)," Q-sorts")}) 32 | 33 | output$codeUpload <- renderText({ 34 | inFile <- input$file1 35 | if (is.null(inFile)) 36 | return("Customised code will display here when some data is uploaded in step 1 above.") 37 | paste0("library(qmethod) 38 | mydata <- read.csv('", input$file1[[1]], "', 39 | header = ", input$header, ", sep = '", input$sep, "', quote = '", input$quote, "')") 40 | }) 41 | 42 | output$codeQmethod <- renderText({ 43 | inFile <- input$file1 44 | if (is.null(inFile)) 45 | return("Customised code will display here when some data is uploaded in step 1 above.") 46 | paste0("results <- qmethod(mydata, nfactors = ", input$nfactors, ", extraction = ", 47 | input$extraction, ", rotation = ", input$rotation, ")") 48 | }) 49 | 50 | output$codeSave <- renderText({ 51 | inFile <- input$file1 52 | if (is.null(inFile)) 53 | return("Customised code will display here when some data is uploaded in step 1 above.") 54 | paste0("save(results, 'qm_results.RData')") 55 | }) 56 | 57 | output$codeReport <- renderText({ 58 | inFile <- input$file1 59 | if (is.null(inFile)) 60 | return("Customised code will display here when some data is uploaded in step 1 above.") 61 | paste0("export.qm(results, 'qm_report.txt', style='PQMethod')") 62 | }) 63 | 64 | output$codePlot <- renderText({ 65 | inFile <- input$file1 66 | if (is.null(inFile)) 67 | return("Customised code will display here when some data is uploaded in step 1 above.") 68 | paste0("plot(results, sub='Plot of statement z-scores (filled points: distinguishing items)') 69 | 70 | abline(v=0, col='grey')") 71 | }) 72 | 73 | output$qmPlot <- renderPlot({ 74 | inFile <- input$file1 75 | if (is.null(input$file1)) 76 | return(NULL) 77 | results <- qmethod(data(), nfactors=input$nfactors, rotation=input$rotation, extraction=input$extraction) 78 | par(mai=c(1,input$maileft,0,0), omi=c(0,0,0,0)) 79 | plot(results, sub="Plot of statement z-scores", ylab="") 80 | abline(v=0, col="grey") 81 | }) 82 | 83 | output$summary <- renderPrint({ 84 | inFile <- input$file1 85 | if (is.null(input$file1)) 86 | return("Summary of results will display here when some data is uploaded in step 1 above.") 87 | results <- qmethod(data(), nfactors=input$nfactors, rotation=input$rotation, extraction=input$extraction) 88 | cat("\nFactor scores\n") 89 | print(results[[6]]) 90 | }) 91 | 92 | output$fullResults <- renderPrint({ 93 | inFile <- input$file1 94 | if (is.null(input$file1)) 95 | return("Summary of results will display here when some data is uploaded in step 1 above.") 96 | results <- qmethod(data(), nfactors=input$nfactors, rotation=input$rotation, extraction=input$extraction, silent=T) 97 | print(results, length=max(dim(results[[2]]))) 98 | }) 99 | 100 | output$flaggedqsorts <- renderPrint({ 101 | inFile <- input$file1 102 | if (is.null(input$file1)) 103 | return("Flagged Q-sorts will display here when some data is uploaded in step 1 above.") 104 | results2 <- qmethod(data(), nfactors=input$nfactors2, rotation=input$rotation2, extraction=input$extraction2) 105 | flagqs <- loa.and.flags(results2) 106 | cat("\nNumber of Q-sorts flagged for each factor:\n") 107 | print(results2[[7]][[1]]["nload"]) 108 | cat("\n") 109 | print(flagqs) 110 | }) 111 | 112 | output$factorsel <- renderPrint({ 113 | inFile <- input$file1 114 | if (is.null(input$file1)) 115 | return("Information to select the number of factors will display here when some data is uploaded in step 1 above.") 116 | results3 <- qmethod(data(), nfactors=input$nfactors3, rotation=input$rotation3, extraction=input$extraction3)[[7]] 117 | cat("\nGeneral factor characteristics:\n") 118 | print(round(results3[[1]], digits=2)) 119 | cat("\nTotal variance explained: ") 120 | tve <- round(sum(results3[[1]]$expl_var), digits=2) 121 | cat(tve, "%") 122 | cat("\n\nCorrelation between factor z-scores:\n") 123 | print(round(results3[[2]], digits=2)) 124 | }) 125 | 126 | output$screePlot <- renderPlot({ 127 | inFile <- input$file1 128 | if (is.null(input$file1)) 129 | return(NULL) 130 | screeplot(prcomp(data()), type="lines", main="Screeplot of unrotated PCA components") 131 | abline(h=1, col="grey") 132 | }) 133 | }) -------------------------------------------------------------------------------- /inst/shiny-examples/qmethod-gui/ui.R: -------------------------------------------------------------------------------- 1 | library(shiny) 2 | 3 | shinyUI(fluidPage( 4 | # Application title 5 | HTML("

Demo: 'qmethod' package to analyse Q methodology data in R

"), 6 | HTML("
"), 7 | sidebarLayout( 8 | sidebarPanel( 9 | HTML("

See more information about the package, a cookbook, and a paper on its implementation and validation.

"), 10 | HTML("

Technical notes: 1) This interface performs the analysis only on Q studies with 'forced' distribution. To run the analysis with non-forced distributions, see documentation. 2) Q-sorts are flagged automatically. You can explore this flagging below. Manual flagging is possible using R beyond this simplified interface. Questions & comments (including how to improve this GUI) to aiora [dot] zabala (at) gmail [dot] com

") 11 | ), 12 | 13 | mainPanel( 14 | HTML("

This is a graphical user interface (GUI) of the R package 'qmethod', with basic functionality.

"), 15 | HTML("

Follow these steps to analyse Q methodology data:

16 | 17 |
    18 | 19 |
  1. Upload your data from a *.csv file.
  2. 20 | 21 |
  3. Select the extraction method ('PCA' or 'centroid'), the rotation method ('varimax' is most common), and the number of factors to extract.
  4. 22 | 23 |
24 | 25 |

Full results are displayed at the end. To run the same analysis directly in R and use the full package functionality, see the code created below in Run the analysis directly in R. To run this interface locally in your computer, Install the offline version.

26 | 27 | "), 28 | 29 | HTML("

Advanced: 1) explore the flagging of Q-sorts resulting from different number of factors below, or 2) information to aid the selection of number of factors below.

") 30 | ) 31 | ), 32 | 33 | HTML("
"), 34 | h3('Step 1. Upload your data'), 35 | 36 | sidebarLayout( 37 | sidebarPanel( 38 | fileInput('file1', 'Choose CSV File', 39 | accept=c('text/csv', 40 | 'text/comma-separated-values,text/plain', 41 | '.csv')), 42 | HTML("

See an example of the format of the dataset here.

"), 43 | tags$hr(), 44 | checkboxInput('header', 'Header', TRUE), 45 | checkboxInput('statnames', 'Statement names in 1st column', FALSE), 46 | radioButtons('sep', 'Separator', 47 | c(Comma=',', 48 | Semicolon=';', 49 | Tab='\t'), 50 | ','), 51 | radioButtons('quote', 'Quote', 52 | c(None='', 53 | 'Double Quote'='"', 54 | 'Single Quote'="'"), 55 | '"') 56 | ), 57 | 58 | mainPanel( 59 | textOutput('rawinfo'), 60 | tableOutput('contents') 61 | ) 62 | ), 63 | 64 | HTML("
"), 65 | 66 | h3('Step 2. Select the extraction and rotation methods and the number of factors'), 67 | 68 | sidebarLayout( 69 | sidebarPanel( 70 | sliderInput("nfactors", label = "Number of factors:", 71 | min = 2, max = 7, value = 3, step = 1), 72 | selectInput("extraction", label = "Extraction:", 73 | choices = c("PCA", "centroid"), selected = "PCA"), 74 | selectInput("rotation", label = "Rotation:", 75 | choices = c("none", "varimax", "quartimax", "promax", "oblimin", "simplimax", "cluster"), selected = "varimax"), 76 | sliderInput("maileft", label = "Item margin:", 77 | min = 0.8, max = 2.8, value = 1, step = 0.2), 78 | ), 79 | 80 | mainPanel( 81 | plotOutput("qmPlot"), 82 | verbatimTextOutput("summary") 83 | ) 84 | ), 85 | 86 | HTML("
"), 87 | 88 | HTML("

Run the analysis directly in R

"), 89 | 90 | sidebarLayout( 91 | sidebarPanel( 92 | HTML("To run the above analysis in R and explore the results further, copy and paste the code on the right. (You need to install R AND install the 'qmethod' package in your computer first if you have not done so already).") 93 | ), 94 | mainPanel( 95 | p("1. Load the package and upload your data:"), 96 | verbatimTextOutput("codeUpload"), 97 | p("2. Run Q analysis:"), 98 | verbatimTextOutput("codeQmethod"), 99 | p("3. Plot the results:"), 100 | verbatimTextOutput("codePlot"), 101 | p("4. Save your data in R format:"), 102 | verbatimTextOutput("codeSave"), 103 | p("5. Export the full report of results in plain text:"), 104 | verbatimTextOutput("codeReport") 105 | ) 106 | ), 107 | 108 | HTML("
"), 109 | HTML("

Advanced: Explore the flagged Q-sorts

"), 110 | p("The table indicates with an '*' those Q-sorts flagged for each factor, using the automatic method. Change the number of factors in the slider below, to see how flaggings vary."), 111 | 112 | sidebarLayout( 113 | sidebarPanel( 114 | sliderInput("nfactors2", label = "Number of factors:", min = 2, max = 7, value = 3, step = 1), 115 | selectInput("extraction2", label = "Extraction:", 116 | choices = c("PCA", "centroid"), selected = "PCA"), 117 | selectInput("rotation2", label = "Rotation:", 118 | choices = c("none", "varimax", "quartimax", "promax", "oblimin", "simplimax", "cluster"), selected = "varimax") 119 | ), 120 | 121 | mainPanel( 122 | verbatimTextOutput("flaggedqsorts") 123 | ) 124 | ), 125 | 126 | HTML("
"), 127 | HTML("

Advanced: Explore how many factors to extract

"), 128 | 129 | sidebarLayout( 130 | sidebarPanel( 131 | sliderInput("nfactors3", label = "Number of factors:", 132 | min = 2, max = 7, value = 3, step = 1), 133 | selectInput("extraction3", label = "Extraction:", 134 | choices = c("PCA", "centroid"), selected = "PCA"), 135 | selectInput("rotation3", label = "Rotation:", 136 | choices = c("none", "varimax", "quartimax", "promax", "oblimin", "simplimax", "cluster"), selected = "varimax") 137 | ), 138 | 139 | mainPanel( 140 | verbatimTextOutput("factorsel"), 141 | plotOutput("screePlot") 142 | ) 143 | ), 144 | 145 | HTML("
"), 146 | HTML("

Full results

"), 147 | 148 | fluidRow( 149 | verbatimTextOutput("fullResults") 150 | ) 151 | )) 152 | -------------------------------------------------------------------------------- /man/build.q.set.Rd: -------------------------------------------------------------------------------- 1 | \name{build.q.set} 2 | \alias{build.q.set} 3 | \title{Q methodology: sample a Q set from a concourse} 4 | \description{ 5 | Subsets a concourse of items into a sample of selected items. 6 | Returns a dataframe with handles as row names, and languages (if applicable) as columns. 7 | } 8 | \usage{ 9 | build.q.set(q.concourse, q.sample, q.distribution) 10 | } 11 | 12 | \arguments{ 13 | \item{q.concourse}{ 14 | A matrix with handles as row names, (optional) languages as columns, and full item wordings in cells as produced by \code{\link{import.q.concourse}}. 15 | } 16 | \item{q.sample}{ 17 | A character vector of handles (such as \kbd{q-is-great}). 18 | The items identified by the handles will be sampled. 19 | } 20 | \item{q.distribution}{ 21 | The chosen Q distribution as a vector of integers, such as \code{c(1,3,1)}. 22 | } 23 | } 24 | \details{ 25 | Q studies are carried out letting participants rank a \emph{sample} of statements (items), collectively referred to as the \emph{Q set}. 26 | These Q sets are drawn (by some sampling strategy) from a \emph{concourse}, or universe of items. 27 | This function subsets the concourse generated by \code{\link{import.q.concourse}}, based on a vector of handles provided, and returns it as \code{q.set}. 28 | 29 | The function implements a number of tests on the validity and consistency of inputs. 30 | 31 | If you are not familiar with the terminology of item \emph{handle}, \emph{ID} and \emph{wording} or the file structure expected for import functions, please read the respective sections in the documentation for \link{qmethod-package} first or consider the package \href{http://aiorazabala.github.io/qmethod/Data-management}{website}. 32 | } 33 | 34 | \value{ 35 | Returns a matrix with handles as row names, languages (if applicable) as column names and full item wordings in cells. 36 | } 37 | 38 | \note{ 39 | This function currently does \emph{not} actually \emph{draw} a sample, but merely builds the Q set from a \emph{given} sample. 40 | 41 | This function currently requires input in the argument \code{q.distribution}, but it only checks for the sum, so if you are working with a distribution-free study that still has a fixed number of items, you can just enter a vector of length 1 with your total sum of items. 42 | } 43 | 44 | \author{Maximilian Held} 45 | 46 | \seealso{ 47 | \code{\link{import.q.concourse}}, 48 | \code{\link{import.q.feedback}}, 49 | \code{\link{import.q.sorts}}, 50 | \code{\link{make.cards}} 51 | } 52 | 53 | \examples{ 54 | # Build a Q Set from a concourse and a sample 55 | data(importexample) 56 | q.set <- build.q.set( 57 | q.concourse = importexample$q.concourse, # as created by import.q.concourse 58 | q.sample = c("life-with-q","q-uprising","r-dominance","small-village"), 59 | # add vector with items to be selected from concourse 60 | # q.sample is ideally read in from a separate *.CSV file 61 | q.distribution = c(1,2,1) # very simple distribution 62 | ) 63 | } 64 | -------------------------------------------------------------------------------- /man/centroid.Rd: -------------------------------------------------------------------------------- 1 | \name{centroid} 2 | \alias{centroid} 3 | \title{Q methodology: centroid extraction} 4 | \description{Extracts factors/ components using the centroid approach as an alternative to Principal Components Analysis.} 5 | \usage{centroid(tmat, nfactors = 7, spc)} 6 | 7 | \arguments{ 8 | \item{tmat}{a correlation matrix between Q-sorts.} 9 | \item{nfactors}{number of factors/ components to extract. Defaults to 7.} 10 | \item{spc}{the threshold to accept factor results, set to 0.00001 by default (in Brown 1980, this is set to 0.02).} 11 | } 12 | 13 | \details{This functions implement the centroid method for extraction of factors, an alternative to Principal Components that can be used in Q methodology. The calculations are based in Brown (1980; below). 14 | 15 | The function is called from within \code{\link{qmethod}} where the attribute \code{extraction} is set to \code{centroid}. 16 | 17 | This function can be used independently where conducting each step of the analysis separately, preceded by a correlation between Q-sorts and followed by the rotation of factors/ components (see below), calculation of z-scores, etc. 18 | } 19 | \value{ 20 | Returns a matrix with Q-sorts as rows, and rotated factors as columns. 21 | } 22 | \references{Brown, S. R., 1980 \emph{Political subjectivity: Applications of Q methodology in political science}, New Haven, CT: Yale University Press, pages 208-224. 23 | 24 | See further references on the methodology in \code{\link{qmethod-package}}.} 25 | 26 | \note{This is a function used within \code{\link{qmethod}}. Rarely to be used independently.} 27 | 28 | \author{Frans Hermans} 29 | 30 | \examples{ 31 | #### Example 32 | require('qmethod') 33 | require ("psych") 34 | 35 | # Load data 36 | data("lipset") 37 | lip <- lipset[[1]] 38 | 39 | # Correlation matrix 40 | corlip <-cor(lip) 41 | 42 | # Centroid extraction 43 | lipcent <- centroid(corlip) 44 | lipcent 45 | 46 | 47 | ## To finalise the full analysis, continue with the following steps 48 | # Rotation (in this example, varimax over 3 factors) 49 | vmax <- varimax(lipcent[,1:3]) 50 | 51 | # Automatic pre-flagging of Q-sorts 52 | flags <- qflag(unclass(vmax$loadings), nstat = 33) 53 | 54 | # Calculate z-scores and general characeristics 55 | results <- qzscores(lip, 3, loa=vmax$loadings, flagged=flags) 56 | summary(results) 57 | 58 | # Consensus and distinguishing statements 59 | results$qdc <- qdc(lip, 3, zsc=results$zsc, sed=results$f_char$sd_dif) 60 | 61 | plot(results) 62 | 63 | ## All of the above can be done with: 64 | results2 <- qmethod(lip, 3, extraction="centroid") 65 | } -------------------------------------------------------------------------------- /man/export.pqmethod.Rd: -------------------------------------------------------------------------------- 1 | \name{export.pqmethod} 2 | \alias{export.pqmethod} 3 | \title{Q methodology: export to PQMethod *.DAT and *.STA files} 4 | \description{Exports Q data to *.DAT and *.STA files readable in PQMethod software.} 5 | \usage{export.pqmethod(dataset, study.name, 6 | study.description, col.range, 7 | filename='Q_data_forPQmethod', 8 | left.zeros, right.zeros, statements)} 9 | 10 | \arguments{ 11 | \item{dataset}{a matrix or data frame qwith Q data: Q-sorts as columns and statements as rows. The names of the columns will be used as Q-sort IDs in the *.DAT file.} 12 | \item{study.name}{a string with a short name of the study. No space characters are allowed.} 13 | \item{study.description}{a string with a one-sentence description of the study).} 14 | \item{col.range}{a two-element numerical vector with the values at the two extremes of the Q distribution (e.g. \code{c(-4, 4)}).} 15 | \item{filename}{a filename. The extension *.DAT is added automatically).} 16 | \item{left.zeros}{number of zeros before the distribution, in the second line of *.DAT file.} 17 | \item{right.zeros}{number of zeros after the distribution, in the second line of *.DAT file).} 18 | \item{statements}{a matrix with statements, one in each row).} 19 | } 20 | \details{ 21 | Exports the raw data of a Q methodology study into the native format used in PQMethod. Returns a message with some basic information about the data. 22 | 23 | Note that \bold{no checks are made on the data}, such as whether there are duplicated or non-forced Q-sorts. 24 | 25 | This function is not applicable to non-forced distributions. 26 | 27 | } 28 | 29 | \references{Schmolck, Peter, 2014. \emph{PQMethod Software}, Available at: \url{http://schmolck.org/qmethod/} 30 | 31 | File descriptions in \emph{PQMethod Manual}: \url{http://schmolck.org/qmethod/pqmanual.htm#appdxa}} 32 | 33 | \author{Aiora Zabala} 34 | 35 | \note{This function is experimental. Use with caution and verify that the output is as desired.} 36 | 37 | \examples{ 38 | # data(lipset) 39 | # db <- lipset[[1]] 40 | # export.pqmethod(dataset = db, 41 | # study.name = 'mystudy', 42 | # study.description = 'great study', 43 | # col.range = c(-4, 4), 44 | # filename = 'mystudy', 45 | # statements=lipset[[2]]) 46 | } 47 | -------------------------------------------------------------------------------- /man/export.qm.Rd: -------------------------------------------------------------------------------- 1 | \name{export.qm} 2 | \alias{export.qm} 3 | \title{Q Methodology: export results to a plain text document} 4 | \description{Exports an object of class \code{QmethodRes} to a plain text file (*.TXT). All the objects within the list resulting from \code{\link{qmethod}} are exported as they are. This is intended for interpretation rather than for further analysis.} 5 | \usage{export.qm(qmobject, file, style= c("R", "PQMethod"))} 6 | 7 | \arguments{ 8 | \item{qmobject}{an object of Q methodology results, obtained from the function \code{\link{qmethod}}.} 9 | \item{file}{the file name. Note that in some operating systems, the file name will need an extension *.TXT so that other software opens it.} 10 | \item{style}{the structure and formatting of the results in the exported document. Defaults to \code{"R"} where the \code{qmobject} will be written as is. Option \code{"PQMethod"} provides an output with similar structure and elements as those provided by PQMethod software in the *.LIS files (see details of *.LIS files in the References below). Note that the latter creates a much longer document.} 11 | } 12 | 13 | \references{Schmolck. \emph{PQMethod Software (Version 2.35)}, 2014. \url{http://schmolck.org/qmethod/} 14 | 15 | File descriptions in \emph{PQMethod Manual}: \url{http://schmolck.org/qmethod/pqmanual.htm#appdxa} 16 | } 17 | 18 | \author{Aiora Zabala} -------------------------------------------------------------------------------- /man/import.easyhtmlq.Rd: -------------------------------------------------------------------------------- 1 | \name{import.easyhtmlq} 2 | \alias{import.easyhtmlq} 3 | \title{Q methodology: import data from easy-HTMLQ} 4 | \description{Imports data from *.JSON files created with easy-HTMLQ software for Q-sort administration.} 5 | \usage{import.easyhtmlq(filename, ...)} 6 | 7 | \arguments{ 8 | \item{filename}{a file with extension *.JSON (see full description of the file below in References).} 9 | \item{...}{further arguments to be passed to \code{\link[utils]{read.csv2}}.} 10 | } 11 | \details{ 12 | Extracts the raw data of a Q methodology study from the native format saved in \emph{easy-HTMLQ}. Returns a list with two objects. 13 | 14 | The first object contains a data frame with items as rows and Q-sorts as columns, ready to be used in \code{\link{qmethod}}. It sets the Q-sort names to the values in the column 'uid' or else in 'sid'. 15 | 16 | The second object contains the additional data collected. Columns \code{npos}, \code{nneu} and \code{nneg} have the number of items allocated to the groups of 'positive', 'neutral', and 'negative' respectively. Columns which name start with \code{comment*} and \code{form*} contain further information introduced by the respondent. Columns which name start with \code{dur*} contain the time that the respondent spent in each screen. Column \code{datetime} contains the data stamp when the Q-sort was submitted. 17 | } 18 | 19 | \author{Aiora Zabala} 20 | 21 | \references{Banasick, Shawn, 2021. \emph{easy-htmlq}, Available at: \url{https://github.com/shawnbanasick/easy-htmlq}, based on Oschlies, Johannes and Killing, Marvin, 2015. \emph{HTMLQ}, Available at: \url{https://github.com/aproxima/htmlq} 22 | } -------------------------------------------------------------------------------- /man/import.htmlq.Rd: -------------------------------------------------------------------------------- 1 | \name{import.htmlq} 2 | \alias{import.htmlq} 3 | \title{Q methodology: import data from HTMLQ and FlashQ} 4 | \description{Imports data from *.CSV files created with HTMLQ or FlashQ softwares for Q-sort administration.} 5 | \usage{import.htmlq(filename, ...)} 6 | 7 | \arguments{ 8 | \item{filename}{a file with extension *.CSV, separated by ";" as done by default in HTMLQ (see full description of the file below in References).} 9 | \item{...}{further arguments to be passed to \code{\link[utils]{read.csv2}}.} 10 | } 11 | \details{ 12 | Extracts the raw data of a Q methodology study from the native format saved in both \emph{FlashQ} and \emph{HTMLQ}. Returns a list with two objects. 13 | 14 | The first object contains a data frame with items as rows and Q-sorts as columns, ready to be used in \code{\link{qmethod}}. It sets the Q-sort names to the values in the column 'uid' or else in 'sid'. 15 | 16 | The second object contains the additional data collected. Columns \code{npos}, \code{nneu} and \code{nneg} have the number of items allocated to the groups of 'positive', 'neutral', and 'negative' respectively. Columns which name start with \code{comment*} and \code{form*} contain further information introduced by the respondent. Columns which name start with \code{dur*} contain the time that the respondent spent in each screen. Column \code{datetime} contains the data stamp when the Q-sort was responded. 17 | } 18 | 19 | \author{Aiora Zabala} 20 | 21 | \references{Hackert, Christian and Braehler, Gernot, 2007. \emph{FlashQ}, Used to be available at: http://www.hackert.biz/flashq, but offline as tested on Feb 2021. 22 | 23 | Oschlies, Johannes and Killing, Marvin, 2015. \emph{HTMLQ}, Available at: \url{https://github.com/aproxima/htmlq} 24 | } -------------------------------------------------------------------------------- /man/import.pqmethod.Rd: -------------------------------------------------------------------------------- 1 | \name{import.pqmethod} 2 | \alias{import.pqmethod} 3 | \title{Q methodology: import PQMethod *.DAT files} 4 | \description{Imports data from *.DAT files created in PQMethod software.} 5 | \usage{import.pqmethod(file, ...)} 6 | 7 | \arguments{ 8 | \item{file}{a file with extension *.DAT (see full description of the file below in References).} 9 | \item{...}{further arguments to be passed to \code{\link[utils]{read.table}} and \code{\link[utils]{read.fwf}}.} 10 | } 11 | \details{ 12 | Extracts the raw data of a Q methodology study from the native format used in PQMethod. Returns a data frame with statements as rows and Q-sorts as columns. 13 | 14 | If the following error occurs: \code{"invalid multibyte string"}, a possible solution is to either set the right file-encoding in the argument \code{fileEncoding} or inspect the file for uncommon characters (see details in \code{\link[utils]{read.table}}). 15 | } 16 | 17 | \references{Schmolck, Peter, 2014. \emph{PQMethod Software}, Available at: \url{http://schmolck.org/qmethod/} 18 | 19 | File descriptions in \emph{PQMethod Manual}: \url{http://schmolck.org/qmethod/pqmanual.htm#appdxa}} 20 | 21 | \author{Aiora Zabala} -------------------------------------------------------------------------------- /man/import.q.concourse.Rd: -------------------------------------------------------------------------------- 1 | \name{import.q.concourse} 2 | \alias{import.q.concourse} 3 | \title{Q methodology: import concourse of Q items} 4 | \description{ 5 | Imports a full set of items (statements in a concourse) from a directory of *.TEX files (one file per item), including possible translations in separate folders. 6 | } 7 | \usage{ 8 | import.q.concourse(q.concourse.dir, languages = NULL) 9 | } 10 | 11 | \arguments{ 12 | \item{q.concourse.dir}{ 13 | A directory of \emph{individual} item wordings in *.TEX files with handles as filenames (e.g. \kbd{happy-feeling.tex}). 14 | If \code{languages} are specified, the directory should contain one folder per language, with all full item wordings as individual *.TEX files in \emph{each} language folder. 15 | Items should have the \emph{same} file name across all languages (e.g. \kbd{happy-feeling.tex}). 16 | Directories end with a trailing slash, such as \code{study/q-sample/q-concourse/}. 17 | } 18 | \item{languages}{ 19 | A character vector of languages, same as folders within \code{q.concourse.dir}. 20 | If the concourse is monolingual, leave empty. Defaults to \code{NULL}. 21 | } 22 | } 23 | \details{ 24 | Q studies are conducted by asking participants (or a P set) to rank order a \emph{sample} (or Q Set) of items, drawn from a universe (or concourse) of items, based on some sampling strategy. 25 | A concourse is, simply put, \emph{the sum of all things people could say about a subject matter}. 26 | 27 | It is helpful to keep the \emph{entire} concourse readily available, so as to draw samples from it. 28 | 29 | For some studies, it is necessary to have the complete items available in several languages. 30 | 31 | This function simply imports all full item wordings and assigns a \emph{handle} for the item, based on the filename (see \link{qmethod-package}). 32 | These filenames should be short and meaningful to the researcher. 33 | 34 | Individual items as *.TEX files should include minimal markup, and no trailing whitespace or empty newlines. 35 | If you do not need any additional formatting, you can just save plain text files (*.TXT) with the extension *.TEX. 36 | There is no need to know \href{https://www.latex-project.org/}{LaTeX}. 37 | 38 | Returns error if items are not available in all translations. 39 | 40 | Defaults to monolingual variant. 41 | 42 | If you are not familiar with the terminology of Q item \emph{handle}, \emph{ID} and \emph{wording} or the file structure expected for import functions, please read the respective sections in the documentation for \link{qmethod-package} first or consider the package \href{http://aiorazabala.github.io/qmethod/Data-management}{website}. 43 | } 44 | \value{ 45 | Returns a character matrix with handles as row names, languages (if applicable) as columns and full item wording per language in cells. 46 | } 47 | 48 | \author{Maximilian Held} 49 | 50 | \seealso{ 51 | \code{\link{build.q.set}}, 52 | \code{\link{import.q.feedback}}, 53 | \code{\link{import.q.sorts}}, 54 | \code{\link{make.cards}} 55 | } 56 | 57 | \examples{ 58 | ## Import a full q concourse from 'importexample' dataset 59 | path.concourse <- paste( # this part is only for the example! 60 | path.package("qmethod"), # just to make sure, use absolute path 61 | # import example files are in root/extdata of package 62 | "/extdata/importexample/sample/concourse/", # location of concourse 63 | sep = "" 64 | ) 65 | q.concourse <- import.q.concourse( # import concourse 66 | q.concourse.dir = path.concourse, # insert your applicable path here 67 | languages = c("english","german") # choose your languages from path here 68 | ) 69 | 70 | } 71 | -------------------------------------------------------------------------------- /man/import.q.feedback.Rd: -------------------------------------------------------------------------------- 1 | \name{import.q.feedback} 2 | \alias{import.q.feedback} 3 | \title{Q methodology: imports feedback on Q items} 4 | \description{ 5 | Turns raw item feedback (in *.CSV files) into a verified array or matrix. 6 | } 7 | \usage{ 8 | import.q.feedback(q.feedback.dir, q.sorts, q.set, manual.lookup = NULL) 9 | } 10 | 11 | \arguments{ 12 | \item{q.feedback.dir}{ 13 | A relative path to a directory structure where: 14 | \itemize{ 15 | \item (optional) folders are \code{conditions} (such as \kbd{before} and \kbd{after}), if there is more than one condition. 16 | Conditions are inferred from the specified \code{q.sorts}. 17 | If there are no conditions, there should be no folders. 18 | \item filenames of *.CSV are participant names (might be given pseudonyms). 19 | \item *.CSV files within folders contain raw feedback, beginning with an arbitrary header line (ignored), and the following columns, starting from the left: 20 | \enumerate{ 21 | \item An ID, either as an automatic hash or manually specified (see \code{\link{qmethod-package}}), as specified per the \code{manual.lookup} option of \code{\link{make.cards}}. 22 | Each ID only occurs once. 23 | \item The full feedback in plain text, enclosed in quotes. 24 | \item Optionally, a logical indicator whether current line should be ignored (in which case it should be set to \code{TRUE}). 25 | If there is no such column, all feedback will be imported. 26 | } 27 | } 28 | } 29 | \item{q.sorts}{ 30 | A matrix or array with handles as row names, participant as column names, (optional) conditions as 3rd dimension and cells as Q-sort ranks, as produced by \code{\link{import.q.sorts}}. 31 | } 32 | \item{q.set}{ 33 | A matrix with handles as row names, languages (if applicable) in columns, as produced by \code{\link{build.q.set}}. 34 | } 35 | \item{manual.lookup}{ 36 | A matrix with handles as row names, and IDs (such as \kbd{"sta121"}, as printed on the Q-cards by \code{\link{make.cards}}) in any of the columns. 37 | Defaults to \code{NULL} in which case items IDs are expected to be item wording hashes, as produced by \code{\link{make.cards}}. 38 | } 39 | } 40 | 41 | \details{ 42 | 43 | Participants in Q studies are often invited to provide open-ended feedback on items, giving researchers additional information on participants' viewpoints. 44 | This feedback is conveniently entered in a spreadsheet editor (2nd column), where each line of feedback corresponds to an item ID (1st column) 45 | An additional, optional (3rd) column indicates whether the current line should be ignored (\code{TRUE}), as may be the case for privacy reasons or when the feedback is merely a correction of a typographic error. 46 | If no such 3rd column is included, all feedback will be imported. 47 | 48 | The automatic summary of full item wordings, technically known as \emph{hashing}, proceeds internally by passing the full item wording to the \code{\link[digest]{digest}} function of the package \pkg{digest} (with arguments set to \code{algo = crc32, serialize = FALSE}.) 49 | 50 | After an (arbitrary) header line, a *.CSV file may look like this:\cr 51 | \samp{sta001,"This q-item sounds like r-research to me!",FALSE}, indicating that it should \emph{not} be ignored (\kbd{FALSE}). 52 | 53 | If you are not familiar with the terminology of item \emph{handle}, \emph{ID} and \emph{wording} or the file structure expected for import functions, please read the respective sections in the documentation for \link{qmethod-package} first or consider the package \href{http://aiorazabala.github.io/qmethod/Data-management}{website}. 54 | } 55 | 56 | \value{ 57 | Returns a matrix or array (if there is more than one condition) with handles as row names, people as column names, (optional) conditions as 3rd dimension name and item feedback in cells. 58 | The return parallels the output from \code{\link{import.q.sorts}}, but with feedback as array cells, rather than Q-sort ranks. 59 | } 60 | 61 | \author{Maximilian Held} 62 | 63 | \seealso{ 64 | \code{\link{import.q.concourse}}, 65 | \code{\link{import.q.sorts}}, 66 | \code{\link{build.q.set}}, 67 | \code{\link{make.cards}}, 68 | \code{\link{qmethod}} 69 | } 70 | 71 | \examples{ 72 | data(importexample) 73 | path.feedback <- paste( # this part is only for the example! 74 | path.package("qmethod"), # just to make sure, use absolute path 75 | # import example files are in root/extdata of package: 76 | "/extdata/importexample/feedback/", # location of sorts 77 | sep = "" 78 | ) 79 | q.feedback <- import.q.feedback( # now import the feedback 80 | q.feedback.dir = path.feedback, # add your path here 81 | q.sorts = importexample$q.sorts, 82 | q.set = importexample$q.set, # as produced by build.q.set 83 | manual.lookup = matrix( # ideally empty for automatic hashing, or read in from *.CSV 84 | c("i01","i02","i03","i04"), 85 | ncol = 1, 86 | nrow = 4, 87 | dimnames = list(c("r-dominance","q-uprising","small-village","life-with-q"),"ID") 88 | ) 89 | ) 90 | } 91 | -------------------------------------------------------------------------------- /man/importexample.Rd: -------------------------------------------------------------------------------- 1 | \name{importexample} 2 | \docType{data} 3 | \alias{importexample} 4 | \title{Import Example} 5 | \description{ 6 | A minimum working example (MWE) to test the functions \code{\link{import.q.concourse}}, \code{\link{build.q.set}}, \code{\link{import.q.sorts}}, \code{\link{import.q.feedback}} and \code{\link{make.cards}}. 7 | The example is too small to run an actual Q analysis. 8 | To test out a real study with the same data structure, go to: \url{https://github.com/maxheld83/keyneson}. 9 | } 10 | \usage{importexample} 11 | \format{ 12 | \code{importexample} is included as a directory in \code{qmethod} package root folder, including subdirectories as documented in the package documentation, and on the package \href{http://aiorazabala.github.io/qmethod/Data-management}{website}. 13 | Importexample is \emph{also} partly included as a ready-made RData datafile in the folder \code{qmethod/data} so that (cumulative) function examples can run. 14 | } 15 | \source{None.} 16 | \keyword{datasets} 17 | -------------------------------------------------------------------------------- /man/lipset.Rd: -------------------------------------------------------------------------------- 1 | \name{lipset} 2 | \docType{data} 3 | \alias{lipset} 4 | \title{\emph{Lipset} (1963) Q methodology dataset} 5 | \description{Dataset about \emph{The Value Patterns of Democracy} based on Lipset (1963) to illustrate the \pkg{qmethod} package.} 6 | \usage{lipset} 7 | \format{A list with two objects. A data frame with 9 Q sorts sorting 33 statements and a data frame with the text corresponding to the statements.} 8 | \source{Brown, S. R., 1980. \emph{Political subjectivity: Applications of Q methodology in political science}, New Haven, CT: Yale University Press. 9 | 10 | Lipset, S. M., 1963. The value patterns of democracy: A case study in comparative analysis. \emph{American Sociological Review}, 28, 515-531. 11 | } 12 | \keyword{datasets} 13 | -------------------------------------------------------------------------------- /man/loa.and.flags.Rd: -------------------------------------------------------------------------------- 1 | \name{loa.and.flags} 2 | \alias{loa.and.flags} 3 | \title{Q methodology: show factor loadings next to flags} 4 | \description{Prints a table with factor loadings and flagged Q-sorts are indicated with a star.} 5 | \usage{loa.and.flags(results, nload = FALSE)} 6 | 7 | \arguments{ 8 | \item{results}{an object of Q method results.} 9 | \item{nload}{logical; print number of flagged Q-sorts.} 10 | } 11 | \details{ 12 | Simple function to explore the Q-sorts that are automatically pre-flagged, using the standard criteria implemented in function \code{\link{qflag}} 13 | } 14 | 15 | \author{Aiora Zabala} 16 | 17 | \examples{ 18 | data(lipset) 19 | results <- qmethod(lipset[[1]], nfactors = 3, rotation = "varimax") 20 | loa.and.flags(results) 21 | } -------------------------------------------------------------------------------- /man/make.cards.Rd: -------------------------------------------------------------------------------- 1 | \name{make.cards} 2 | \alias{make.cards} 3 | \title{Q methodology: produce printable cards for Q study with ID and full item wording} 4 | \description{ 5 | Creates cards for administering a Q study. 6 | Full item wordings are printed on the front of business cards and item IDs on the back. 7 | } 8 | \usage{ 9 | make.cards( 10 | q.set, 11 | study.language = NULL, 12 | paper.format = "AveryZweckformC32010.Rnw", 13 | output.pdf = TRUE, 14 | manual.lookup = NULL, 15 | wording.font.size = NULL, 16 | file.name = "QCards", 17 | babel.language = NULL 18 | ) 19 | } 20 | 21 | \arguments{ 22 | \item{q.set}{ 23 | A matrix with handles as row names ("\code{q-is-great}", for example), languages (if applicable) in columns, as produced by \code{\link{build.q.set}}. 24 | } 25 | \item{study.language}{ 26 | A character vector of length 1. 27 | Must be one of the languages from the column names in the specified \code{q.set} (which will be the same as the respective Q concourse object). 28 | Defaults to \code{NULL}, in which case the first column from \code{q.set} is selected. 29 | } 30 | \item{paper.format}{ 31 | A character vector of length 1, choosing among available templates of business card sheets. 32 | Defaults to the only currently available \code{"AveryZweckformC32010.Rnw"}. 33 | Must include file extension of template. 34 | } 35 | \item{output.pdf}{ 36 | Logical. 37 | If \code{TRUE}, function invokes \code{knit2pdf} to create a PDF in the workspace. 38 | If \code{FALSE}, function invokes \code{knit} to return only a *.TEX in the workspace, may be preferable if no \href{https://www.latex-project.org/}{LaTeX} installation is available on the used computer. 39 | Defaults to \code{TRUE}. 40 | } 41 | \item{manual.lookup}{ 42 | A matrix with handles (same as in \code{\link{build.q.set}}, \code{\link{import.q.concourse}}) as row names, and arbitrary, unique identifying strings in any of the columns as also expected in \code{\link{import.q.sorts}} and \code{\link{import.q.feedback}}. 43 | Defaults to \code{NULL} in which case items are automatically identified by full item hashes, as also detected by \code{\link{import.q.sorts}} and \code{\link{import.q.feedback}}. 44 | } 45 | \item{wording.font.size}{ 46 | A character vector of length 1 to set the font size of the full item wording on the cards. 47 | Defaults to \code{NULL} in which case the default font size 12pt is used. 48 | Only \href{https://en.wikibooks.org/wiki/LaTeX/Fonts#Sizing_text}{standard LaTeX font sizes} are allowed, from \code{\\tiny} to \code{\\Huge}. 49 | } 50 | \item{file.name}{ 51 | A character vector of length 1 to set the output file path relative to the working directory \emph{without file extension}. 52 | Defaults to \code{QCards}. 53 | } 54 | \item{babel.language}{ 55 | A character vector of length 1 to set the babel language for appropriate hyphenation, special letters and other international support as provided by the \href{https://www.ctan.org/pkg/babel/}{babel LaTeX package}. 56 | Only available babel options are permissible. 57 | Defaults to \code{NULL}, in which case babel is never called. 58 | Changing \code{babel.language} between function calls can occasionally leave inconsistent LaTeX temp files, which may trip up compilation. 59 | Please re-run the function once again or clean up temp files (in the working directory) in that case. 60 | } 61 | } 62 | \details{ 63 | 64 | Preparing cards with full items and IDs quickly becomes cumbersome if a study is done several times or if items change frequently. 65 | Participants require well-printed, well-designed cards for their sorting task, ideally on heavier paper. 66 | Cards should include shorthand, unique identifiers to simplify later data entry. 67 | 68 | This function prepares a properly typeset *.PDF (or *.TEX source), where items are printed on readily-available business card templates, from which individual cards can be easily broken out. 69 | 70 | The function prints the full item wording on the \emph{right} column of any page, and the identifier (ID) on the \emph{left} column. 71 | If templates are duplex printed with the same page on the front and back, and in proper orientation, the front of each card includes the full wording, and the back its unique identifier (ID). 72 | 73 | Identifiers (ID) entered manually or automatically hashed from full wordings are also expected in the import functions \code{\link{import.q.sorts}} and \code{\link{import.q.feedback}}. 74 | The automatic summary of full item wordings, technically known as \emph{hashing}, proceeds internally by passing the full item wording to the \code{\link[digest]{digest}} function of the package \pkg{digest} (with arguments set to 75 | 76 | \code{algo = crc32, serialize = FALSE}.) 77 | 78 | The function proceeds internally by preparing a dataframe with full item wordings and identifiers (ID), and then invokes a prepared \code{*.RNW} template included with this package, which in turn includes a \pkg{knitr} chunk, which in turn calls \pkg{xtable} to return a neatly layed-out multi-page table. 79 | 80 | If you are not familiar with the terminology of item \emph{handle}, \emph{ID} and \emph{wording} or the file structure expected for import functions, please read the respective sections in the documentation for \link{qmethod-package} first or consider the package \href{http://aiorazabala.github.io/qmethod/Data-management}{website}. 81 | } 82 | 83 | \value{ 84 | Writes a *.PDF file or its source *.TEX file to the working directory ready for printout. 85 | } 86 | 87 | \note{ 88 | Hashed identification has not been widely tested in Q studies and should be used with great care and only for extra convenience. 89 | When using hash identification, researchers should be careful to record the precise item wordings at the time of hashing for the printed Q-cards, preferably with a version control system. 90 | Researchers should also record the complete Q-sorts of participants in an \emph{unhashed} form, such as a picture of the completed sort in full wordings, in case problems with the hashing arise. 91 | 92 | When \code{output.pdf = TRUE}, the function will sometimes fail with the error message\cr \code{"Running 'texi2dvi' on ... failed"}. 93 | This is not a bug with the function, but simply indicates that the path to pdflatex is not available in the current R environment. 94 | To fix this issue, compile the resulting *.TEX manually, use RStudio or try \href{https://www.r-bloggers.com/2012/12/building-r-packages-missing-path-to-pdflatex/}{this fix}. 95 | 96 | This function does \emph{not} automatically scale the font size to fit the given card size. 97 | Instead, users will have to proceed by trial and error, using a \code{wording.font.size} that works for their longest item. 98 | The default value should work for most Q items. 99 | 100 | This function currently only works for Avery Zweckform C32010 templates, designed in\cr \code{/cardtemplates/AveryZweckformC32010.Rnw}. 101 | If you would like support for other templates, check out / chip in here: \url{https://github.com/aiorazabala/qmethod/issues/34}. 102 | } 103 | 104 | \author{Maximilian Held} 105 | 106 | \seealso{ 107 | \code{\link{build.q.set}}, 108 | \code{\link{import.q.feedback}}, 109 | \code{\link{import.q.sorts}}, 110 | \code{\link{import.q.concourse}} 111 | } 112 | 113 | \examples{ 114 | ## Make cards from importexample 115 | data(importexample) 116 | # use your own output file name or leave NULL for `file.name` 117 | # tempfile() is used here to avoid leaving files behind example code runs 118 | make.cards(importexample$q.set, output.pdf = FALSE, file.name = tempfile()) 119 | } 120 | -------------------------------------------------------------------------------- /man/make.distribution.Rd: -------------------------------------------------------------------------------- 1 | \name{make.distribution} 2 | \alias{make.distribution} 3 | \title{Q methodology: create Q normal distribution} 4 | \description{ 5 | Creates a distribution close to a standard normal distribution given a number of statements and a maximum Q sort rank. 6 | } 7 | \usage{ 8 | make.distribution( 9 | nstat, 10 | max.bin = 5 11 | ) 12 | } 13 | 14 | \arguments{ 15 | \item{nstat}{ 16 | Number of desired statements in a Q sample for a given study. 17 | Must be a positive integer, vector of length 1. 18 | } 19 | \item{max.bin}{ 20 | Maximum positive value to be entered by participants. 21 | Must be a positive integer, vector of length 1. 22 | Defaults to frequent value \code{5}. 23 | } 24 | } 25 | \details{ 26 | Participants in Q studies are frequently asked to sort Q items under a quasi-normal distribution. 27 | This function generates such a Q distribution, given a number of statements \code{nstat} and a desired maximum positive value \code{max.bin} in the Q distribution. 28 | 29 | The function always returns an \emph{uneven} number of columns, ensuring that there is always a modal column at the zero mark. 30 | 31 | Not every combination of \code{nstat} and \code{max.bin} can be neatly fit under a standard normal distribution, in which case the function returns a vector of unequal length to the specified \code{nstat}. 32 | The function will issue a warning in that case. 33 | Researchers can either accept the different \code{nstat}, or try again with a different \code{max.bin}. 34 | } 35 | 36 | \value{ 37 | Returns a vector of positive integers (column heights), of the length \code{max.column * 2 + 1}. 38 | An object of this kind is expected in \code{\link{import.q.sorts}}, \code{\link{build.q.set}} and other convenience functions. 39 | } 40 | 41 | \author{Maximilian Held} 42 | 43 | \seealso{ 44 | \code{\link{build.q.set}}, 45 | \code{\link{import.q.sorts}} 46 | } 47 | 48 | \examples{ 49 | ## Make Q distribution 50 | make.distribution(nstat=76, max.bin=7) 51 | } 52 | -------------------------------------------------------------------------------- /man/plot.QmethodRes.Rd: -------------------------------------------------------------------------------- 1 | \name{plot.QmethodRes} 2 | \alias{plot.QmethodRes} 3 | \title{Q Method: plot for statement z-scores} 4 | \description{Takes an object of class \code{QmethodRes} resulting from \code{\link{qmethod}} and makes a dot-chart with the z-scores for statements and all factors.} 5 | \usage{ 6 | \method{plot}{QmethodRes}(x, xlab = 'z-scores', ylab = 'statements', 7 | pchlist = NULL, colours = NULL, 8 | fnames = NULL, legend = TRUE, 9 | dist = TRUE, pchlist.fill = NULL, 10 | leg.pos="bottomright", xlim= NULL, 11 | sort.items=T, factors = NULL, 12 | ...)} 13 | \arguments{ 14 | \item{x}{results object returned by \code{\link{qmethod}}.} 15 | \item{xlab}{label for x axis. Defaults to 'z-scores'.} 16 | \item{ylab}{label for y axis. Defaults to 'statements'.} 17 | \item{pchlist}{array of \code{pch} symbols to be used in plotting the points for each factor. Defaults to a pre-defined set of symbols.} 18 | \item{colours}{array of colours to be used when plotting the points for each perspective. Defaults to a pre-defined set of colours based on the \code{\link[grDevices]{rainbow}} palette.} 19 | \item{fnames}{names for factors to be used in the legend. In results where factor names have not been changed (using, e.g. \code{\link{q.fnames}}) it defaults to \code{'Factor 1'}, \code{'Factor 2'}, etc.} 20 | \item{legend}{logical; if \code{FALSE}, no \code{legend} will be drawn.} 21 | \item{dist}{Logical. Should distinguishing statements be indicated in the plot dots? If \code{TRUE}, then the z-score values that are distinguishing for a given statement and factor are represented with a different (filled) symbol.} 22 | \item{pchlist.fill}{List of symbols to indicate distinguishing statements. By default, this is set to \code{NULL}, which provides a set of symbols that coincides with those in \code{pchlist}, but filled.} 23 | \item{leg.pos}{Position of the legend.} 24 | \item{xlim}{Limits for the x axis, given as a vector of two numbers. If this is not provided, the limits are calculated from the sample.} 25 | \item{sort.items}{Whether and how the items are sorted in the vertical axis. Defaults to \code{TRUE}, which sorts the items according to the standard deviation of their z-scores for all factors; items of most disagreement are placed at the top. To invert this order (items of most disagreement at the bottom), set this argument to "consensus.top" A value of \code{FALSE} will not sort the items, and these are displayed in the same order as in the raw data. A numerical vector can also be provided in order to sort the statements manually: the vector needs to have the same length as the number of items, and provides the order in which the items are to be ordered.} 26 | \item{factors}{The factors to plot. Defaults to \code{NULL}, which plots all the factors in the object \code{x} in the order given. To print a subset of these factors or to print them in a different order, provide a numeric vector here with the factors and the order desired, e.g. \code{c(2,1)}}. 27 | \item{...}{other arguments for \code{\link[graphics]{plot}}.} 28 | 29 | } 30 | 31 | \author{Aiora Zabala} 32 | 33 | \seealso{ 34 | \code{\link[graphics]{dotchart}} and \code{\link[graphics]{points}}. 35 | } 36 | \examples{ 37 | data(lipset) 38 | results <- qmethod(lipset[[1]], nfactors = 3, rotation = "varimax") 39 | title <- "Q method z-scores, lipset dataset" 40 | subtitle <- paste0("Three factors, PCA, varimax. Printed on ", 41 | Sys.Date()) 42 | plot(results, main = title, sub = subtitle) 43 | 44 | # Order the items in a different way 45 | plot(results, main = title, sub = subtitle, 46 | sort.items = c(rev(1:nrow(results$zsc)))) 47 | } 48 | \keyword{plot} 49 | 50 | \note{ 51 | The names of items to be plotted are taken from the matrix \code{x$zsc}. 52 | 53 | To change these names, change the row names in that matrix first, e.g.: 54 | \code{rownames(x$zsc) <- vector.of.item.names}. 55 | 56 | If the margin width is not enough to read the items, specify \code{par(mai=...)} first. See \code{\link[graphics]{par}} for details. 57 | } 58 | 59 | \references{ 60 | This specific dotchart visualisation of Q results implemented in \code{\link{plot.QmethodRes}} was first developed and introduced in this R package, in preparation for the study published in \href{https://www.sciencedirect.com/science/article/abs/pii/S0921800916302683}{Zabala et al. (2017)}. 61 | 62 | \itemize{ 63 | \item Zabala, A., 2014. qmethod: A Package to Explore Human Perspectives Using Q Methodology. \emph{The R Journal}, 6(2):163-173.\cr Available from: \url{https://journal.r-project.org/archive/2014-2/zabala.pdf}. 64 | \item Zabala, A., Pascual, U. and Garcia-Barrios, L. 2017. Payments for Pioneers? Revisiting the Role of External Rewards for Sustainable Innovation under Heterogeneous Motivations. \emph{Ecological Economics}, 135:234-245.\cr Available from: \url{https://www.sciencedirect.com/science/article/pii/S0921800916302683/}. 65 | } 66 | 67 | } -------------------------------------------------------------------------------- /man/print.QmethodRes.Rd: -------------------------------------------------------------------------------- 1 | \name{print.QmethodRes} 2 | \alias{print.QmethodRes} 3 | \title{Q Method: print method for results} 4 | \description{Takes an object \code{QmethodRes} resulting from \code{\link{qmethod}} and prints it in a synthetic way.} 5 | \usage{ 6 | \method{print}{QmethodRes}(x, length = 10, digits = 2, ...)} 7 | \arguments{ 8 | \item{x}{an object of class \code{QmethodRes}.} 9 | \item{length}{maximum number of rows to print from the data frames within \code{QmethodRes}. Defaults to 10. Set to \code{NULL} to see the full results.} 10 | \item{digits}{minimum number of significant digits, see \code{\link[base]{print.default}.}} 11 | \item{...}{further arguments passed to or from other methods.} 12 | } 13 | 14 | \author{Aiora Zabala} 15 | 16 | \examples{ 17 | data(lipset) 18 | results <- qmethod(lipset[[1]], nfactors = 3, rotation = "varimax") 19 | print(results, length = 5, digits = 1) 20 | } 21 | \keyword{print} -------------------------------------------------------------------------------- /man/q.fnames.Rd: -------------------------------------------------------------------------------- 1 | \name{q.fnames} 2 | \alias{q.fnames} 3 | \title{Change factor names in the results of Q methodology analysis} 4 | \description{This function replaces the automatic names created in an object of Q method results returned by \code{\link{qmethod}}.} 5 | \usage{ 6 | q.fnames(results, fnames)} 7 | 8 | \arguments{ 9 | \item{results}{an object of class \code{QmethodRes}.} 10 | \item{fnames}{a vector with the names of the factors. The number of names provided has to match the number of factors extracted in the object \code{results}. The names cannot begin with a number. A limit of 50 characters is set, to avoid excessively wide columns. Names should ideally contain no spaces or symbols that are used for other purposes in R (e.g. '-', '+', '/' , ). However '.' are fine.} 11 | } 12 | 13 | \value{ 14 | Returns the object \code{results} of class \code{QmethodRes}, with the new factor names. 15 | } 16 | 17 | \author{Aiora Zabala} 18 | 19 | \seealso{ 20 | \code{\link{qmethod}} 21 | } 22 | \examples{ 23 | data(lipset) 24 | results <- qmethod(lipset[[1]], nfactors = 3, rotation = "varimax") 25 | factor.names <- c("good", "bad", "ugly") 26 | results.renamed <- q.fnames(results, fnames = factor.names) 27 | results.renamed #shows all results 28 | } 29 | -------------------------------------------------------------------------------- /man/qbstep.Rd: -------------------------------------------------------------------------------- 1 | \name{qbstep} 2 | \alias{qbstep} 3 | \title{Q Methodology: Single step for the bootstrap} 4 | \description{Bootstraping of Q methodology using PCA.} 5 | \usage{qbstep(subdata, subtarget, indet, 6 | nfactors, nqsorts, nstat, 7 | qmts = qmts, qmts_log = qmts_log, 8 | rotation = "unknown", 9 | flagged = flagged, cor.method="pearson", ...)} 10 | 11 | \arguments{ 12 | \item{subdata}{resampled dataset of Q-sorts.} 13 | \item{subtarget}{target matrix, adapted to match the rows of the resampled dataset.} 14 | \item{indet}{method to solve the double indeterminacy issue when bootstrapping Principal Components Analysis (PCA). \code{"procrustes"} for procrustes rotation from \pkg{MCMCpack}, \code{"qindtest"} for simple solution valid for at least up to 3 factors extracted (see references), \code{"both"} for a \emph{\code{"qindtest"}} and a \emph{\code{"procrustes"}} rotation, or \code{"none"} for no solution. The latter is not recommended because it introduces inflated variability. If \code{"none"} is selected, each replication is rotated using the method set in \code{rotation}.} 15 | \item{nfactors}{number of factors in the study.} 16 | \item{nqsorts}{number of Q-sorts in the study.} 17 | \item{nstat}{number of statements in the study.} 18 | \item{qmts}{data frame with two rows and at least one column. This is automatically created when this function is called from \code{\link{qmboots}}(see \emph{Note} below).} 19 | \item{qmts_log}{data frame with two rows and at least one column. This is automatically created when this function is called from \code{\link{qmboots}}(see \emph{Note} below).} 20 | \item{rotation}{rotation method, defaults to \code{"none"}.} 21 | \item{flagged}{matrix or data frame of \code{nqsorts} rows and \code{nfactors} columns, with \code{TRUE} values for the Q-sorts that are flagged. Automatic flagging can be aplied using \code{\link{qflag}}. Manual flagging can be done by providing a logical matrix with \code{nqsorts} rows and \code{nfactors} columns to the argument \code{flagged}.} 22 | \item{cor.method}{character string indicating which correlation coefficient is to be computed, to be passed on to the function \code{\link[stats]{cor}}: \code{"pearson"} (default), \code{"kendall"}, or \code{"spearman"}. } 23 | \item{...}{other arguments to be passed on to \code{\link{qzscores}} or to \code{\link[psych]{principal}}.} 24 | } 25 | 26 | \value{ 27 | \item{step_res}{summary of the analysis.} 28 | } 29 | 30 | \details{ 31 | This function performs a single step within a bootstrap of Q methodology data. It takes one resample, performs the Q method analysis, checks for indeterminacy issues, and corrects them if necessary by calling the function \code{\link{qindtest}} or \code{\link{qpcrustes}}. 32 | } 33 | 34 | \author{Aiora Zabala} 35 | 36 | \note{This function is called within the function \code{\link{qmboots}}. Not intended to be used separately.} 37 | 38 | \seealso{ 39 | \code{\link{qmethod}} and \code{\link{qmboots}} in this package. 40 | } 41 | 42 | \references{Zabala, Pascual (2016) Bootstrapping Q Methodology to Improve the Understanding of Human Perspectives. PLoS ONE 11(2): e0148087.} 43 | 44 | \keyword{multivariate} 45 | \keyword{Q methodology} 46 | \keyword{bootstrapping} 47 | \keyword{PCA} 48 | -------------------------------------------------------------------------------- /man/qdc.Rd: -------------------------------------------------------------------------------- 1 | \name{qdc} 2 | \alias{qdc} 3 | \title{Q methodology: distinguishing and consensus statements} 4 | \description{Indicates the distinguishing and consensus statements. It does so by comparing the z-scores between each pair factors.} 5 | \usage{qdc(dataset, nfactors, zsc, sed)} 6 | 7 | \arguments{ 8 | \item{dataset}{a matrix or a dataframe containing original data, with statements as rows, Q sorts as columns, and grid column values in each cell.} 9 | \item{nfactors}{number of factors extracted.} 10 | \item{zsc}{a matrix or a dataframe with the factor z-scores for statements resulting from \code{\link{qzscores}}.} 11 | \item{sed}{a matrix or a dataframe with the standard error of differences resulting from \code{\link{qfcharact}}.} 12 | } 13 | 14 | \details{ 15 | Finds the distinguishing and consensus statements, based on the absolute differences between factor z-scores being larger than the standard error of differences (SED, calculated in \code{\link{qfcharact}}) for a given pair of factors. 16 | 17 | Returns a single data frame with the differences in z-scores between each pair of factors and the variable \code{dist.and.cons}, indicating whether each statement is distinguishing or consensus and for which factor(s) it is distinguishing. These are the possible categories in the \code{dist.and.cons} variable: 18 | \itemize{ 19 | \item Where all the comparisons between each pair of factors are significantly different at p-value < .05 the statement is labelled as \code{"Distinguishes all"}. 20 | \item Where the comparisons of a given factor with all other factors are significant at p-value < .05, and comparisons between all other factors are not significant, the statement is labeled as \code{"Distinguishes f*"}. 21 | \item Where none of the comparisons are significantly different, the statement is labeled as \code{"Consensus"}. 22 | \item Statements that have category \code{""} (empty) are not distinguishing for any of the factors in particularly. They distinguish one or more pairs of factors and the star indications may be inspected to understand their role. 23 | } 24 | 25 | Significant differences at p-values: 26 | \itemize{ 27 | \item {p >= 0.05 <- \code{""} (i.e. nothing)} 28 | \item {p < 0.05 <- \code{"*"}} 29 | \item {p < 0.01 <- \code{"**"}} 30 | \item {p < 0.001 <- \code{"***"}} 31 | \item {p < 0.000001 <- \code{"6*"}} 32 | } 33 | 34 | } 35 | 36 | \references{Brown, S. R., 1980 \emph{Political subjectivity: Applications of Q methodology in political science}, New Haven, CT: Yale University Press. 37 | 38 | See further references on the methodology in \code{\link{qmethod-package}}.} 39 | 40 | \note{This is a function used within \code{\link{qmethod}}. Rarely to be used independently.} 41 | 42 | \author{Aiora Zabala} 43 | 44 | \examples{ 45 | data(lipset) 46 | results <- qmethod(lipset[[1]], nfactors = 3, rotation = "varimax") 47 | sed <- as.data.frame(results[[7]][[3]]) 48 | zsc <- results[[5]] 49 | qdc(lipset[[1]], nfactors = 3, zsc = zsc, sed = sed) 50 | } -------------------------------------------------------------------------------- /man/qdc.zsc.Rd: -------------------------------------------------------------------------------- 1 | \name{qdc.zsc} 2 | \alias{qdc.zsc} 3 | \title{Q methodology: distinguishing and consensus statements} 4 | \description{Extracts the z-score of distinguishing statements, in order to plot.} 5 | \usage{qdc.zsc(results)} 6 | 7 | \arguments{ 8 | \item{results}{an object of class \code{QmethodRes}.} 9 | } 10 | 11 | \note{This is a function used within \code{\link{plot.QmethodRes}}. Rarely to be used independently.} 12 | 13 | \author{Aiora Zabala} 14 | 15 | \examples{ 16 | data(lipset) 17 | results <- qmethod(lipset[[1]], nfactors = 3, rotation = "varimax") 18 | qdc.zsc(results) 19 | } -------------------------------------------------------------------------------- /man/qfcharact.Rd: -------------------------------------------------------------------------------- 1 | \name{qfcharact} 2 | \alias{qfcharact} 3 | \title{Q methodology: factor characteristics} 4 | \description{Calculates the general factor characteristics: number of flagged Q-sorts, composite reliability, standard errors of factor scores, and comparisons between factors.} 5 | \usage{qfcharact(loa, flagged, zsc, nfactors, av_rel_coef = 0.8)} 6 | 7 | \arguments{ 8 | \item{loa}{matrix or data frame of as many rows as Q-sorts (\code{nqsorts}) and \code{nfactors} columns, with values of factor loadings for Q-sorts, calculated using, e.g., \code{principal(...)$loadings}.} 9 | \item{flagged}{matrix or data frame of type \emph{logical}, indicating which Q-sorts are flagged for each factor. Provided manually or automatically using \code{\link{qflag}}.} 10 | \item{zsc}{a data frame with the z-scores for statements, calculated using \code{\link{qzscores}}.} 11 | \item{nfactors}{number of factors extracted.} 12 | \item{av_rel_coef}{average reliability coefficient (the individual variability of a respondent), set by default as 0.8.} 13 | } 14 | 15 | \value{ 16 | Returns a list with three objects: 17 | \item{characteristics}{data frame with the following values for each factor: 18 | \itemize{ 19 | \item{\code{"av_rel_coef"}: average reliability coefficient.} 20 | \item{\code{"nload"}: number of loading Q-sorts.} 21 | \item{\code{"eigenvals"}: eigenvalues.} 22 | \item{\code{"expl_var"}: percentage of explained variance.} 23 | \item{\code{"reliability"}: composite reliability.} 24 | \item{\code{"se_fscores"}: standard error of factor scores (SE).} 25 | } 26 | } 27 | \item{cor_zsc}{matrix of correlation coefficients between factors z-scores.} 28 | \item{sd_dif}{matrix of standard errors of differences (SED).} 29 | } 30 | \references{Brown, S. R., 1980 \emph{Political subjectivity: Applications of Q methodology in political science}, New Haven, CT: Yale University Press. 31 | 32 | See further references on the methodology in \code{\link{qmethod-package}}.} 33 | 34 | \note{This is a function used within \code{\link{qzscores}}. Rarely to be used independently.} 35 | 36 | \author{Aiora Zabala} -------------------------------------------------------------------------------- /man/qflag.Rd: -------------------------------------------------------------------------------- 1 | \name{qflag} 2 | \alias{qflag} 3 | \title{Q methodology: automatic flagging of Q-sorts} 4 | \description{Applies the two standard algorithms to pre-flag Q-sorts automatically, for posterior calculation of the statement scores.} 5 | \usage{qflag(loa, nstat)} 6 | 7 | \arguments{ 8 | \item{loa}{a Q-sort factor loading matrix obtained, for example from\cr \code{unclass(\link[psych]{principal}(...)$loadings)}, or from \code{\link{qmethod}(...)$loa}.} 9 | \item{nstat}{number of statements in the study.} 10 | } 11 | \details{ 12 | These are the two standard criteria for automatic flagging used in Q method analysis: 13 | \enumerate{ 14 | \item{Q-sorts which factor loading is higher than the threshold for p-value < 0.05, and} 15 | \item{Q-sorts which square loading is higher than the sum of square loadings of the same Q-sort in all other factors.} 16 | } 17 | Returns a logical matrix with Q-sorts as rows, and factors as columns. 18 | 19 | The function also runs two checks: Q-sorts flagged that have negative loadings and Q-sorts flagged in more than one factor. If any of these is true, the function returns a warning for the user to inspect the automatic pre-flagging (which should be done in all cases, but particularly in these ones). To conduct manual flagging, see guidelines here: \url{http://aiorazabala.github.io/qmethod/Advanced-analysis} 20 | } 21 | 22 | \references{Brown, S. R., 1980 \emph{Political subjectivity: Applications of Q methodology in political science}, New Haven, CT: Yale University Press. 23 | 24 | Van Exel, J., de Graaf, G., Rietveld, P., 2011. "'I can do perfectly well without a car!'" \emph{Transportation} 38, 383-407 (Page 388, footnote 8). 25 | 26 | See further references on the methodology in \code{\link{qmethod-package}}.} 27 | 28 | \note{This is a function used within \code{\link{qmethod}}. Rarely to be used independently.} 29 | 30 | \author{Aiora Zabala} 31 | 32 | \examples{ 33 | data(lipset) 34 | library(psych) 35 | loa <- unclass(principal(lipset[[1]], nfactors = 3, 36 | rotate = "varimax")$loadings) 37 | flagged <- qflag(loa = loa, nstat = nrow(lipset[[1]])) 38 | summary(flagged) 39 | 40 | # Remember to manually inspect the automatic pre-flagging: 41 | results=list(loa=loa, flagged=flagged, brief=list(nfactors = ncol(loa))) 42 | loa.and.flags(results) 43 | 44 | } -------------------------------------------------------------------------------- /man/qfsi.Rd: -------------------------------------------------------------------------------- 1 | \name{qfsi} 2 | \alias{qfsi} 3 | \title{Q Methodology: Factor Stability index} 4 | \description{Calculates a Factor Stability index and a Normalised Factor Stability index to bootstrapped Q method results (experimental).} 5 | \usage{qfsi(nfactors, nstat, qscores, zsc_bn, qm)} 6 | 7 | \arguments{ 8 | \item{nfactors}{number of factors to extract.} 9 | \item{nstat}{number of statements in the study.} 10 | \item{qscores}{all possible factor score values in the Q grid distribution.} 11 | \item{zsc_bn}{bootstrapped factor scores.} 12 | \item{qm}{original Q method results from \code{\link{qmethod}} function.} 13 | } 14 | \details{ 15 | Applies the Factor Stability index to a bootstrapped Q method results. Returns a data frame with two variables and as many rows as factors extracted. The first variable is the raw Factor Stability index. The second variable is the Normalised Factor Stability index which ranges from 0 to 1. 16 | } 17 | 18 | \author{Aiora Zabala} 19 | 20 | \seealso{ 21 | \code{\link{qmboots}}. 22 | } 23 | \note{IMPORTANT: This function is experimental. Please contact the author for details.} 24 | \examples{ 25 | data(lipset) 26 | boots <- qmboots(lipset[[1]], nfactors=3, nsteps=10, 27 | rotation="varimax", indet="qindtest", 28 | fsi=FALSE) 29 | fsi <- qfsi(nfactors=3, nstat=33, qscores=boots[[6]], 30 | zsc_bn=boots[[1]][[1]], qm=boots[[5]]) 31 | fsi 32 | } 33 | 34 | \keyword{Q methodology} 35 | -------------------------------------------------------------------------------- /man/qindtest.Rd: -------------------------------------------------------------------------------- 1 | \name{qindtest} 2 | \alias{qindtest} 3 | \title{Q Methodology: PCA bootstrap indeterminacy tests} 4 | \description{This is a simple test and implementation of the 'reordering-reflection' solution for the indeterminacy problem (alignment problem) when bootstrapping Principal Components Analysis (PCA) that causes factor order swaps and factor sign swaps.} 5 | \usage{qindtest(loa, target, nfactors)} 6 | 7 | \arguments{ 8 | \item{loa}{data frame with factor loadings from the subsample analysis.} 9 | \item{target}{data frame with factor loadings from the full sample analysis, excluding qsorts that are not present in the bootstrap step.} 10 | \item{nfactors}{number of factors extracted.} 11 | } 12 | 13 | \details{This function tests whether there is any or both of the indeterminacy issues in bootstrapped PCA factor loading values. For testing, it looks at correlation coefficients between the target factor loadings and the bootstrapped factor loadings for each factor. 14 | 15 | First, if \emph{factor swap} is detected (Is the absolute value of diagonal coefficients bigger than non-diagonal coefficients for the same factor?) and it is only between two factors, these are swaped. After, the test is again performed to ensure that there is no need for further swaps. If the test fails, then the original factor loadings are recovered and the failure is reported. If the need for factor swap is detected for 1, 3 or more factors, this is reported and left unresolved. This is because an algorithm to determine which factors should swap with which has not been implemented. 16 | 17 | Second, \emph{sign swap} is tested for (Are all diagonal coefficients positive?). If it is detected, then the sign of factor loadings is shifted. This is not tested again afterwards, for it is given for granted that swaping signs will solve the issue.} 18 | \value{ 19 | \item{qindtest}{returns a list with three data frames: the factor loadings of the corrected bootstrap step, results from order swap and sign swap tests, and report of errors.} 20 | } 21 | 22 | \note{this function is called within the function \code{\link{qmboots}}. Not intended to be used separately.} 23 | 24 | \references{Zabala, Pascual (2016) Bootstrapping Q Methodology to Improve the Understanding of Human Perspectives. PLoS ONE 11(2): e0148087. 25 | 26 | See also: 27 | 28 | Timmerman, M.E., Kiers, H. a L., Smilde, A.K., 2007. Estimating confidence intervals for principal component loadings: a comparison between the bootstrap and asymptotic results. The British journal of mathematical and statistical psychology 60, 295-314. 29 | 30 | Zhang, G., Preacher, K.J., Luo, S., 2010. Bootstrap Confidence Intervals for Ordinary Least Squares Factor Loadings and Correlations in Exploratory Factor Analysis. Multivariate Behavioral Research 45, 104-134. 31 | } 32 | 33 | \author{Aiora Zabala} 34 | 35 | \examples{ 36 | data(lipset) 37 | nf <- 3 38 | 39 | # 1. Create target matrix 40 | qm <- qmethod(lipset[[1]], nfactors = nf, rotation = "varimax") 41 | 42 | # 2. Resample 43 | qselim <- sample(1:3, 2, replace = FALSE) ##q sorts to eliminate 44 | subdata <- lipset[[1]][ , -qselim] 45 | 46 | # 3. Calculate factor loadings with the resample 47 | library(psych) 48 | loa <- as.data.frame(unclass(principal(subdata, 49 | nfactors = nf, rotate = "varimax")$loadings)) 50 | 51 | # 4. Reorder target matrix 52 | target <- as.matrix(as.data.frame(qm[3])) 53 | colnames(target) <- paste0("target_f", 1:nf) 54 | subtarget <- target[c(-qselim),] 55 | 56 | # 5. Apply test and solution for indeterminacy issue 57 | qindt <- qindtest(loa, subtarget, nf) 58 | qindt 59 | } 60 | 61 | \keyword{PCA} 62 | \keyword{Q methodology} 63 | \keyword{indeterminacy} 64 | \keyword{bootstrapping} -------------------------------------------------------------------------------- /man/qmb.plot.Rd: -------------------------------------------------------------------------------- 1 | \name{qmb.plot} 2 | \alias{qmb.plot} 3 | \title{Q Methodology: Plot of bootstrap results} 4 | \description{Plots the summary of bootstrap results, either z-scores or factor loadings.} 5 | \usage{qmb.plot(qmbsum, type = c("zsc", "loa"), nfactors, 6 | cex = 0.7, cex.leg = 0.8, errbar.col = "black", 7 | lwd = 1, lty = 1, vertdist = 0.2, limits = NULL, 8 | r.names = NA, sort = c("none", "difference", "sd"), 9 | sbset = NULL, leg.pos = "topleft", 10 | bty = "n", plot.std = TRUE, pch= NULL, 11 | col=NULL, grid.col="gray", ...)} 12 | 13 | \arguments{ 14 | \item{qmbsum}{an object with the summary of bootstrap results, as produced by \code{\link{qmb.summary}}.} 15 | \item{type}{the subject to plot, either z-zcores of statements or factor loadings of Q-sorts.} 16 | \item{nfactors}{number of factors extracted.} 17 | \item{cex}{a numerical value giving the amount by which plotting text and symbols should be magnified relative to the default (see \code{\link[graphics]{par}}).)} 18 | \item{cex.leg}{a numerical value giving the amount by which the legend should be magnified relative to \code{cex}.)} 19 | \item{errbar.col}{colour used for the error bars. Defaults to \code{"black"}.} 20 | \item{lwd}{line width (see \code{\link[graphics]{par}}).} 21 | \item{lty}{line type (see \code{\link[graphics]{par}}).} 22 | \item{vertdist}{distance between the values for each factor.} 23 | \item{limits}{axis limits for the numerical values. If set to \code{NULL}, the limits are automatically set as \code{c(-1, 1)} when \code{type = "loa"}, and as the minimum and maximum values of z-scores (including the error bars) when \code{type = "zsc"}} 24 | \item{r.names}{names of the items to be printed in the axis ticks(either Q-sorts when \code{type = "loa"}, or statements when \code{type = "zsc"}). When the value is \code{NULL}, it defaults to \code{rownames}.} 25 | \item{sort}{ordering of the items in the axis. If set to \code{"none"}, items are ordered by the default order in the dataset. If set to \code{"difference"}, items are ordered according to the variability in the values across factors. If set to \code{"sd"}, items are ordered according to the sum of the errors obtained in the bootstrap.} 26 | \item{sbset}{How many items are to be printed? When the value is \code{NULL}, it plots all the items.} 27 | \item{leg.pos}{Position of the legend.} 28 | \item{bty}{Legend box (see \code{\link[graphics]{legend}}).} 29 | \item{plot.std}{logical value. When set to \code{TRUE} (default), it prints the points for values obtained with the standard analysis (non bootstrapped).} 30 | \item{pch}{plotting symbols. Defaults to \code{NULL}, in which case the symbols are selected automatically. If provided, the vector needs to contain at least as many elements as number of factors. In addition, if argument \code{plot.std == TRUE} (default) the vector needs to contain at least double as many elements as vectors, in order to extract (a) the plotting symbols for bootstrapped values (the first elements) and (b) the plotting symbols for standard values (the next elements).} 31 | \item{col}{colours for the points. At least as many elements as number of factors have to be provided.} 32 | \item{grid.col}{colour of the grid.} 33 | \item{...}{additional arguments to be passed to the functions \code{\link[graphics]{dotchart}}, \code{\link[graphics]{mtext}}, \code{\link[graphics]{segments}}, \code{\link[graphics]{points}}, \code{\link[graphics]{abline}} or \code{\link[graphics]{legend}}.} 34 | } 35 | 36 | \author{Aiora Zabala} 37 | 38 | \references{Zabala, Pascual (2016) Bootstrapping Q Methodology to Improve the Understanding of Human Perspectives. PLoS ONE 11(2): e0148087.} 39 | 40 | \seealso{ 41 | \code{\link{qmethod}}, \code{\link{qmboots}}, \code{\link{qmb.summary}} 42 | } 43 | \examples{ 44 | data(lipset) 45 | boots <- qmboots(lipset[[1]], nfactors = 3, nsteps = 50, 46 | load = "auto", rotation = "varimax", 47 | indet = "qindet", fsi = TRUE) 48 | 49 | boots.summary <- qmb.summary(boots) 50 | 51 | qmb.plot(boots.summary, 3, type = "loa", sort="difference") 52 | } 53 | \keyword{Q methodology} 54 | \keyword{bootstrapping} 55 | \keyword{plot} 56 | -------------------------------------------------------------------------------- /man/qmb.summary.Rd: -------------------------------------------------------------------------------- 1 | \name{qmb.summary} 2 | \alias{qmb.summary} 3 | \title{Q Methodology: Summary of bootstrap results} 4 | \description{Summarises bootstrap results for Q-sorts and statements into two tables.} 5 | \usage{qmb.summary(qmboots)} 6 | 7 | \arguments{ 8 | \item{qmboots}{an object of bootstrap results, as produced by \code{\link{qmboots}}.} 9 | } 10 | 11 | \value{ 12 | Returns a list with two data frames: 13 | \item{qsorts}{data frame with Q-sort as rows, and the following columns: the factor loadings from the standard analysis (*.std), the bootstrap (*.loa), the bootstrap SE (*.SE), the frequency of flagging (*.freq*) and the estimate of bias (*.bias).} 14 | \item{statements}{data frame with statements as rows, and the following columns: the z-scores from the standard analysis (*.std), from the bootstrap (*.bts), bootstrap SE (*.SE), estimate of bias of z-scores (*.bias), factor scores from the standard analysis (fsc_f*), from the bootstrap (fsc.bts.*), estimate of bias of factor scores, distinguishing and consensus statements from the standard results (see \code{\link{qdc}}) and from the bootstrap values.} 15 | } 16 | 17 | \author{Aiora Zabala} 18 | 19 | \references{Zabala, Pascual (2016) Bootstrapping Q Methodology to Improve the Understanding of Human Perspectives. PLoS ONE 11(2): e0148087.} 20 | 21 | \seealso{ 22 | \code{\link{qmethod}}, \code{\link{qmboots}} 23 | } 24 | \examples{ 25 | data(lipset) 26 | boots <- qmboots(lipset[[1]], nfactors = 3, nsteps = 50, 27 | load = "auto", rotation = "varimax", 28 | indet = "qindet", fsi = TRUE) 29 | 30 | boots.summary <- qmb.summary(boots) 31 | 32 | # First rows of the summary for Q-sorts: 33 | head(boots.summary$qsorts) 34 | 35 | # First rows of the summary for statements: 36 | head(boots.summary$statements) 37 | } 38 | \keyword{Q methodology} 39 | \keyword{bootstrapping} 40 | \keyword{summary} 41 | -------------------------------------------------------------------------------- /man/qmboots.Rd: -------------------------------------------------------------------------------- 1 | \name{qmboots} 2 | \alias{qmboots} 3 | \title{Q Methodology: Bootstrap} 4 | \description{Implementation of the bootstrap to Q methodology using Principal Components Analysis (PCA).} 5 | \usage{qmboots(dataset, nfactors, nsteps, load = "auto", 6 | rotation = "varimax", indet = "qindtest", fsi = TRUE, 7 | forced = T, distribution = NULL, 8 | cor.method="pearson", ...)} 9 | 10 | \arguments{ 11 | \item{dataset}{a matrix or dataframe containing original data, with statements as rows, Q sorts as columns, and Q board column values in each cell.} 12 | \item{nfactors}{number of factors to extract using PCA.} 13 | \item{load}{a matrix of factor loadings to be used as target. If "auto", the target matrix is generated using the rotation indicated ("varimax" by default).} 14 | \item{nsteps}{number of steps (repetitions) for the bootstraping.} 15 | \item{rotation}{rotation method, set to "varimax" by default. Other possible rotations from \pkg{psych} \code{\link[psych]{principal}} function "none", "varimax", "quatimax", "promax", "oblimin", "simplimax", and "cluster" are possible.} 16 | \item{indet}{method to solve the double indeterminacy issue in PCA bootstrapping. \code{"procrustes"} for procrustes rotation, \code{"qindtest"} for simple solution valid for up to 3 factors extracted, \code{"both"} for a qindtest and a procrustes rotation, or \code{"none"} for no rotation. The latter is not recommended for it introduces inflated variability. If \code{"none"} is selected, each replication is rotated using varimax.} 17 | \item{fsi}{logical; Shall the Factor Stability index be calculated? (experimental index).} 18 | \item{forced}{logical; Is the ranking of the items forced to match the distributions? Set to \code{TRUE} if all respondents ranked the items strictly following the distribution scores, in which case the values of the distribution are calculated automatically. Set to \code{FALSE} if respondents had the possibility to rank the items without following the distribution, and the values of the distribution have to be provided as an array in the argument \code{distribution}.} 19 | \item{distribution}{logical; when forced = \code{FALSE}, the distribution has to be provided as a vector of numbers, such as \code{c(-2, -1, -1, 0, 1, 1, 2, 2)}.} 20 | \item{cor.method}{character string indicating which correlation coefficient is to be computed, to be passed on to the function \code{\link[stats]{cor}}: \code{"pearson"} (default), \code{"kendall"}, or \code{"spearman"}. } 21 | \item{...}{Other arguments passed on to \code{\link{qmethod}}.} 22 | } 23 | 24 | \value{ 25 | \item{zscore-stats}{summary of the analysis. List of one object, plus as many objects as factors extracted: the bootstrapped factor scores, and the z-score statistics of the bootrstrap. The z-score statistics of interest are \code{mean} (the bootstrap estimate of the z-score), and \code{sd} (the bootstrap estimate of the SE).} 26 | \item{full.bts.res}{full bootstrap results. List with as many objects as factors extracted, each object containing three data frames: \code{flagged}, \code{zsc} and \code{loa}. These data frames have as many columns as bootstrap steps, and contain the results of the analysis of each iteration. See description of these three data frames in \code{\link{qmethod}}.} 27 | \item{indet.tests}{indeterminacy tests.} 28 | \item{resamples}{index of the Q-sorts selected for each step.} 29 | \item{orig.res}{original results. See details of all the objects in \code{\link{qmethod}}.} 30 | \item{q.array}{array of values in the distribution grid.} 31 | \item{loa.stats}{statistics of factor loadings. List with as many objects as factors extracted, each object containing one data frame with the factor loading statistics of the bootrstrap. The factor loading statistics of interest are \code{mean} (the bootstrap estimate of the factor loading), and \code{sd} (the bootstrap estimate of the SE). This table includes \code{flag_freq}, which indicates the frequency with which the given Q-sort was flagged for the given factor.} 32 | \item{q.array}{array of values in the distribution grid.} 33 | \item{fsi}{factor stability index (optional; experimental).} 34 | } 35 | \references{Zabala, Pascual (2016) Bootstrapping Q Methodology to Improve the Understanding of Human Perspectives. PLoS ONE 11(2): e0148087.} 36 | 37 | \author{Aiora Zabala} 38 | 39 | \seealso{ 40 | \code{\link{qmethod}} 41 | } 42 | \examples{ 43 | data(lipset) 44 | boots <- qmboots(lipset[[1]], nfactors = 3, nsteps = 10, load = "auto", 45 | rotation = "varimax", indet = "qindtest", 46 | fsi = TRUE) 47 | boots 48 | boxplot(t(boots[[2]][[1]][[2]]), horizontal = TRUE, 49 | main = "Statement z-score boxplot for the first factor", las = 1) 50 | 51 | #See the table summaries: 52 | qms <- qmb.summary(boots) 53 | round(qms$statements, digits=2) # statements 54 | round(qms$qsorts, digits=2) # Q-sorts 55 | 56 | # A more synthetic visualisation: 57 | # z-scores: 58 | qmb.plot(qms, nfactors=3, type="zsc", sort="difference") 59 | # factor loadings: 60 | qmb.plot(qms, nfactors=3, type="loa", sort="difference") 61 | 62 | 63 | } 64 | \keyword{multivariate} 65 | \keyword{Q methodology} 66 | \keyword{bootstrapping} 67 | \keyword{PCA} 68 | -------------------------------------------------------------------------------- /man/qpcrustes.Rd: -------------------------------------------------------------------------------- 1 | \name{qpcrustes} 2 | \alias{qpcrustes} 3 | \title{Q Methodology: Procrustes rotation of loadings} 4 | \description{This is a wrap of \code{procrustes} rotation from \pkg{MCMCpack} for bootstrapping Q methodology in the function \code{\link{qmboots}}.} 5 | \usage{qpcrustes(loa, target, nfactors)} 6 | 7 | \arguments{ 8 | \item{loa}{factor loadings from the analysis of a resample.} 9 | \item{target}{factor loadings from the analysis of a subsample.} 10 | \item{nfactors}{fumber of factors} 11 | } 12 | 13 | \details{Returns the factor loadings for the subsample after applying Procrustes rotation to correct the indeterminacy issue. Use \code{procrustes} from \pkg{MCMCpack}. Used within the function \code{\link{qmboots}}, not intended for independent use. 14 | } 15 | 16 | \references{Zabala, Pascual (2016) Bootstrapping Q Methodology to Improve the Understanding of Human Perspectives. PLoS ONE 11(2): e0148087.} 17 | 18 | \author{Aiora Zabala} 19 | 20 | \note{this function is called within the function \code{\link{qmboots}}. Not intended to be used separately. The function calls \code{procrustes} from \pkg{MCMCpack}, a package that requires the package \code{graph}. As from April 2016 the package has been moved to Bioconductor, and therefore it needs to be installed manually. If you get errors of missing packages when using this function or \code{\link{qmboots}}, install \code{graph} manually: 21 | \code{source("https://bioconductor.org/biocLite.R") 22 | biocLite("graph")} 23 | } 24 | 25 | \seealso{ 26 | Function \code{procrustes} from \pkg{GPArotation} package. 27 | } 28 | \examples{ 29 | # This example requires installing 'MCMCpack': 30 | data(lipset) 31 | qm <- qmethod(lipset[[1]], nfactors=3, rotation="varimax") 32 | qselim <- sample(1:3, 2, replace=FALSE) ##q sorts to eliminate 33 | subdata <- lipset[[1]][ , -qselim] 34 | library(psych) 35 | loa <- as.data.frame(unclass(principal(subdata, 36 | nfactors=3, rotate="varimax")$loadings)) 37 | target <- as.matrix(as.data.frame(qm[3])) 38 | colnames(target) <- paste("target_f", 1:3, sep="") 39 | subtarget <- target[c(-qselim),] 40 | qindt <- qpcrustes(loa, subtarget, 3) 41 | qindt 42 | } 43 | 44 | \keyword{multivariate} 45 | \keyword{Q methodology} 46 | \keyword{Procrustes rotation} 47 | -------------------------------------------------------------------------------- /man/qzscores.Rd: -------------------------------------------------------------------------------- 1 | \name{qzscores} 2 | \alias{qzscores} 3 | \title{Q methodology: z-scores from loadings} 4 | \description{Calculates factor characteristics, z-scores, and factor scores, provided a matrix of loadings and a matrix of (manually or automatically) flagged Q-sorts.} 5 | \usage{qzscores(dataset, nfactors, loa, flagged, forced = TRUE, 6 | distribution = NULL)} 7 | 8 | \arguments{ 9 | \item{dataset}{a matrix or a data frame containing raw data, with statements as rows, Q-sorts as columns, and the column scores in the distribution in each cell.} 10 | \item{nfactors}{number of factors to extract.} 11 | \item{loa}{matrix or data frame of \code{nqsorts} rows and \code{nfactors} columns, with values of factor loadings for Q-sorts, calculated using, e.g., \code{principal(...)$loadings} or \code{\link{centroid}}.} 12 | \item{flagged}{matrix or data frame of \code{nqsorts} rows and \code{nfactors} columns, with \code{TRUE} values for the Q-sorts that are flagged. Automatic flagging can be aplied using \code{\link{qflag}}. Manual flagging can be done by providing a logical matrix with \code{nqsorts} rows and \code{nfactors} columns to the argument \code{flagged}.} 13 | \item{forced}{logical; Is the distribution of items forced? Set to \code{TRUE} if all respondents ranked the items following strictly the distribution scores, and the values of the distribution are calculated automatically. Set to \code{FALSE} if respondents were able to rank the items without following the distribution, and the values of the distribution have to be provided as an array in the argument \code{distribution}.} 14 | \item{distribution}{logical; when \code{forced = FALSE}, the distribution has to be provided as a vector of numbers, such as \code{c(-2, -1, -1, 0, 1, 1, 2, 2)}.} 15 | } 16 | 17 | \details{In order to implement manual flagging, use a manually created data frame (or matrix) for \code{flagged}. See an example of code to perform manual flagging or to manipulate the loadings in \href{http://aiorazabala.github.io/qmethod/Advanced-analysis}{the website}. 18 | 19 | The loadings from \code{principal(...)$loadings} or \code{centroid} can be explored to decide upon flagging. The \code{loa} data frame should have Q-sorts as rows, and factors as columns, where \code{TRUE} are the flagged Q-sorts.} 20 | \value{ 21 | Returns a list of class \code{QmethodRes}, with seven objects: 22 | \item{brief}{a list with the basic values of the analysis: date (\code{"date"}), number of statements (\code{"nstat"}), number of Q-sorts (\code{"nqsort"}), whether the distribution was 'forced' (\code{"distro"}), number of factors extracted (\code{"nfactors"}), type of extraction (\code{"extraction"}), type of rotation (\code{"rotation"}), method for correlation (\code{"cor.method"}), and a summary of this information for display purposes (\code{"info"}).} 23 | \item{dataset}{original data.} 24 | \item{loa}{factor loadings for Q-sorts.} 25 | \item{flagged}{logical dataframe of flagged Q-sorts.} 26 | \item{zsc}{statements z-scores.} 27 | \item{zsc_n}{statements rounded scores, rounded to the values in the first row of the original dataset.} 28 | \item{f_char}{factor characteristics obtained from \code{\link{qfcharact}}.} 29 | } 30 | \references{Brown, S. R., 1980 \emph{Political subjectivity: Applications of Q methodology in political science}, New Haven, CT: Yale University Press. 31 | 32 | See further references on the methodology in \code{\link{qmethod-package}}.} 33 | 34 | \note{This is a function used within \code{\link{qmethod}}. Rarely to be used independently.} 35 | 36 | \author{Aiora Zabala} 37 | 38 | \examples{ 39 | data(lipset) 40 | library(psych) 41 | loa <- unclass(principal(lipset[[1]], 42 | nfactors = 3, rotate = "varimax")$loadings) 43 | flagged <- qflag(nstat = 33, loa = loa) 44 | qmzsc <- qzscores(lipset[[1]], nfactors = 3, flagged = flagged, loa = loa) 45 | qmzsc # Show results 46 | } 47 | -------------------------------------------------------------------------------- /man/runInterface.Rd: -------------------------------------------------------------------------------- 1 | \name{runInterface} 2 | \alias{runInterface} 3 | \title{Q methodology: Graphical User Interface (GUI)} 4 | \description{Launch an interactive interface to run Q methodology analysis using the basic features. 5 | The interface is also [available online](https://azabala.shinyapps.io/qmethod-gui/).} 6 | 7 | \usage{runInterface()} 8 | 9 | \details{ 10 | This GUI allows the user to conduct a full Q methodology analysis, choosing: 11 | \itemize{ 12 | \item {either PCA or centroid extraction method} 13 | \item {varimax or no rotation method (for PCA and centroid) and other uncommon rotation methods (for PCA)} 14 | \item {selecting from 2 to 7 factors/components.} 15 | } 16 | The GUI conducts analysis with forced distribution and automatic flagging. See Note. 17 | 18 | The GUI shows the full results from the analysis, and also: 19 | \itemize{ 20 | \item{Plot of z-scores} 21 | \item{Automatically flagged Q-sorts} 22 | \item{Information to explore how many factors to extract (including a screeplot)} 23 | \item{Plot of z-scores} 24 | } 25 | } 26 | 27 | \note{ 28 | This GUI has limited functionality in comparison to that through the command-line. For full functionality (such as specifying non-forced analysis, manual flagging, and much more), use the command-line directly in the R console. See, for example, a tutorial for \href{http://aiorazabala.github.io/qmethod/Advanced-analysis}{manual manipulation of Q-sort loadings and/or manual flagging}. 29 | 30 | To run this same analysis directly in R, see the code generated in the GUI in \emph{Run the analysis directly in R}. 31 | } 32 | 33 | \examples{ 34 | ## Only run this example in interactive R sessions 35 | if (interactive()) { 36 | runInterface() 37 | } 38 | } -------------------------------------------------------------------------------- /man/summary.QmethodRes.Rd: -------------------------------------------------------------------------------- 1 | \name{summary.QmethodRes} 2 | \alias{summary.QmethodRes} 3 | \title{Q methodology: summary for class 'QmethodRes'} 4 | \description{Shows a summary of the results of Q methodology from the \code{\link{qmethod}} function: factor scores and factor characteristics.} 5 | \usage{ 6 | \method{summary}{QmethodRes}(object, ...) 7 | } 8 | 9 | \arguments{ 10 | \item{object}{an object of class \code{QmethodRes} created after \code{\link{qmethod}} function.} 11 | \item{...}{any other argument for the \code{\link{summary}} function.} 12 | } 13 | \value{ 14 | Returns the summary of the analysis: 15 | \itemize{ 16 | \item Statements factor scores normalized to the values in the first row of the original dataset, and 17 | \item Factor characteristics: Average reliability coefficient, Number of loading Q-sorts, Eigenvalues, Percentage of explained variance, Composite reliability, Standard error of factor scores, Correlation coefficients between factors z-scores, Standard errors of differences 18 | } 19 | } 20 | \references{Brown, S. R., 1980 \emph{Political subjectivity: Applications of Q methodology in political science}, New Haven, CT: Yale University Press.} 21 | 22 | \author{Aiora Zabala} 23 | 24 | \seealso{ 25 | \code{\link{qmethod}} in this package 26 | } 27 | \examples{ 28 | data(lipset) 29 | results <- qmethod(lipset[[1]], nfactors = 3, rotation = "varimax") 30 | summary(results) 31 | } --------------------------------------------------------------------------------