├── .Rbuildignore ├── .github ├── .gitignore ├── dependabot.yml └── workflows │ ├── R-CMD-check.yaml │ ├── build-image.yaml │ ├── make-release.yaml │ ├── pkgdown.yaml │ └── test-coverage.yaml ├── .gitignore ├── CODE_OF_CONDUCT.md ├── CRAN-RELEASE ├── DESCRIPTION ├── Dockerfile ├── LICENSE ├── LICENSE.md ├── NAMESPACE ├── NEWS.md ├── R ├── clean_answers.R ├── collector.R ├── data.R ├── fit_distributions.R ├── generate_weights.R ├── make_handouts.R ├── make_scorecard.R ├── make_slides.R ├── prepare_data.R ├── tidyrisk_question_set.R ├── tidyrisk_response_set.R ├── utils-pipe.R └── utils.R ├── README.Rmd ├── README.md ├── _pkgdown.yml ├── codecov.yml ├── collector.Rproj ├── cran-comments.md ├── data-raw ├── calibration_answers.csv ├── calibration_questions.csv ├── capability_answers.csv ├── domains.csv ├── regenerate_data.R ├── scenario_answers.csv ├── sme_top_domains.csv └── threat_communities.csv ├── data ├── calibration_questions.rda ├── mc_calibration_answers.rda ├── mc_capabilities.rda ├── mc_capability_answers.rda ├── mc_capability_parameters_fitted.rda ├── mc_domains.rda ├── mc_scenario_answers.rda ├── mc_scenario_parameters_fitted.rda ├── mc_scenarios.rda ├── mc_sme_top_domains.rda ├── mc_threat_communities.rda └── mc_threat_parameters_fitted.rda ├── inst ├── WORDLIST ├── css │ └── styles.css ├── img │ └── espresso_machine.jpg ├── interview.Rmd └── templates │ └── template.docx ├── man ├── calibration_questions.Rd ├── check_readability.Rd ├── clean_answers.Rd ├── collector.Rd ├── combine_capability_parameters.Rd ├── combine_lognorm.Rd ├── combine_lognorm_trunc.Rd ├── combine_norm.Rd ├── combine_scenario_parameters.Rd ├── derive_controls.Rd ├── enforce_tidyrisk_question_set.Rd ├── enforce_tidyrisk_response_set.Rd ├── figures │ └── logo.png ├── fit_capabilities.Rd ├── fit_capabilities_geomean.Rd ├── fit_lognorm.Rd ├── fit_lognorm_trunc.Rd ├── fit_norm_trunc.Rd ├── fit_pois.Rd ├── fit_scenarios.Rd ├── fit_scenarios_geomean.Rd ├── fit_threat_communities.Rd ├── generate_cost_function.Rd ├── generate_weights.Rd ├── get_smes_domains.Rd ├── is_tidyrisk_question_set.Rd ├── is_tidyrisk_response_set.Rd ├── lognormal_to_normal.Rd ├── make_handouts.Rd ├── make_scorecard.Rd ├── make_slides.Rd ├── mc_calibration_answers.Rd ├── mc_capabilities.Rd ├── mc_capability_answers.Rd ├── mc_capability_parameters_fitted.Rd ├── mc_domains.Rd ├── mc_scenario_answers.Rd ├── mc_scenario_parameters_fitted.Rd ├── mc_scenarios.Rd ├── mc_sme_top_domains.Rd ├── mc_threat_communities.Rd ├── mc_threat_parameters_fitted.Rd ├── normal_to_lognormal.Rd ├── pipe.Rd ├── prepare_data.Rd ├── read_questions.Rd ├── read_responses.Rd ├── tidyrisk_question_set.Rd └── tidyrisk_response_set.Rd ├── scripts └── Makevars ├── tests ├── spelling.R ├── testthat.R └── testthat │ ├── test-clean_answers.R │ ├── test-fit_distributions.R │ ├── test-generate_weights.R │ ├── test-make_handouts.R │ ├── test-make_scorecard.R │ ├── test-make_slides.R │ ├── test-prepare_data.R │ └── test-utils.R └── vignettes ├── .gitignore └── file_structures.Rmd /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^LICENSE\.md$ 4 | ^CODE_OF_CONDUCT\.md$ 5 | ^docs$ 6 | ^_pkgdown.yml$ 7 | 8 | ^data-raw$ 9 | ^README\.Rmd$ 10 | ^appveyor\.yml$ 11 | ^\.travis\.yml$ 12 | ^codecov\.yml$ 13 | ^cran-comments\.md$ 14 | ^CRAN-RELEASE$ 15 | ^\.github$ 16 | ^scripts$ 17 | ^Dockerfile$ 18 | -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /.github/dependabot.yml: -------------------------------------------------------------------------------- 1 | version: 2 2 | updates: 3 | # Maintain dependencies for GitHub Actions 4 | - package-ecosystem: "github-actions" 5 | directory: "/" 6 | schedule: 7 | interval: "weekly" 8 | day: "sunday" 9 | assignees: 10 | - "davidski" 11 | 12 | -------------------------------------------------------------------------------- /.github/workflows/R-CMD-check.yaml: -------------------------------------------------------------------------------- 1 | # For help debugging build failures open an issue on the RStudio community with the 'github-actions' tag. 2 | # https://community.rstudio.com/new-topic?category=Package%20development&tags=github-actions 3 | on: 4 | push: 5 | branches: 6 | - main 7 | - master 8 | pull_request: 9 | branches: 10 | - main 11 | - master 12 | 13 | name: R-CMD-check 14 | 15 | jobs: 16 | R-CMD-check: 17 | runs-on: macOS-latest 18 | steps: 19 | - uses: actions/checkout@v4 20 | - uses: r-lib/actions/setup-r@v2 21 | - name: Install dependencies 22 | run: | 23 | install.packages(c("remotes", "rcmdcheck")) 24 | remotes::install_deps(dependencies = TRUE) 25 | shell: Rscript {0} 26 | - name: Check 27 | run: rcmdcheck::rcmdcheck(args = "--no-manual", error_on = "error") 28 | shell: Rscript {0} 29 | -------------------------------------------------------------------------------- /.github/workflows/build-image.yaml: -------------------------------------------------------------------------------- 1 | name: Docker Image 2 | 3 | on: 4 | push: 5 | branches: 6 | - master 7 | - main 8 | 9 | jobs: 10 | build: 11 | runs-on: ubuntu-latest 12 | steps: 13 | - name: Checkout 14 | uses: actions/checkout@v4 15 | - name: Docker meta 16 | id: docker_meta 17 | uses: docker/metadata-action@v5 18 | with: 19 | images: ghcr.io/davidski/collector # list of Docker images to use as base name for tags 20 | tags: | # add git short SHA as Docker tag 21 | type=sha 22 | - name: Set up QEMU 23 | uses: docker/setup-qemu-action@v3 24 | - name: Set up Docker Buildx 25 | uses: docker/setup-buildx-action@v3 26 | - name: Login to GitHub Container Registry 27 | uses: docker/login-action@v3 28 | with: 29 | registry: ghcr.io 30 | username: ${{ github.repository_owner }} 31 | password: ${{ secrets.CR_PAT }} 32 | - name: Build and push 33 | id: docker_build 34 | uses: docker/build-push-action@v5 35 | with: 36 | push: ${{ github.event_name != 'pull_request' }} 37 | labels: ${{ steps.docker_meta.outputs.labels }} 38 | tags: ${{ steps.docker_meta.outputs.tags }} 39 | build-args: | 40 | arg1=value1 41 | arg2=value2 42 | -------------------------------------------------------------------------------- /.github/workflows/make-release.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | # Sequence of patterns matched against refs/tags 4 | tags: 5 | - 'v*' # Push events to matching v*, i.e. v1.0, v20.15.10 6 | 7 | name: Create Release 8 | 9 | jobs: 10 | build: 11 | name: Create Release 12 | runs-on: ubuntu-latest 13 | steps: 14 | - name: Checkout code 15 | uses: actions/checkout@v4 16 | - name: Create Release 17 | id: create_release 18 | uses: softprops/action-gh-release@v1 19 | -------------------------------------------------------------------------------- /.github/workflows/pkgdown.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - main 5 | - master 6 | workflow_dispatch: 7 | 8 | name: pkgdown 9 | 10 | permissions: 11 | id-token: write 12 | contents: read 13 | 14 | jobs: 15 | pkgdown: 16 | runs-on: macOS-latest 17 | env: 18 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 19 | steps: 20 | - uses: actions/checkout@v4 21 | 22 | - uses: r-lib/actions/setup-r@v2 23 | 24 | - uses: r-lib/actions/setup-pandoc@v2 25 | 26 | - name: Query dependencies 27 | run: | 28 | install.packages('remotes') 29 | saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) 30 | writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") 31 | shell: Rscript {0} 32 | 33 | - name: Cache R packages 34 | uses: actions/cache@v3 35 | with: 36 | path: ${{ env.R_LIBS_USER }} 37 | key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} 38 | restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- 39 | 40 | - name: Install dependencies 41 | run: | 42 | remotes::install_deps(dependencies = TRUE) 43 | install.packages("pkgdown", type = "binary") 44 | shell: Rscript {0} 45 | 46 | - name: Install package 47 | run: R CMD INSTALL . 48 | 49 | - name: Build site 50 | run: | 51 | Rscript -e 'pkgdown::build_site(preview = FALSE)' 52 | 53 | - name: Configure AWS credentials 54 | uses: aws-actions/configure-aws-credentials@v4 55 | with: 56 | role-to-assume: ${{ secrets.AWS_ROLE_ARN }} 57 | role-session-name: ${{ secrets.AWS_SESSION_NAME }} 58 | aws-region: us-west-2 59 | 60 | - name: Copy files to the website with the AWS CLI 61 | run: | 62 | aws s3 cp --recursive docs s3://${{ secrets.DEPLOY_BUCKET }}/ 63 | #aws s3 sync . s3://my-s3-test-website-bucket 64 | 65 | -------------------------------------------------------------------------------- /.github/workflows/test-coverage.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - main 5 | - master 6 | pull_request: 7 | branches: 8 | - main 9 | - master 10 | 11 | name: test-coverage 12 | 13 | jobs: 14 | test-coverage: 15 | runs-on: macOS-latest 16 | env: 17 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 18 | steps: 19 | - uses: actions/checkout@v4 20 | 21 | - uses: r-lib/actions/setup-r@v2 22 | 23 | - uses: r-lib/actions/setup-pandoc@v2 24 | 25 | - name: Query dependencies 26 | run: | 27 | install.packages('remotes') 28 | saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) 29 | writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") 30 | shell: Rscript {0} 31 | 32 | - name: Cache R packages 33 | uses: actions/cache@v3 34 | with: 35 | path: ${{ env.R_LIBS_USER }} 36 | key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} 37 | restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- 38 | 39 | - name: Install dependencies 40 | run: | 41 | install.packages(c("remotes")) 42 | remotes::install_deps(dependencies = TRUE) 43 | remotes::install_cran("covr") 44 | shell: Rscript {0} 45 | 46 | - name: Test coverage 47 | run: covr::codecov() 48 | shell: Rscript {0} 49 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .RData 4 | docs 5 | inst/doc 6 | -------------------------------------------------------------------------------- /CODE_OF_CONDUCT.md: -------------------------------------------------------------------------------- 1 | # Contributor Code of Conduct 2 | 3 | As contributors and maintainers of this project, we pledge to respect all people who 4 | contribute through reporting issues, posting feature requests, updating documentation, 5 | submitting pull requests or patches, and other activities. 6 | 7 | We are committed to making participation in this project a harassment-free experience for 8 | everyone, regardless of level of experience, gender, gender identity and expression, 9 | sexual orientation, disability, personal appearance, body size, race, ethnicity, age, or religion. 10 | 11 | Examples of unacceptable behavior by participants include the use of sexual language or 12 | imagery, derogatory comments or personal attacks, trolling, public or private harassment, 13 | insults, or other unprofessional conduct. 14 | 15 | Project maintainers have the right and responsibility to remove, edit, or reject comments, 16 | commits, code, wiki edits, issues, and other contributions that are not aligned to this 17 | Code of Conduct. Project maintainers who do not follow the Code of Conduct may be removed 18 | from the project team. 19 | 20 | Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by 21 | opening an issue or contacting one or more of the project maintainers. 22 | 23 | This Code of Conduct is adapted from the Contributor Covenant 24 | (http://contributor-covenant.org), version 1.0.0, available at 25 | http://contributor-covenant.org/version/1/0/0/ 26 | -------------------------------------------------------------------------------- /CRAN-RELEASE: -------------------------------------------------------------------------------- 1 | This package was submitted to CRAN on 2020-02-17. 2 | Once it is accepted, delete this file and tag the release (commit be1550cba0). 3 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: collector 2 | Title: Quantified Risk Assessment Data Collection 3 | Version: 0.1.4 4 | Authors@R: person("David", "Severski", email = "davidski@deadheaven.com", role = c("aut", "cre"), comment = c(ORCID ="0000-0001-7867-0459")) 5 | Description: An open source process for collecting quantified data inputs from 6 | subject matter experts. Intended for feeding into an OpenFAIR analysis 7 | using 8 | a tool such as 'evaluator' . 9 | Depends: R (>= 3.4.0) 10 | License: MIT + file LICENSE 11 | Encoding: UTF-8 12 | LazyData: true 13 | RoxygenNote: 7.1.1 14 | Roxygen: list(markdown = TRUE) 15 | Imports: 16 | EnvStats, 17 | dplyr, 18 | evaluator (>= 0.4.0), 19 | flextable, 20 | ggplot2, 21 | magrittr, 22 | markdown, 23 | patchwork, 24 | purrr, 25 | quanteda.textstats, 26 | readr, 27 | rlang, 28 | rmarkdown, 29 | stringr, 30 | tibble, 31 | tidyr (>= 1.0.0), 32 | officer, 33 | xaringan 34 | Suggests: 35 | spelling, 36 | testthat, 37 | covr, 38 | knitr 39 | SystemRequirements: pandoc 40 | URL: https://collector.tidyrisk.org 41 | BugReports: https://github.com/davidski/collector/issues 42 | Language: en-US 43 | VignetteBuilder: knitr 44 | -------------------------------------------------------------------------------- /Dockerfile: -------------------------------------------------------------------------------- 1 | FROM rocker/tidyverse:4.0.3 as builder 2 | 3 | ARG EVALUATOR_VERSION 4 | ENV BUILD_DATE=2020-11-27 5 | 6 | LABEL org.opencontainers.image.licenses="MIT" \ 7 | org.opencontainers.image.source="https://github.com/davidski/collector" \ 8 | org.opencontainers.image.documentation="https://collector.tidyrisk.org" \ 9 | maintainer="David F. Severski " \ 10 | org.openctainers.image.authors="David F. Severski " 11 | 12 | # collector doesn't need shiny 13 | #RUN export ADD=shiny && bash /etc/cont-init.d/add 14 | 15 | COPY . /src/ 16 | WORKDIR /src 17 | 18 | RUN apt-get update \ 19 | && apt-get install -y zlib1g-dev libproj-dev libcairo2-dev libmagick++-dev \ 20 | && install2.r --deps=TRUE remotes \ 21 | && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 22 | 23 | # Silence pragma warnings for RcppEigen 24 | # Reference https://github.com/kaskr/adcomp/issues/277#issuecomment-400191014 25 | COPY /scripts/Makevars /root/.R/Makevars 26 | 27 | RUN apt-get clean \ 28 | && rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* 29 | 30 | VOLUME /data 31 | 32 | EXPOSE 8787 33 | EXPOSE 3838 34 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | YEAR: 2018 2 | COPYRIGHT HOLDER: David F. Severski 3 | -------------------------------------------------------------------------------- /LICENSE.md: -------------------------------------------------------------------------------- 1 | # MIT License 2 | 3 | Copyright (c) 2018 David F. Severski 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | S3method(as.data.frame,tidyrisk_question_set) 4 | export("%>%") 5 | export(as.tidyrisk_question_set) 6 | export(as.tidyrisk_response_set) 7 | export(check_readability) 8 | export(clean_answers) 9 | export(combine_capability_parameters) 10 | export(combine_lognorm) 11 | export(combine_lognorm_trunc) 12 | export(combine_norm) 13 | export(combine_scenario_parameters) 14 | export(fit_capabilities) 15 | export(fit_capabilities_geomean) 16 | export(fit_lognorm) 17 | export(fit_lognorm_trunc) 18 | export(fit_norm_trunc) 19 | export(fit_pois) 20 | export(fit_scenarios) 21 | export(fit_scenarios_geomean) 22 | export(fit_threat_communities) 23 | export(generate_cost_function) 24 | export(generate_weights) 25 | export(get_smes_domains) 26 | export(is_tidyrisk_question_set) 27 | export(is_tidyrisk_response_set) 28 | export(lognormal_to_normal) 29 | export(make_bingo) 30 | export(make_handouts) 31 | export(make_scorecard) 32 | export(make_slides) 33 | export(new_tidyrisk_question_set) 34 | export(new_tidyrisk_response_set) 35 | export(normal_to_lognormal) 36 | export(prepare_data) 37 | export(read_questions) 38 | export(read_responses) 39 | export(tidyrisk_question_set) 40 | export(tidyrisk_response_set) 41 | export(validate_tidyrisk_question_set) 42 | import(dplyr) 43 | import(ggplot2) 44 | import(xaringan) 45 | importFrom(EnvStats,geoMean) 46 | importFrom(EnvStats,qlnormTrunc) 47 | importFrom(EnvStats,qnormTrunc) 48 | importFrom(dplyr,"%>%") 49 | importFrom(dplyr,arrange) 50 | importFrom(dplyr,bind_cols) 51 | importFrom(dplyr,case_when) 52 | importFrom(dplyr,desc) 53 | importFrom(dplyr,distinct) 54 | importFrom(dplyr,filter) 55 | importFrom(dplyr,group_by) 56 | importFrom(dplyr,group_cols) 57 | importFrom(dplyr,group_vars) 58 | importFrom(dplyr,if_else) 59 | importFrom(dplyr,left_join) 60 | importFrom(dplyr,mutate) 61 | importFrom(dplyr,mutate_at) 62 | importFrom(dplyr,n) 63 | importFrom(dplyr,pull) 64 | importFrom(dplyr,rename) 65 | importFrom(dplyr,row_number) 66 | importFrom(dplyr,sample_n) 67 | importFrom(dplyr,select) 68 | importFrom(dplyr,starts_with) 69 | importFrom(dplyr,summarize) 70 | importFrom(dplyr,summarize_at) 71 | importFrom(dplyr,ungroup) 72 | importFrom(dplyr,vars) 73 | importFrom(evaluator,tidyrisk_scenario) 74 | importFrom(flextable,add_header) 75 | importFrom(flextable,align) 76 | importFrom(flextable,autofit) 77 | importFrom(flextable,body_add_flextable) 78 | importFrom(flextable,merge_h) 79 | importFrom(flextable,regulartable) 80 | importFrom(flextable,set_header_labels) 81 | importFrom(flextable,style) 82 | importFrom(flextable,width) 83 | importFrom(magrittr,"%>%") 84 | importFrom(officer,body_add_break) 85 | importFrom(officer,body_add_par) 86 | importFrom(officer,body_add_toc) 87 | importFrom(officer,body_remove) 88 | importFrom(officer,fp_text) 89 | importFrom(officer,read_docx) 90 | importFrom(patchwork,wrap_plots) 91 | importFrom(purrr,map) 92 | importFrom(purrr,pmap) 93 | importFrom(purrr,quietly) 94 | importFrom(purrr,walk) 95 | importFrom(quanteda.textstats,textstat_readability) 96 | importFrom(readr,col_character) 97 | importFrom(readr,col_date) 98 | importFrom(readr,col_integer) 99 | importFrom(readr,col_logical) 100 | importFrom(readr,col_number) 101 | importFrom(readr,cols) 102 | importFrom(readr,read_csv) 103 | importFrom(rlang,"!!") 104 | importFrom(rlang,.data) 105 | importFrom(rlang,get_expr) 106 | importFrom(rlang,set_names) 107 | importFrom(rmarkdown,render) 108 | importFrom(stats,optim) 109 | importFrom(stats,qlnorm) 110 | importFrom(stats,qpois) 111 | importFrom(stringr,str_extract_all) 112 | importFrom(stringr,str_glue) 113 | importFrom(stringr,str_replace_all) 114 | importFrom(stringr,str_split_fixed) 115 | importFrom(stringr,str_wrap) 116 | importFrom(tibble,add_column) 117 | importFrom(tibble,as_tibble) 118 | importFrom(tibble,tibble) 119 | importFrom(tidyr,drop_na) 120 | importFrom(tidyr,gather) 121 | importFrom(tidyr,nest) 122 | importFrom(tidyr,replace_na) 123 | importFrom(tidyr,unnest) 124 | -------------------------------------------------------------------------------- /NEWS.md: -------------------------------------------------------------------------------- 1 | # collector 0.1.4 2 | 3 | * Convert from quanteda to quanteda.textstats 4 | * Convert from ggpubr to patchwork 5 | 6 | # collector 0.1.3 7 | 8 | * Remove timer from interview slides. 9 | * Correct `check_readability()` column errors. 10 | * Update for tidyr 1.0 changes. 11 | 12 | # collector 0.1.2 13 | 14 | * Fix namespace requirement for xaringan. 15 | * Do not run pandoc tests when pandoc is not available. 16 | 17 | # collector 0.1.1 18 | 19 | * Documentation improvements. 20 | * Add system dependency on pandoc. 21 | * Remove inadvertent dependency on R 3.5.0 `tempdir()` syntax. 22 | 23 | # collector 0.1.0 24 | 25 | * Initial release. 26 | -------------------------------------------------------------------------------- /R/clean_answers.R: -------------------------------------------------------------------------------- 1 | #' Clean extreme answers 2 | #' 3 | #' You may wish to apply some sanity checking bounds on the responses from 4 | #' subject matter experts. This function applies a set of predefined 5 | #' transformations to the scenario and capability responses. Review these 6 | #' assumptions carefully before using them in your own analysis. 7 | #' 8 | #' Make the following assumptions/modifications 9 | #' 10 | #' - minimum capacity is 5% (we've thought about it - 90% CI) 11 | #' - maximum capacity is 95% (we're just about the best - 90% CI) 12 | #' - minimum loss is 1000 dollars (both low and high) 13 | #' - scale all impact into thousands of dollars (make normal 14 | #' decomposition easier, and is in line of the scale of 15 | #' a strategic analysis) 16 | #' - set a minimum frequency of once per 10 years (0.1) 17 | #' 18 | #' @param scenario_answers Scenario answers dataframe. 19 | #' @param capability_answers Capability answers dataframe. 20 | #' 21 | #' @return A list of modified scenarios and capabilities. 22 | #' @export 23 | #' @importFrom dplyr mutate if_else 24 | #' @importFrom rlang .data 25 | #' 26 | #' @examples 27 | #' data(mc_capability_answers) 28 | #' data(mc_scenario_answers) 29 | #' clean_answers(mc_scenario_answers, mc_capability_answers) 30 | clean_answers <- function(scenario_answers, capability_answers) { 31 | cap_ans <- capability_answers %>% 32 | dplyr::mutate(low = dplyr::if_else(.data$low < .05, .05, .data$low), 33 | high = dplyr::if_else(.data$high > .95, .95, .data$high), 34 | high = pmax(.data$high, .data$low)) 35 | 36 | sce_ans <- scenario_answers %>% 37 | # set a floor for minimum impact (both low and high range) 38 | dplyr::mutate(imp_low = dplyr::if_else(.data$imp_low < 1000, 39 | 1000, 40 | .data$imp_low), 41 | imp_high = dplyr::if_else(.data$imp_high < 1000, 42 | 1000, 43 | .data$imp_high)) %>% 44 | dplyr::mutate(freq_low = dplyr::if_else(.data$freq_low == 0, 45 | 0.1, 46 | .data$freq_low), 47 | freq_high = dplyr::if_else(.data$freq_high == 0, 48 | 1, 49 | .data$freq_high)) 50 | list(capabilities = cap_ans, 51 | scenarios = sce_ans) 52 | } 53 | -------------------------------------------------------------------------------- /R/collector.R: -------------------------------------------------------------------------------- 1 | #' \code{collector} package 2 | #' 3 | #' Quantified Information Risk Assessment Data Collection 4 | #' 5 | #' See the online documentation located at 6 | #' \href{https://evaluator.tidyrisk.org/}{https://evaluator.tidyrisk.org/} 7 | #' 8 | #' @docType package 9 | #' @name collector 10 | #' @importFrom dplyr %>% 11 | NULL 12 | 13 | ## quiets concerns of R CMD check re: the .'s that appear in pipelines 14 | ## technique from Jenny Bryan's googlesheet package 15 | utils::globalVariables(c(".")) 16 | -------------------------------------------------------------------------------- /R/data.R: -------------------------------------------------------------------------------- 1 | #' Calibration questions 2 | #' 3 | #' A dataset of reference trivia questions for calibrating SMEs. 4 | #' 5 | #' @source 6 | #' Common trivia questions drawn from a variety of open source web resources. 7 | #' @format A data frame with 27 rows and 3 variables: 8 | #' \describe{ 9 | #' \item{question}{text of the calibration question} 10 | #' \item{answer}{answer text to the calibration question} 11 | #' \item{calibration_id}{unique identifier for the calibration question} 12 | #' } 13 | "calibration_questions" 14 | 15 | #' MetroCare Hospital Calibration Answers 16 | #' 17 | #' A dataset of SME answers to calibration questions. 18 | #' 19 | #' @source 20 | #' This is hypothetical information. Any similarity to any other 21 | #' entity is completely coincidental. 22 | #' @format A data frame with 50 rows and 5 variables: 23 | #' \describe{ 24 | #' \item{sme}{name of the subject matter expert} 25 | #' \item{calibration_id}{unique identifier of the calibration question} 26 | #' \item{low}{SME's low end estimate} 27 | #' \item{high}{SME's high end estimate} 28 | #' \item{date}{date of answer} 29 | #' } 30 | "mc_calibration_answers" 31 | 32 | #' MetroCare Hospital Capabilities 33 | #' 34 | #' A dataset of program capabilities. 35 | #' 36 | #' @source 37 | #' This is hypothetical information. Any similarity to any other 38 | #' entity is completely coincidental. 39 | #' @format A data frame with 60 rows and 3 variables: 40 | #' \describe{ 41 | #' \item{capability_id}{unique identifier of the capability} 42 | #' \item{domain_id}{domain associated with the capability} 43 | #' \item{capability}{text description of the capability} 44 | #' } 45 | "mc_capabilities" 46 | 47 | #' MetroCare Hospital Capability Answers 48 | #' 49 | #' A dataset of SME answers to capabilities. 50 | #' 51 | #' @source 52 | #' This is hypothetical information. Any similarity to any other 53 | #' entity is completely coincidental. 54 | #' @format A data frame with 1 rows and 7 variables: 55 | #' \describe{ 56 | #' \item{sme}{name of the SME} 57 | #' \item{capability_id}{identifier of the capability} 58 | #' \item{low}{capability estimate, low} 59 | #' \item{high}{capability estimate, high} 60 | #' \item{date}{date of the answer} 61 | #' } 62 | "mc_capability_answers" 63 | 64 | #' MetroCare Hospital Scenario Answers 65 | #' 66 | #' A dataset of SME answers to scenarios. 67 | #' 68 | #' @source 69 | #' This is hypothetical information. Any similarity to any other 70 | #' entity is completely coincidental. 71 | #' @format A data frame with 1 rows and 7 variables: 72 | #' \describe{ 73 | #' \item{sme}{name of the SME} 74 | #' \item{scenario_id}{identifier of the scenario} 75 | #' \item{freq_low}{frequency estimate, low} 76 | #' \item{freq_high}{frequency estimate, high} 77 | #' \item{imp_low}{impact estimate, low} 78 | #' \item{imp_high}{impact estimate, high} 79 | #' \item{date}{date of the answer} 80 | #' } 81 | "mc_scenario_answers" 82 | 83 | #' MetroCare Hospital Domains 84 | #' 85 | #' A dataset of program domains. 86 | #' 87 | #' @source 88 | #' This is hypothetical information. Any similarity to any other 89 | #' entity is completely coincidental. 90 | #' @format A data frame with 15 rows and 4 variables: 91 | #' \describe{ 92 | #' \item{domain}{domain title} 93 | #' \item{description}{descriptive text describing the content of the domain} 94 | #' \item{active}{logical flag indicating whether or not the domain is in use} 95 | #' \item{domain_id}{unique domain id} 96 | #' } 97 | "mc_domains" 98 | 99 | #' MetroCare Hospital SME Top Domains 100 | #' 101 | #' A dataset of focus domains per SME. 102 | #' 103 | #' @source 104 | #' This is hypothetical information. Any similarity to any other 105 | #' entity is completely coincidental. 106 | #' @format A data frame with 35 rows and 3 variables: 107 | #' \describe{ 108 | #' \item{sme}{SME name} 109 | #' \item{key}{index of domain} 110 | #' \item{value}{name of domain} 111 | #' } 112 | "mc_sme_top_domains" 113 | 114 | #' MetroCare Hospital Threat Communities 115 | #' 116 | #' A dataset of sample threat communities. 117 | #' 118 | #' @source 119 | #' This is hypothetical information. Any similarity to any other 120 | #' entity is completely coincidental. 121 | #' @format A data frame with 6 rows and 7 variables: 122 | #' \describe{ 123 | #' \item{threat_community}{text title of the threat community} 124 | #' \item{threat_id}{unique identifier} 125 | #' \item{definition}{text description of the threat community} 126 | #' \item{low}{threat communities capability, low end} 127 | #' \item{high}{threat communities capability, high end} 128 | #' \item{category}{type of the threat community} 129 | #' \item{action_type}{action type of the threat community} 130 | #' } 131 | "mc_threat_communities" 132 | 133 | #' MetroCare Risk Scenarios 134 | #' 135 | #' A dataset of sample risk scenarios. 136 | #' 137 | #' @source 138 | #' This is hypothetical information. Any similarity to any other 139 | #' entity is completely coincidental. 140 | #' @format A data frame with 56 rows and 5 variables: 141 | #' \describe{ 142 | #' \item{scenario_id}{unique identifier} 143 | #' \item{scenario}{scenario description} 144 | #' \item{threat_id}{threat community id} 145 | #' \item{domain_id}{domain id} 146 | #' \item{controls}{comma separated list of control ids} 147 | #' } 148 | "mc_scenarios" 149 | 150 | #' MetroCare Hospital Scenario Parameters (fitted) 151 | #' 152 | #' A dataset of sample fitted scenario parameters. 153 | #' 154 | #' @source 155 | #' This is hypothetical information. Any similarity to any other 156 | #' entity is completely coincidental. 157 | #' @format A data frame with 280 rows and 17 variables: 158 | #' \describe{ 159 | #' \item{sme}{name of the sme providing the response} 160 | #' \item{scenario_id}{unique identifier} 161 | #' \item{date}{date of the response} 162 | #' \item{impact_func}{function to use for impact sampling} 163 | #' \item{impact_meanlog}{threat communities capability, high end} 164 | #' \item{impact_sdlog}{type of the threat community} 165 | #' \item{impact_min}{action type of the threat community} 166 | #' \item{impact_max}{action type of the threat community} 167 | #' \item{imp_low}{action type of the threat community} 168 | #' \item{imp_high}{action type of the threat community} 169 | #' \item{frequency_func}{function to use for frequency sampling} 170 | #' \item{frequency_meanlog}{frequency meanlog} 171 | #' \item{frequency_sdlog}{frequency standard deviation log} 172 | #' \item{frequency_min}{frequency minimum} 173 | #' \item{frequency_max}{frequency maximum} 174 | #' \item{freq_low}{action type of the threat community} 175 | #' \item{freq_high}{action type of the threat community} 176 | #' } 177 | "mc_scenario_parameters_fitted" 178 | 179 | #' MetroCare Hospital Capability Parameters (fitted) 180 | #' 181 | #' A dataset of sample fitted capability parameters. 182 | #' 183 | #' @source 184 | #' This is hypothetical information. Any similarity to any other 185 | #' entity is completely coincidental. 186 | #' @format A data frame with 300 rows and 10 variables: 187 | #' \describe{ 188 | #' \item{sme}{name of the sme providing the response} 189 | #' \item{capability_id}{unique identifier} 190 | #' \item{date}{text description of the threat community} 191 | #' \item{capability_func}{capability sampling function} 192 | #' \item{capability_mean}{capability mean} 193 | #' \item{capability_sd}{capability standard deviation} 194 | #' \item{capability_min}{capability minimum} 195 | #' \item{capability_max}{capability maximum} 196 | #' \item{low}{threat communities capability, high end} 197 | #' \item{high}{threat communities capability, high end} 198 | #' } 199 | "mc_capability_parameters_fitted" 200 | 201 | #' MetroCare Hospital Threat Parameters (fitted) 202 | #' 203 | #' A dataset of sample fitted threat parameters. 204 | #' 205 | #' @source 206 | #' This is hypothetical information. Any similarity to any other 207 | #' entity is completely coincidental. 208 | #' @format A data frame with 8 rows and 12 variables: 209 | #' \describe{ 210 | #' \item{action_type}{action type} 211 | #' \item{category}{category} 212 | #' \item{definition}{text description of the threat community} 213 | #' \item{high}{action type of the threat community} 214 | #' \item{low}{type of the threat community} 215 | #' \item{threat_community}{text title of the threat community} 216 | #' \item{threat_func}{sampling function} 217 | #' \item{threat_id}{unique identifier} 218 | #' \item{threat_max}{threat maximum capability} 219 | #' \item{threat_mean}{threat mean capability} 220 | #' \item{threat_sd}{threat capability standard deviation} 221 | #' \item{threat_min}{threat capability minimum} 222 | #' } 223 | "mc_threat_parameters_fitted" 224 | -------------------------------------------------------------------------------- /R/generate_weights.R: -------------------------------------------------------------------------------- 1 | #' Generate a weighting table for SMEs based upon their calibration answers 2 | #' 3 | #' @param questions \code{\link{tidyrisk_question_set}} object. 4 | #' @param responses \code{\link{tidyrisk_response_set}} object 5 | #' @importFrom dplyr mutate_at left_join group_by mutate summarize n case_when arrange vars 6 | #' @importFrom stringr str_extract_all 7 | #' @importFrom purrr map 8 | #' @return A dataframe of SMEs and their numerical weighting. 9 | #' @export 10 | #' 11 | #' @examples 12 | #' NULL 13 | generate_weights <- function(questions, responses){ 14 | 15 | enforce_tidyrisk_question_set(questions) 16 | enforce_tidyrisk_response_set(responses) 17 | 18 | # convert string formatted calibration answers to numbers 19 | responses$calibration %>% dplyr::mutate_at(dplyr::vars(.data$low, .data$high), 20 | ~stringr::str_extract_all(., "[\\d.]+") %>% 21 | purrr::map(~ paste(.x, collapse ="")) %>% 22 | as.numeric()) -> dat 23 | 24 | # calculate how many each SME got correct and compare to target 25 | dplyr::left_join(dat, questions$calibration, by = "calibration_id") %>% 26 | dplyr::mutate(correct = ifelse(.data$low <= .data$answer & 27 | .data$answer <= .data$high, TRUE, FALSE)) %>% 28 | dplyr::group_by(.data$sme) %>% 29 | dplyr::summarise(pct_correct = sum(.data$correct) / n()) %>% 30 | dplyr::mutate(weight = dplyr::case_when( 31 | .data$pct_correct >= .9 ~ 4L, # perfectly calibrated, weight 4 32 | .data$pct_correct >= .6 ~ 3L, # imperfectly calibrated, weight 3 33 | .data$pct_correct >= .3 ~ 2L, # imperfectly calibrated, weight 2 34 | TRUE ~ 1L, # not well calibrated, weight 1 35 | ), 36 | pct_correct = NULL) %>% 37 | dplyr::arrange(.data$sme) -> weights 38 | 39 | weights 40 | } 41 | -------------------------------------------------------------------------------- /R/make_handouts.R: -------------------------------------------------------------------------------- 1 | #' Create a set of interview handouts for a SME 2 | #' 3 | #' Creates two MS Word documents. One is an `answers` document that contains 4 | #' the answers to the calibration questions, the other (with the name of the SME) 5 | #' does not contain answers and is intended to be a visual reference (and possible 6 | #' take away) for the SME. 7 | #' 8 | #' @param sme Name of the SME. 9 | #' @param questions \code{\link{tidyrisk_question_set}} object 10 | #' @param output_dir Directory to place output. 11 | #' @param calibration_questions Number of calibration questions to ask. 12 | #' 13 | #' @return NULL 14 | #' @export 15 | #' @importFrom dplyr sample_n select mutate 16 | #' @importFrom tibble tibble add_column 17 | #' @importFrom officer read_docx body_remove body_add_par body_add_toc body_add_break fp_text 18 | #' @importFrom flextable regulartable align autofit width style merge_h add_header body_add_flextable set_header_labels 19 | #' @importFrom purrr walk 20 | #' 21 | #' @examples 22 | #' \dontrun{ 23 | #' questions <- read_questions() 24 | #' make_handouts("Sally Expert", questions, output_dir = tempdir()) 25 | #' } 26 | make_handouts <- function(sme, questions, output_dir, calibration_questions = 10) { 27 | 28 | enforce_tidyrisk_question_set(questions) 29 | 30 | # get a sample set of calibrarion questions for this SME 31 | cal_ques <- questions$calibration %>% dplyr::sample_n(calibration_questions) 32 | 33 | # order_domains 34 | domain_list <- get_smes_domains(sme, questions) 35 | 36 | # create sme doc 37 | doc <- officer::read_docx(system.file(package = "collector", "templates", 38 | "template.docx")) 39 | doc <- officer::body_remove(doc) 40 | 41 | ## Create title page 42 | doc <- officer::body_add_par(doc, paste0("Risk Assessment - ", sme), style = "Title") 43 | doc <- officer::body_add_par(x = doc, value = "Table of Contents", style = "heading 1") %>% 44 | officer::body_add_toc(level = 1) 45 | doc <- doc %>% officer::body_add_break() 46 | 47 | # create calibration page 48 | doc <- officer::body_add_par(x = doc, value = "Calibration Questions", 49 | style = "heading 1") 50 | tbl <- cal_ques %>% dplyr::select("Question" = .data$question) %>% 51 | dplyr::mutate("Low" = NA_character_, "High" = NA_character_) %>% 52 | flextable::regulartable() 53 | tbl <- flextable::align(tbl, align = "left", part = "body") 54 | tbl <- flextable::align(tbl, align = "center", part = "header") 55 | #tbl <- theme_vanilla(tbl) 56 | tbl <- flextable::autofit(tbl) 57 | tbl <- flextable::width(tbl, j = "Question", width = 4) 58 | tbl <- flextable::width(tbl, j = c("Low", "High"), width = 1) 59 | tbl <- tbl %>% flextable::style(pr_t = officer::fp_text(font.family = "Calibiri"), part = "all") 60 | 61 | doc <- flextable::body_add_flextable(x = doc, align = "left", tbl) 62 | 63 | # walk the domains 64 | domain_list %>% purrr::walk(function(d) { 65 | doc <- doc %>% officer::body_add_break() 66 | 67 | # add the domain heading 68 | doc <- officer::body_add_par(x = doc, 69 | value = paste("Domain", d, sep = " - "), 70 | style = "heading 1") 71 | if (nrow(questions$domains[questions$domains$domain == d, "description"]) > 0) { 72 | doc <- officer::body_add_par(x = doc, value = questions$domains[ 73 | questions$domains$domain == d, "description"]) 74 | } 75 | 76 | # get the domain id 77 | dom_id <- questions$domains[questions$domains$domain == d, ]$domain_id 78 | 79 | # add the scenarios 80 | doc <- officer::body_add_par(x = doc, 81 | value = paste("Scenarios", d, sep = " - "), 82 | style = "heading 2") 83 | questions$scenarios[questions$scenarios$domain_id == dom_id, ] %>% 84 | dplyr::select("ID" = .data$scenario_id, .data$scenario) %>% 85 | dplyr::mutate("Frequency Low" = NA_character_, 86 | "Frequency High" = NA_character_, 87 | "Impact Low" = NA_character_, 88 | "Impact High" = NA_character_) %>% 89 | flextable::regulartable() -> tbl 90 | tbl <- flextable::set_header_labels(tbl, ID = "ID", 91 | scenario = "Scenario", 92 | `Frequency Low` = "Frequency", 93 | `Frequency High` = "Frequency", 94 | `Impact Low` = "Impact", 95 | `Impact High` = "Impact") 96 | tbl <- flextable::add_header(tbl, 97 | `Frequency Low` = "Events per Year", 98 | `Frequency High` = "Events per Year", 99 | `Impact Low` = "Dollar Cost per Event", 100 | `Impact High` = "Dollar Cost per Event", 101 | top = FALSE) 102 | tbl <- flextable::add_header(tbl, 103 | `Frequency Low` = "Low", 104 | `Frequency High` = "High", 105 | `Impact Low` = "Low", 106 | `Impact High` = "High", 107 | top = FALSE ) %>% 108 | flextable::merge_h(part = "header") 109 | tbl <- flextable::align(tbl, align = "left", part = "body") 110 | tbl <- flextable::align(tbl, align = "center", part = "header") 111 | #tbl <- theme_vanilla(tbl) 112 | tbl <- flextable::autofit(tbl) 113 | tbl <- flextable::width(tbl, width = 2/3) %>% 114 | flextable::width(j = "scenario", width = 3) %>% 115 | flextable::style(pr_t = officer::fp_text(font.family = "Calibiri"), part = "all") 116 | doc <- flextable::body_add_flextable(x = doc, align = "left", tbl) 117 | doc <- doc %>% officer::body_add_break() 118 | 119 | # add capabilities 120 | doc <- officer::body_add_par(x = doc, value = paste("Capabilities", d, 121 | sep = " - "), 122 | style = "heading 2") 123 | questions$capabilities[questions$capabilities$domain_id == dom_id, ] %>% 124 | dplyr::select("ID" = .data$capability_id, .data$capability) %>% 125 | tibble::add_column(cap_low = NA_character_, cap_high = NA_character_) %>% 126 | flextable::regulartable() -> tbl 127 | tbl <- flextable::set_header_labels(tbl, ID = "ID", 128 | capability = "Capability", 129 | cap_low = "Capability Range", 130 | cap_high = "Capability Range") 131 | tbl <- flextable::add_header(tbl, `cap_low` = "% Better than World", 132 | `cap_high` = "% Better than World", 133 | top = FALSE) 134 | tbl <- flextable::add_header(tbl, cap_low = "Low", cap_high = "High", top = FALSE) %>% 135 | flextable::merge_h(part = "header") 136 | tbl <- flextable::align(tbl, align = "left", part = "body") 137 | tbl <- flextable::align(tbl, align = "center", part = "header") 138 | #tbl <- theme_vanilla(tbl) 139 | tbl <- flextable::autofit(tbl) 140 | tbl <- flextable::width(tbl, j = c("cap_low", "cap_high"), width = 2/3) %>% 141 | flextable::width(j = "capability", width = 3) %>% 142 | flextable::style(pr_t = officer::fp_text(font.family = "Calibiri"), 143 | part = "all") 144 | doc <- flextable::body_add_flextable(x = doc, align = "left", tbl) 145 | }) 146 | 147 | # save sme document 148 | filename <- paste0(tolower(sme) %>% stringr::str_replace_all(" ", "_"), ".docx") 149 | print(doc, target = file.path(output_dir, filename)) 150 | 151 | # create answer doc 152 | doc <- officer::read_docx(system.file(package = "collector", "templates", "template.docx")) 153 | doc <- officer::body_remove(doc) 154 | 155 | ## Create title page 156 | doc <- officer::body_add_par(doc, paste0("Risk Assessment - ", sme, " (Answers)"), style = "Title") 157 | doc <- officer::body_add_par(x = doc, value = "Table of Contents", style = "heading 1") %>% 158 | officer::body_add_toc(level = 1) 159 | doc <- doc %>% officer::body_add_break() 160 | 161 | # create calibration page 162 | doc <- officer::body_add_par(x = doc, value = "Calibration Questions", 163 | style = "heading 1") 164 | tbl <- cal_ques %>% 165 | dplyr::select("Question" = .data$question, "Answer" = .data$answer) %>% 166 | flextable::regulartable() 167 | tbl <- flextable::align(tbl, align = "left", part = "body") 168 | tbl <- flextable::align(tbl, align = "center", part = "header") 169 | #tbl <- theme_vanilla(tbl) 170 | tbl <- flextable::autofit(tbl) 171 | tbl <- flextable::width(tbl, j = "Question", width = 4) 172 | tbl <- flextable::width(tbl, j = "Answer", width = 2) 173 | tbl <- tbl %>% flextable::style(pr_t = officer::fp_text(font.family = "Calibiri"), part = "all") 174 | 175 | doc <- flextable::body_add_flextable(x = doc, align = "left", tbl) 176 | 177 | # walk the domains 178 | domain_list %>% purrr::walk(function(d) { 179 | doc <- doc %>% officer::body_add_break() 180 | 181 | # get the domain id 182 | dom_id <- questions$domains[questions$domains$domain == d, ]$domain_id 183 | 184 | # add the domain heading 185 | doc <- officer::body_add_par(x = doc, 186 | value = paste("Domain", d, sep = " - "), 187 | style = "heading 1") 188 | if (nrow(questions$domains[questions$domains$domain == d, "description"]) > 0) { 189 | doc <- officer::body_add_par(x = doc, value = questions$domains[questions$domains$domain == d, "description"]) 190 | } 191 | 192 | # add the scenarios 193 | doc <- officer::body_add_par(x = doc, 194 | value = paste("Scenarios", d, sep = " - "), 195 | style = "heading 2") 196 | questions$scenarios[questions$scenarios$domain_id == dom_id, ] %>% 197 | dplyr::select("ID" = .data$scenario_id, .data$scenario) %>% 198 | dplyr::mutate("Frequency Low" = NA_character_, 199 | "Frequency High" = NA_character_, 200 | "Impact Low" = NA_character_, 201 | "Impact High" = NA_character_) %>% 202 | flextable::regulartable() -> tbl 203 | tbl <- flextable::set_header_labels(tbl, ID = "ID", 204 | scenario = "Scenario", 205 | `Frequency Low` = "Frequency", 206 | `Frequency High` = "Frequency", 207 | `Impact Low` = "Impact", 208 | `Impact High` = "Impact") 209 | tbl <- flextable::add_header(tbl, 210 | `Frequency Low` = "Events per Year", 211 | `Frequency High` = "Events per Year", 212 | `Impact Low` = "Dollar Cost per Event", 213 | `Impact High` = "Dollar Cost per Event", 214 | top = FALSE) 215 | tbl <- flextable::add_header(tbl, 216 | `Frequency Low` = "Low", 217 | `Frequency High` = "High", 218 | `Impact Low` = "Low", 219 | `Impact High` = "High", 220 | top = FALSE ) %>% 221 | flextable::merge_h(part = "header") 222 | tbl <- flextable::align(tbl, align = "left", part = "body") 223 | tbl <- flextable::align(tbl, align = "center", part = "header") 224 | #tbl <- theme_vanilla(tbl) 225 | tbl <- flextable::autofit(tbl) 226 | tbl <- flextable::width(tbl, width = 2/3) %>% 227 | flextable::width(j = "scenario", width = 3) %>% 228 | flextable::style(pr_t = officer::fp_text(font.family = "Calibiri"), part = "all") 229 | doc <- flextable::body_add_flextable(x = doc, align = "left", tbl) 230 | doc <- doc %>% officer::body_add_break() 231 | 232 | # add capabilities 233 | doc <- officer::body_add_par(x = doc, value = paste("Capabilities", d, sep = " - "), 234 | style = "heading 2") 235 | questions$capabilities[questions$capabilities$domain_id == dom_id, ] %>% 236 | dplyr::select("ID" = .data$capability_id, .data$capability) %>% 237 | tibble::add_column(cap_low = NA_character_, cap_high = NA_character_) %>% 238 | flextable::regulartable() -> tbl 239 | tbl <- flextable::set_header_labels(tbl, ID = "ID", 240 | capability = "Capability", 241 | cap_low = "Capability Range", 242 | cap_high = "Capability Range") 243 | tbl <- flextable::add_header(tbl, `cap_low` = "% Better than World", 244 | `cap_high` = "% Better than World", top = FALSE) 245 | tbl <- flextable::add_header(tbl, cap_low = "Low", cap_high = "High", top = FALSE) %>% 246 | flextable::merge_h(part = "header") 247 | tbl <- flextable::align(tbl, align = "left", part = "body") 248 | tbl <- flextable::align(tbl, align = "center", part = "header") 249 | #tbl <- theme_vanilla(tbl) 250 | tbl <- flextable::autofit(tbl) 251 | tbl <- flextable::width(tbl, j = c("cap_low", "cap_high"), width = 2/3) %>% 252 | flextable::width(j = "capability", width = 3) %>% 253 | flextable::style(pr_t = officer::fp_text(font.family = "Calibiri"), part = "all") 254 | doc <- flextable::body_add_flextable(x = doc, align = "left", tbl) 255 | }) 256 | 257 | # save answer document 258 | filename <- paste0(tolower(sme) %>% stringr::str_replace_all(" ", "_"), "_answers", ".docx") 259 | print(doc, target = file.path(output_dir, filename)) 260 | } 261 | -------------------------------------------------------------------------------- /R/make_scorecard.R: -------------------------------------------------------------------------------- 1 | #' Create a scorecard for marking progress through domains in an interview 2 | #' 3 | #' Creates a two page PDF with one grid for scenarios and one for capabilities. 4 | #' Each grid contains a square for each domain. An analyst can mark/stamp 5 | #' each domain as it is covered in an interview, gamifying progress. 6 | #' 7 | #' The domains are ordered according to the SME's expertise profile, ensuring 8 | #' they match the interview order flow. 9 | #' 10 | #' @param sme Name of SME. 11 | #' @param questions \code{\link{tidyrisk_question_set}} object. 12 | #' @param output_dir Directory to place scorecards. 13 | #' 14 | #' @return Invisibly returns the full path to the saved scorecard. 15 | #' @export 16 | #' @importFrom dplyr mutate row_number if_else 17 | #' @importFrom rlang .data 18 | #' @importFrom tibble tibble 19 | #' @importFrom purrr quietly 20 | #' @importFrom stringr str_wrap str_replace_all 21 | #' @import ggplot2 22 | #' @importFrom patchwork wrap_plots 23 | #' 24 | #' @examples 25 | #' \dontrun{ 26 | #' questions <- read_questions() 27 | #' make_scorecard("Sally Expert", questions, output_dir = tempdir()) 28 | #' } 29 | make_scorecard <- function(sme, questions, output_dir) { 30 | 31 | enforce_tidyrisk_question_set(questions) 32 | 33 | # get ordered scenarios 34 | values <- get_smes_domains(sme, questions) 35 | 36 | # calculate_dimensions 37 | n_col <- 4 38 | rows <- rep(1:(floor(length(values) / n_col)), each = n_col) 39 | rows <- c(rows, rep(max(rows) + 1, length(values) - length(rows))) 40 | 41 | # make_dataframe 42 | dat <- tibble::tibble(id = stringr::str_wrap(values, width = 15), 43 | row = rows, 44 | column = rep_len(1:n_col, length.out = length(values))) %>% 45 | dplyr::mutate(highlight = dplyr::if_else((row_number() - 1) %% 5 == 0, "Y", "N")) 46 | 47 | # create_plot 48 | gg <- ggplot(dat, aes_string(x = "column", y = "row", label = "id")) + 49 | geom_tile(aes(fill = "highlight"), alpha = 0.5, color = "black") + 50 | scale_fill_manual(values = c("N" = "white", "Y" = "lightslategray"), 51 | guide = "none") + 52 | coord_equal() + geom_text() + scale_y_reverse() + 53 | theme_void() + 54 | theme(axis.text = element_blank(), panel.grid = element_blank()) + 55 | labs(x = NULL, y = NULL, title = "Scenarios", 56 | subtitle = "Target takt time: 1 minute per response", 57 | caption = paste0("SME: ", sme)) 58 | 59 | # make_capabilities_plot 60 | gg_cap <- ggplot(dat, aes_string(x = "column", y = "row", label = "id")) + 61 | geom_tile(aes_string(fill = "highlight"), alpha = 0.5, color = "black") + 62 | scale_fill_manual(values = c("N" = "white", "Y" = "lightslategray"), 63 | guide = "none") + 64 | coord_equal() + geom_text() + scale_y_reverse() + 65 | theme_void() + 66 | theme(axis.text = element_blank(), panel.grid = element_blank()) + 67 | labs(x = NULL, y = NULL, title = "Capabilities", 68 | subtitle = "Target takt time: 1 minute per response", 69 | caption = paste0("SME: ", sme)) 70 | 71 | # make_combined_pdf 72 | combo <- patchwork::wrap_plots(gg, gg_cap, ncol = 1) 73 | filename <- tolower(sme) %>% stringr::str_replace_all(" ", "_") %>% 74 | paste0(., "_scorecard.pdf") 75 | result <- ggplot2::ggsave(combo, filename = file.path(output_dir, filename)) 76 | invisible(result) 77 | } 78 | 79 | #' @export 80 | #' @rdname make_scorecard 81 | make_bingo <- function(sme, questions, output_dir = getwd()) { 82 | .Deprecated("make_scorecard") 83 | make_scorecard(sme, questions, output_dir = getwd()) 84 | } 85 | -------------------------------------------------------------------------------- /R/make_slides.R: -------------------------------------------------------------------------------- 1 | #' Create interview slides 2 | #' 3 | #' Creates an in-browser slideshow as a visual aide when conducting an 4 | #' interview with a subject matter expert (SME). The slideshow is customized 5 | #' for the SME by placing the domains in the order of preference for that 6 | #' SME. 7 | #' 8 | #' @param sme Name of the SME being interviewed. 9 | #' @param questions A \code{\link{tidyrisk_question_set}} object. 10 | #' @param output_dir Directory location for knitted slides. 11 | #' @param assessment_title Title of the assessment being performed. 12 | #' 13 | #' @return Invisibly returns the full path to the slide file. 14 | #' @export 15 | #' @import xaringan 16 | #' @importFrom rmarkdown render 17 | #' @importFrom stringr str_replace_all str_glue 18 | #' 19 | #' @examples 20 | #' \dontrun{ 21 | #' make_slides("Sally Expert", questions, output_dir = tempdir()) 22 | #' } 23 | make_slides <- function(sme, questions, output_dir, 24 | assessment_title = "Strategic Risk Assessment") { 25 | 26 | enforce_tidyrisk_question_set(questions) 27 | 28 | # ensure output directory is available 29 | if (!dir.exists(output_dir)) dir.create(output_dir) 30 | 31 | # save questions object 32 | saveRDS(questions, file.path(output_dir, "questions.rds")) 33 | 34 | # prepare output location with libs subdir, css + image files 35 | if (!dir.exists(file.path(output_dir, "css"))) dir.create(file.path(output_dir, "css")) 36 | file.copy(system.file(package = "collector", "css", "styles.css"), 37 | file.path(output_dir, "css", "styles.css")) 38 | if (!dir.exists(file.path(output_dir, "libs"))) dir.create(file.path(output_dir, "libs")) 39 | if (!dir.exists(file.path(output_dir, "img"))) dir.create(file.path(output_dir, "img")) 40 | file.copy(system.file(package = "collector", "img"), 41 | file.path(output_dir), 42 | recursive = TRUE) 43 | 44 | # copy the RMD to our output directory - Yuck! 45 | file.copy(system.file(package = "collector", "interview.Rmd"), 46 | file.path(output_dir, "interview.Rmd"), overwrite = TRUE) 47 | 48 | logo_emoji <- "\\U0002696" # scales emoji 49 | 50 | slides_path <- rmarkdown::render( 51 | file.path(output_dir, "interview.Rmd"), 52 | #output_dir = output_dir, 53 | output_file = paste0(tolower(sme) %>% stringr::str_replace_all(" ", "_"), ".html"), 54 | knit_root_dir = output_dir, 55 | quiet = TRUE, 56 | params = list("sme" = sme, 57 | "assessment_title" = stringr::str_glue("{assessment_title}
{logo_emoji}"), 58 | "domain_list" = get_smes_domains(sme, questions), 59 | "questions_file" = file.path(output_dir, "questions.rds"))) 60 | 61 | # remove the temporary rds and Rmd files 62 | file.remove(file.path(output_dir, "questions.rds")) 63 | file.remove(file.path(output_dir, "interview.Rmd")) 64 | 65 | invisible(slides_path) 66 | } 67 | -------------------------------------------------------------------------------- /R/prepare_data.R: -------------------------------------------------------------------------------- 1 | #' Create one or more quantitative scenarios objects suitable for simulation by 'evaluator' 2 | #' 3 | #' Given parameters for the scenarios, threat communities, capabilities, and 4 | #' the question set, generate a list of \code{\link{tidyrisk_scenario}} objects that may be 5 | #' fed into \code{evaluator::\link[evaluator]{run_simulation}} for Monte Carlo simulation. 6 | #' 7 | #' @param scenario_parameters Scenarios with final parameters defined. 8 | #' @param capability_parameters Capabilities with final parameters defined. 9 | #' @param threat_parameters Threat communities with final parameters defined. 10 | #' @param questions A \code{\link{tidyrisk_question_set}} object. 11 | #' 12 | #' @importFrom dplyr rename left_join mutate select starts_with pull 13 | #' @importFrom tidyr drop_na 14 | #' @importFrom purrr map pmap 15 | #' @importFrom rlang .data 16 | #' @importFrom evaluator tidyrisk_scenario 17 | #' @return A list of one or more \code{\link{tidyrisk_scenario}} objects. 18 | #' @export 19 | #' 20 | #' @examples 21 | #' suppressPackageStartupMessages(library(dplyr)) 22 | #' data(mc_domains, mc_capabilities, mc_scenarios, mc_sme_top_domains, 23 | #' calibration_questions, mc_threat_communities) 24 | #' question_set <- tidyrisk_question_set(mc_domains, mc_scenarios, mc_capabilities, 25 | #' calibration_questions, mc_sme_top_domains, 26 | #' mc_threat_communities) 27 | #' response_set <- tidyrisk_response_set(mc_calibration_answers, 28 | #' mc_scenario_answers, mc_capability_answers) 29 | #' sme_weightings <- generate_weights(question_set, response_set) 30 | #' data(mc_scenario_parameters_fitted, mc_capability_parameters_fitted, 31 | #' mc_threat_parameters_fitted) 32 | #' scenario_parameters <- left_join(mc_scenario_parameters_fitted, sme_weightings, by = "sme") %>% 33 | #' combine_scenario_parameters() 34 | #' capability_parameters <- left_join(mc_capability_parameters_fitted, sme_weightings, by = "sme") %>% 35 | #' combine_capability_parameters() 36 | #' quantitative_scenarios <- prepare_data(scenario_parameters, 37 | #' capability_parameters, 38 | #' mc_threat_parameters_fitted, 39 | #' question_set) 40 | prepare_data <- function(scenario_parameters, capability_parameters, 41 | threat_parameters, questions) { 42 | 43 | enforce_tidyrisk_question_set(questions) 44 | 45 | # combine capabilities + scenarios into a single dataframe 46 | scenario_parameters %>% 47 | # bring in the scenario descriptions 48 | dplyr::left_join(questions$scenarios, by = "scenario_id") %>% 49 | # bring in the domains 50 | dplyr::left_join(questions$domains, by = "domain_id") %>% 51 | # add TC info 52 | dplyr::left_join(threat_parameters, by = "threat_id") %>% 53 | # massage the dataframe to look like the standard evaluator inputs 54 | dplyr::select(.data$scenario_id, scenario = .data$scenario, 55 | dplyr::starts_with("threat_"), .data$domain_id, 56 | controls = .data$controls, 57 | # TEF parameters 58 | tef_func = .data$frequency_func, 59 | tef_meanlog = .data$frequency_meanlog, 60 | tef_sdlog = .data$frequency_sdlog, 61 | # LM parameters 62 | lm_func = .data$impact_func, lm_meanlog = .data$impact_meanlog, 63 | lm_sdlog = .data$impact_sdlog, lm_min = .data$impact_min, 64 | lm_max = .data$impact_max) %>% 65 | # the only NAs should be for retired scenarios 66 | tidyr::drop_na() -> 67 | scenarios_final 68 | 69 | # actually derive controls 70 | scenarios_final$diff_params <- purrr::map( 71 | scenarios_final$controls, 72 | ~derive_controls(capability_ids = .x, 73 | capability_parameters = capability_parameters)) 74 | 75 | # create our list columns for tef/tc/lm 76 | scenarios_final %>% 77 | dplyr::mutate( 78 | tef_params = purrr::pmap(with(scenarios_final, list(tef_func, tef_meanlog, tef_sdlog)), 79 | ~ list(func = ..1, meanlog = ..2, sdlog = ..3)), 80 | tc_params = purrr::pmap(with(scenarios_final, list(threat_func, threat_mean, threat_sd, threat_min, threat_max)), 81 | ~ list(func = ..1, mean = ..2, sd = ..3, min = ..4, max = ..5)), 82 | lm_params = purrr::pmap(with(scenarios_final, list(lm_func, lm_meanlog, lm_sdlog, lm_min, lm_max)), 83 | ~ list(func = ..1, meanlog = ..2, sdlog = ..3, min = ..4, max = ..5))) %>% 84 | dplyr::mutate(scenarios = pmap(list(tef_params = .data$tef_params, 85 | tc_params = .data$tc_params, 86 | lm_params = .data$lm_params, 87 | diff_params = .data$diff_params, 88 | model = "openfair_tef_tc_diff_lm"), 89 | evaluator::tidyrisk_scenario)) %>% 90 | dplyr::pull(.data$scenarios) -> scenarios_final 91 | 92 | scenarios_final 93 | } 94 | 95 | #' Generate the quantified capability parameters for a scenario 96 | #' 97 | #' Based on the \code{evaluator::\link[evaluator]{derive_controls}} function 98 | #' 99 | #' Creates the difficulty parameters (embedded list) for quantitative 100 | #' parameters. 101 | #' @param capability_ids Comma-delimited list of capability ids 102 | #' @param capability_parameters Dataframe of fitted and combined capability parameters 103 | #' @seealso \code{evaluator::\link[evaluator]{derive_controls}} 104 | #' @importFrom stringr str_split_fixed 105 | #' @importFrom dplyr select pull 106 | #' @importFrom purrr pmap 107 | #' @importFrom rlang .data set_names 108 | #' 109 | #' @return A list. 110 | #' 111 | #' @examples 112 | #' NULL 113 | derive_controls <- function(capability_ids, capability_parameters) { 114 | control_list <- stringr::str_split_fixed(capability_ids, ", ", Inf) %>% unlist() 115 | 116 | capability_parameters[capability_parameters$capability_id %in% control_list, ] %>% 117 | dplyr::mutate(diff_params = purrr::pmap( 118 | list(.data$capability_func, .data$capability_mean, .data$capability_sd, 119 | .data$capability_min, .data$capability_max), 120 | ~ list(func = ..1, mean = ..2, sd = ..3, min = ..4, max = ..5))) %>% 121 | dplyr::pull(.data$diff_params) %>% 122 | rlang::set_names(nm = control_list) 123 | } 124 | -------------------------------------------------------------------------------- /R/tidyrisk_question_set.R: -------------------------------------------------------------------------------- 1 | #' Construct a tidyrisk_question_set object 2 | #' 3 | #' \code{new.tidyrisk_question_set} is a low-level constructor that takes a list of dataframes. 4 | #' \code{tidyrisk_question_set} constructs a tidyrisk_question_set object from dataframes. 5 | #' \code{as.tidyrisk_question_set} is a S3 generic that converts existing objects. 6 | #' \code{validate_tidyrisk_question_set} verifies that the data elements are internally consistent. 7 | #' 8 | #' @param ... Individual dataframes 9 | #' @param domains Domains 10 | #' @param calibration Calibration questions 11 | #' @param scenarios Scenario questions 12 | #' @param capabilities Capability questions 13 | #' @param expertise SME expertise 14 | #' @param threat_communities Threat communities 15 | #' @param x object to coerce 16 | #' 17 | #' @export 18 | #' @examples 19 | #' NULL 20 | tidyrisk_question_set <- function(domains, scenarios, capabilities, calibration, 21 | expertise, threat_communities) { 22 | x <- new_tidyrisk_question_set(list(domains = domains, scenarios = scenarios, 23 | capabilities = capabilities, 24 | calibration = calibration, 25 | expertise = expertise, 26 | threat_communities = threat_communities)) 27 | validate_tidyrisk_question_set(x) 28 | } 29 | 30 | #' @export 31 | #' @rdname tidyrisk_question_set 32 | new_tidyrisk_question_set <- function(x) { 33 | if (!is.list(x)) stop("`x` must be a list", call. = FALSE) 34 | mandatory_elements <- c("domains", "scenarios", "capabilities", "calibration", 35 | "expertise", "threat_communities") 36 | if (length(setdiff(mandatory_elements, names(x)))) { 37 | stop(paste0("Missing elements: ", 38 | paste0(setdiff(mandatory_elements, names(x)), 39 | collapse = ", ")), call. = FALSE) 40 | } 41 | structure(x, class = "tidyrisk_question_set") 42 | } 43 | 44 | #' @export 45 | #' @rdname tidyrisk_question_set 46 | as.tidyrisk_question_set <- function(x, ...) { 47 | UseMethod("as.tidyrisk_question_set") 48 | } 49 | 50 | #' @export 51 | as.data.frame.tidyrisk_question_set <- function(x, ...) { 52 | x$scenarios 53 | } 54 | 55 | #' Test if the object is a tidyrisk_question_set 56 | #' 57 | #' This function returns TRUE for tidyrisk_question_set or sub-classes 58 | #' thereof, and FALSE for all other objects. 59 | #' 60 | #' @param x An object 61 | #' @export 62 | #' @examples 63 | #' \dontrun{ 64 | #' is_tidyrisk_question_set(x) 65 | #' } 66 | is_tidyrisk_question_set <- function(x) { 67 | inherits(x, "tidyrisk_question_set") 68 | } 69 | 70 | #' @export 71 | #' @rdname tidyrisk_question_set 72 | validate_tidyrisk_question_set <- function(x) { 73 | 74 | enforce_tidyrisk_question_set(x) 75 | 76 | # check that there is agreement between domains/scenarios/capabilities 77 | domain_list <- unique(x$domains$domain_id) 78 | scenario_list <- unique(x$scenarios$domain_id) 79 | capability_list <- unique(x$capabilities$domain_id) 80 | 81 | if (!setequal(domain_list, scenario_list)) { 82 | stop("Scenarios and domains disagree.", call. = FALSE) 83 | } 84 | if (!setequal(domain_list, capability_list)) { 85 | stop("capability and domains disagree.", call. = FALSE) 86 | } 87 | 88 | # look for agreement in threat communities 89 | threat_list <- unique(x$threat_communities$threat_id) 90 | scenario_list <- unique(x$scenarios$threat_id) 91 | if (!setequal(threat_list, scenario_list)) { 92 | stop("threats and scenarios disagree.", call. = FALSE) 93 | } 94 | 95 | x 96 | 97 | } 98 | 99 | -------------------------------------------------------------------------------- /R/tidyrisk_response_set.R: -------------------------------------------------------------------------------- 1 | #' Construct a tidyrisk_response_set object 2 | #' 3 | #' \code{new.tidyrisk_response_set} is a low-level constructor that takes a list of dataframes. 4 | #' \code{tidyrisk_response_set} constructs a tidyrisk_response_set from dataframes. 5 | #' \code{as.tidyrisk_response_set} is a S3 generic that converts existing objects. 6 | #' 7 | #' @param ... Individual dataframes 8 | #' @param calibration_answers Calibration tidyrisk_response_set 9 | #' @param scenario_answers Scenarios tidyrisk_response_set 10 | #' @param capability_answers Capability tidyrisk_response_set 11 | #' @param x object to coerce 12 | #' 13 | #' @export 14 | #' @examples 15 | #' NULL 16 | tidyrisk_response_set <- function(calibration_answers, scenario_answers, capability_answers) { 17 | new_tidyrisk_response_set(calibration_answers, scenario_answers, capability_answers) 18 | } 19 | 20 | #' @export 21 | #' @rdname tidyrisk_response_set 22 | new_tidyrisk_response_set <- function(calibration_answers, scenario_answers, capability_answers) { 23 | if (!is.data.frame(calibration_answers)) stop("calibration_answers must be a dataframe") 24 | if (!is.data.frame(scenario_answers)) stop("scenario_answers must be a dataframe") 25 | if (!is.data.frame(capability_answers)) stop("capability_answers must be a dataframe") 26 | structure(list(calibration = calibration_answers, 27 | scenarios = scenario_answers, 28 | capabilities = capability_answers), class = "tidyrisk_response_set") 29 | } 30 | 31 | #' @export 32 | #' @rdname tidyrisk_response_set 33 | as.tidyrisk_response_set <- function(x, ...) { 34 | UseMethod("as.tidyrisk_response_set") 35 | } 36 | 37 | #' Test if the object is a tidyrisk_response_set 38 | #' 39 | #' This function returns TRUE for tidyrisk_response_set or sub-classes 40 | #' thereof, and FALSE for all other objects. 41 | #' 42 | #' @param x An object 43 | #' @export 44 | #' @examples 45 | #' \dontrun{ 46 | #' is_tidyrisk_response_set(x) 47 | #' } 48 | is_tidyrisk_response_set <- function(x) { 49 | inherits(x, "tidyrisk_response_set") 50 | } 51 | -------------------------------------------------------------------------------- /R/utils-pipe.R: -------------------------------------------------------------------------------- 1 | #' Pipe operator 2 | #' 3 | #' See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. 4 | #' 5 | #' @name %>% 6 | #' @rdname pipe 7 | #' @keywords internal 8 | #' @export 9 | #' @importFrom magrittr %>% 10 | #' @usage lhs \%>\% rhs 11 | #' @param lhs A value or the magrittr placeholder. 12 | #' @param rhs A function call using the magrittr semantics. 13 | #' @return The result of calling `rhs(lhs)`. 14 | NULL 15 | -------------------------------------------------------------------------------- /R/utils.R: -------------------------------------------------------------------------------- 1 | #' Read scenario questions 2 | #' 3 | #' Reads in all the questions for which subject matter expert input is 4 | #' needed. Includes the domains, capabilities, scenarios, calibration 5 | #' questions, and threat communities. 6 | #' 7 | #' Expects the following files to be present: 8 | #' 9 | #' * `domains.csv` - Domains 10 | #' - domain_id, domain 11 | #' * `capabilities.csv` - Capabilities 12 | #' - domain_id, capability_id, capability 13 | #' * `scenarios.csv` - Scenarios 14 | #' - scenario_id, scenario, threat_id, domain_id, controls 15 | #' * `sme_top_domains.csv` - SME expertise 16 | #' - sme, domain1, domain2, domain3, domain4, domain5, domain6, domain7 17 | #' * `calibration_questions.csv` - Calibration questions 18 | #' * `threat_communities.csv` - Threat communities 19 | #' - threat_community, threat_id, definition, low, high 20 | #' 21 | #' @export 22 | #' @param source_dir Directory location to find input files. 23 | #' @param active_only Read in only the active elements, defaults to TRUE. 24 | #' @importFrom readr read_csv col_character col_logical cols col_number 25 | #' @importFrom tidyr gather drop_na 26 | #' @importFrom dplyr filter arrange 27 | #' @importFrom rlang .data 28 | #' @return A \code{\link{tidyrisk_question_set}} object 29 | #' 30 | #' @examples 31 | #' \dontrun{ 32 | #' read_questions() 33 | #' } 34 | read_questions <- function(source_dir, active_only = TRUE) { 35 | 36 | # domains 37 | domains <- { 38 | dat <- readr::read_csv(file.path(source_dir, "domains.csv"), 39 | col_types = readr::cols(domain = readr::col_character(), 40 | domain_id = readr::col_character())) %>% 41 | dplyr::arrange(.data$domain) 42 | if (active_only && "active" %in% names(dat)) { 43 | dplyr::filter(dat, .data$active != FALSE | is.na(.data$active)) 44 | } else {dat} 45 | } 46 | 47 | # capabilities 48 | dat <- readr::read_csv(file.path(source_dir, "capabilities.csv"), 49 | col_types = readr::cols(capability = readr::col_character(), 50 | capability_id = readr::col_character(), 51 | domain_id = readr::col_character())) %>% 52 | dplyr::arrange(.data$domain_id, .data$capability_id) 53 | caps <- if (active_only && "active" %in% names(dat)) { 54 | dplyr::filter(dat, .data$active != FALSE | is.na(.data$active)) 55 | } else {dat} 56 | 57 | # scenarios 58 | scenarios <- { 59 | dat <- readr::read_csv(file.path(source_dir, "scenarios.csv"), 60 | col_types = readr::cols( 61 | scenario_id = readr::col_character(), 62 | scenario_id = readr::col_character(), 63 | threat_id = readr::col_character(), 64 | domain_id = readr::col_character(), 65 | controls = readr::col_character())) %>% 66 | dplyr::arrange(.data$domain_id, .data$scenario_id) 67 | if (active_only && "active" %in% names(dat)) { 68 | dplyr::filter(dat, .data$active != FALSE | is.na(.data$active))} else {dat} 69 | } 70 | 71 | # expertise 72 | expertise <- readr::read_csv(file.path(source_dir, "sme_top_domains.csv"), 73 | col_types = readr::cols( 74 | sme = readr::col_character(), 75 | .default = readr::col_character()), 76 | comment = "#") %>% 77 | tidyr::gather(key = "key", value = "value", -.data$sme) %>% 78 | tidyr::drop_na() 79 | 80 | # calibration 81 | calibration <- readr::read_csv(file.path(source_dir, "calibration_questions.csv"), 82 | col_types = readr::cols(.default = readr::col_character())) 83 | 84 | # threat_communities 85 | threat_communities <- readr::read_csv(file.path(source_dir, "threat_communities.csv"), 86 | col_types = readr::cols( 87 | low = readr::col_number(), 88 | high = readr::col_number(), 89 | .default = readr::col_character())) 90 | 91 | tidyrisk_question_set(domains = domains, capabilities = caps, scenarios = scenarios, 92 | expertise = expertise, calibration = calibration, 93 | threat_communities = threat_communities) 94 | } 95 | 96 | #' Read all SMEs responses 97 | #' 98 | #' Reads in all the responses recorded to the calibration, scenarios, and 99 | #' capability questions. 100 | #' 101 | #' Expects the following files to be present: 102 | #' 103 | #' * `calibration_answers.csv` - Calibration 104 | #' * `scenario_answers.csv` - Scenarios 105 | #' * `capability_answers.csv` - Capabilities 106 | #' 107 | #' 108 | #' @param source_dir Directory location where input files are found. 109 | #' @importFrom readr read_csv col_character col_date col_number col_integer cols 110 | #' @importFrom dplyr mutate_at 111 | #' @importFrom tidyr drop_na 112 | #' @importFrom purrr map 113 | #' @importFrom stringr str_extract_all 114 | #' @return A tidyrisk_response_set object 115 | #' @export 116 | #' 117 | #' @examples 118 | #' \dontrun{ 119 | #' read_responses() 120 | #' } 121 | read_responses <- function(source_dir = getwd()) { 122 | cal_ans <- readr::read_csv(file.path(source_dir, "calibration_answers.csv"), 123 | col_types = readr::cols(.default = readr::col_character(), 124 | sme = readr::col_character(), 125 | calibration_id = readr::col_character(), 126 | low = readr::col_character(), 127 | high = readr::col_character(), 128 | date = readr::col_date())) %>% 129 | dplyr::mutate_at(c("low", "high"), ~stringr::str_extract_all(., "\\d+") %>% 130 | purrr::map(~ paste(.x, collapse ="")) %>% 131 | as.numeric()) 132 | 133 | sce_ans <- readr::read_csv(file.path(source_dir, "scenario_answers.csv"), 134 | col_types = readr::cols( 135 | sme = readr::col_character(), 136 | scenario_id = readr::col_character(), 137 | freq_low = readr::col_number(), 138 | freq_high = readr::col_number(), 139 | imp_low = readr::col_character(), 140 | imp_high = readr::col_character(), 141 | date = readr::col_date())) %>% 142 | tidyr::drop_na() %>% 143 | dplyr::mutate_at(c("imp_low", "imp_high"), ~stringr::str_extract_all(., "\\d+") %>% 144 | purrr::map(~ paste(.x, collapse = "")) %>% 145 | as.numeric()) 146 | 147 | cap_ans <- readr::read_csv(file.path(source_dir, "capability_answers.csv"), 148 | col_types = readr::cols( 149 | sme = readr::col_character(), 150 | capability_id = readr::col_character(), 151 | low = readr::col_character(), 152 | high = readr::col_character(), 153 | date = readr::col_date())) %>% 154 | tidyr::drop_na() %>% 155 | dplyr::mutate_at(c("low", "high"), ~stringr::str_extract_all(., "[\\d.]+") %>% 156 | purrr::map(~ paste(.x, collapse ="")) %>% 157 | as.numeric()) 158 | 159 | tidyrisk_response_set(capability_answers = cap_ans, 160 | scenario_answers = sce_ans, 161 | calibration_answers = cal_ans) 162 | } 163 | 164 | #' Calculate the prioritized list of domains for a given subject matter expert (SME) 165 | #' 166 | #' Given a \code{\link{tidyrisk_question_set}} object and the name and the 167 | #' name of a specific SME of interest, create a vector of the domains in 168 | #' order of priority. 169 | #' 170 | #' @param sme Name of the subject matter expert. 171 | #' @param questions A \code{\link{tidyrisk_question_set}} object. 172 | #' 173 | #' @importFrom dplyr filter arrange distinct pull 174 | #' @importFrom tidyr drop_na 175 | #' @importFrom rlang .data !! 176 | #' @return An ordered vector of the domains for the requested SME. 177 | #' @export 178 | #' 179 | #' @examples 180 | #' \dontrun{ 181 | #' questions <- read_questions() 182 | #' get_sme_domains("Sally Expert", questions) 183 | #' } 184 | get_smes_domains <- function(sme, questions) { 185 | 186 | enforce_tidyrisk_question_set(questions) 187 | 188 | doms <- dplyr::filter(questions$expertise, sme == !!sme) %>% 189 | tidyr::drop_na() %>% 190 | dplyr::arrange(.data$key) %>% 191 | dplyr::distinct(.data$value) %>% 192 | dplyr::pull() 193 | 194 | c(doms, questions$domains[!questions$domains$domain %in% doms,] %>% 195 | dplyr::pull(.data$domain)) 196 | } 197 | 198 | #' Check the readability of scenario text 199 | #' 200 | #' Calculate the Flesch-Kincaid score for each scenario and return that score 201 | #' along with the scenario ID and domain as a tidy dataframe. 202 | #' 203 | #' @param x A `tidyrisk_question_set` object 204 | #' 205 | #' @importFrom quanteda.textstats textstat_readability 206 | #' @importFrom tibble as_tibble 207 | #' @importFrom dplyr arrange desc select bind_cols 208 | #' @importFrom rlang .data 209 | #' @return A dataframe of the scenario id, domain id, and the Flesch-Kincaid readability score for the scenario text. 210 | #' @export 211 | #' 212 | #' @examples 213 | #' \dontrun{ 214 | #' questions <- read_questions() 215 | #' check_readability(questions) 216 | #' } 217 | check_readability <- function(x) { 218 | enforce_tidyrisk_question_set(x) 219 | x <- x$scenarios 220 | dplyr::bind_cols(x, quanteda.textstats::textstat_readability(x$scenario, "Flesch.Kincaid")) %>% 221 | dplyr::arrange(dplyr::desc(.data$Flesch.Kincaid)) %>% 222 | dplyr::select(.data$scenario_id, .data$domain_id, .data$Flesch.Kincaid) 223 | } 224 | 225 | #' Validate that the parameter passed is a \code{\link{tidyrisk_question_set}} object 226 | #' 227 | #' @param x An object 228 | #' 229 | #' @return NULL 230 | #' 231 | #' @examples 232 | #' NULL 233 | enforce_tidyrisk_question_set <- function(x) { 234 | if (!is_tidyrisk_question_set(x)) { 235 | stop("Must pass a tidyrisk_question_set object.", call. = FALSE) 236 | } 237 | NULL 238 | } 239 | 240 | #' Validate that the parameter passed is a \code{\link{tidyrisk_response_set}} object 241 | #' 242 | #' @param x An object 243 | #' 244 | #' @return NULL 245 | #' 246 | #' @examples 247 | #' NULL 248 | enforce_tidyrisk_response_set <- function(x) { 249 | if (!is_tidyrisk_response_set(x)) { 250 | stop("Must pass a tidyrisk_response_set object.", call. = FALSE) 251 | } 252 | NULL 253 | } 254 | -------------------------------------------------------------------------------- /README.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | output: github_document 3 | --- 4 | 5 | 6 | 7 | ```{r setup, include = FALSE} 8 | knitr::opts_chunk$set( 9 | collapse = TRUE, 10 | comment = "#>", 11 | fig.path = "man/figures/README-", 12 | out.width = "100%" 13 | ) 14 | ``` 15 | # collector collector Logo 16 | 17 | 18 | [![R build status](https://github.com/davidski/collector/workflows/R-CMD-check/badge.svg)](https://github.com/davidski/collector/actions) 19 | [![Coverage Status](https://codecov.io/gh/davidski/collector/branch/master/graph/badge.svg)](https://codecov.io/github/davidski/collector?branch=master) 20 | [![CRAN_Status_Badge](https://www.r-pkg.org/badges/version/collector)](https://cran.r-project.org/package=collector) 21 | ![downloads](https://cranlogs.r-pkg.org/badges/grand-total/collector) 22 | 23 | 24 | **collector** is an R package for conducting interviews with subject matter 25 | experts (SMEs) on the risk scenarios facing an organization. It offers 26 | functions for the following stages of input collection: 27 | 28 | - generate scenario and capability questions 29 | - building interview artifacts, including progress card, slide decks, and handouts 30 | - calibration testing, similar to that promoted by Doug Hubbard and the FAIR Institute 31 | - distribution fitting 32 | - opinion pooling of multiple SMEs into a single representative distribution 33 | - generating quantitative risk scenarios for simulation and reporting by [Evaluator](https://evaluator.tidyrisk.org) 34 | 35 | ## Installation 36 | 37 | Collector is now available on CRAN. 38 | 39 | ```{r install_cran, eval=FALSE} 40 | install.packages("collector") 41 | ``` 42 | 43 | If you wish to run the development (and potentially bleeding edge) version, 44 | you can install directly from GitHub via the following `remotes` command. 45 | 46 | ```{r github_install, eval=FALSE} 47 | # install.packages("remotes") 48 | remotes::install_github("davidski/collector") 49 | ``` 50 | 51 | ## Basic Flow 52 | 53 | See the [package website](https://collector.tidyrisk.org) for 54 | reference. The basic flow for preparing for interviews with your SMEs, 55 | processing the results, and generating parameters for simulation via 56 | [evaluator](https://evaluator.tidyrisk.org) is: 57 | 58 | 1. Build questions and define SME expertise 59 | 60 | 2. Read in the question set. See `read_questions()` for more information. 61 | 62 | ```{r, eval=FALSE} 63 | library(collector) 64 | 65 | questions <- read_questions() 66 | ``` 67 | 68 | 3. Generate materials for interviewing a SME. 69 | 70 | ```{r, eval=FALSE} 71 | output_dir <- tempdir() 72 | make_handouts("Leader Name", questions, output_dir) 73 | make_scorecard("Leader Name", questions, output_dir) 74 | make_slides("Leader Name", questions, output_dir) 75 | ``` 76 | 77 | 4. Read in the responses from your SMEs. See `read_responses()` documentation 78 | for more information. 79 | 80 | ```{r, eval=FALSE} 81 | responses <- read_responses() 82 | ``` 83 | 84 | 5. Fit the SME answers to distributions. 85 | 86 | ```{r, eval=FALSE} 87 | scenario_answers_fitted <- fit_scenarios(responses) 88 | capability_answers_fitted <- fit_capabilities(responses) 89 | ``` 90 | 91 | 6. Combine distributions into final parameters, applying weighting based on 92 | each SMEs level of calibration. 93 | 94 | ```{r eval=FALSE} 95 | sme_weightings <- generate_weights(questions, responses) 96 | scenario_parameters <- left_join(scenario_answers_fitted, sme_weightings, by = "sme") %>% 97 | combine_scenario_parameters() 98 | capability_parameters <- left_join(capability_answers_fitted, sme_weightings, by = "sme") %>% 99 | combine_capability_parameters() 100 | ``` 101 | 102 | 7. Build quantitative scenarios for [evaluator](https://evaluator.tidyrisk.org). 103 | 104 | ```{r eval=FALSE} 105 | scenarios <- prepare_data(scenario_parameters, capability_parameters, 106 | threat_parameters, questions) 107 | ``` 108 | 109 | ## Contributing 110 | 111 | This project is governed by a [Code of Conduct](https://collector.tidyrisk.org/CODE_OF_CONDUCT.html). By 112 | participating in this project you agree to abide by these terms. 113 | 114 | ## License 115 | 116 | The [MIT License](LICENSE) applies. 117 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | # collector collector Logo 5 | 6 | 7 | 8 | [![R build 9 | status](https://github.com/davidski/collector/workflows/R-CMD-check/badge.svg)](https://github.com/davidski/collector/actions) 10 | [![Coverage 11 | Status](https://codecov.io/gh/davidski/collector/branch/master/graph/badge.svg)](https://codecov.io/github/davidski/collector?branch=master) 12 | [![CRAN\_Status\_Badge](https://www.r-pkg.org/badges/version/collector)](https://cran.r-project.org/package=collector) 13 | ![downloads](https://cranlogs.r-pkg.org/badges/grand-total/collector) 14 | 15 | 16 | **collector** is an R package for conducting interviews with subject 17 | matter experts (SMEs) on the risk scenarios facing an organization. It 18 | offers functions for the following stages of input collection: 19 | 20 | - generate scenario and capability questions 21 | - building interview artifacts, including progress card, slide decks, 22 | and handouts 23 | - calibration testing, similar to that promoted by Doug Hubbard and 24 | the FAIR Institute 25 | - distribution fitting 26 | - opinion pooling of multiple SMEs into a single representative 27 | distribution 28 | - generating quantitative risk scenarios for simulation and reporting 29 | by [Evaluator](https://evaluator.tidyrisk.org) 30 | 31 | ## Installation 32 | 33 | Collector is now available on CRAN. 34 | 35 | ``` r 36 | install.packages("collector") 37 | ``` 38 | 39 | If you wish to run the development (and potentially bleeding edge) 40 | version, you can install directly from GitHub via the following 41 | `remotes` command. 42 | 43 | ``` r 44 | # install.packages("remotes") 45 | remotes::install_github("davidski/collector") 46 | ``` 47 | 48 | ## Basic Flow 49 | 50 | See the [package website](https://collector.tidyrisk.org) for reference. 51 | The basic flow for preparing for interviews with your SMEs, processing 52 | the results, and generating parameters for simulation via 53 | [evaluator](https://evaluator.tidyrisk.org) is: 54 | 55 | 1. Build questions and define SME expertise 56 | 57 | 2. Read in the question set. See `read_questions()` for more 58 | information. 59 | 60 | ``` r 61 | library(collector) 62 | 63 | questions <- read_questions() 64 | ``` 65 | 66 | 3. Generate materials for interviewing a SME. 67 | 68 | ``` r 69 | output_dir <- tempdir() 70 | make_handouts("Leader Name", questions, output_dir) 71 | make_scorecard("Leader Name", questions, output_dir) 72 | make_slides("Leader Name", questions, output_dir) 73 | ``` 74 | 75 | 4. Read in the responses from your SMEs. See `read_responses()` 76 | documentation for more information. 77 | 78 | ``` r 79 | responses <- read_responses() 80 | ``` 81 | 82 | 5. Fit the SME answers to distributions. 83 | 84 | ``` r 85 | scenario_answers_fitted <- fit_scenarios(responses) 86 | capability_answers_fitted <- fit_capabilities(responses) 87 | ``` 88 | 89 | 6. Combine distributions into final parameters, applying weighting 90 | based on each SMEs level of calibration. 91 | 92 | ``` r 93 | sme_weightings <- generate_weights(questions, responses) 94 | scenario_parameters <- left_join(scenario_answers_fitted, sme_weightings, by = "sme") %>% 95 | combine_scenario_parameters() 96 | capability_parameters <- left_join(capability_answers_fitted, sme_weightings, by = "sme") %>% 97 | combine_capability_parameters() 98 | ``` 99 | 100 | 7. Build quantitative scenarios for 101 | [evaluator](https://evaluator.tidyrisk.org). 102 | 103 | ``` r 104 | scenarios <- prepare_data(scenario_parameters, capability_parameters, 105 | threat_parameters, questions) 106 | ``` 107 | 108 | ## Contributing 109 | 110 | This project is governed by a [Code of 111 | Conduct](https://collector.tidyrisk.org/CODE_OF_CONDUCT.html). By 112 | participating in this project you agree to abide by these terms. 113 | 114 | ## License 115 | 116 | The [MIT License](LICENSE) applies. 117 | -------------------------------------------------------------------------------- /_pkgdown.yml: -------------------------------------------------------------------------------- 1 | url: https://collector.tidyrisk.org 2 | 3 | template: 4 | params: 5 | bootswatch: flatly 6 | ganalytics: UA-87348339-3 7 | 8 | navbar: 9 | left: 10 | - icon: fa-home fa-lg 11 | href: index.html 12 | - text: "Reference" 13 | href: reference/index.html 14 | - text: "News" 15 | href: news/index.html 16 | right: 17 | - icon: fa-github fa-lg fab 18 | text: "github" 19 | href: https://github.com/davidski/collector/ 20 | 21 | reference: 22 | - title: "Combination Functions" 23 | contents: 24 | - starts_with("combine") 25 | - title: "Create Scenario Objects" 26 | contents: 27 | - prepare_data 28 | - title: "Distribution Fitting" 29 | contents: 30 | - starts_with("fit") 31 | - generate_cost_function 32 | - title: "Interview Creation" 33 | contents: 34 | - check_readability 35 | - starts_with("make_") 36 | - title: "Miscellaneous Functions" 37 | contents: 38 | - clean_answers 39 | - collector 40 | - generate_weights 41 | - get_smes_domains 42 | - lognormal_to_normal 43 | - normal_to_lognormal 44 | - title: "Object Classes" 45 | contents: 46 | - contains("tidyrisk") 47 | - title: "Provided Data Sets" 48 | desc: "Reference and sample data sets" 49 | contents: 50 | - starts_with("mc_") 51 | - calibration_questions 52 | - title: "Read Questions and Responses" 53 | desc: "Read datasets from disk" 54 | contents: 55 | - read_questions 56 | - read_responses 57 | -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: false 2 | -------------------------------------------------------------------------------- /collector.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: No 4 | SaveWorkspace: No 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source 21 | PackageRoxygenize: rd,collate,namespace 22 | -------------------------------------------------------------------------------- /cran-comments.md: -------------------------------------------------------------------------------- 1 | This is a resubmission of a previously archived package, correcting previously 2 | failing tests. 3 | 4 | ## Test environments 5 | 6 | * local MacOS install, R 4.0.3 7 | * MacOS (on github actions) 8 | * r-release 9 | * r-hub 10 | * win-builder (devel and release) 11 | 12 | ## R CMD check results 13 | 14 | 0 errors | 0 warnings | 1 notes 15 | 16 | A false warning about a possible misspelling of `OpenFAIR` is triggered 17 | on the description. The capitalization and spelling is correct for this 18 | analysis. 19 | -------------------------------------------------------------------------------- /data-raw/calibration_answers.csv: -------------------------------------------------------------------------------- 1 | sme,calibration_id,low,high,date 2 | Natalie Wade,CAL-01,27,53,2018-09-04 3 | Natalie Wade,CAL-02,42,73,2018-09-05 4 | Natalie Wade,CAL-03,1820,1888,2018-09-05 5 | Natalie Wade,CAL-04,38,68,2018-09-05 6 | Natalie Wade,CAL-05,12%,34%,2018-09-05 7 | Natalie Wade,CAL-06,32,64,2018-09-05 8 | Natalie Wade,CAL-07,1954,1969,2018-09-05 9 | Natalie Wade,CAL-08,44,87,2018-09-05 10 | Natalie Wade,CAL-09,89,102,2018-09-05 11 | Natalie Wade,CAL-10,"$8,000","$16,000",2018-09-05 12 | Theresa Fowler,CAL-01,100,400,2018-09-10 13 | Theresa Fowler,CAL-05,15%,40%,2018-09-10 14 | Theresa Fowler,CAL-02,40,55,2018-09-10 15 | Theresa Fowler,CAL-03,1885,1908,2018-09-10 16 | Theresa Fowler,CAL-10,"$4,800","$6,000",2018-09-10 17 | Theresa Fowler,CAL-06,160,200,2018-09-10 18 | Theresa Fowler,CAL-08,100,125,2018-09-10 19 | Theresa Fowler,CAL-09,60,80,2018-09-10 20 | Theresa Fowler,CAL-04,20,30,2018-09-10 21 | Theresa Fowler,CAL-07,1955,1965,2018-09-10 22 | Jimmy Jennings,CAL-06,20,50,2018-09-12 23 | Jimmy Jennings,CAL-13,"9,000","25,000",2018-09-12 24 | Jimmy Jennings,CAL-03,1820,1845,2018-09-12 25 | Jimmy Jennings,CAL-09,80,120,2018-09-12 26 | Jimmy Jennings,CAL-05,25%,37%,2018-09-12 27 | Jimmy Jennings,CAL-18,120,130,2018-09-12 28 | Jimmy Jennings,CAL-10,"$10,000","$30,000",2018-09-12 29 | Jimmy Jennings,CAL-16,15,30,2018-09-12 30 | Jimmy Jennings,CAL-11,1900,1920,2018-09-12 31 | Jimmy Jennings,CAL-19,1820,1900,2018-09-12 32 | Marsha Watson,CAL-25,6,7,2018-09-30 33 | Marsha Watson,CAL-16,100,"1,000",2018-09-30 34 | Marsha Watson,CAL-12,12,40,2018-09-30 35 | Marsha Watson,CAL-24,2,100,2018-09-30 36 | Marsha Watson,CAL-05,20%,80%,2018-09-30 37 | Marsha Watson,CAL-13,500,"4,000",2018-09-30 38 | Marsha Watson,CAL-18,100,500,2018-09-30 39 | Marsha Watson,CAL-11,1776,1850,2018-09-30 40 | Marsha Watson,CAL-06,1,"1,000",2018-09-30 41 | Marsha Watson,CAL-09,80,200,2018-09-30 42 | Ray Evans,CAL-01,10,30,2018-09-03 43 | Ray Evans,CAL-06,50,70,2018-09-03 44 | Ray Evans,CAL-23,20,40,2018-09-03 45 | Ray Evans,CAL-14,3,5,2018-09-03 46 | Ray Evans,CAL-25,5,7,2018-09-03 47 | Ray Evans,CAL-12,22,22,2018-09-03 48 | Ray Evans,CAL-17,1700,1900,2018-09-03 49 | Ray Evans,CAL-08,20,30,2018-09-03 50 | Ray Evans,CAL-24,50,150,2018-09-03 51 | Ray Evans,CAL-10,"$5,000","$6,000",2018-09-03 52 | -------------------------------------------------------------------------------- /data-raw/calibration_questions.csv: -------------------------------------------------------------------------------- 1 | question,answer,calibration_id 2 | How many countries are in NATO?,29,CAL-01 3 | "What is the average April temperature (in Fahrenheit) of Boston, MA?",48,CAL-02 4 | "On behalf of the US, President Grover Cleveland accepted the Statue of Liberty as a gift from France in what year?",1886,CAL-03 5 | How many gold medals did the US win in the 2012 Rio de Janeiro Summer Olympics?,46,CAL-04 6 | What percent of Americans did not have health insurance in 2016?,8.80%,CAL-05 7 | "If the Earth is 93 million miles from the sun, how far from the sun is Venus?",67.2,CAL-06 8 | What year did Gunsmoke premier on television?,1955,CAL-07 9 | How many billions of dollars did Microsoft earn in revenue in FY2017?,90,CAL-08 10 | How many stories does the Empire State Building have?,102,CAL-09 11 | How much did the average US household spend on food at home in 2016?,"$4,049 ",CAL-10 12 | The US Constitution was written in what year?,1787,CAL-11 13 | How many soccer players should be on the field at the same time?,22,CAL-12 14 | "What is the driving distance, in miles, from Seattle to NYC?","2,859",CAL-13 15 | How many time zones are in Russia?,9,CAL-14 16 | In what year did the UK abolish the shilling (adopt decimalization)?,1971,CAL-15 17 | How many bones are in the human body?,206,CAL-16 18 | What year did the American Civil War end?,1865,CAL-17 19 | How many countries are there in the world as of January 2018?,195,CAL-18 20 | What year did the Titanic sink?,1912,CAL-19 21 | How many keys are on a standard piano?,88,CAL-20 22 | What is the circumference of the earth in miles?,"24,901",CAL-21 23 | What was the maximum speed (in mph) of the Apollo 10 spacecraft?,"24,791",CAL-22 24 | How many elements make up the Periodic Table? ,118,CAL-23 25 | "What is the current population, in billions, of planet Earth?",7.442,CAL-24 26 | How many books are there in the Harry Potter series?,8,CAL-25 27 | How many oceans are there in the world? ,5,CAL-26 28 | How many planets are in our solar system? ,8,CAL-27 -------------------------------------------------------------------------------- /data-raw/capability_answers.csv: -------------------------------------------------------------------------------- 1 | sme,capability_id,low,high,date 2 | 3 | # 4 | Jimmy Jennings,CAP-01,.10,.40,2018-09-10 5 | Jimmy Jennings,CAP-02,.10,.40,2018-09-10 6 | Jimmy Jennings,CAP-03,.10,.40,2018-09-10 7 | Jimmy Jennings,CAP-04,.10,.40,2018-09-10 8 | Jimmy Jennings,CAP-05,.10,.40,2018-09-10 9 | Jimmy Jennings,CAP-06,.10,.40,2018-09-10 10 | Jimmy Jennings,CAP-07,.10,.40,2018-09-10 11 | Jimmy Jennings,CAP-08,.10,.40,2018-09-10 12 | Jimmy Jennings,CAP-09,.10,.40,2018-09-10 13 | Jimmy Jennings,CAP-10,.10,.40,2018-09-10 14 | Jimmy Jennings,CAP-11,.10,.40,2018-09-10 15 | Jimmy Jennings,CAP-12,.10,.40,2018-09-10 16 | Jimmy Jennings,CAP-13,.10,.40,2018-09-10 17 | Jimmy Jennings,CAP-14,.10,.40,2018-09-10 18 | Jimmy Jennings,CAP-15,.10,.40,2018-09-10 19 | Jimmy Jennings,CAP-16,.10,.40,2018-09-10 20 | Jimmy Jennings,CAP-17,.10,.40,2018-09-10 21 | Jimmy Jennings,CAP-18,.10,.40,2018-09-10 22 | Jimmy Jennings,CAP-19,.10,.40,2018-09-10 23 | Jimmy Jennings,CAP-20,.10,.40,2018-09-10 24 | Jimmy Jennings,CAP-21,.10,.40,2018-09-10 25 | Jimmy Jennings,CAP-22,.10,.40,2018-09-10 26 | Jimmy Jennings,CAP-23,.10,.40,2018-09-10 27 | Jimmy Jennings,CAP-24,.10,.40,2018-09-10 28 | Jimmy Jennings,CAP-25,.10,.40,2018-09-10 29 | Jimmy Jennings,CAP-26,.10,.40,2018-09-10 30 | Jimmy Jennings,CAP-27,.10,.40,2018-09-10 31 | Jimmy Jennings,CAP-28,.10,.40,2018-09-10 32 | Jimmy Jennings,CAP-29,.10,.40,2018-09-10 33 | Jimmy Jennings,CAP-30,.10,.40,2018-09-10 34 | Jimmy Jennings,CAP-31,.10,.40,2018-09-10 35 | Jimmy Jennings,CAP-32,.10,.40,2018-09-10 36 | Jimmy Jennings,CAP-33,.10,.40,2018-09-10 37 | Jimmy Jennings,CAP-34,.10,.40,2018-09-10 38 | Jimmy Jennings,CAP-35,.10,.40,2018-09-10 39 | Jimmy Jennings,CAP-36,.10,.40,2018-09-10 40 | Jimmy Jennings,CAP-37,.10,.40,2018-09-10 41 | Jimmy Jennings,CAP-38,.10,.40,2018-09-10 42 | Jimmy Jennings,CAP-39,.10,.40,2018-09-10 43 | Jimmy Jennings,CAP-40,.10,.40,2018-09-10 44 | Jimmy Jennings,CAP-41,.10,.40,2018-09-10 45 | Jimmy Jennings,CAP-42,.10,.40,2018-09-10 46 | Jimmy Jennings,CAP-43,.10,.40,2018-09-10 47 | Jimmy Jennings,CAP-44,.10,.40,2018-09-10 48 | Jimmy Jennings,CAP-45,.10,.40,2018-09-10 49 | Jimmy Jennings,CAP-46,.10,.40,2018-09-10 50 | Jimmy Jennings,CAP-47,.10,.40,2018-09-10 51 | Jimmy Jennings,CAP-48,.10,.40,2018-09-10 52 | Jimmy Jennings,CAP-49,.10,.40,2018-09-10 53 | Jimmy Jennings,CAP-50,.10,.40,2018-09-10 54 | Jimmy Jennings,CAP-51,.10,.40,2018-09-10 55 | Jimmy Jennings,CAP-52,.10,.40,2018-09-10 56 | Jimmy Jennings,CAP-53,.10,.40,2018-09-10 57 | Jimmy Jennings,CAP-54,.10,.40,2018-09-10 58 | Jimmy Jennings,CAP-55,.10,.40,2018-09-10 59 | Jimmy Jennings,CAP-56,.10,.40,2018-09-10 60 | Jimmy Jennings,CAP-57,.10,.40,2018-09-10 61 | Jimmy Jennings,CAP-58,.10,.40,2018-09-10 62 | Jimmy Jennings,CAP-59,.10,.40,2018-09-10 63 | Jimmy Jennings,CAP-60,.10,.40,2018-09-10 64 | 65 | # 66 | Marsha Watson,CAP-01,.10,.40,2018-09-10 67 | Marsha Watson,CAP-02,.10,.40,2018-09-10 68 | Marsha Watson,CAP-03,.10,.40,2018-09-10 69 | Marsha Watson,CAP-04,.10,.40,2018-09-10 70 | Marsha Watson,CAP-05,.10,.40,2018-09-10 71 | Marsha Watson,CAP-06,.10,.40,2018-09-10 72 | Marsha Watson,CAP-07,.10,.40,2018-09-10 73 | Marsha Watson,CAP-08,.10,.40,2018-09-10 74 | Marsha Watson,CAP-09,.10,.40,2018-09-10 75 | Marsha Watson,CAP-10,.10,.40,2018-09-10 76 | Marsha Watson,CAP-11,.10,.40,2018-09-10 77 | Marsha Watson,CAP-12,.10,.40,2018-09-10 78 | Marsha Watson,CAP-13,.10,.40,2018-09-10 79 | Marsha Watson,CAP-14,.10,.40,2018-09-10 80 | Marsha Watson,CAP-15,.10,.40,2018-09-10 81 | Marsha Watson,CAP-16,.10,.40,2018-09-10 82 | Marsha Watson,CAP-17,.10,.40,2018-09-10 83 | Marsha Watson,CAP-18,.10,.40,2018-09-10 84 | Marsha Watson,CAP-19,.10,.40,2018-09-10 85 | Marsha Watson,CAP-20,.10,.40,2018-09-10 86 | Marsha Watson,CAP-21,.10,.40,2018-09-10 87 | Marsha Watson,CAP-22,.10,.40,2018-09-10 88 | Marsha Watson,CAP-23,.10,.40,2018-09-10 89 | Marsha Watson,CAP-24,.10,.40,2018-09-10 90 | Marsha Watson,CAP-25,.10,.40,2018-09-10 91 | Marsha Watson,CAP-26,.10,.40,2018-09-10 92 | Marsha Watson,CAP-27,.10,.40,2018-09-10 93 | Marsha Watson,CAP-28,.10,.40,2018-09-10 94 | Marsha Watson,CAP-29,.10,.40,2018-09-10 95 | Marsha Watson,CAP-30,.10,.40,2018-09-10 96 | Marsha Watson,CAP-31,.10,.40,2018-09-10 97 | Marsha Watson,CAP-32,.10,.40,2018-09-10 98 | Marsha Watson,CAP-33,.10,.40,2018-09-10 99 | Marsha Watson,CAP-34,.10,.40,2018-09-10 100 | Marsha Watson,CAP-35,.10,.40,2018-09-10 101 | Marsha Watson,CAP-36,.10,.40,2018-09-10 102 | Marsha Watson,CAP-37,.10,.40,2018-09-10 103 | Marsha Watson,CAP-38,.10,.40,2018-09-10 104 | Marsha Watson,CAP-39,.10,.40,2018-09-10 105 | Marsha Watson,CAP-40,.10,.40,2018-09-10 106 | Marsha Watson,CAP-41,.10,.40,2018-09-10 107 | Marsha Watson,CAP-42,.10,.40,2018-09-10 108 | Marsha Watson,CAP-43,.10,.40,2018-09-10 109 | Marsha Watson,CAP-44,.10,.40,2018-09-10 110 | Marsha Watson,CAP-45,.10,.40,2018-09-10 111 | Marsha Watson,CAP-46,.10,.40,2018-09-10 112 | Marsha Watson,CAP-47,.10,.40,2018-09-10 113 | Marsha Watson,CAP-48,.10,.40,2018-09-10 114 | Marsha Watson,CAP-49,.10,.40,2018-09-10 115 | Marsha Watson,CAP-50,.10,.40,2018-09-10 116 | Marsha Watson,CAP-51,.10,.40,2018-09-10 117 | Marsha Watson,CAP-52,.10,.40,2018-09-10 118 | Marsha Watson,CAP-53,.10,.40,2018-09-10 119 | Marsha Watson,CAP-54,.10,.40,2018-09-10 120 | Marsha Watson,CAP-55,.10,.40,2018-09-10 121 | Marsha Watson,CAP-56,.10,.40,2018-09-10 122 | Marsha Watson,CAP-57,.10,.40,2018-09-10 123 | Marsha Watson,CAP-58,.10,.40,2018-09-10 124 | Marsha Watson,CAP-59,.10,.40,2018-09-10 125 | Marsha Watson,CAP-60,.10,.40,2018-09-10 126 | 127 | # 128 | Natalie Wade,CAP-01,.10,.40,2018-09-10 129 | Natalie Wade,CAP-02,.10,.40,2018-09-10 130 | Natalie Wade,CAP-03,.10,.40,2018-09-10 131 | Natalie Wade,CAP-04,.10,.40,2018-09-10 132 | Natalie Wade,CAP-05,.10,.40,2018-09-10 133 | Natalie Wade,CAP-06,.10,.40,2018-09-10 134 | Natalie Wade,CAP-07,.10,.40,2018-09-10 135 | Natalie Wade,CAP-08,.10,.40,2018-09-10 136 | Natalie Wade,CAP-09,.10,.40,2018-09-10 137 | Natalie Wade,CAP-10,.10,.40,2018-09-10 138 | Natalie Wade,CAP-11,.10,.40,2018-09-10 139 | Natalie Wade,CAP-12,.10,.40,2018-09-10 140 | Natalie Wade,CAP-13,.10,.40,2018-09-10 141 | Natalie Wade,CAP-14,.10,.40,2018-09-10 142 | Natalie Wade,CAP-15,.10,.40,2018-09-10 143 | Natalie Wade,CAP-16,.10,.40,2018-09-10 144 | Natalie Wade,CAP-17,.10,.40,2018-09-10 145 | Natalie Wade,CAP-18,.10,.40,2018-09-10 146 | Natalie Wade,CAP-19,.10,.40,2018-09-10 147 | Natalie Wade,CAP-20,.10,.40,2018-09-10 148 | Natalie Wade,CAP-21,.10,.40,2018-09-10 149 | Natalie Wade,CAP-22,.10,.40,2018-09-10 150 | Natalie Wade,CAP-23,.10,.40,2018-09-10 151 | Natalie Wade,CAP-24,.10,.40,2018-09-10 152 | Natalie Wade,CAP-25,.10,.40,2018-09-10 153 | Natalie Wade,CAP-26,.10,.40,2018-09-10 154 | Natalie Wade,CAP-27,.10,.40,2018-09-10 155 | Natalie Wade,CAP-28,.10,.40,2018-09-10 156 | Natalie Wade,CAP-29,.10,.40,2018-09-10 157 | Natalie Wade,CAP-30,.10,.40,2018-09-10 158 | Natalie Wade,CAP-31,.10,.40,2018-09-10 159 | Natalie Wade,CAP-32,.10,.40,2018-09-10 160 | Natalie Wade,CAP-33,.10,.40,2018-09-10 161 | Natalie Wade,CAP-34,.10,.40,2018-09-10 162 | Natalie Wade,CAP-35,.10,.40,2018-09-10 163 | Natalie Wade,CAP-36,.10,.40,2018-09-10 164 | Natalie Wade,CAP-37,.10,.40,2018-09-10 165 | Natalie Wade,CAP-38,.10,.40,2018-09-10 166 | Natalie Wade,CAP-39,.10,.40,2018-09-10 167 | Natalie Wade,CAP-40,.10,.40,2018-09-10 168 | Natalie Wade,CAP-41,.10,.40,2018-09-10 169 | Natalie Wade,CAP-42,.10,.40,2018-09-10 170 | Natalie Wade,CAP-43,.10,.40,2018-09-10 171 | Natalie Wade,CAP-44,.10,.40,2018-09-10 172 | Natalie Wade,CAP-45,.10,.40,2018-09-10 173 | Natalie Wade,CAP-46,.10,.40,2018-09-10 174 | Natalie Wade,CAP-47,.10,.40,2018-09-10 175 | Natalie Wade,CAP-48,.10,.40,2018-09-10 176 | Natalie Wade,CAP-49,.10,.40,2018-09-10 177 | Natalie Wade,CAP-50,.10,.40,2018-09-10 178 | Natalie Wade,CAP-51,.10,.40,2018-09-10 179 | Natalie Wade,CAP-52,.10,.40,2018-09-10 180 | Natalie Wade,CAP-53,.10,.40,2018-09-10 181 | Natalie Wade,CAP-54,.10,.40,2018-09-10 182 | Natalie Wade,CAP-55,.10,.40,2018-09-10 183 | Natalie Wade,CAP-56,.10,.40,2018-09-10 184 | Natalie Wade,CAP-57,.10,.40,2018-09-10 185 | Natalie Wade,CAP-58,.10,.40,2018-09-10 186 | Natalie Wade,CAP-59,.10,.40,2018-09-10 187 | Natalie Wade,CAP-60,.10,.40,2018-09-10 188 | 189 | # 190 | Ray Evans,CAP-01,.10,.40,2018-09-10 191 | Ray Evans,CAP-02,.10,.40,2018-09-10 192 | Ray Evans,CAP-03,.10,.40,2018-09-10 193 | Ray Evans,CAP-04,.10,.40,2018-09-10 194 | Ray Evans,CAP-05,.10,.40,2018-09-10 195 | Ray Evans,CAP-06,.10,.40,2018-09-10 196 | Ray Evans,CAP-07,.10,.40,2018-09-10 197 | Ray Evans,CAP-08,.10,.40,2018-09-10 198 | Ray Evans,CAP-09,.10,.40,2018-09-10 199 | Ray Evans,CAP-10,.10,.40,2018-09-10 200 | Ray Evans,CAP-11,.10,.40,2018-09-10 201 | Ray Evans,CAP-12,.10,.40,2018-09-10 202 | Ray Evans,CAP-13,.10,.40,2018-09-10 203 | Ray Evans,CAP-14,.10,.40,2018-09-10 204 | Ray Evans,CAP-15,.10,.40,2018-09-10 205 | Ray Evans,CAP-16,.10,.40,2018-09-10 206 | Ray Evans,CAP-17,.10,.40,2018-09-10 207 | Ray Evans,CAP-18,.10,.40,2018-09-10 208 | Ray Evans,CAP-19,.10,.40,2018-09-10 209 | Ray Evans,CAP-20,.10,.40,2018-09-10 210 | Ray Evans,CAP-21,.10,.40,2018-09-10 211 | Ray Evans,CAP-22,.10,.40,2018-09-10 212 | Ray Evans,CAP-23,.10,.40,2018-09-10 213 | Ray Evans,CAP-24,.10,.40,2018-09-10 214 | Ray Evans,CAP-25,.10,.40,2018-09-10 215 | Ray Evans,CAP-26,.10,.40,2018-09-10 216 | Ray Evans,CAP-27,.10,.40,2018-09-10 217 | Ray Evans,CAP-28,.10,.40,2018-09-10 218 | Ray Evans,CAP-29,.10,.40,2018-09-10 219 | Ray Evans,CAP-30,.10,.40,2018-09-10 220 | Ray Evans,CAP-31,.10,.40,2018-09-10 221 | Ray Evans,CAP-32,.10,.40,2018-09-10 222 | Ray Evans,CAP-33,.10,.40,2018-09-10 223 | Ray Evans,CAP-34,.10,.40,2018-09-10 224 | Ray Evans,CAP-35,.10,.40,2018-09-10 225 | Ray Evans,CAP-36,.10,.40,2018-09-10 226 | Ray Evans,CAP-37,.10,.40,2018-09-10 227 | Ray Evans,CAP-38,.10,.40,2018-09-10 228 | Ray Evans,CAP-39,.10,.40,2018-09-10 229 | Ray Evans,CAP-40,.10,.40,2018-09-10 230 | Ray Evans,CAP-41,.10,.40,2018-09-10 231 | Ray Evans,CAP-42,.10,.40,2018-09-10 232 | Ray Evans,CAP-43,.10,.40,2018-09-10 233 | Ray Evans,CAP-44,.10,.40,2018-09-10 234 | Ray Evans,CAP-45,.10,.40,2018-09-10 235 | Ray Evans,CAP-46,.10,.40,2018-09-10 236 | Ray Evans,CAP-47,.10,.40,2018-09-10 237 | Ray Evans,CAP-48,.10,.40,2018-09-10 238 | Ray Evans,CAP-49,.10,.40,2018-09-10 239 | Ray Evans,CAP-50,.10,.40,2018-09-10 240 | Ray Evans,CAP-51,.10,.40,2018-09-10 241 | Ray Evans,CAP-52,.10,.40,2018-09-10 242 | Ray Evans,CAP-53,.10,.40,2018-09-10 243 | Ray Evans,CAP-54,.10,.40,2018-09-10 244 | Ray Evans,CAP-55,.10,.40,2018-09-10 245 | Ray Evans,CAP-56,.10,.40,2018-09-10 246 | Ray Evans,CAP-57,.10,.40,2018-09-10 247 | Ray Evans,CAP-58,.10,.40,2018-09-10 248 | Ray Evans,CAP-59,.10,.40,2018-09-10 249 | Ray Evans,CAP-60,.10,.40,2018-09-10 250 | 251 | # 252 | Theresa Fowler,CAP-01,.10,.40,2018-09-10 253 | Theresa Fowler,CAP-02,.10,.40,2018-09-10 254 | Theresa Fowler,CAP-03,.10,.40,2018-09-10 255 | Theresa Fowler,CAP-04,.10,.40,2018-09-10 256 | Theresa Fowler,CAP-05,.10,.40,2018-09-10 257 | Theresa Fowler,CAP-06,.10,.40,2018-09-10 258 | Theresa Fowler,CAP-07,.10,.40,2018-09-10 259 | Theresa Fowler,CAP-08,.10,.40,2018-09-10 260 | Theresa Fowler,CAP-09,.10,.40,2018-09-10 261 | Theresa Fowler,CAP-10,.10,.40,2018-09-10 262 | Theresa Fowler,CAP-11,.10,.40,2018-09-10 263 | Theresa Fowler,CAP-12,.10,.40,2018-09-10 264 | Theresa Fowler,CAP-13,.10,.40,2018-09-10 265 | Theresa Fowler,CAP-14,.10,.40,2018-09-10 266 | Theresa Fowler,CAP-15,.10,.40,2018-09-10 267 | Theresa Fowler,CAP-16,.10,.40,2018-09-10 268 | Theresa Fowler,CAP-17,.10,.40,2018-09-10 269 | Theresa Fowler,CAP-18,.10,.40,2018-09-10 270 | Theresa Fowler,CAP-19,.10,.40,2018-09-10 271 | Theresa Fowler,CAP-20,.10,.40,2018-09-10 272 | Theresa Fowler,CAP-21,.10,.40,2018-09-10 273 | Theresa Fowler,CAP-22,.10,.40,2018-09-10 274 | Theresa Fowler,CAP-23,.10,.40,2018-09-10 275 | Theresa Fowler,CAP-24,.10,.40,2018-09-10 276 | Theresa Fowler,CAP-25,.10,.40,2018-09-10 277 | Theresa Fowler,CAP-26,.10,.40,2018-09-10 278 | Theresa Fowler,CAP-27,.10,.40,2018-09-10 279 | Theresa Fowler,CAP-28,.10,.40,2018-09-10 280 | Theresa Fowler,CAP-29,.10,.40,2018-09-10 281 | Theresa Fowler,CAP-30,.10,.40,2018-09-10 282 | Theresa Fowler,CAP-31,.10,.40,2018-09-10 283 | Theresa Fowler,CAP-32,.10,.40,2018-09-10 284 | Theresa Fowler,CAP-33,.10,.40,2018-09-10 285 | Theresa Fowler,CAP-34,.10,.40,2018-09-10 286 | Theresa Fowler,CAP-35,.10,.40,2018-09-10 287 | Theresa Fowler,CAP-36,.10,.40,2018-09-10 288 | Theresa Fowler,CAP-37,.10,.40,2018-09-10 289 | Theresa Fowler,CAP-38,.10,.40,2018-09-10 290 | Theresa Fowler,CAP-39,.10,.40,2018-09-10 291 | Theresa Fowler,CAP-40,.10,.40,2018-09-10 292 | Theresa Fowler,CAP-41,.10,.40,2018-09-10 293 | Theresa Fowler,CAP-42,.10,.40,2018-09-10 294 | Theresa Fowler,CAP-43,.10,.40,2018-09-10 295 | Theresa Fowler,CAP-44,.10,.40,2018-09-10 296 | Theresa Fowler,CAP-45,.10,.40,2018-09-10 297 | Theresa Fowler,CAP-46,.10,.40,2018-09-10 298 | Theresa Fowler,CAP-47,.10,.40,2018-09-10 299 | Theresa Fowler,CAP-48,.10,.40,2018-09-10 300 | Theresa Fowler,CAP-49,.10,.40,2018-09-10 301 | Theresa Fowler,CAP-50,.10,.40,2018-09-10 302 | Theresa Fowler,CAP-51,.10,.40,2018-09-10 303 | Theresa Fowler,CAP-52,.10,.40,2018-09-10 304 | Theresa Fowler,CAP-53,.10,.40,2018-09-10 305 | Theresa Fowler,CAP-54,.10,.40,2018-09-10 306 | Theresa Fowler,CAP-55,.10,.40,2018-09-10 307 | Theresa Fowler,CAP-56,.10,.40,2018-09-10 308 | Theresa Fowler,CAP-57,.10,.40,2018-09-10 309 | Theresa Fowler,CAP-58,.10,.40,2018-09-10 310 | Theresa Fowler,CAP-59,.10,.40,2018-09-10 311 | Theresa Fowler,CAP-60,.10,.40,2018-09-10 312 | -------------------------------------------------------------------------------- /data-raw/domains.csv: -------------------------------------------------------------------------------- 1 | domain,description,active,domain_id 2 | Information Security Management Program,,TRUE,ISMP 3 | Access Control,,TRUE,AC 4 | Human Resources Security,,TRUE,HR 5 | Risk Management,,TRUE,RISK 6 | Security Policy,,TRUE,POL 7 | Organization of Information Security,,TRUE,ORG 8 | Compliance,,TRUE,COMP 9 | Asset Management,,TRUE,ASSET 10 | Physical and Environmental Security,,TRUE,PHY 11 | Communications and Operations Management,,TRUE,OPS 12 | "Information Systems Acquisition, Development, and Maintenance",,TRUE,ADM 13 | Information Security Incident Management,,TRUE,IM 14 | Business Continuity Management,,TRUE,BC 15 | Privacy Practices,,TRUE,PRI 16 | -------------------------------------------------------------------------------- /data-raw/regenerate_data.R: -------------------------------------------------------------------------------- 1 | ## Regenerate sample data sets 2 | library(evaluator) 3 | library(readr) 4 | library(dplyr) 5 | 6 | # read in and save domain mappings 7 | mc_domains <- readr::read_csv(here::here("data-raw/domains.csv"), 8 | col_types = cols(domain = col_character(), 9 | description = col_character(), 10 | active = col_logical(), 11 | domain_id = col_character())) 12 | usethis::use_data(mc_domains, overwrite = TRUE) 13 | 14 | # read in capabilities 15 | mc_capabilities <- evaluator::import_capabilities(domains = mc_domains) 16 | mc_capabilities <- mc_capabilities[, c("capability_id", "domain_id", "capability")] 17 | usethis::use_data(mc_capabilities, overwrite = TRUE) 18 | 19 | # read in capability_answers 20 | mc_capability_answers <- readr::read_csv(here::here("data-raw/capability_answers.csv"), 21 | col_types = readr::cols(sme = readr::col_character(), 22 | capability_id = readr::col_character(), 23 | low = readr::col_number(), 24 | high = readr::col_number(), 25 | date = readr::col_date()), 26 | comment = "#") 27 | usethis::use_data(mc_capability_answers, overwrite = TRUE) 28 | 29 | # generate and save threat_communities 30 | mc_threat_communities <- readr::read_csv(here::here("data-raw/threat_communities.csv")) 31 | usethis::use_data(mc_threat_communities, overwrite = TRUE) 32 | 33 | # read in and save scenarios 34 | mc_scenarios <- evaluator::import_scenarios(domains = evaluator::mc_domains) %>% 35 | left_join(mc_threat_communities, by = c("tcomm" = "threat_community")) %>% 36 | select(scenario_id, scenario, threat_id, domain_id, controls) 37 | usethis::use_data(mc_scenarios, overwrite = TRUE) 38 | 39 | # scenario answers 40 | mc_scenario_answers <- readr::read_csv(here::here("data-raw/scenario_answers.csv"), 41 | comment = "#", 42 | col_types = readr::cols(sme = col_character(), 43 | scenario_id = col_character(), 44 | freq_low = col_number(), 45 | freq_high = col_number(), 46 | imp_low = col_number(), 47 | imp_high = col_number(), 48 | date = col_date())) 49 | usethis::use_data(mc_scenario_answers, overwrite = TRUE) 50 | 51 | # generate and save calibration_questions 52 | calibration_questions <- readr::read_csv(here::here("data-raw/calibration_questions.csv")) 53 | usethis::use_data(calibration_questions, overwrite = TRUE) 54 | 55 | # generate and save calibration_answers 56 | mc_calibration_answers <- readr::read_csv(here::here("data-raw/calibration_answers.csv")) 57 | usethis::use_data(mc_calibration_answers, overwrite = TRUE) 58 | 59 | # generate and save sme top domains 60 | mc_sme_top_domains <- readr::read_csv(here::here("data-raw/sme_top_domains.csv")) %>% 61 | tidyr::gather(key = "key", value = "value", -.data$sme) %>% 62 | tidyr::drop_na() 63 | usethis::use_data(mc_sme_top_domains, overwrite = TRUE) 64 | 65 | # generate and save fitted parameters 66 | response_set <- tidyrisk_response_set(mc_calibration_answers, 67 | mc_scenario_answers, mc_capability_answers) 68 | mc_scenario_parameters_fitted <- fit_scenarios(response_set) 69 | usethis::use_data(mc_scenario_parameters_fitted, overwrite = TRUE) 70 | mc_capability_parameters_fitted <- fit_capabilities(response_set) 71 | usethis::use_data(mc_capability_parameters_fitted, overwrite = TRUE) 72 | mc_threat_parameters_fitted <- fit_threat_communities(mc_threat_communities) 73 | usethis::use_data(mc_threat_parameters_fitted, overwrite = TRUE) 74 | -------------------------------------------------------------------------------- /data-raw/scenario_answers.csv: -------------------------------------------------------------------------------- 1 | sme,scenario_id,freq_low,freq_high,imp_low,imp_high,date 2 | Jimmy Jennings,RS-01,10,40,0,"3,000,000",2018-09-10 3 | Marsha Watson,RS-01,10,40,0,"3,000,000",2018-09-10 4 | Natalie Wade,RS-01,10,40,0,"3,000,000",2018-09-10 5 | Ray Evans,RS-01,00,40,0,"3,000,000",2018-09-10 6 | Theresa Fowler,RS-01,10,40,0,"3,000,000",2018-09-10 7 | # 8 | Jimmy Jennings,RS-02,10,40,0,"3,000,000",2018-09-10 9 | Marsha Watson,RS-02,10,40,0,"3,000,000",2018-09-10 10 | Natalie Wade,RS-02,10,40,0,"3,000,000",2018-09-10 11 | Ray Evans,RS-02,10,40,0,"3,000,000",2018-09-10 12 | Theresa Fowler,RS-02,10,40,0,"3,000,000",2018-09-10 13 | 14 | # 15 | Jimmy Jennings,RS-03,10,40,0,"3,000,000",2018-09-10 16 | Marsha Watson,RS-03,10,40,0,"3,000,000",2018-09-10 17 | Natalie Wade,RS-03,10,40,0,"3,000,000",2018-09-10 18 | Ray Evans,RS-03,10,40,0,"3,000,000",2018-09-10 19 | Theresa Fowler,RS-03,10,40,0,"3,000,000",2018-09-10 20 | 21 | # 22 | Jimmy Jennings,RS-04,10,40,0,"3,000,000",2018-09-10 23 | Marsha Watson,RS-04,10,40,0,"3,000,000",2018-09-10 24 | Natalie Wade,RS-04,10,40,0,"3,000,000",2018-09-10 25 | Ray Evans,RS-04,10,40,0,"3,000,000",2018-09-10 26 | Theresa Fowler,RS-04,10,40,0,"3,000,000",2018-09-10 27 | 28 | # 29 | Jimmy Jennings,RS-05,10,40,0,"3,000,000",2018-09-10 30 | Marsha Watson,RS-05,10,40,0,"3,000,000",2018-09-10 31 | Natalie Wade,RS-05,10,40,0,"3,000,000",2018-09-10 32 | Ray Evans,RS-05,10,40,0,"3,000,000",2018-09-10 33 | Theresa Fowler,RS-05,10,40,0,"3,000,000",2018-09-10 34 | 35 | # 36 | Jimmy Jennings,RS-06,10,40,0,"3,000,000",2018-09-10 37 | Marsha Watson,RS-06,10,40,0,"3,000,000",2018-09-10 38 | Natalie Wade,RS-06,10,40,0,"3,000,000",2018-09-10 39 | Ray Evans,RS-06,10,40,0,"3,000,000",2018-09-10 40 | Theresa Fowler,RS-06,10,40,0,"3,000,000",2018-09-10 41 | 42 | # 43 | Jimmy Jennings,RS-07,10,40,0,"3,000,000",2018-09-10 44 | Marsha Watson,RS-07,10,40,0,"3,000,000",2018-09-10 45 | Natalie Wade,RS-07,10,40,0,"3,000,000",2018-09-10 46 | Ray Evans,RS-07,10,40,0,"3,000,000",2018-09-10 47 | Theresa Fowler,RS-07,10,40,0,"3,000,000",2018-09-10 48 | 49 | # 50 | Jimmy Jennings,RS-08,10,40,0,"3,000,000",2018-09-10 51 | Marsha Watson,RS-08,10,40,0,"3,000,000",2018-09-10 52 | Natalie Wade,RS-08,10,40,0,"3,000,000",2018-09-10 53 | Ray Evans,RS-08,10,40,0,"3,000,000",2018-09-10 54 | Theresa Fowler,RS-08,10,40,0,"3,000,000",2018-09-10 55 | 56 | # 57 | Jimmy Jennings,RS-09,10,40,0,"3,000,000",2018-09-10 58 | Marsha Watson,RS-09,10,40,0,"3,000,000",2018-09-10 59 | Natalie Wade,RS-09,10,40,0,"3,000,000",2018-09-10 60 | Ray Evans,RS-09,10,40,0,"3,000,000",2018-09-10 61 | Theresa Fowler,RS-09,10,40,0,"3,000,000",2018-09-10 62 | 63 | # 64 | Jimmy Jennings,RS-10,10,40,0,"3,000,000",2018-09-10 65 | Marsha Watson,RS-10,10,40,0,"3,000,000",2018-09-10 66 | Natalie Wade,RS-10,10,40,0,"3,000,000",2018-09-10 67 | Ray Evans,RS-10,10,40,0,"3,000,000",2018-09-10 68 | Theresa Fowler,RS-10,10,40,0,"3,000,000",2018-09-10 69 | 70 | # 71 | Jimmy Jennings,RS-11,10,40,0,"3,000,000",2018-09-10 72 | Marsha Watson,RS-11,10,40,0,"3,000,000",2018-09-10 73 | Natalie Wade,RS-11,10,40,0,"3,000,000",2018-09-10 74 | Ray Evans,RS-11,10,40,0,"3,000,000",2018-09-10 75 | Theresa Fowler,RS-11,10,40,0,"3,000,000",2018-09-10 76 | 77 | # 78 | Jimmy Jennings,RS-12,10,40,0,"3,000,000",2018-09-10 79 | Marsha Watson,RS-12,10,40,0,"3,000,000",2018-09-10 80 | Natalie Wade,RS-12,10,40,0,"3,000,000",2018-09-10 81 | Ray Evans,RS-12,10,40,0,"3,000,000",2018-09-10 82 | Theresa Fowler,RS-12,10,40,0,"3,000,000",2018-09-10 83 | 84 | # 85 | Jimmy Jennings,RS-13,10,40,0,"3,000,000",2018-09-10 86 | Marsha Watson,RS-13,10,40,0,"3,000,000",2018-09-10 87 | Natalie Wade,RS-13,10,40,0,"3,000,000",2018-09-10 88 | Ray Evans,RS-13,10,40,0,"3,000,000",2018-09-10 89 | Theresa Fowler,RS-13,10,40,0,"3,000,000",2018-09-10 90 | 91 | # 92 | Jimmy Jennings,RS-14,10,40,0,"3,000,000",2018-09-10 93 | Marsha Watson,RS-14,10,40,0,"3,000,000",2018-09-10 94 | Natalie Wade,RS-14,10,40,0,"3,000,000",2018-09-10 95 | Ray Evans,RS-14,10,40,0,"3,000,000",2018-09-10 96 | Theresa Fowler,RS-14,10,40,0,"3,000,000",2018-09-10 97 | 98 | # 99 | Jimmy Jennings,RS-15,10,40,0,"3,000,000",2018-09-10 100 | Marsha Watson,RS-15,10,40,0,"3,000,000",2018-09-10 101 | Natalie Wade,RS-15,10,40,0,"3,000,000",2018-09-10 102 | Ray Evans,RS-15,10,40,0,"3,000,000",2018-09-10 103 | Theresa Fowler,RS-15,10,40,0,"3,000,000",2018-09-10 104 | 105 | # 106 | Jimmy Jennings,RS-16,10,40,0,"3,000,000",2018-09-10 107 | Marsha Watson,RS-16,10,40,0,"3,000,000",2018-09-10 108 | Natalie Wade,RS-16,10,40,0,"3,000,000",2018-09-10 109 | Ray Evans,RS-16,10,40,0,"3,000,000",2018-09-10 110 | Theresa Fowler,RS-16,10,40,0,"3,000,000",2018-09-10 111 | 112 | # 113 | Jimmy Jennings,RS-17,10,40,0,"3,000,000",2018-09-10 114 | Marsha Watson,RS-17,10,40,0,"3,000,000",2018-09-10 115 | Natalie Wade,RS-17,10,40,0,"3,000,000",2018-09-10 116 | Ray Evans,RS-17,10,40,0,"3,000,000",2018-09-10 117 | Theresa Fowler,RS-17,10,40,0,"3,000,000",2018-09-10 118 | 119 | # 120 | Jimmy Jennings,RS-18,10,40,0,"3,000,000",2018-09-10 121 | Marsha Watson,RS-18,10,40,0,"3,000,000",2018-09-10 122 | Natalie Wade,RS-18,10,40,0,"3,000,000",2018-09-10 123 | Ray Evans,RS-18,10,40,0,"3,000,000",2018-09-10 124 | Theresa Fowler,RS-18,10,40,0,"3,000,000",2018-09-10 125 | 126 | # 127 | Jimmy Jennings,RS-19,10,40,0,"3,000,000",2018-09-10 128 | Marsha Watson,RS-19,10,40,0,"3,000,000",2018-09-10 129 | Natalie Wade,RS-19,10,40,0,"3,000,000",2018-09-10 130 | Ray Evans,RS-19,10,40,0,"3,000,000",2018-09-10 131 | Theresa Fowler,RS-19,10,40,0,"3,000,000",2018-09-10 132 | 133 | # 134 | Jimmy Jennings,RS-20,10,40,0,"3,000,000",2018-09-10 135 | Marsha Watson,RS-20,10,40,0,"3,000,000",2018-09-10 136 | Natalie Wade,RS-20,10,40,0,"3,000,000",2018-09-10 137 | Ray Evans,RS-20,10,40,0,"3,000,000",2018-09-10 138 | Theresa Fowler,RS-20,10,40,0,"3,000,000",2018-09-10 139 | 140 | # 141 | Jimmy Jennings,RS-21,10,40,0,"3,000,000",2018-09-10 142 | Marsha Watson,RS-21,10,40,0,"3,000,000",2018-09-10 143 | Natalie Wade,RS-21,10,40,0,"3,000,000",2018-09-10 144 | Ray Evans,RS-21,10,40,0,"3,000,000",2018-09-10 145 | Theresa Fowler,RS-21,10,40,0,"3,000,000",2018-09-10 146 | 147 | # 148 | Jimmy Jennings,RS-22,10,40,0,"3,000,000",2018-09-10 149 | Marsha Watson,RS-22,10,40,0,"3,000,000",2018-09-10 150 | Natalie Wade,RS-22,10,40,0,"3,000,000",2018-09-10 151 | Ray Evans,RS-22,10,40,0,"3,000,000",2018-09-10 152 | Theresa Fowler,RS-22,10,40,0,"3,000,000",2018-09-10 153 | 154 | # 155 | Jimmy Jennings,RS-23,10,40,0,"3,000,000",2018-09-10 156 | Marsha Watson,RS-23,10,40,0,"3,000,000",2018-09-10 157 | Natalie Wade,RS-23,10,40,0,"3,000,000",2018-09-10 158 | Ray Evans,RS-23,10,40,0,"3,000,000",2018-09-10 159 | Theresa Fowler,RS-23,10,40,0,"3,000,000",2018-09-10 160 | 161 | # 162 | Jimmy Jennings,RS-24,10,40,0,"3,000,000",2018-09-10 163 | Marsha Watson,RS-24,10,40,0,"3,000,000",2018-09-10 164 | Natalie Wade,RS-24,10,40,0,"3,000,000",2018-09-10 165 | Ray Evans,RS-24,10,40,0,"3,000,000",2018-09-10 166 | Theresa Fowler,RS-24,10,40,0,"3,000,000",2018-09-10 167 | 168 | # 169 | Jimmy Jennings,RS-25,10,40,0,"3,000,000",2018-09-10 170 | Marsha Watson,RS-25,10,40,0,"3,000,000",2018-09-10 171 | Natalie Wade,RS-25,10,40,0,"3,000,000",2018-09-10 172 | Ray Evans,RS-25,10,40,0,"3,000,000",2018-09-10 173 | Theresa Fowler,RS-25,10,40,0,"3,000,000",2018-09-10 174 | 175 | # 176 | Jimmy Jennings,RS-26,10,40,0,"3,000,000",2018-09-10 177 | Marsha Watson,RS-26,10,40,0,"3,000,000",2018-09-10 178 | Natalie Wade,RS-26,10,40,0,"3,000,000",2018-09-10 179 | Ray Evans,RS-26,10,40,0,"3,000,000",2018-09-10 180 | Theresa Fowler,RS-26,10,40,0,"3,000,000",2018-09-10 181 | 182 | # 183 | Jimmy Jennings,RS-27,10,40,0,"3,000,000",2018-09-10 184 | Marsha Watson,RS-27,10,40,0,"3,000,000",2018-09-10 185 | Natalie Wade,RS-27,10,40,0,"3,000,000",2018-09-10 186 | Ray Evans,RS-27,10,40,0,"3,000,000",2018-09-10 187 | Theresa Fowler,RS-27,10,40,0,"3,000,000",2018-09-10 188 | 189 | # 190 | Jimmy Jennings,RS-28,10,40,0,"3,000,000",2018-09-10 191 | Marsha Watson,RS-28,10,40,0,"3,000,000",2018-09-10 192 | Natalie Wade,RS-28,10,40,0,"3,000,000",2018-09-10 193 | Ray Evans,RS-28,10,40,0,"3,000,000",2018-09-10 194 | Theresa Fowler,RS-28,10,40,0,"3,000,000",2018-09-10 195 | 196 | # 197 | Jimmy Jennings,RS-29,10,40,0,"3,000,000",2018-09-10 198 | Marsha Watson,RS-29,10,40,0,"3,000,000",2018-09-10 199 | Natalie Wade,RS-29,10,40,0,"3,000,000",2018-09-10 200 | Ray Evans,RS-29,10,40,0,"3,000,000",2018-09-10 201 | Theresa Fowler,RS-29,10,40,0,"3,000,000",2018-09-10 202 | 203 | # 204 | Jimmy Jennings,RS-30,10,40,0,"3,000,000",2018-09-10 205 | Marsha Watson,RS-30,10,40,0,"3,000,000",2018-09-10 206 | Natalie Wade,RS-30,10,40,0,"3,000,000",2018-09-10 207 | Ray Evans,RS-30,10,40,0,"3,000,000",2018-09-10 208 | Theresa Fowler,RS-30,10,40,0,"3,000,000",2018-09-10 209 | 210 | # 211 | Jimmy Jennings,RS-31,10,40,0,"3,000,000",2018-09-10 212 | Marsha Watson,RS-31,10,40,0,"3,000,000",2018-09-10 213 | Natalie Wade,RS-31,10,40,0,"3,000,000",2018-09-10 214 | Ray Evans,RS-31,10,40,0,"3,000,000",2018-09-10 215 | Theresa Fowler,RS-31,10,40,0,"3,000,000",2018-09-10 216 | 217 | # 218 | Jimmy Jennings,RS-32,10,40,0,"3,000,000",2018-09-10 219 | Marsha Watson,RS-32,10,40,0,"3,000,000",2018-09-10 220 | Natalie Wade,RS-32,10,40,0,"3,000,000",2018-09-10 221 | Ray Evans,RS-32,10,40,0,"3,000,000",2018-09-10 222 | Theresa Fowler,RS-32,10,40,0,"3,000,000",2018-09-10 223 | 224 | # 225 | Jimmy Jennings,RS-33,10,40,0,"3,000,000",2018-09-10 226 | Marsha Watson,RS-33,10,40,0,"3,000,000",2018-09-10 227 | Natalie Wade,RS-33,10,40,0,"3,000,000",2018-09-10 228 | Ray Evans,RS-33,10,40,0,"3,000,000",2018-09-10 229 | Theresa Fowler,RS-33,10,40,0,"3,000,000",2018-09-10 230 | 231 | # 232 | Jimmy Jennings,RS-34,10,40,0,"3,000,000",2018-09-10 233 | Marsha Watson,RS-34,10,40,0,"3,000,000",2018-09-10 234 | Natalie Wade,RS-34,10,40,0,"3,000,000",2018-09-10 235 | Ray Evans,RS-34,10,40,0,"3,000,000",2018-09-10 236 | Theresa Fowler,RS-34,10,40,0,"3,000,000",2018-09-10 237 | 238 | # 239 | Jimmy Jennings,RS-35,10,40,0,"3,000,000",2018-09-10 240 | Marsha Watson,RS-35,10,40,0,"3,000,000",2018-09-10 241 | Natalie Wade,RS-35,10,40,0,"3,000,000",2018-09-10 242 | Ray Evans,RS-35,10,40,0,"3,000,000",2018-09-10 243 | Theresa Fowler,RS-35,10,40,0,"3,000,000",2018-09-10 244 | 245 | # 246 | Jimmy Jennings,RS-36,10,40,0,"3,000,000",2018-09-10 247 | Marsha Watson,RS-36,10,40,0,"3,000,000",2018-09-10 248 | Natalie Wade,RS-36,10,40,0,"3,000,000",2018-09-10 249 | Ray Evans,RS-36,10,40,0,"3,000,000",2018-09-10 250 | Theresa Fowler,RS-36,10,40,0,"3,000,000",2018-09-10 251 | 252 | # 253 | Jimmy Jennings,RS-37,10,40,0,"3,000,000",2018-09-10 254 | Marsha Watson,RS-37,10,40,0,"3,000,000",2018-09-10 255 | Natalie Wade,RS-37,10,40,0,"3,000,000",2018-09-10 256 | Ray Evans,RS-37,10,40,0,"3,000,000",2018-09-10 257 | Theresa Fowler,RS-37,10,40,0,"3,000,000",2018-09-10 258 | 259 | # 260 | Jimmy Jennings,RS-38,10,40,0,"3,000,000",2018-09-10 261 | Marsha Watson,RS-38,10,40,0,"3,000,000",2018-09-10 262 | Natalie Wade,RS-38,10,40,0,"3,000,000",2018-09-10 263 | Ray Evans,RS-38,10,40,0,"3,000,000",2018-09-10 264 | Theresa Fowler,RS-38,10,40,0,"3,000,000",2018-09-10 265 | 266 | # 267 | Jimmy Jennings,RS-39,10,40,0,"3,000,000",2018-09-10 268 | Marsha Watson,RS-39,10,40,0,"3,000,000",2018-09-10 269 | Natalie Wade,RS-39,10,40,0,"3,000,000",2018-09-10 270 | Ray Evans,RS-39,10,40,0,"3,000,000",2018-09-10 271 | Theresa Fowler,RS-39,10,40,0,"3,000,000",2018-09-10 272 | 273 | # 274 | Jimmy Jennings,RS-40,10,40,0,"3,000,000",2018-09-10 275 | Marsha Watson,RS-40,10,40,0,"3,000,000",2018-09-10 276 | Natalie Wade,RS-40,10,40,0,"3,000,000",2018-09-10 277 | Ray Evans,RS-40,10,40,0,"3,000,000",2018-09-10 278 | Theresa Fowler,RS-40,10,40,0,"3,000,000",2018-09-10 279 | 280 | # 281 | Jimmy Jennings,RS-41,10,40,0,"3,000,000",2018-09-10 282 | Marsha Watson,RS-41,10,40,0,"3,000,000",2018-09-10 283 | Natalie Wade,RS-41,10,40,0,"3,000,000",2018-09-10 284 | Ray Evans,RS-41,10,40,0,"3,000,000",2018-09-10 285 | Theresa Fowler,RS-41,10,40,0,"3,000,000",2018-09-10 286 | 287 | # 288 | Jimmy Jennings,RS-42,10,40,0,"3,000,000",2018-09-10 289 | Marsha Watson,RS-42,10,40,0,"3,000,000",2018-09-10 290 | Natalie Wade,RS-42,10,40,0,"3,000,000",2018-09-10 291 | Ray Evans,RS-42,10,40,0,"3,000,000",2018-09-10 292 | Theresa Fowler,RS-42,10,40,0,"3,000,000",2018-09-10 293 | 294 | # 295 | Jimmy Jennings,RS-43,10,40,0,"3,000,000",2018-09-10 296 | Marsha Watson,RS-43,10,40,0,"3,000,000",2018-09-10 297 | Natalie Wade,RS-43,10,40,0,"3,000,000",2018-09-10 298 | Ray Evans,RS-43,10,40,0,"3,000,000",2018-09-10 299 | Theresa Fowler,RS-43,10,40,0,"3,000,000",2018-09-10 300 | 301 | # 302 | Jimmy Jennings,RS-44,10,40,0,"3,000,000",2018-09-10 303 | Marsha Watson,RS-44,10,40,0,"3,000,000",2018-09-10 304 | Natalie Wade,RS-44,10,40,0,"3,000,000",2018-09-10 305 | Ray Evans,RS-44,10,40,0,"3,000,000",2018-09-10 306 | Theresa Fowler,RS-44,10,40,0,"3,000,000",2018-09-10 307 | 308 | # 309 | Jimmy Jennings,RS-45,10,40,0,"3,000,000",2018-09-10 310 | Marsha Watson,RS-45,10,40,0,"3,000,000",2018-09-10 311 | Natalie Wade,RS-45,10,40,0,"3,000,000",2018-09-10 312 | Ray Evans,RS-45,10,40,0,"3,000,000",2018-09-10 313 | Theresa Fowler,RS-45,10,40,0,"3,000,000",2018-09-10 314 | 315 | # 316 | Jimmy Jennings,RS-46,10,40,0,"3,000,000",2018-09-10 317 | Marsha Watson,RS-46,10,40,0,"3,000,000",2018-09-10 318 | Natalie Wade,RS-46,10,40,0,"3,000,000",2018-09-10 319 | Ray Evans,RS-46,10,40,0,"3,000,000",2018-09-10 320 | Theresa Fowler,RS-46,10,40,0,"3,000,000",2018-09-10 321 | 322 | # 323 | Jimmy Jennings,RS-47,10,40,0,"3,000,000",2018-09-10 324 | Marsha Watson,RS-47,10,40,0,"3,000,000",2018-09-10 325 | Natalie Wade,RS-47,10,40,0,"3,000,000",2018-09-10 326 | Ray Evans,RS-47,10,40,0,"3,000,000",2018-09-10 327 | Theresa Fowler,RS-47,10,40,0,"3,000,000",2018-09-10 328 | 329 | # 330 | Jimmy Jennings,RS-48,10,40,0,"3,000,000",2018-09-10 331 | Marsha Watson,RS-48,10,40,0,"3,000,000",2018-09-10 332 | Natalie Wade,RS-48,10,40,0,"3,000,000",2018-09-10 333 | Ray Evans,RS-48,10,40,0,"3,000,000",2018-09-10 334 | Theresa Fowler,RS-48,10,40,0,"3,000,000",2018-09-10 335 | 336 | # 337 | Jimmy Jennings,RS-49,10,40,0,"3,000,000",2018-09-10 338 | Marsha Watson,RS-49,10,40,0,"3,000,000",2018-09-10 339 | Natalie Wade,RS-49,10,40,0,"3,000,000",2018-09-10 340 | Ray Evans,RS-49,10,40,0,"3,000,000",2018-09-10 341 | Theresa Fowler,RS-49,10,40,0,"3,000,000",2018-09-10 342 | 343 | # 344 | Jimmy Jennings,RS-50,10,40,0,"3,000,000",2018-09-10 345 | Marsha Watson,RS-50,10,40,0,"3,000,000",2018-09-10 346 | Natalie Wade,RS-50,10,40,0,"3,000,000",2018-09-10 347 | Ray Evans,RS-50,10,40,0,"3,000,000",2018-09-10 348 | Theresa Fowler,RS-50,10,40,0,"3,000,000",2018-09-10 349 | 350 | # 351 | Jimmy Jennings,RS-51,10,40,0,"3,000,000",2018-09-10 352 | Marsha Watson,RS-51,10,40,0,"3,000,000",2018-09-10 353 | Natalie Wade,RS-51,10,40,0,"3,000,000",2018-09-10 354 | Ray Evans,RS-51,10,40,0,"3,000,000",2018-09-10 355 | Theresa Fowler,RS-51,10,40,0,"3,000,000",2018-09-10 356 | 357 | # 358 | Jimmy Jennings,RS-52,10,40,0,"3,000,000",2018-09-10 359 | Marsha Watson,RS-52,10,40,0,"3,000,000",2018-09-10 360 | Natalie Wade,RS-52,10,40,0,"3,000,000",2018-09-10 361 | Ray Evans,RS-52,10,40,0,"3,000,000",2018-09-10 362 | Theresa Fowler,RS-52,10,40,0,"3,000,000",2018-09-10 363 | 364 | # 365 | Jimmy Jennings,RS-53,10,40,0,"3,000,000",2018-09-10 366 | Marsha Watson,RS-53,10,40,0,"3,000,000",2018-09-10 367 | Natalie Wade,RS-53,10,40,0,"3,000,000",2018-09-10 368 | Ray Evans,RS-53,10,40,0,"3,000,000",2018-09-10 369 | Theresa Fowler,RS-53,10,40,0,"3,000,000",2018-09-10 370 | 371 | # 372 | Jimmy Jennings,RS-54,10,40,0,"3,000,000",2018-09-10 373 | Marsha Watson,RS-54,10,40,0,"3,000,000",2018-09-10 374 | Natalie Wade,RS-54,10,40,0,"3,000,000",2018-09-10 375 | Ray Evans,RS-54,10,40,0,"3,000,000",2018-09-10 376 | Theresa Fowler,RS-54,10,40,0,"3,000,000",2018-09-10 377 | 378 | # 379 | Jimmy Jennings,RS-55,10,40,0,"3,000,000",2018-09-10 380 | Marsha Watson,RS-55,10,40,0,"3,000,000",2018-09-10 381 | Natalie Wade,RS-55,10,40,0,"3,000,000",2018-09-10 382 | Ray Evans,RS-55,10,40,0,"3,000,000",2018-09-10 383 | Theresa Fowler,RS-55,10,40,0,"3,000,000",2018-09-10 384 | 385 | # 386 | Jimmy Jennings,RS-56,10,40,0,"3,000,000",2018-09-10 387 | Marsha Watson,RS-56,10,40,0,"3,000,000",2018-09-10 388 | Natalie Wade,RS-56,10,40,0,"3,000,000",2018-09-10 389 | Ray Evans,RS-56,10,40,0,"3,000,000",2018-09-10 390 | Theresa Fowler,RS-56,10,40,0,"3,000,000",2018-09-10 391 | -------------------------------------------------------------------------------- /data-raw/sme_top_domains.csv: -------------------------------------------------------------------------------- 1 | sme,domain1,domain2,domain3,domain4,domain5,domain6 2 | Natalie Wade,Risk Management,Human Resources Security,Asset Management,Information Security Incident Management,"Information Systems Acquisition, Development, and Maintenance", 3 | Theresa Fowler,Business Continuity Management,Physical and Environmental Security,Human Resources Security,COmpliance,Access Management, 4 | Jimmy Jennings,Asset Management,Communications and Operations Management,Privacy Practices,Compliance,Information Security Incident Management,Security Policy 5 | Marsha Watson,Compliance,Information Security Management Program, Security Policy, Risk Management, Communications and Operations Management, Privacy Practices 6 | Ray Evans,,,,,, 7 | -------------------------------------------------------------------------------- /data-raw/threat_communities.csv: -------------------------------------------------------------------------------- 1 | threat_community,threat_id,definition,low,high,category,action_type 2 | Technology Leadership,TC-01,"Technology leaders at the director level and above",.40,.80,employee,error 3 | External Cyber Adversaries,TC-02,Hackers or external entities that attempt to penetrate our systems,.10,.50,external,malicious 4 | Environmental,TC-03,Natural disasters,.20,.80,natural,geological 5 | Third Parties,TC-04,"Vendors and other organizations that work directly with our company",.30,.60,external,error 6 | Regulators or Auditors,TC-05,Internal and external auditors and regulators,.40,.75,external, 7 | External Litigants,TC-06,"Civil and criminal corporate litigation. Mostly employment matters.",.30,.50,external,competitive 8 | Organizational Internal Users,TC-07,"Internal users at the individual contributor level.",.30,.50,employee,error 9 | Political Activists,TC-08,"Terrorists and political activitists.",.30,.50,external,malicious 10 | -------------------------------------------------------------------------------- /data/calibration_questions.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/calibration_questions.rda -------------------------------------------------------------------------------- /data/mc_calibration_answers.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/mc_calibration_answers.rda -------------------------------------------------------------------------------- /data/mc_capabilities.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/mc_capabilities.rda -------------------------------------------------------------------------------- /data/mc_capability_answers.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/mc_capability_answers.rda -------------------------------------------------------------------------------- /data/mc_capability_parameters_fitted.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/mc_capability_parameters_fitted.rda -------------------------------------------------------------------------------- /data/mc_domains.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/mc_domains.rda -------------------------------------------------------------------------------- /data/mc_scenario_answers.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/mc_scenario_answers.rda -------------------------------------------------------------------------------- /data/mc_scenario_parameters_fitted.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/mc_scenario_parameters_fitted.rda -------------------------------------------------------------------------------- /data/mc_scenarios.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/mc_scenarios.rda -------------------------------------------------------------------------------- /data/mc_sme_top_domains.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/mc_sme_top_domains.rda -------------------------------------------------------------------------------- /data/mc_threat_communities.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/mc_threat_communities.rda -------------------------------------------------------------------------------- /data/mc_threat_parameters_fitted.rda: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/data/mc_threat_parameters_fitted.rda -------------------------------------------------------------------------------- /inst/WORDLIST: -------------------------------------------------------------------------------- 1 | AppVeyor 2 | Flesch 3 | gamifying 4 | ggpubr 5 | https 6 | Kincaid 7 | LM 8 | magrittr 9 | meanlog 10 | MetroCare 11 | ogsys 12 | OpenFAIR 13 | opengroup 14 | optim 15 | pandoc 16 | poisson 17 | quanteda 18 | README 19 | rlognorm 20 | rnorm 21 | Schemas 22 | schemas 23 | sd 24 | sdlog 25 | SME 26 | sme 27 | SME's 28 | SMEs 29 | TEF 30 | th 31 | textstats 32 | tidyr 33 | tidyrisk 34 | www 35 | xaringan 36 | -------------------------------------------------------------------------------- /inst/css/styles.css: -------------------------------------------------------------------------------- 1 | @import url('https://fonts.googleapis.com/css?family=Source+Sans+Pro'); 2 | 3 | body { font-family: 'Source Sans Pro', sans-serif;} 4 | 5 | h1 { 6 | font-family: 'Source Sans Pro'; 7 | font-weight: normal; 8 | font-size: 1em; 9 | margin-bottom: 0; 10 | } 11 | 12 | h2, h3, i{ 13 | font-family: 'Source Sans Pro'; 14 | font-weight: normal; 15 | } 16 | 17 | .footer-left { 18 | position: absolute; 19 | bottom: 12px; 20 | left: 20px; 21 | padding: 0; 22 | margin: 0; 23 | opacity: 0.5; 24 | } 25 | 26 | .progress-image { 27 | width: 50%; 28 | text-align: center; 29 | margin: auto; 30 | bottom: 0; 31 | position: absolute; 32 | 33 | } 34 | 35 | .header-right { 36 | position: absolute; 37 | top: 12px; 38 | right: 20px; 39 | padding: 0; 40 | margin: 0; 41 | opacity: 0.5; 42 | } 43 | 44 | .middle-text { 45 | top: 50%; 46 | } 47 | 48 | .black-background { 49 | background-color: black; 50 | } 51 | 52 | .top-white-text { 53 | background-color: rgba(0,0,0,.8); 54 | margin-top: 0; 55 | color: white; 56 | width: 300px; 57 | padding-right: 100px; 58 | } 59 | 60 | .banner { 61 | /* 62 | background:url('../img/word_risk.jpg'); 63 | */ 64 | background-position:center; 65 | width:100%; 66 | background-size: cover; 67 | min-height:180px; 68 | } 69 | 70 | .capability-slide .banner { 71 | /* 72 | background:url('../img/word_strength.jpg'); 73 | */ 74 | background-position:center; 75 | background-size: 100% 100%; 76 | } 77 | 78 | .capability-slide .capability { 79 | font-size:xx-large; 80 | /* background: white; 81 | color: grey; 82 | text-shadow: 0 0 0; */ 83 | } 84 | 85 | .scenario-slide { 86 | background-color: #212A39; 87 | } 88 | 89 | .scenario-slide .scenario { 90 | font-size:xx-large; 91 | background: white; 92 | color: grey; 93 | text-shadow: 0 0 0; 94 | } 95 | 96 | .scenario-slide strong { 97 | color: black; 98 | } 99 | 100 | .domain { 101 | font-size:large; 102 | font-weight:bold; 103 | margin-bottom:0; 104 | } 105 | 106 | .domain-description { 107 | font-size: medium; 108 | margin-top:0; 109 | } 110 | -------------------------------------------------------------------------------- /inst/img/espresso_machine.jpg: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/inst/img/espresso_machine.jpg -------------------------------------------------------------------------------- /inst/interview.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Scenarios and Capabilities" 3 | params: 4 | sme: NULL 5 | domain_list: NULL 6 | questions_file: NULL 7 | assessment_title: NULL 8 | output: 9 | xaringan::moon_reader: 10 | css: ["default", "css/styles.css"] 11 | #lib_dir: libs 12 | lib_dir: libs 13 | nature: 14 | highlightStyle: github 15 | highlightLines: true 16 | countIncrementalSlides: false 17 | seal: false 18 | --- 19 | 20 | class: title-slide, inverse, center 21 | 22 | # Scenarios and Capabilities 23 | 24 | ```{r smetitle, results='asis', echo=FALSE} 25 | asssessment_title <- if (!is.null(params$assessment_title)) { 26 | params$assessment_title } else { 27 | "Strategic Risk Assessment"} 28 | cat("## ", assessment_title, "\n", sep = "") 29 | 30 | sme <- if (!is.null(params$sme)) params$sme else "SME Interview" 31 | cat("### ", sme, "\n", sep = "") 32 | ``` 33 | 34 | --- 35 | 36 | ```{r setup, include=FALSE} 37 | options(htmltools.dir.version = FALSE) 38 | library(collector) 39 | library(dplyr) 40 | library(tidyr) 41 | library(purrr) 42 | ``` 43 | 44 | ```{r read_questions, include = FALSE} 45 | dat <- readRDS(params$questions_file) 46 | domains <- questions$domains 47 | scenarios <- questions$scenarios 48 | capabilities <- questions$capabilities 49 | rm(dat) 50 | 51 | questions <- scenarios %>% dplyr::left_join(domains, by = "domain_id") 52 | capabilities <- capabilities %>% dplyr::left_join(domains, by = "domain_id") 53 | ``` 54 | 55 | ```{r define_helpers, include=FALSE} 56 | make_slide <- function(x) { 57 | cat("class: inverse scenario-slide\n") 58 | #cat("backgound-size: cover\n") 59 | #cat("background-image: url('img/word_risk.jpg')\n") 60 | cat("\n") 61 | cat("

", x$domain, "

\n") 62 | cat("

", x$description, "

\n") 63 | cat(".footer-left[", x$scenario_id, "]\n") 64 | cat(".scenario.middle-text.center[", 65 | gsub(" (due to|resulting in) ", " __\\1__ ", x$scenario, fixed = FALSE), 66 | "]", "\n\n") 67 | 68 | # cat("
\n") 69 | # cat("--\n", ".pull-left[", 70 | # "### Frequency Statement\n\n", x$Frequency, 71 | # "]", "\n\n", sep = "") 72 | # cat("--\n", ".pull-right[", 73 | # "### Impact Statement\n", x$Impact, 74 | # "]", "\n\n", sep= "") 75 | 76 | cat("???\n", x$example, "\n\n", sep = "") 77 | cat("---\n") 78 | } 79 | make_capabilty_slide <- function(x) { 80 | cat("class: capability-slide\n") 81 | #cat("background-image: url('img/happy_transform.jpg')\n") 82 | #cat("backgound-size: cover\n") 83 | cat("\n") 84 | cat("

", x$domain, "

\n") 85 | cat("

", x$description, "

\n") 86 | cat(".footer-left[", x$capability_id, "]\n") 87 | cat(".middle[.center.capability[", x$capability, "]]", "\n\n") 88 | 89 | #cat("
\n\n") 90 | #make_progress_graphic(x$ix, x$n) %>% print() 91 | #cat("\n
\n") 92 | 93 | cat("\n\n---\n") 94 | } 95 | ``` 96 | 97 | ```{r make_slides, results='asis', echo=FALSE, fig.height=1, fig.width=5} 98 | domains_to_display <- if (!is.null(params$domain_list)) { 99 | params$domain_list } else { 100 | sort(unique(questions$domain))} 101 | list(domain = domains_to_display, 102 | ix = 1:length(domains_to_display), 103 | n = length(domains_to_display)) %>% 104 | purrr::pwalk(function(domain, ix, n) { 105 | questions[questions$domain == domain, ] %>% 106 | dplyr::mutate(rown = row_number(), ix = ix*2 - 1, n = n*2) %>% 107 | dplyr::group_by(rown) %>% tidyr::nest() %>% dplyr::pull(data) %>% 108 | purrr::walk(function(x) make_slide(x)) 109 | capabilities[capabilities$domain == domain, ] %>% 110 | dplyr::mutate(rown = row_number(), ix = ix*2, n = n*2) %>% 111 | dplyr::group_by(rown) %>% tidyr::nest() %>% dplyr::pull(data) %>% 112 | purrr::walk(function(x) make_capabilty_slide(x)) 113 | }) 114 | ``` 115 | 116 | background-image: url('img/espresso_machine.jpg') 117 | background-size: cover 118 | 119 | # .top-white-text[The End] 120 | 121 |

Thank you for your time!

122 | -------------------------------------------------------------------------------- /inst/templates/template.docx: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/inst/templates/template.docx -------------------------------------------------------------------------------- /man/calibration_questions.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{calibration_questions} 5 | \alias{calibration_questions} 6 | \title{Calibration questions} 7 | \format{ 8 | A data frame with 27 rows and 3 variables: 9 | \describe{ 10 | \item{question}{text of the calibration question} 11 | \item{answer}{answer text to the calibration question} 12 | \item{calibration_id}{unique identifier for the calibration question} 13 | } 14 | } 15 | \source{ 16 | Common trivia questions drawn from a variety of open source web resources. 17 | } 18 | \usage{ 19 | calibration_questions 20 | } 21 | \description{ 22 | A dataset of reference trivia questions for calibrating SMEs. 23 | } 24 | \keyword{datasets} 25 | -------------------------------------------------------------------------------- /man/check_readability.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{check_readability} 4 | \alias{check_readability} 5 | \title{Check the readability of scenario text} 6 | \usage{ 7 | check_readability(x) 8 | } 9 | \arguments{ 10 | \item{x}{A \code{tidyrisk_question_set} object} 11 | } 12 | \value{ 13 | A dataframe of the scenario id, domain id, and the Flesch-Kincaid readability score for the scenario text. 14 | } 15 | \description{ 16 | Calculate the Flesch-Kincaid score for each scenario and return that score 17 | along with the scenario ID and domain as a tidy dataframe. 18 | } 19 | \examples{ 20 | \dontrun{ 21 | questions <- read_questions() 22 | check_readability(questions) 23 | } 24 | } 25 | -------------------------------------------------------------------------------- /man/clean_answers.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/clean_answers.R 3 | \name{clean_answers} 4 | \alias{clean_answers} 5 | \title{Clean extreme answers} 6 | \usage{ 7 | clean_answers(scenario_answers, capability_answers) 8 | } 9 | \arguments{ 10 | \item{scenario_answers}{Scenario answers dataframe.} 11 | 12 | \item{capability_answers}{Capability answers dataframe.} 13 | } 14 | \value{ 15 | A list of modified scenarios and capabilities. 16 | } 17 | \description{ 18 | You may wish to apply some sanity checking bounds on the responses from 19 | subject matter experts. This function applies a set of predefined 20 | transformations to the scenario and capability responses. Review these 21 | assumptions carefully before using them in your own analysis. 22 | } 23 | \details{ 24 | Make the following assumptions/modifications 25 | \itemize{ 26 | \item minimum capacity is 5\% (we've thought about it - 90\% CI) 27 | \item maximum capacity is 95\% (we're just about the best - 90\% CI) 28 | \item minimum loss is 1000 dollars (both low and high) 29 | \item scale all impact into thousands of dollars (make normal 30 | decomposition easier, and is in line of the scale of 31 | a strategic analysis) 32 | \item set a minimum frequency of once per 10 years (0.1) 33 | } 34 | } 35 | \examples{ 36 | data(mc_capability_answers) 37 | data(mc_scenario_answers) 38 | clean_answers(mc_scenario_answers, mc_capability_answers) 39 | } 40 | -------------------------------------------------------------------------------- /man/collector.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/collector.R 3 | \docType{package} 4 | \name{collector} 5 | \alias{collector} 6 | \title{\code{collector} package} 7 | \description{ 8 | Quantified Information Risk Assessment Data Collection 9 | } 10 | \details{ 11 | See the online documentation located at 12 | \href{https://evaluator.tidyrisk.org/}{https://evaluator.tidyrisk.org/} 13 | } 14 | -------------------------------------------------------------------------------- /man/combine_capability_parameters.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{combine_capability_parameters} 4 | \alias{combine_capability_parameters} 5 | \title{Combine multiple SME distributions into a single unified view} 6 | \usage{ 7 | combine_capability_parameters(capability_parameters) 8 | } 9 | \arguments{ 10 | \item{capability_parameters}{Fitted individual parameters for capabilities.} 11 | } 12 | \value{ 13 | A dataframe. 14 | } 15 | \description{ 16 | Given a dataframe with multiple SME fitted distributions for a single 17 | capability, apply weighting for opinion pooling, and construct a final 18 | combined distribution for each OpenFAIR scenario parameter. 19 | } 20 | \examples{ 21 | NULL 22 | } 23 | -------------------------------------------------------------------------------- /man/combine_lognorm.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{combine_lognorm} 4 | \alias{combine_lognorm} 5 | \title{Weight a set of lognormal parameters into a single distribution} 6 | \usage{ 7 | combine_lognorm(dat) 8 | } 9 | \arguments{ 10 | \item{dat}{A dataframe.} 11 | } 12 | \value{ 13 | A dataframe. 14 | } 15 | \description{ 16 | Weight a set of lognormal parameters into a single distribution 17 | } 18 | \examples{ 19 | dat <- data.frame(meanlog = c(1, 1.5), 20 | sdlog = c(1, 2), 21 | weight = c(2, 1)) 22 | combine_lognorm(dat) 23 | } 24 | \seealso{ 25 | Other distribution fitting functions: 26 | \code{\link{combine_lognorm_trunc}()}, 27 | \code{\link{combine_norm}()}, 28 | \code{\link{fit_capabilities_geomean}()}, 29 | \code{\link{fit_capabilities}()}, 30 | \code{\link{fit_lognorm_trunc}()}, 31 | \code{\link{fit_lognorm}()}, 32 | \code{\link{fit_norm_trunc}()}, 33 | \code{\link{fit_pois}()}, 34 | \code{\link{fit_scenarios_geomean}()}, 35 | \code{\link{fit_scenarios}()}, 36 | \code{\link{fit_threat_communities}()}, 37 | \code{\link{generate_cost_function}()}, 38 | \code{\link{lognormal_to_normal}()}, 39 | \code{\link{normal_to_lognormal}()} 40 | } 41 | \concept{distribution fitting functions} 42 | -------------------------------------------------------------------------------- /man/combine_lognorm_trunc.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{combine_lognorm_trunc} 4 | \alias{combine_lognorm_trunc} 5 | \title{Weight a set of lognormal parameters into a single distribution} 6 | \usage{ 7 | combine_lognorm_trunc(dat) 8 | } 9 | \arguments{ 10 | \item{dat}{Dataframe of meanlog, sdlog, min, max, and sdlog.} 11 | } 12 | \value{ 13 | A dataframe. 14 | } 15 | \description{ 16 | Weight a set of lognormal parameters into a single distribution 17 | } 18 | \examples{ 19 | dat <- data.frame(meanlog = c(1, 1.5), 20 | sdlog = c(1, 2), 21 | min = 0, 22 | max = Inf, 23 | weight = c(2, 1)) 24 | combine_lognorm_trunc(dat) 25 | } 26 | \seealso{ 27 | Other distribution fitting functions: 28 | \code{\link{combine_lognorm}()}, 29 | \code{\link{combine_norm}()}, 30 | \code{\link{fit_capabilities_geomean}()}, 31 | \code{\link{fit_capabilities}()}, 32 | \code{\link{fit_lognorm_trunc}()}, 33 | \code{\link{fit_lognorm}()}, 34 | \code{\link{fit_norm_trunc}()}, 35 | \code{\link{fit_pois}()}, 36 | \code{\link{fit_scenarios_geomean}()}, 37 | \code{\link{fit_scenarios}()}, 38 | \code{\link{fit_threat_communities}()}, 39 | \code{\link{generate_cost_function}()}, 40 | \code{\link{lognormal_to_normal}()}, 41 | \code{\link{normal_to_lognormal}()} 42 | } 43 | \concept{distribution fitting functions} 44 | -------------------------------------------------------------------------------- /man/combine_norm.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{combine_norm} 4 | \alias{combine_norm} 5 | \title{Weight a set of normal parameters into a single distribution} 6 | \usage{ 7 | combine_norm(dat) 8 | } 9 | \arguments{ 10 | \item{dat}{Dataframe of mean, sd and weights.} 11 | } 12 | \value{ 13 | A dataframe. 14 | } 15 | \description{ 16 | Given a set of arbitrary parameters that includes at least a weight column, 17 | take a weighted average of all the other parameters. 18 | } 19 | \examples{ 20 | dat <- data.frame(mean = c(10, 20, 30), 21 | sd = c(4, 5, 10), 22 | weight = c(2, 1, 2)) 23 | combine_norm(dat) 24 | } 25 | \seealso{ 26 | Other distribution fitting functions: 27 | \code{\link{combine_lognorm_trunc}()}, 28 | \code{\link{combine_lognorm}()}, 29 | \code{\link{fit_capabilities_geomean}()}, 30 | \code{\link{fit_capabilities}()}, 31 | \code{\link{fit_lognorm_trunc}()}, 32 | \code{\link{fit_lognorm}()}, 33 | \code{\link{fit_norm_trunc}()}, 34 | \code{\link{fit_pois}()}, 35 | \code{\link{fit_scenarios_geomean}()}, 36 | \code{\link{fit_scenarios}()}, 37 | \code{\link{fit_threat_communities}()}, 38 | \code{\link{generate_cost_function}()}, 39 | \code{\link{lognormal_to_normal}()}, 40 | \code{\link{normal_to_lognormal}()} 41 | } 42 | \concept{distribution fitting functions} 43 | -------------------------------------------------------------------------------- /man/combine_scenario_parameters.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{combine_scenario_parameters} 4 | \alias{combine_scenario_parameters} 5 | \title{Combine multiple SME distributions into a single unified view} 6 | \usage{ 7 | combine_scenario_parameters(scenario_parameters) 8 | } 9 | \arguments{ 10 | \item{scenario_parameters}{Fitted scenario factors for individual SMEs.} 11 | } 12 | \value{ 13 | A dataframe. 14 | } 15 | \description{ 16 | Given a dataframe with multiple SME fitted distributions for a single 17 | scenario, decompose the lognormal distribution into normal parameters, 18 | apply weighting for opinion pooling, and construct a final combined 19 | distribution for each OpenFAIR scenario factor. 20 | } 21 | \examples{ 22 | NULL 23 | } 24 | -------------------------------------------------------------------------------- /man/derive_controls.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/prepare_data.R 3 | \name{derive_controls} 4 | \alias{derive_controls} 5 | \title{Generate the quantified capability parameters for a scenario} 6 | \usage{ 7 | derive_controls(capability_ids, capability_parameters) 8 | } 9 | \arguments{ 10 | \item{capability_ids}{Comma-delimited list of capability ids} 11 | 12 | \item{capability_parameters}{Dataframe of fitted and combined capability parameters} 13 | } 14 | \value{ 15 | A list. 16 | } 17 | \description{ 18 | Based on the \code{evaluator::\link[evaluator]{derive_controls}} function 19 | } 20 | \details{ 21 | Creates the difficulty parameters (embedded list) for quantitative 22 | parameters. 23 | } 24 | \examples{ 25 | NULL 26 | } 27 | \seealso{ 28 | \code{evaluator::\link[evaluator]{derive_controls}} 29 | } 30 | -------------------------------------------------------------------------------- /man/enforce_tidyrisk_question_set.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{enforce_tidyrisk_question_set} 4 | \alias{enforce_tidyrisk_question_set} 5 | \title{Validate that the parameter passed is a \code{\link{tidyrisk_question_set}} object} 6 | \usage{ 7 | enforce_tidyrisk_question_set(x) 8 | } 9 | \arguments{ 10 | \item{x}{An object} 11 | } 12 | \description{ 13 | Validate that the parameter passed is a \code{\link{tidyrisk_question_set}} object 14 | } 15 | \examples{ 16 | NULL 17 | } 18 | -------------------------------------------------------------------------------- /man/enforce_tidyrisk_response_set.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{enforce_tidyrisk_response_set} 4 | \alias{enforce_tidyrisk_response_set} 5 | \title{Validate that the parameter passed is a \code{\link{tidyrisk_response_set}} object} 6 | \usage{ 7 | enforce_tidyrisk_response_set(x) 8 | } 9 | \arguments{ 10 | \item{x}{An object} 11 | } 12 | \description{ 13 | Validate that the parameter passed is a \code{\link{tidyrisk_response_set}} object 14 | } 15 | \examples{ 16 | NULL 17 | } 18 | -------------------------------------------------------------------------------- /man/figures/logo.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/davidski/collector/061fc18d92c94509b5e72d0877763448d8580994/man/figures/logo.png -------------------------------------------------------------------------------- /man/fit_capabilities.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{fit_capabilities} 4 | \alias{fit_capabilities} 5 | \title{Fit SME capability estimates to distribution parameters} 6 | \usage{ 7 | fit_capabilities(responses) 8 | } 9 | \arguments{ 10 | \item{responses}{A \code{\link{tidyrisk_response_set}} object} 11 | } 12 | \value{ 13 | A dataframe. 14 | } 15 | \description{ 16 | Fit SME capability estimates to distribution parameters 17 | } 18 | \examples{ 19 | NULL 20 | } 21 | \seealso{ 22 | Other distribution fitting functions: 23 | \code{\link{combine_lognorm_trunc}()}, 24 | \code{\link{combine_lognorm}()}, 25 | \code{\link{combine_norm}()}, 26 | \code{\link{fit_capabilities_geomean}()}, 27 | \code{\link{fit_lognorm_trunc}()}, 28 | \code{\link{fit_lognorm}()}, 29 | \code{\link{fit_norm_trunc}()}, 30 | \code{\link{fit_pois}()}, 31 | \code{\link{fit_scenarios_geomean}()}, 32 | \code{\link{fit_scenarios}()}, 33 | \code{\link{fit_threat_communities}()}, 34 | \code{\link{generate_cost_function}()}, 35 | \code{\link{lognormal_to_normal}()}, 36 | \code{\link{normal_to_lognormal}()} 37 | } 38 | \concept{distribution fitting functions} 39 | -------------------------------------------------------------------------------- /man/fit_capabilities_geomean.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{fit_capabilities_geomean} 4 | \alias{fit_capabilities_geomean} 5 | \title{Fit capability parameters via a geometric mean} 6 | \usage{ 7 | fit_capabilities_geomean(capabilities_answers) 8 | } 9 | \arguments{ 10 | \item{capabilities_answers}{Answers dataframe.} 11 | } 12 | \value{ 13 | A dataframe. 14 | } 15 | \description{ 16 | Fit capability parameters via a geometric mean 17 | } 18 | \examples{ 19 | data(mc_capability_answers) 20 | fit_capabilities_geomean(mc_capability_answers) 21 | } 22 | \seealso{ 23 | Other distribution fitting functions: 24 | \code{\link{combine_lognorm_trunc}()}, 25 | \code{\link{combine_lognorm}()}, 26 | \code{\link{combine_norm}()}, 27 | \code{\link{fit_capabilities}()}, 28 | \code{\link{fit_lognorm_trunc}()}, 29 | \code{\link{fit_lognorm}()}, 30 | \code{\link{fit_norm_trunc}()}, 31 | \code{\link{fit_pois}()}, 32 | \code{\link{fit_scenarios_geomean}()}, 33 | \code{\link{fit_scenarios}()}, 34 | \code{\link{fit_threat_communities}()}, 35 | \code{\link{generate_cost_function}()}, 36 | \code{\link{lognormal_to_normal}()}, 37 | \code{\link{normal_to_lognormal}()} 38 | } 39 | \concept{distribution fitting functions} 40 | -------------------------------------------------------------------------------- /man/fit_lognorm.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{fit_lognorm} 4 | \alias{fit_lognorm} 5 | \title{Find parameters that fit quantile values of an unknown lognormal distribution} 6 | \usage{ 7 | fit_lognorm(low, high) 8 | } 9 | \arguments{ 10 | \item{low}{5th quantile.} 11 | 12 | \item{high}{95th quantile.} 13 | } 14 | \value{ 15 | A dataframe. 16 | } 17 | \description{ 18 | With a 5th and 95th quantile point estimates, fit a lognormal distribution, 19 | returning the parameters of the distribution. 20 | } 21 | \examples{ 22 | fit_lognorm(low = .20, high = .50) 23 | } 24 | \seealso{ 25 | Other distribution fitting functions: 26 | \code{\link{combine_lognorm_trunc}()}, 27 | \code{\link{combine_lognorm}()}, 28 | \code{\link{combine_norm}()}, 29 | \code{\link{fit_capabilities_geomean}()}, 30 | \code{\link{fit_capabilities}()}, 31 | \code{\link{fit_lognorm_trunc}()}, 32 | \code{\link{fit_norm_trunc}()}, 33 | \code{\link{fit_pois}()}, 34 | \code{\link{fit_scenarios_geomean}()}, 35 | \code{\link{fit_scenarios}()}, 36 | \code{\link{fit_threat_communities}()}, 37 | \code{\link{generate_cost_function}()}, 38 | \code{\link{lognormal_to_normal}()}, 39 | \code{\link{normal_to_lognormal}()} 40 | } 41 | \concept{distribution fitting functions} 42 | -------------------------------------------------------------------------------- /man/fit_lognorm_trunc.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{fit_lognorm_trunc} 4 | \alias{fit_lognorm_trunc} 5 | \title{Find parameters that fit quantile values of an unknown truncated lognormal distribution} 6 | \usage{ 7 | fit_lognorm_trunc(low, high, min = 0, max = Inf) 8 | } 9 | \arguments{ 10 | \item{low}{5th quantile.} 11 | 12 | \item{high}{95th quantile.} 13 | 14 | \item{min}{lower bound of support.} 15 | 16 | \item{max}{upper bound of support.} 17 | } 18 | \value{ 19 | A dataframe. 20 | } 21 | \description{ 22 | With a 5th and 95th quantile point estimates and optional lower and 23 | upper bounds, fit a lognormal distribution, returning the parameters of 24 | the distribution. 25 | } 26 | \examples{ 27 | fit_lognorm_trunc(low = 10, high = 50, min = 0, max = 100) 28 | } 29 | \seealso{ 30 | Other distribution fitting functions: 31 | \code{\link{combine_lognorm_trunc}()}, 32 | \code{\link{combine_lognorm}()}, 33 | \code{\link{combine_norm}()}, 34 | \code{\link{fit_capabilities_geomean}()}, 35 | \code{\link{fit_capabilities}()}, 36 | \code{\link{fit_lognorm}()}, 37 | \code{\link{fit_norm_trunc}()}, 38 | \code{\link{fit_pois}()}, 39 | \code{\link{fit_scenarios_geomean}()}, 40 | \code{\link{fit_scenarios}()}, 41 | \code{\link{fit_threat_communities}()}, 42 | \code{\link{generate_cost_function}()}, 43 | \code{\link{lognormal_to_normal}()}, 44 | \code{\link{normal_to_lognormal}()} 45 | } 46 | \concept{distribution fitting functions} 47 | -------------------------------------------------------------------------------- /man/fit_norm_trunc.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{fit_norm_trunc} 4 | \alias{fit_norm_trunc} 5 | \title{Find parameters that fit quantile values of an unknown truncated normal 6 | distribution} 7 | \usage{ 8 | fit_norm_trunc(low, high, min = 0, max = Inf) 9 | } 10 | \arguments{ 11 | \item{low}{5th quantile.} 12 | 13 | \item{high}{95th quantile.} 14 | 15 | \item{min}{Lower bound of support.} 16 | 17 | \item{max}{Upper bound of support.} 18 | } 19 | \value{ 20 | Dataframe. 21 | } 22 | \description{ 23 | With a 5th and 95th quantile point estimates and optional lower and 24 | upper bounds, fit a truncated normal distribution, returning the parameters of 25 | the distribution. 26 | } 27 | \examples{ 28 | fit_norm_trunc(low = 10, high = 50, min = 0, max = 100) 29 | } 30 | \seealso{ 31 | Other distribution fitting functions: 32 | \code{\link{combine_lognorm_trunc}()}, 33 | \code{\link{combine_lognorm}()}, 34 | \code{\link{combine_norm}()}, 35 | \code{\link{fit_capabilities_geomean}()}, 36 | \code{\link{fit_capabilities}()}, 37 | \code{\link{fit_lognorm_trunc}()}, 38 | \code{\link{fit_lognorm}()}, 39 | \code{\link{fit_pois}()}, 40 | \code{\link{fit_scenarios_geomean}()}, 41 | \code{\link{fit_scenarios}()}, 42 | \code{\link{fit_threat_communities}()}, 43 | \code{\link{generate_cost_function}()}, 44 | \code{\link{lognormal_to_normal}()}, 45 | \code{\link{normal_to_lognormal}()} 46 | } 47 | \concept{distribution fitting functions} 48 | -------------------------------------------------------------------------------- /man/fit_pois.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{fit_pois} 4 | \alias{fit_pois} 5 | \title{Find parameters that fit a poisson distribution.} 6 | \usage{ 7 | fit_pois(low, high) 8 | } 9 | \arguments{ 10 | \item{low}{5th quantile.} 11 | 12 | \item{high}{95th quantile.} 13 | } 14 | \value{ 15 | A dataframe. 16 | } 17 | \description{ 18 | With a 5th and 95th quantile point estimates and optional lower and 19 | upper bounds, fit a poisson distribution, returning the parameters of 20 | the distribution. 21 | } 22 | \examples{ 23 | fit_pois(low = 10, high = 50) 24 | } 25 | \seealso{ 26 | Other distribution fitting functions: 27 | \code{\link{combine_lognorm_trunc}()}, 28 | \code{\link{combine_lognorm}()}, 29 | \code{\link{combine_norm}()}, 30 | \code{\link{fit_capabilities_geomean}()}, 31 | \code{\link{fit_capabilities}()}, 32 | \code{\link{fit_lognorm_trunc}()}, 33 | \code{\link{fit_lognorm}()}, 34 | \code{\link{fit_norm_trunc}()}, 35 | \code{\link{fit_scenarios_geomean}()}, 36 | \code{\link{fit_scenarios}()}, 37 | \code{\link{fit_threat_communities}()}, 38 | \code{\link{generate_cost_function}()}, 39 | \code{\link{lognormal_to_normal}()}, 40 | \code{\link{normal_to_lognormal}()} 41 | } 42 | \concept{distribution fitting functions} 43 | -------------------------------------------------------------------------------- /man/fit_scenarios.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{fit_scenarios} 4 | \alias{fit_scenarios} 5 | \title{Fit SME scenario estimates to distribution parameters} 6 | \usage{ 7 | fit_scenarios( 8 | responses, 9 | maximum_impact = Inf, 10 | maximum_impact_factor = 10, 11 | maximum_frequency_factor = 10 12 | ) 13 | } 14 | \arguments{ 15 | \item{responses}{A \code{\link{tidyrisk_response_set}} object.} 16 | 17 | \item{maximum_impact}{The absolute maximum potential impact of any 18 | single loss event.} 19 | 20 | \item{maximum_impact_factor}{Maximum impact factor - scaling factor 21 | of a SME's 95 percent maximum loss to limit the impact of any single event.} 22 | 23 | \item{maximum_frequency_factor}{Maximum frequency factor - scaling 24 | factor at which to limit frequency of events.} 25 | } 26 | \value{ 27 | A dataframe. 28 | } 29 | \description{ 30 | Given a set of subject matter expert estimates for the 5th and 95th 31 | quantiles of impact and frequency of contact for events, calculate the 32 | distribution parameters for TEF and LM. Use a truncated lognormal 33 | distribution for LM (losses cannot be infinite in size) and 34 | for the TEF. 35 | } 36 | \examples{ 37 | NULL 38 | } 39 | \seealso{ 40 | Other distribution fitting functions: 41 | \code{\link{combine_lognorm_trunc}()}, 42 | \code{\link{combine_lognorm}()}, 43 | \code{\link{combine_norm}()}, 44 | \code{\link{fit_capabilities_geomean}()}, 45 | \code{\link{fit_capabilities}()}, 46 | \code{\link{fit_lognorm_trunc}()}, 47 | \code{\link{fit_lognorm}()}, 48 | \code{\link{fit_norm_trunc}()}, 49 | \code{\link{fit_pois}()}, 50 | \code{\link{fit_scenarios_geomean}()}, 51 | \code{\link{fit_threat_communities}()}, 52 | \code{\link{generate_cost_function}()}, 53 | \code{\link{lognormal_to_normal}()}, 54 | \code{\link{normal_to_lognormal}()} 55 | } 56 | \concept{distribution fitting functions} 57 | -------------------------------------------------------------------------------- /man/fit_scenarios_geomean.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{fit_scenarios_geomean} 4 | \alias{fit_scenarios_geomean} 5 | \title{Fit scenario parameters by applying a geometric mean} 6 | \usage{ 7 | fit_scenarios_geomean(scenario_answers) 8 | } 9 | \arguments{ 10 | \item{scenario_answers}{Scenario answers dataframe.} 11 | } 12 | \value{ 13 | A dataframe. 14 | } 15 | \description{ 16 | Fit scenario parameters by applying a geometric mean 17 | } 18 | \examples{ 19 | data(mc_scenario_answers) 20 | fit_scenarios_geomean(mc_scenario_answers) 21 | } 22 | \seealso{ 23 | Other distribution fitting functions: 24 | \code{\link{combine_lognorm_trunc}()}, 25 | \code{\link{combine_lognorm}()}, 26 | \code{\link{combine_norm}()}, 27 | \code{\link{fit_capabilities_geomean}()}, 28 | \code{\link{fit_capabilities}()}, 29 | \code{\link{fit_lognorm_trunc}()}, 30 | \code{\link{fit_lognorm}()}, 31 | \code{\link{fit_norm_trunc}()}, 32 | \code{\link{fit_pois}()}, 33 | \code{\link{fit_scenarios}()}, 34 | \code{\link{fit_threat_communities}()}, 35 | \code{\link{generate_cost_function}()}, 36 | \code{\link{lognormal_to_normal}()}, 37 | \code{\link{normal_to_lognormal}()} 38 | } 39 | \concept{distribution fitting functions} 40 | -------------------------------------------------------------------------------- /man/fit_threat_communities.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{fit_threat_communities} 4 | \alias{fit_threat_communities} 5 | \title{Fit each of the threat communities to a distribution} 6 | \usage{ 7 | fit_threat_communities(threat_communities) 8 | } 9 | \arguments{ 10 | \item{threat_communities}{Dataframe of threat communities.} 11 | } 12 | \value{ 13 | A dataframe. 14 | } 15 | \description{ 16 | Fit each of the threat communities to a distribution 17 | } 18 | \examples{ 19 | data(mc_threat_communities) 20 | fit_threat_communities(mc_threat_communities) 21 | } 22 | \seealso{ 23 | Other distribution fitting functions: 24 | \code{\link{combine_lognorm_trunc}()}, 25 | \code{\link{combine_lognorm}()}, 26 | \code{\link{combine_norm}()}, 27 | \code{\link{fit_capabilities_geomean}()}, 28 | \code{\link{fit_capabilities}()}, 29 | \code{\link{fit_lognorm_trunc}()}, 30 | \code{\link{fit_lognorm}()}, 31 | \code{\link{fit_norm_trunc}()}, 32 | \code{\link{fit_pois}()}, 33 | \code{\link{fit_scenarios_geomean}()}, 34 | \code{\link{fit_scenarios}()}, 35 | \code{\link{generate_cost_function}()}, 36 | \code{\link{lognormal_to_normal}()}, 37 | \code{\link{normal_to_lognormal}()} 38 | } 39 | \concept{distribution fitting functions} 40 | -------------------------------------------------------------------------------- /man/generate_cost_function.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{generate_cost_function} 4 | \alias{generate_cost_function} 5 | \title{Generate a sum of squares cost function for optimization} 6 | \usage{ 7 | generate_cost_function(func) 8 | } 9 | \arguments{ 10 | \item{func}{A distribution function.} 11 | } 12 | \value{ 13 | A function. 14 | } 15 | \description{ 16 | This is an internal helper function that generates a sum of squares 17 | cost function for any given \verb{r*} function (e.g. rnorm, rlognorm). The 18 | resulting function is intended to be used by an \code{optim} call for fitting 19 | quantiles to distribution parameters. 20 | } 21 | \examples{ 22 | generate_cost_function(stats::qlnorm) 23 | } 24 | \seealso{ 25 | Other distribution fitting functions: 26 | \code{\link{combine_lognorm_trunc}()}, 27 | \code{\link{combine_lognorm}()}, 28 | \code{\link{combine_norm}()}, 29 | \code{\link{fit_capabilities_geomean}()}, 30 | \code{\link{fit_capabilities}()}, 31 | \code{\link{fit_lognorm_trunc}()}, 32 | \code{\link{fit_lognorm}()}, 33 | \code{\link{fit_norm_trunc}()}, 34 | \code{\link{fit_pois}()}, 35 | \code{\link{fit_scenarios_geomean}()}, 36 | \code{\link{fit_scenarios}()}, 37 | \code{\link{fit_threat_communities}()}, 38 | \code{\link{lognormal_to_normal}()}, 39 | \code{\link{normal_to_lognormal}()} 40 | } 41 | \concept{distribution fitting functions} 42 | -------------------------------------------------------------------------------- /man/generate_weights.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/generate_weights.R 3 | \name{generate_weights} 4 | \alias{generate_weights} 5 | \title{Generate a weighting table for SMEs based upon their calibration answers} 6 | \usage{ 7 | generate_weights(questions, responses) 8 | } 9 | \arguments{ 10 | \item{questions}{\code{\link{tidyrisk_question_set}} object.} 11 | 12 | \item{responses}{\code{\link{tidyrisk_response_set}} object} 13 | } 14 | \value{ 15 | A dataframe of SMEs and their numerical weighting. 16 | } 17 | \description{ 18 | Generate a weighting table for SMEs based upon their calibration answers 19 | } 20 | \examples{ 21 | NULL 22 | } 23 | -------------------------------------------------------------------------------- /man/get_smes_domains.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{get_smes_domains} 4 | \alias{get_smes_domains} 5 | \title{Calculate the prioritized list of domains for a given subject matter expert (SME)} 6 | \usage{ 7 | get_smes_domains(sme, questions) 8 | } 9 | \arguments{ 10 | \item{sme}{Name of the subject matter expert.} 11 | 12 | \item{questions}{A \code{\link{tidyrisk_question_set}} object.} 13 | } 14 | \value{ 15 | An ordered vector of the domains for the requested SME. 16 | } 17 | \description{ 18 | Given a \code{\link{tidyrisk_question_set}} object and the name and the 19 | name of a specific SME of interest, create a vector of the domains in 20 | order of priority. 21 | } 22 | \examples{ 23 | \dontrun{ 24 | questions <- read_questions() 25 | get_sme_domains("Sally Expert", questions) 26 | } 27 | } 28 | -------------------------------------------------------------------------------- /man/is_tidyrisk_question_set.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tidyrisk_question_set.R 3 | \name{is_tidyrisk_question_set} 4 | \alias{is_tidyrisk_question_set} 5 | \title{Test if the object is a tidyrisk_question_set} 6 | \usage{ 7 | is_tidyrisk_question_set(x) 8 | } 9 | \arguments{ 10 | \item{x}{An object} 11 | } 12 | \description{ 13 | This function returns TRUE for tidyrisk_question_set or sub-classes 14 | thereof, and FALSE for all other objects. 15 | } 16 | \examples{ 17 | \dontrun{ 18 | is_tidyrisk_question_set(x) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /man/is_tidyrisk_response_set.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tidyrisk_response_set.R 3 | \name{is_tidyrisk_response_set} 4 | \alias{is_tidyrisk_response_set} 5 | \title{Test if the object is a tidyrisk_response_set} 6 | \usage{ 7 | is_tidyrisk_response_set(x) 8 | } 9 | \arguments{ 10 | \item{x}{An object} 11 | } 12 | \description{ 13 | This function returns TRUE for tidyrisk_response_set or sub-classes 14 | thereof, and FALSE for all other objects. 15 | } 16 | \examples{ 17 | \dontrun{ 18 | is_tidyrisk_response_set(x) 19 | } 20 | } 21 | -------------------------------------------------------------------------------- /man/lognormal_to_normal.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{lognormal_to_normal} 4 | \alias{lognormal_to_normal} 5 | \title{Convert lognormal parameters to normal parameters} 6 | \usage{ 7 | lognormal_to_normal(meanlog, sdlog) 8 | } 9 | \arguments{ 10 | \item{meanlog}{Mean log.} 11 | 12 | \item{sdlog}{Standard deviation log.} 13 | } 14 | \value{ 15 | A list. 16 | } 17 | \description{ 18 | Given a set of parameters describing a lognormal distribution, return 19 | the parameters of the underlying normal distribution. 20 | } 21 | \examples{ 22 | lognormal_to_normal(meanlog=1, sdlog=3) 23 | } 24 | \seealso{ 25 | Other distribution fitting functions: 26 | \code{\link{combine_lognorm_trunc}()}, 27 | \code{\link{combine_lognorm}()}, 28 | \code{\link{combine_norm}()}, 29 | \code{\link{fit_capabilities_geomean}()}, 30 | \code{\link{fit_capabilities}()}, 31 | \code{\link{fit_lognorm_trunc}()}, 32 | \code{\link{fit_lognorm}()}, 33 | \code{\link{fit_norm_trunc}()}, 34 | \code{\link{fit_pois}()}, 35 | \code{\link{fit_scenarios_geomean}()}, 36 | \code{\link{fit_scenarios}()}, 37 | \code{\link{fit_threat_communities}()}, 38 | \code{\link{generate_cost_function}()}, 39 | \code{\link{normal_to_lognormal}()} 40 | } 41 | \concept{distribution fitting functions} 42 | -------------------------------------------------------------------------------- /man/make_handouts.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/make_handouts.R 3 | \name{make_handouts} 4 | \alias{make_handouts} 5 | \title{Create a set of interview handouts for a SME} 6 | \usage{ 7 | make_handouts(sme, questions, output_dir, calibration_questions = 10) 8 | } 9 | \arguments{ 10 | \item{sme}{Name of the SME.} 11 | 12 | \item{questions}{\code{\link{tidyrisk_question_set}} object} 13 | 14 | \item{output_dir}{Directory to place output.} 15 | 16 | \item{calibration_questions}{Number of calibration questions to ask.} 17 | } 18 | \description{ 19 | Creates two MS Word documents. One is an \code{answers} document that contains 20 | the answers to the calibration questions, the other (with the name of the SME) 21 | does not contain answers and is intended to be a visual reference (and possible 22 | take away) for the SME. 23 | } 24 | \examples{ 25 | \dontrun{ 26 | questions <- read_questions() 27 | make_handouts("Sally Expert", questions, output_dir = tempdir()) 28 | } 29 | } 30 | -------------------------------------------------------------------------------- /man/make_scorecard.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/make_scorecard.R 3 | \name{make_scorecard} 4 | \alias{make_scorecard} 5 | \alias{make_bingo} 6 | \title{Create a scorecard for marking progress through domains in an interview} 7 | \usage{ 8 | make_scorecard(sme, questions, output_dir) 9 | 10 | make_bingo(sme, questions, output_dir = getwd()) 11 | } 12 | \arguments{ 13 | \item{sme}{Name of SME.} 14 | 15 | \item{questions}{\code{\link{tidyrisk_question_set}} object.} 16 | 17 | \item{output_dir}{Directory to place scorecards.} 18 | } 19 | \value{ 20 | Invisibly returns the full path to the saved scorecard. 21 | } 22 | \description{ 23 | Creates a two page PDF with one grid for scenarios and one for capabilities. 24 | Each grid contains a square for each domain. An analyst can mark/stamp 25 | each domain as it is covered in an interview, gamifying progress. 26 | } 27 | \details{ 28 | The domains are ordered according to the SME's expertise profile, ensuring 29 | they match the interview order flow. 30 | } 31 | \examples{ 32 | \dontrun{ 33 | questions <- read_questions() 34 | make_scorecard("Sally Expert", questions, output_dir = tempdir()) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /man/make_slides.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/make_slides.R 3 | \name{make_slides} 4 | \alias{make_slides} 5 | \title{Create interview slides} 6 | \usage{ 7 | make_slides( 8 | sme, 9 | questions, 10 | output_dir, 11 | assessment_title = "Strategic Risk Assessment" 12 | ) 13 | } 14 | \arguments{ 15 | \item{sme}{Name of the SME being interviewed.} 16 | 17 | \item{questions}{A \code{\link{tidyrisk_question_set}} object.} 18 | 19 | \item{output_dir}{Directory location for knitted slides.} 20 | 21 | \item{assessment_title}{Title of the assessment being performed.} 22 | } 23 | \value{ 24 | Invisibly returns the full path to the slide file. 25 | } 26 | \description{ 27 | Creates an in-browser slideshow as a visual aide when conducting an 28 | interview with a subject matter expert (SME). The slideshow is customized 29 | for the SME by placing the domains in the order of preference for that 30 | SME. 31 | } 32 | \examples{ 33 | \dontrun{ 34 | make_slides("Sally Expert", questions, output_dir = tempdir()) 35 | } 36 | } 37 | -------------------------------------------------------------------------------- /man/mc_calibration_answers.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{mc_calibration_answers} 5 | \alias{mc_calibration_answers} 6 | \title{MetroCare Hospital Calibration Answers} 7 | \format{ 8 | A data frame with 50 rows and 5 variables: 9 | \describe{ 10 | \item{sme}{name of the subject matter expert} 11 | \item{calibration_id}{unique identifier of the calibration question} 12 | \item{low}{SME's low end estimate} 13 | \item{high}{SME's high end estimate} 14 | \item{date}{date of answer} 15 | } 16 | } 17 | \source{ 18 | This is hypothetical information. Any similarity to any other 19 | entity is completely coincidental. 20 | } 21 | \usage{ 22 | mc_calibration_answers 23 | } 24 | \description{ 25 | A dataset of SME answers to calibration questions. 26 | } 27 | \keyword{datasets} 28 | -------------------------------------------------------------------------------- /man/mc_capabilities.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{mc_capabilities} 5 | \alias{mc_capabilities} 6 | \title{MetroCare Hospital Capabilities} 7 | \format{ 8 | A data frame with 60 rows and 3 variables: 9 | \describe{ 10 | \item{capability_id}{unique identifier of the capability} 11 | \item{domain_id}{domain associated with the capability} 12 | \item{capability}{text description of the capability} 13 | } 14 | } 15 | \source{ 16 | This is hypothetical information. Any similarity to any other 17 | entity is completely coincidental. 18 | } 19 | \usage{ 20 | mc_capabilities 21 | } 22 | \description{ 23 | A dataset of program capabilities. 24 | } 25 | \keyword{datasets} 26 | -------------------------------------------------------------------------------- /man/mc_capability_answers.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{mc_capability_answers} 5 | \alias{mc_capability_answers} 6 | \title{MetroCare Hospital Capability Answers} 7 | \format{ 8 | A data frame with 1 rows and 7 variables: 9 | \describe{ 10 | \item{sme}{name of the SME} 11 | \item{capability_id}{identifier of the capability} 12 | \item{low}{capability estimate, low} 13 | \item{high}{capability estimate, high} 14 | \item{date}{date of the answer} 15 | } 16 | } 17 | \source{ 18 | This is hypothetical information. Any similarity to any other 19 | entity is completely coincidental. 20 | } 21 | \usage{ 22 | mc_capability_answers 23 | } 24 | \description{ 25 | A dataset of SME answers to capabilities. 26 | } 27 | \keyword{datasets} 28 | -------------------------------------------------------------------------------- /man/mc_capability_parameters_fitted.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{mc_capability_parameters_fitted} 5 | \alias{mc_capability_parameters_fitted} 6 | \title{MetroCare Hospital Capability Parameters (fitted)} 7 | \format{ 8 | A data frame with 300 rows and 10 variables: 9 | \describe{ 10 | \item{sme}{name of the sme providing the response} 11 | \item{capability_id}{unique identifier} 12 | \item{date}{text description of the threat community} 13 | \item{capability_func}{capability sampling function} 14 | \item{capability_mean}{capability mean} 15 | \item{capability_sd}{capability standard deviation} 16 | \item{capability_min}{capability minimum} 17 | \item{capability_max}{capability maximum} 18 | \item{low}{threat communities capability, high end} 19 | \item{high}{threat communities capability, high end} 20 | } 21 | } 22 | \source{ 23 | This is hypothetical information. Any similarity to any other 24 | entity is completely coincidental. 25 | } 26 | \usage{ 27 | mc_capability_parameters_fitted 28 | } 29 | \description{ 30 | A dataset of sample fitted capability parameters. 31 | } 32 | \keyword{datasets} 33 | -------------------------------------------------------------------------------- /man/mc_domains.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{mc_domains} 5 | \alias{mc_domains} 6 | \title{MetroCare Hospital Domains} 7 | \format{ 8 | A data frame with 15 rows and 4 variables: 9 | \describe{ 10 | \item{domain}{domain title} 11 | \item{description}{descriptive text describing the content of the domain} 12 | \item{active}{logical flag indicating whether or not the domain is in use} 13 | \item{domain_id}{unique domain id} 14 | } 15 | } 16 | \source{ 17 | This is hypothetical information. Any similarity to any other 18 | entity is completely coincidental. 19 | } 20 | \usage{ 21 | mc_domains 22 | } 23 | \description{ 24 | A dataset of program domains. 25 | } 26 | \keyword{datasets} 27 | -------------------------------------------------------------------------------- /man/mc_scenario_answers.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{mc_scenario_answers} 5 | \alias{mc_scenario_answers} 6 | \title{MetroCare Hospital Scenario Answers} 7 | \format{ 8 | A data frame with 1 rows and 7 variables: 9 | \describe{ 10 | \item{sme}{name of the SME} 11 | \item{scenario_id}{identifier of the scenario} 12 | \item{freq_low}{frequency estimate, low} 13 | \item{freq_high}{frequency estimate, high} 14 | \item{imp_low}{impact estimate, low} 15 | \item{imp_high}{impact estimate, high} 16 | \item{date}{date of the answer} 17 | } 18 | } 19 | \source{ 20 | This is hypothetical information. Any similarity to any other 21 | entity is completely coincidental. 22 | } 23 | \usage{ 24 | mc_scenario_answers 25 | } 26 | \description{ 27 | A dataset of SME answers to scenarios. 28 | } 29 | \keyword{datasets} 30 | -------------------------------------------------------------------------------- /man/mc_scenario_parameters_fitted.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{mc_scenario_parameters_fitted} 5 | \alias{mc_scenario_parameters_fitted} 6 | \title{MetroCare Hospital Scenario Parameters (fitted)} 7 | \format{ 8 | A data frame with 280 rows and 17 variables: 9 | \describe{ 10 | \item{sme}{name of the sme providing the response} 11 | \item{scenario_id}{unique identifier} 12 | \item{date}{date of the response} 13 | \item{impact_func}{function to use for impact sampling} 14 | \item{impact_meanlog}{threat communities capability, high end} 15 | \item{impact_sdlog}{type of the threat community} 16 | \item{impact_min}{action type of the threat community} 17 | \item{impact_max}{action type of the threat community} 18 | \item{imp_low}{action type of the threat community} 19 | \item{imp_high}{action type of the threat community} 20 | \item{frequency_func}{function to use for frequency sampling} 21 | \item{frequency_meanlog}{frequency meanlog} 22 | \item{frequency_sdlog}{frequency standard deviation log} 23 | \item{frequency_min}{frequency minimum} 24 | \item{frequency_max}{frequency maximum} 25 | \item{freq_low}{action type of the threat community} 26 | \item{freq_high}{action type of the threat community} 27 | } 28 | } 29 | \source{ 30 | This is hypothetical information. Any similarity to any other 31 | entity is completely coincidental. 32 | } 33 | \usage{ 34 | mc_scenario_parameters_fitted 35 | } 36 | \description{ 37 | A dataset of sample fitted scenario parameters. 38 | } 39 | \keyword{datasets} 40 | -------------------------------------------------------------------------------- /man/mc_scenarios.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{mc_scenarios} 5 | \alias{mc_scenarios} 6 | \title{MetroCare Risk Scenarios} 7 | \format{ 8 | A data frame with 56 rows and 5 variables: 9 | \describe{ 10 | \item{scenario_id}{unique identifier} 11 | \item{scenario}{scenario description} 12 | \item{threat_id}{threat community id} 13 | \item{domain_id}{domain id} 14 | \item{controls}{comma separated list of control ids} 15 | } 16 | } 17 | \source{ 18 | This is hypothetical information. Any similarity to any other 19 | entity is completely coincidental. 20 | } 21 | \usage{ 22 | mc_scenarios 23 | } 24 | \description{ 25 | A dataset of sample risk scenarios. 26 | } 27 | \keyword{datasets} 28 | -------------------------------------------------------------------------------- /man/mc_sme_top_domains.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{mc_sme_top_domains} 5 | \alias{mc_sme_top_domains} 6 | \title{MetroCare Hospital SME Top Domains} 7 | \format{ 8 | A data frame with 35 rows and 3 variables: 9 | \describe{ 10 | \item{sme}{SME name} 11 | \item{key}{index of domain} 12 | \item{value}{name of domain} 13 | } 14 | } 15 | \source{ 16 | This is hypothetical information. Any similarity to any other 17 | entity is completely coincidental. 18 | } 19 | \usage{ 20 | mc_sme_top_domains 21 | } 22 | \description{ 23 | A dataset of focus domains per SME. 24 | } 25 | \keyword{datasets} 26 | -------------------------------------------------------------------------------- /man/mc_threat_communities.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{mc_threat_communities} 5 | \alias{mc_threat_communities} 6 | \title{MetroCare Hospital Threat Communities} 7 | \format{ 8 | A data frame with 6 rows and 7 variables: 9 | \describe{ 10 | \item{threat_community}{text title of the threat community} 11 | \item{threat_id}{unique identifier} 12 | \item{definition}{text description of the threat community} 13 | \item{low}{threat communities capability, low end} 14 | \item{high}{threat communities capability, high end} 15 | \item{category}{type of the threat community} 16 | \item{action_type}{action type of the threat community} 17 | } 18 | } 19 | \source{ 20 | This is hypothetical information. Any similarity to any other 21 | entity is completely coincidental. 22 | } 23 | \usage{ 24 | mc_threat_communities 25 | } 26 | \description{ 27 | A dataset of sample threat communities. 28 | } 29 | \keyword{datasets} 30 | -------------------------------------------------------------------------------- /man/mc_threat_parameters_fitted.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/data.R 3 | \docType{data} 4 | \name{mc_threat_parameters_fitted} 5 | \alias{mc_threat_parameters_fitted} 6 | \title{MetroCare Hospital Threat Parameters (fitted)} 7 | \format{ 8 | A data frame with 8 rows and 12 variables: 9 | \describe{ 10 | \item{action_type}{action type} 11 | \item{category}{category} 12 | \item{definition}{text description of the threat community} 13 | \item{high}{action type of the threat community} 14 | \item{low}{type of the threat community} 15 | \item{threat_community}{text title of the threat community} 16 | \item{threat_func}{sampling function} 17 | \item{threat_id}{unique identifier} 18 | \item{threat_max}{threat maximum capability} 19 | \item{threat_mean}{threat mean capability} 20 | \item{threat_sd}{threat capability standard deviation} 21 | \item{threat_min}{threat capability minimum} 22 | } 23 | } 24 | \source{ 25 | This is hypothetical information. Any similarity to any other 26 | entity is completely coincidental. 27 | } 28 | \usage{ 29 | mc_threat_parameters_fitted 30 | } 31 | \description{ 32 | A dataset of sample fitted threat parameters. 33 | } 34 | \keyword{datasets} 35 | -------------------------------------------------------------------------------- /man/normal_to_lognormal.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/fit_distributions.R 3 | \name{normal_to_lognormal} 4 | \alias{normal_to_lognormal} 5 | \title{Convert normal parameters to lognormal parameters} 6 | \usage{ 7 | normal_to_lognormal(normmean, normsd) 8 | } 9 | \arguments{ 10 | \item{normmean}{Mean.} 11 | 12 | \item{normsd}{Standard deviation.} 13 | } 14 | \value{ 15 | A list. 16 | } 17 | \description{ 18 | Given parameters that describe a normal distribution, convert them back 19 | to parameters for a lognormal distribution. 20 | } 21 | \examples{ 22 | normal_to_lognormal(normmean = 20, normsd = 3) 23 | } 24 | \seealso{ 25 | Other distribution fitting functions: 26 | \code{\link{combine_lognorm_trunc}()}, 27 | \code{\link{combine_lognorm}()}, 28 | \code{\link{combine_norm}()}, 29 | \code{\link{fit_capabilities_geomean}()}, 30 | \code{\link{fit_capabilities}()}, 31 | \code{\link{fit_lognorm_trunc}()}, 32 | \code{\link{fit_lognorm}()}, 33 | \code{\link{fit_norm_trunc}()}, 34 | \code{\link{fit_pois}()}, 35 | \code{\link{fit_scenarios_geomean}()}, 36 | \code{\link{fit_scenarios}()}, 37 | \code{\link{fit_threat_communities}()}, 38 | \code{\link{generate_cost_function}()}, 39 | \code{\link{lognormal_to_normal}()} 40 | } 41 | \concept{distribution fitting functions} 42 | -------------------------------------------------------------------------------- /man/pipe.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils-pipe.R 3 | \name{\%>\%} 4 | \alias{\%>\%} 5 | \title{Pipe operator} 6 | \usage{ 7 | lhs \%>\% rhs 8 | } 9 | \arguments{ 10 | \item{lhs}{A value or the magrittr placeholder.} 11 | 12 | \item{rhs}{A function call using the magrittr semantics.} 13 | } 14 | \value{ 15 | The result of calling \code{rhs(lhs)}. 16 | } 17 | \description{ 18 | See \code{magrittr::\link[magrittr:pipe]{\%>\%}} for details. 19 | } 20 | \keyword{internal} 21 | -------------------------------------------------------------------------------- /man/prepare_data.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/prepare_data.R 3 | \name{prepare_data} 4 | \alias{prepare_data} 5 | \title{Create one or more quantitative scenarios objects suitable for simulation by 'evaluator'} 6 | \usage{ 7 | prepare_data( 8 | scenario_parameters, 9 | capability_parameters, 10 | threat_parameters, 11 | questions 12 | ) 13 | } 14 | \arguments{ 15 | \item{scenario_parameters}{Scenarios with final parameters defined.} 16 | 17 | \item{capability_parameters}{Capabilities with final parameters defined.} 18 | 19 | \item{threat_parameters}{Threat communities with final parameters defined.} 20 | 21 | \item{questions}{A \code{\link{tidyrisk_question_set}} object.} 22 | } 23 | \value{ 24 | A list of one or more \code{\link{tidyrisk_scenario}} objects. 25 | } 26 | \description{ 27 | Given parameters for the scenarios, threat communities, capabilities, and 28 | the question set, generate a list of \code{\link{tidyrisk_scenario}} objects that may be 29 | fed into \code{evaluator::\link[evaluator]{run_simulation}} for Monte Carlo simulation. 30 | } 31 | \examples{ 32 | suppressPackageStartupMessages(library(dplyr)) 33 | data(mc_domains, mc_capabilities, mc_scenarios, mc_sme_top_domains, 34 | calibration_questions, mc_threat_communities) 35 | question_set <- tidyrisk_question_set(mc_domains, mc_scenarios, mc_capabilities, 36 | calibration_questions, mc_sme_top_domains, 37 | mc_threat_communities) 38 | response_set <- tidyrisk_response_set(mc_calibration_answers, 39 | mc_scenario_answers, mc_capability_answers) 40 | sme_weightings <- generate_weights(question_set, response_set) 41 | data(mc_scenario_parameters_fitted, mc_capability_parameters_fitted, 42 | mc_threat_parameters_fitted) 43 | scenario_parameters <- left_join(mc_scenario_parameters_fitted, sme_weightings, by = "sme") \%>\% 44 | combine_scenario_parameters() 45 | capability_parameters <- left_join(mc_capability_parameters_fitted, sme_weightings, by = "sme") \%>\% 46 | combine_capability_parameters() 47 | quantitative_scenarios <- prepare_data(scenario_parameters, 48 | capability_parameters, 49 | mc_threat_parameters_fitted, 50 | question_set) 51 | } 52 | -------------------------------------------------------------------------------- /man/read_questions.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{read_questions} 4 | \alias{read_questions} 5 | \title{Read scenario questions} 6 | \usage{ 7 | read_questions(source_dir, active_only = TRUE) 8 | } 9 | \arguments{ 10 | \item{source_dir}{Directory location to find input files.} 11 | 12 | \item{active_only}{Read in only the active elements, defaults to TRUE.} 13 | } 14 | \value{ 15 | A \code{\link{tidyrisk_question_set}} object 16 | } 17 | \description{ 18 | Reads in all the questions for which subject matter expert input is 19 | needed. Includes the domains, capabilities, scenarios, calibration 20 | questions, and threat communities. 21 | } 22 | \details{ 23 | Expects the following files to be present: 24 | \itemize{ 25 | \item \code{domains.csv} - Domains 26 | \itemize{ 27 | \item domain_id, domain 28 | } 29 | \item \code{capabilities.csv} - Capabilities 30 | \itemize{ 31 | \item domain_id, capability_id, capability 32 | } 33 | \item \code{scenarios.csv} - Scenarios 34 | \itemize{ 35 | \item scenario_id, scenario, threat_id, domain_id, controls 36 | } 37 | \item \code{sme_top_domains.csv} - SME expertise 38 | \itemize{ 39 | \item sme, domain1, domain2, domain3, domain4, domain5, domain6, domain7 40 | } 41 | \item \code{calibration_questions.csv} - Calibration questions 42 | \item \code{threat_communities.csv} - Threat communities 43 | \itemize{ 44 | \item threat_community, threat_id, definition, low, high 45 | } 46 | } 47 | } 48 | \examples{ 49 | \dontrun{ 50 | read_questions() 51 | } 52 | } 53 | -------------------------------------------------------------------------------- /man/read_responses.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/utils.R 3 | \name{read_responses} 4 | \alias{read_responses} 5 | \title{Read all SMEs responses} 6 | \usage{ 7 | read_responses(source_dir = getwd()) 8 | } 9 | \arguments{ 10 | \item{source_dir}{Directory location where input files are found.} 11 | } 12 | \value{ 13 | A tidyrisk_response_set object 14 | } 15 | \description{ 16 | Reads in all the responses recorded to the calibration, scenarios, and 17 | capability questions. 18 | } 19 | \details{ 20 | Expects the following files to be present: 21 | \itemize{ 22 | \item \code{calibration_answers.csv} - Calibration 23 | \item \code{scenario_answers.csv} - Scenarios 24 | \item \code{capability_answers.csv} - Capabilities 25 | } 26 | } 27 | \examples{ 28 | \dontrun{ 29 | read_responses() 30 | } 31 | } 32 | -------------------------------------------------------------------------------- /man/tidyrisk_question_set.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tidyrisk_question_set.R 3 | \name{tidyrisk_question_set} 4 | \alias{tidyrisk_question_set} 5 | \alias{new_tidyrisk_question_set} 6 | \alias{as.tidyrisk_question_set} 7 | \alias{validate_tidyrisk_question_set} 8 | \title{Construct a tidyrisk_question_set object} 9 | \usage{ 10 | tidyrisk_question_set( 11 | domains, 12 | scenarios, 13 | capabilities, 14 | calibration, 15 | expertise, 16 | threat_communities 17 | ) 18 | 19 | new_tidyrisk_question_set(x) 20 | 21 | as.tidyrisk_question_set(x, ...) 22 | 23 | validate_tidyrisk_question_set(x) 24 | } 25 | \arguments{ 26 | \item{domains}{Domains} 27 | 28 | \item{scenarios}{Scenario questions} 29 | 30 | \item{capabilities}{Capability questions} 31 | 32 | \item{calibration}{Calibration questions} 33 | 34 | \item{expertise}{SME expertise} 35 | 36 | \item{threat_communities}{Threat communities} 37 | 38 | \item{x}{object to coerce} 39 | 40 | \item{...}{Individual dataframes} 41 | } 42 | \description{ 43 | \code{new.tidyrisk_question_set} is a low-level constructor that takes a list of dataframes. 44 | \code{tidyrisk_question_set} constructs a tidyrisk_question_set object from dataframes. 45 | \code{as.tidyrisk_question_set} is a S3 generic that converts existing objects. 46 | \code{validate_tidyrisk_question_set} verifies that the data elements are internally consistent. 47 | } 48 | \examples{ 49 | NULL 50 | } 51 | -------------------------------------------------------------------------------- /man/tidyrisk_response_set.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/tidyrisk_response_set.R 3 | \name{tidyrisk_response_set} 4 | \alias{tidyrisk_response_set} 5 | \alias{new_tidyrisk_response_set} 6 | \alias{as.tidyrisk_response_set} 7 | \title{Construct a tidyrisk_response_set object} 8 | \usage{ 9 | tidyrisk_response_set( 10 | calibration_answers, 11 | scenario_answers, 12 | capability_answers 13 | ) 14 | 15 | new_tidyrisk_response_set( 16 | calibration_answers, 17 | scenario_answers, 18 | capability_answers 19 | ) 20 | 21 | as.tidyrisk_response_set(x, ...) 22 | } 23 | \arguments{ 24 | \item{calibration_answers}{Calibration tidyrisk_response_set} 25 | 26 | \item{scenario_answers}{Scenarios tidyrisk_response_set} 27 | 28 | \item{capability_answers}{Capability tidyrisk_response_set} 29 | 30 | \item{x}{object to coerce} 31 | 32 | \item{...}{Individual dataframes} 33 | } 34 | \description{ 35 | \code{new.tidyrisk_response_set} is a low-level constructor that takes a list of dataframes. 36 | \code{tidyrisk_response_set} constructs a tidyrisk_response_set from dataframes. 37 | \code{as.tidyrisk_response_set} is a S3 generic that converts existing objects. 38 | } 39 | \examples{ 40 | NULL 41 | } 42 | -------------------------------------------------------------------------------- /scripts/Makevars: -------------------------------------------------------------------------------- 1 | CXXFLAGS=-Wno-ignored-attributes 2 | -------------------------------------------------------------------------------- /tests/spelling.R: -------------------------------------------------------------------------------- 1 | if(requireNamespace('spelling', quietly = TRUE)) 2 | spelling::spell_check_test(vignettes = TRUE, error = FALSE, 3 | skip_on_cran = TRUE) 4 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | library(collector) 3 | 4 | test_check("collector") 5 | -------------------------------------------------------------------------------- /tests/testthat/test-clean_answers.R: -------------------------------------------------------------------------------- 1 | context("Clean answers") 2 | 3 | test_that("clean_answers works", { 4 | data("mc_capability_answers") 5 | data("mc_scenario_answers") 6 | cleaned <- clean_answers(capability_answers = mc_capability_answers, 7 | scenario_answers = mc_scenario_answers) 8 | expect_is(cleaned, "list") 9 | expect_length(cleaned, 2) 10 | expect_equivalent(names(cleaned), c("capabilities", "scenarios")) 11 | }) 12 | -------------------------------------------------------------------------------- /tests/testthat/test-fit_distributions.R: -------------------------------------------------------------------------------- 1 | context("Fit distributions") 2 | 3 | test_that("Fit lognorm functions", { 4 | result <- fit_lognorm(low = .20, high = .50) 5 | expect_equal(result$sdlog, 0.05, tolerance = 0.01) 6 | }) 7 | 8 | test_that("Fit poison functions", { 9 | result <- fit_pois(low = 10, high = 50) 10 | expect_equal(result$lambda, 0.11, tolerance = 0.01) 11 | }) 12 | 13 | 14 | test_that("Lognormal to normal conversion is idempotent", { 15 | meanlog <- 1.54 16 | sdlog <- 10.3 17 | 18 | norms <- lognormal_to_normal(meanlog, sdlog) 19 | lognorms <- normal_to_lognormal(norms$mean, norms$sd) 20 | expect_equal(meanlog, lognorms$meanlog) 21 | expect_equal(sdlog, lognorms$sdlog) 22 | }) 23 | 24 | test_that("Combine lognorm functions", { 25 | meanlog <- 1.19 26 | sdlog <- 1.59 27 | dat <- data.frame(meanlog = c(1, 1.5), 28 | sdlog = c(1, 2), 29 | weight = c(2, 1)) 30 | result <- combine_lognorm(dat) 31 | expect_equal(result$meanlog, meanlog, tolerance = 0.01) 32 | expect_equal(result$sdlog, sdlog, tolerance = 0.01) 33 | }) 34 | 35 | test_that("combine norm functions", { 36 | 37 | dat <- data.frame(mean = c(10, 20, 30), 38 | sd = c(4, 5, 10), 39 | weight = c(2, 1, 2)) 40 | results <- combine_norm(dat) 41 | expect_equivalent(nrow(results), 1) 42 | expect_equivalent(rowSums(results), 26.6) 43 | }) 44 | -------------------------------------------------------------------------------- /tests/testthat/test-generate_weights.R: -------------------------------------------------------------------------------- 1 | context("Generate Weights") 2 | 3 | test_that("weights are generated", { 4 | data(calibration_questions) 5 | data(mc_domains) 6 | data(mc_scenarios) 7 | data(mc_capabilities) 8 | data(mc_sme_top_domains) 9 | data(mc_threat_communities) 10 | data(mc_calibration_answers) 11 | data(mc_scenario_answers) 12 | data(mc_capability_answers) 13 | ques <- tidyrisk_question_set(domains = mc_domains, 14 | calibration = calibration_questions, 15 | scenarios = mc_scenarios, 16 | capabilities = mc_capabilities, 17 | expertise = mc_sme_top_domains, 18 | threat_communities = mc_threat_communities) 19 | ans <- tidyrisk_response_set(mc_calibration_answers, mc_scenario_answers, mc_capability_answers) 20 | weights <- generate_weights(ques, ans) 21 | expect_s3_class(weights, "tbl") 22 | }) 23 | -------------------------------------------------------------------------------- /tests/testthat/test-make_handouts.R: -------------------------------------------------------------------------------- 1 | context("Make Handouts") 2 | 3 | test_that("handouts works", { 4 | data(calibration_questions) 5 | data(mc_domains) 6 | data(mc_scenarios) 7 | data(mc_capabilities) 8 | data(mc_sme_top_domains) 9 | data(mc_threat_communities) 10 | 11 | tmpdir <- tempdir() 12 | 13 | ques <- tidyrisk_question_set(domains = mc_domains, 14 | calibration = calibration_questions, 15 | scenarios = mc_scenarios, 16 | capabilities = mc_capabilities, 17 | expertise = mc_sme_top_domains, 18 | threat_communities = mc_threat_communities) 19 | 20 | sme <- "Natalie Wade" 21 | sme_title <- tolower(gsub(" ", "_", sme)) 22 | 23 | make_handouts(sme, ques, tmpdir) 24 | 25 | file_location <- file.path(tmpdir, paste0(sme_title, ".docx")) 26 | expect_true(file.exists(file_location)) 27 | 28 | ans_file_location <- file.path(tmpdir, paste0(sme_title, "_answers", ".docx")) 29 | expect_true(file.exists(ans_file_location)) 30 | 31 | unlink(c(file_location, ans_file_location)) 32 | }) 33 | -------------------------------------------------------------------------------- /tests/testthat/test-make_scorecard.R: -------------------------------------------------------------------------------- 1 | context("Make Scorecard") 2 | 3 | test_that("scorecard generation works", { 4 | data(calibration_questions) 5 | data(mc_domains) 6 | data(mc_scenarios) 7 | data(mc_capabilities) 8 | data(mc_sme_top_domains) 9 | data(mc_threat_communities) 10 | 11 | tmpdir <- file.path(tempdir(), "collector") 12 | dir.create(tmpdir, showWarnings = TRUE) 13 | 14 | ques <- tidyrisk_question_set(domains = mc_domains, 15 | calibration = calibration_questions, 16 | scenarios = mc_scenarios, 17 | capabilities = mc_capabilities, 18 | expertise = mc_sme_top_domains, 19 | threat_communities = mc_threat_communities) 20 | 21 | make_scorecard("Natalie Wade", ques, tmpdir) 22 | file_location <- file.path(tmpdir, "natalie_wade_scorecard.pdf") 23 | expect_true(file.exists(file_location)) 24 | unlink(tmpdir, recursive = TRUE) 25 | }) 26 | 27 | test_that("Bingo function is deprecated", { 28 | data(calibration_questions) 29 | data(mc_domains) 30 | data(mc_scenarios) 31 | data(mc_capabilities) 32 | data(mc_sme_top_domains) 33 | data(mc_threat_communities) 34 | 35 | tmpdir <- file.path(tempdir(), "collector") 36 | dir.create(tmpdir, showWarnings = TRUE) 37 | 38 | ques <- tidyrisk_question_set(domains = mc_domains, 39 | calibration = calibration_questions, 40 | scenarios = mc_scenarios, 41 | capabilities = mc_capabilities, 42 | expertise = mc_sme_top_domains, 43 | threat_communities = mc_threat_communities) 44 | 45 | expect_warning(make_bingo("Natalie Wade", ques, tmpdir), "deprecate") 46 | 47 | unlink(tmpdir, recursive = TRUE) 48 | }) 49 | -------------------------------------------------------------------------------- /tests/testthat/test-make_slides.R: -------------------------------------------------------------------------------- 1 | context("Make Slides") 2 | 3 | test_that("make slides", { 4 | skip_if(!rmarkdown::pandoc_available(), 5 | "Cannot test slide generation without pandoc.") 6 | data(calibration_questions) 7 | data(mc_domains) 8 | data(mc_scenarios) 9 | data(mc_capabilities) 10 | data(mc_sme_top_domains) 11 | data(mc_threat_communities) 12 | 13 | tmpdir <- file.path(tempdir(), "collector") 14 | dir.create(tmpdir, showWarnings = FALSE) 15 | 16 | ques <- tidyrisk_question_set(domains = mc_domains, 17 | calibration = calibration_questions, 18 | scenarios = mc_scenarios, 19 | capabilities = mc_capabilities, 20 | expertise = mc_sme_top_domains, 21 | threat_communities = mc_threat_communities) 22 | 23 | make_slides("Natalie Wade", ques, tmpdir) 24 | file_location <- file.path(tmpdir, "natalie_wade.html") 25 | expect_true(file.exists(file_location)) 26 | unlink(tmpdir, recursive = TRUE) 27 | }) 28 | -------------------------------------------------------------------------------- /tests/testthat/test-prepare_data.R: -------------------------------------------------------------------------------- 1 | context("Prepare Data") 2 | 3 | test_that("Scenario objects are created", { 4 | data(calibration_questions) 5 | data(mc_domains) 6 | data(mc_scenarios) 7 | data(mc_capabilities) 8 | data(mc_sme_top_domains) 9 | data(mc_threat_communities) 10 | 11 | ques <- tidyrisk_question_set(domains = mc_domains, 12 | calibration = calibration_questions, 13 | scenarios = mc_scenarios, 14 | capabilities = mc_capabilities, 15 | expertise = mc_sme_top_domains, 16 | threat_communities = mc_threat_communities) 17 | 18 | data(mc_calibration_answers) 19 | data(mc_scenario_answers) 20 | data(mc_capability_answers) 21 | 22 | ans <- tidyrisk_response_set(mc_calibration_answers, mc_scenario_answers, mc_capability_answers) 23 | 24 | fitted_scenarios <- fit_scenarios(ans) 25 | fitted_capabilities <- fit_capabilities(ans) 26 | fitted_threat_communities <- fit_threat_communities(mc_threat_communities) 27 | 28 | sme_weightings <- generate_weights(ques, ans) 29 | scenario_parameters <- left_join(fitted_scenarios, sme_weightings, by = "sme") %>% 30 | combine_scenario_parameters() 31 | capability_parameters <- left_join(fitted_capabilities, sme_weightings, by = "sme") %>% 32 | combine_capability_parameters() 33 | 34 | scen_objs <- prepare_data(scenario_parameters, capability_parameters, 35 | fitted_threat_communities, ques) 36 | expect_s3_class(scen_objs[[1]], "tidyrisk_scenario") 37 | expect_equal(length(scen_objs), nrow(mc_scenarios)) 38 | }) 39 | -------------------------------------------------------------------------------- /tests/testthat/test-utils.R: -------------------------------------------------------------------------------- 1 | context("Utilities") 2 | 3 | test_that("Read questions", { 4 | data(mc_domains) 5 | data("mc_capabilities") 6 | data("mc_scenarios") 7 | data("mc_sme_top_domains") 8 | data("calibration_questions") 9 | data("mc_threat_communities") 10 | 11 | workdir <- file.path(tempdir(), "collector") 12 | dir.create(workdir, showWarnings = FALSE) 13 | readr::write_csv(mc_domains, file.path(workdir, "domains.csv")) 14 | readr::write_csv(mc_capabilities, file.path(workdir, "capabilities.csv")) 15 | readr::write_csv(mc_scenarios, file.path(workdir, "scenarios.csv")) 16 | readr::write_csv(mc_sme_top_domains, file.path(workdir, "sme_top_domains.csv")) 17 | readr::write_csv(calibration_questions, file.path(workdir, "calibration_questions.csv")) 18 | readr::write_csv(mc_threat_communities, file.path(workdir, "threat_communities.csv")) 19 | 20 | ques <- read_questions(source_dir = workdir) 21 | expect_s3_class(ques, "tidyrisk_question_set") 22 | unlink(workdir, recursive = TRUE) 23 | }) 24 | 25 | test_that("Read answers", { 26 | data("mc_capability_answers") 27 | data("mc_scenario_answers") 28 | data("mc_calibration_answers") 29 | 30 | workdir <- file.path(tempdir(), "collector") 31 | dir.create(workdir, showWarnings = FALSE) 32 | readr::write_csv(mc_capability_answers, file.path(workdir, "capability_answers.csv")) 33 | readr::write_csv(mc_scenario_answers, file.path(workdir, "scenario_answers.csv")) 34 | readr::write_csv(mc_calibration_answers, file.path(workdir, "calibration_answers.csv")) 35 | 36 | resp <- read_responses(source_dir = workdir) 37 | expect_s3_class(resp, "tidyrisk_response_set") 38 | unlink(workdir, recursive = TRUE) 39 | }) 40 | 41 | test_that("Readability functions", { 42 | data(mc_domains) 43 | data("mc_capabilities") 44 | data("mc_scenarios") 45 | data("mc_sme_top_domains") 46 | data("calibration_questions") 47 | data("mc_threat_communities") 48 | 49 | workdir <- file.path(tempdir(), "collector") 50 | dir.create(workdir, showWarnings = FALSE) 51 | readr::write_csv(mc_domains, file.path(workdir, "domains.csv")) 52 | readr::write_csv(mc_capabilities, file.path(workdir, "capabilities.csv")) 53 | readr::write_csv(mc_scenarios, file.path(workdir, "scenarios.csv")) 54 | readr::write_csv(mc_sme_top_domains, file.path(workdir, "sme_top_domains.csv")) 55 | readr::write_csv(calibration_questions, file.path(workdir, "calibration_questions.csv")) 56 | readr::write_csv(mc_threat_communities, file.path(workdir, "threat_communities.csv")) 57 | 58 | ques <- read_questions(source_dir = workdir) 59 | readability_scores <- check_readability(ques) 60 | expect_s3_class(readability_scores, "tbl_df") 61 | unlink(workdir, recursive = TRUE) 62 | }) 63 | -------------------------------------------------------------------------------- /vignettes/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | *.R 3 | -------------------------------------------------------------------------------- /vignettes/file_structures.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "Flat File Schemas" 3 | output: rmarkdown::html_vignette 4 | vignette: > 5 | %\VignetteIndexEntry{Flat File Schemas} 6 | %\VignetteEngine{knitr::rmarkdown} 7 | %\VignetteEncoding{UTF-8} 8 | --- 9 | 10 | ```{r, include = FALSE} 11 | knitr::opts_chunk$set( 12 | collapse = TRUE, 13 | comment = "#>" 14 | ) 15 | ``` 16 | 17 | Key flat file schemas: 18 | 19 | # Responses 20 | 21 | ## `calibration_answers.csv` 22 | char(SME), char(ID), int(LOW), int(HIGH), date(DATE) 23 | 24 | ## `scenario_answers.csv` 25 | char(SME), char(ID), dbl(FREQ_LOW), dbl(FREQ_HIGH), int(IMP_LOW), int(IMP_HIGH), date(DATE) 26 | 27 | ## `capability_answers.csv` 28 | char(SME), char(ID), int(LOW), int(HIGH), date(DATE) 29 | 30 | # Questions 31 | 32 | ## `domains.csv` 33 | char(domain), char(domain_id) 34 | 35 | ## `capabilities.csv` 36 | char(capability), char(capability_id), char(domain_id) 37 | 38 | ## `scenarios.csv` 39 | char(scenario_id), char(scenario), char(threat_id), char(domain_id), char(controls) 40 | 41 | #W `expertise.csv` 42 | char(sme) 43 | 44 | ## `calibration.csv` 45 | char(*) 46 | --------------------------------------------------------------------------------