├── .Rbuildignore ├── .github ├── .gitignore └── workflows │ ├── R-CMD-check.yaml │ ├── pkgdown.yaml │ ├── pr-commands.yaml │ └── test-coverage.yaml ├── .gitignore ├── DESCRIPTION ├── NAMESPACE ├── R ├── cloud_setup.R ├── helpers.R └── xai2shiny.R ├── README.md ├── _pkgdown.yml ├── codecov.yml ├── inst ├── docker │ └── Dockerfile ├── extdata │ ├── app_static_text.csv │ └── learn_more_about_xai.html └── templates │ └── default_template.txt ├── man ├── cloud_setup.Rd ├── deploy_shiny.Rd └── xai2shiny.Rd ├── tests ├── testthat.R └── testthat │ ├── test-cloud_setup.R │ ├── test-helpers.R │ └── test-xai2shiny.R ├── vignettes ├── .gitignore └── vignette_titanic_example.Rmd └── xai2shiny.Rproj /.Rbuildignore: -------------------------------------------------------------------------------- 1 | ^.*\.Rproj$ 2 | ^\.Rproj\.user$ 3 | ^codecov\.yml$ 4 | ^\.github$ 5 | ^\.httr-oauth$ 6 | -------------------------------------------------------------------------------- /.github/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | -------------------------------------------------------------------------------- /.github/workflows/R-CMD-check.yaml: -------------------------------------------------------------------------------- 1 | # NOTE: This workflow is overkill for most R packages 2 | # check-standard.yaml is likely a better choice 3 | # usethis::use_github_action("check-standard") will install it. 4 | # 5 | # For help debugging build failures open an issue on the RStudio community with the 'github-actions' tag. 6 | # https://community.rstudio.com/new-topic?category=Package%20development&tags=github-actions 7 | on: 8 | push: 9 | branches: 10 | - master 11 | pull_request: 12 | branches: 13 | - master 14 | paths: 15 | - '**.R' 16 | - '**.r' 17 | - '*DESCRIPTION' 18 | - '**.Rmd' 19 | - '**.rmd' 20 | - '**.rda' 21 | - '**.Rda' 22 | - '**.rds' 23 | - '**.Rds' 24 | 25 | name: R-CMD-check 26 | 27 | jobs: 28 | R-CMD-check: 29 | runs-on: ${{ matrix.config.os }} 30 | 31 | name: ${{ matrix.config.os }} (${{ matrix.config.r }}) 32 | 33 | strategy: 34 | fail-fast: false 35 | matrix: 36 | config: 37 | - {os: windows-latest, r: 'release'} 38 | - {os: windows-latest, r: '3.6'} 39 | - {os: ubuntu-16.04, r: 'devel', rspm: "https://packagemanager.rstudio.com/cran/__linux__/xenial/latest", http-user-agent: "R/4.0.0 (ubuntu-16.04) R (4.0.0 x86_64-pc-linux-gnu x86_64 linux-gnu) on GitHub Actions" } 40 | - {os: ubuntu-16.04, r: 'release', rspm: "https://packagemanager.rstudio.com/cran/__linux__/xenial/latest"} 41 | - {os: ubuntu-16.04, r: 'oldrel', rspm: "https://packagemanager.rstudio.com/cran/__linux__/xenial/latest"} 42 | 43 | env: 44 | R_REMOTES_NO_ERRORS_FROM_WARNINGS: true 45 | RSPM: ${{ matrix.config.rspm }} 46 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 47 | 48 | steps: 49 | - uses: actions/checkout@v2 50 | 51 | - uses: r-lib/actions/setup-r@master 52 | with: 53 | r-version: ${{ matrix.config.r }} 54 | http-user-agent: ${{ matrix.config.http-user-agent }} 55 | 56 | - uses: r-lib/actions/setup-pandoc@master 57 | 58 | - name: Query dependencies 59 | run: | 60 | install.packages('remotes') 61 | saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) 62 | writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") 63 | shell: Rscript {0} 64 | 65 | - name: Cache R packages 66 | if: runner.os != 'Windows' 67 | uses: actions/cache@v2 68 | with: 69 | path: ${{ env.R_LIBS_USER }} 70 | key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} 71 | restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- 72 | 73 | - name: Install system dependencies 74 | if: runner.os == 'Linux' 75 | run: | 76 | while read -r cmd 77 | do 78 | eval sudo $cmd 79 | done < <(Rscript -e 'writeLines(remotes::system_requirements("ubuntu", "16.04"))') 80 | 81 | - name: Install dependencies 82 | run: | 83 | remotes::install_deps(dependencies = TRUE) 84 | remotes::install_cran("rcmdcheck") 85 | shell: Rscript {0} 86 | 87 | - name: Session info 88 | run: | 89 | options(width = 100) 90 | pkgs <- installed.packages()[, "Package"] 91 | sessioninfo::session_info(pkgs, include_base = TRUE) 92 | shell: Rscript {0} 93 | 94 | - name: Check 95 | env: 96 | _R_CHECK_CRAN_INCOMING_: false 97 | run: rcmdcheck::rcmdcheck(args = c("--no-manual", "--as-cran"), error_on = "warning", check_dir = "check") 98 | shell: Rscript {0} 99 | 100 | - name: Show testthat output 101 | if: always() 102 | run: find check -name 'testthat.Rout*' -exec cat '{}' \; || true 103 | shell: bash 104 | 105 | - name: Upload check results 106 | if: failure() 107 | uses: actions/upload-artifact@main 108 | with: 109 | name: ${{ runner.os }}-r${{ matrix.config.r }}-results 110 | path: check 111 | -------------------------------------------------------------------------------- /.github/workflows/pkgdown.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: master 4 | 5 | name: pkgdown 6 | 7 | jobs: 8 | pkgdown: 9 | runs-on: macOS-latest 10 | env: 11 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 12 | steps: 13 | - uses: actions/checkout@v2 14 | 15 | - uses: r-lib/actions/setup-r@master 16 | 17 | - uses: r-lib/actions/setup-pandoc@master 18 | 19 | - name: Query dependencies 20 | run: | 21 | install.packages('remotes') 22 | saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) 23 | writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") 24 | shell: Rscript {0} 25 | 26 | - name: Cache R packages 27 | uses: actions/cache@v2 28 | with: 29 | path: ${{ env.R_LIBS_USER }} 30 | key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} 31 | restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- 32 | 33 | - name: Install dependencies 34 | run: | 35 | remotes::install_deps(dependencies = TRUE) 36 | install.packages("pkgdown") 37 | remotes::install_github("ModelOriented/DrWhyTemplate") 38 | shell: Rscript {0} 39 | 40 | - name: Install package 41 | run: R CMD INSTALL . 42 | 43 | - name: Deploy package 44 | run: | 45 | git config --local user.email "actions@github.com" 46 | git config --local user.name "GitHub Actions" 47 | Rscript -e 'pkgdown::deploy_to_branch(new_process = FALSE)' 48 | -------------------------------------------------------------------------------- /.github/workflows/pr-commands.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | issue_comment: 3 | types: [created] 4 | name: Commands 5 | jobs: 6 | document: 7 | if: startsWith(github.event.comment.body, '/document') 8 | name: document 9 | runs-on: macOS-latest 10 | env: 11 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 12 | steps: 13 | - uses: actions/checkout@v2 14 | - uses: r-lib/actions/pr-fetch@master 15 | with: 16 | repo-token: ${{ secrets.GITHUB_TOKEN }} 17 | - uses: r-lib/actions/setup-r@master 18 | - name: Install dependencies 19 | run: Rscript -e 'install.packages(c("remotes", "roxygen2"))' -e 'remotes::install_deps(dependencies = TRUE)' 20 | - name: Document 21 | run: Rscript -e 'roxygen2::roxygenise()' 22 | - name: commit 23 | run: | 24 | git config --local user.email "actions@github.com" 25 | git config --local user.name "GitHub Actions" 26 | git add man/\* NAMESPACE 27 | git commit -m 'Document' 28 | - uses: r-lib/actions/pr-push@master 29 | with: 30 | repo-token: ${{ secrets.GITHUB_TOKEN }} 31 | style: 32 | if: startsWith(github.event.comment.body, '/style') 33 | name: style 34 | runs-on: macOS-latest 35 | env: 36 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 37 | steps: 38 | - uses: actions/checkout@v2 39 | - uses: r-lib/actions/pr-fetch@master 40 | with: 41 | repo-token: ${{ secrets.GITHUB_TOKEN }} 42 | - uses: r-lib/actions/setup-r@master 43 | - name: Install dependencies 44 | run: Rscript -e 'install.packages("styler")' 45 | - name: Style 46 | run: Rscript -e 'styler::style_pkg()' 47 | - name: commit 48 | run: | 49 | git config --local user.email "actions@github.com" 50 | git config --local user.name "GitHub Actions" 51 | git add \*.R 52 | git commit -m 'Style' 53 | - uses: r-lib/actions/pr-push@master 54 | with: 55 | repo-token: ${{ secrets.GITHUB_TOKEN }} 56 | -------------------------------------------------------------------------------- /.github/workflows/test-coverage.yaml: -------------------------------------------------------------------------------- 1 | on: 2 | push: 3 | branches: 4 | - master 5 | pull_request: 6 | branches: 7 | - master 8 | 9 | name: test-coverage 10 | 11 | jobs: 12 | test-coverage: 13 | runs-on: macOS-latest 14 | env: 15 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }} 16 | steps: 17 | - uses: actions/checkout@v2 18 | 19 | - uses: r-lib/actions/setup-r@master 20 | 21 | - uses: r-lib/actions/setup-pandoc@master 22 | 23 | - name: Query dependencies 24 | run: | 25 | install.packages('remotes') 26 | saveRDS(remotes::dev_package_deps(dependencies = TRUE), ".github/depends.Rds", version = 2) 27 | writeLines(sprintf("R-%i.%i", getRversion()$major, getRversion()$minor), ".github/R-version") 28 | shell: Rscript {0} 29 | 30 | - name: Cache R packages 31 | uses: actions/cache@v2 32 | with: 33 | path: ${{ env.R_LIBS_USER }} 34 | key: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1-${{ hashFiles('.github/depends.Rds') }} 35 | restore-keys: ${{ runner.os }}-${{ hashFiles('.github/R-version') }}-1- 36 | 37 | - name: Install dependencies 38 | run: | 39 | install.packages(c("remotes")) 40 | remotes::install_deps(dependencies = TRUE) 41 | remotes::install_cran("covr") 42 | shell: Rscript {0} 43 | 44 | - name: Test coverage 45 | run: covr::codecov() 46 | shell: Rscript {0} 47 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | .Rproj.user 2 | .Rhistory 3 | .RData 4 | .Ruserdata 5 | .httr-oauth 6 | inst/doc 7 | -------------------------------------------------------------------------------- /DESCRIPTION: -------------------------------------------------------------------------------- 1 | Package: xai2shiny 2 | Type: Package 3 | Title: Creates Shiny Application From A DALEX Explainer 4 | Version: 1.1.0 5 | Authors@R: c(person("Rydelek", "Adam", email = "adam.rydelek@gmail.com", role = c("aut", "cre")), 6 | person("Mateusz", "Polakowski", email = "mateusz.polakowski.ds@gmail.com", role = "aut"), 7 | person("Przemyslaw", "Biecek", role = c("aut", "ths"), 8 | comment = c(ORCID = "0000-0001-8423-1823"))) 9 | Description: The package is used to create Shiny applications that contain all crucial information about models such as their performance and model explanations. The application is created based on one or many explainers which are adapters for models created by the DALEX package. 10 | License: GPL 11 | Encoding: UTF-8 12 | LazyData: true 13 | RoxygenNote: 7.1.1 14 | Depends: R (>= 3.6) 15 | Imports: 16 | DALEX, 17 | analogsea, 18 | shiny, 19 | shinyjs, 20 | shinyBS, 21 | shinydashboard, 22 | shinyWidgets, 23 | shinycssloaders (>= 1.0.0), 24 | whisker, 25 | readr, 26 | ggplot2 27 | Suggests: 28 | testthat, 29 | shinytest, 30 | ranger, 31 | covr, 32 | knitr, 33 | rmarkdown 34 | VignetteBuilder: knitr 35 | -------------------------------------------------------------------------------- /NAMESPACE: -------------------------------------------------------------------------------- 1 | # Generated by roxygen2: do not edit by hand 2 | 3 | export(cloud_setup) 4 | export(deploy_shiny) 5 | export(xai2shiny) 6 | import(DALEX) 7 | import(shiny) 8 | import(shinydashboard) 9 | importFrom(analogsea,docklet_create) 10 | importFrom(analogsea,docklet_run) 11 | importFrom(analogsea,docklet_shinyapp) 12 | importFrom(analogsea,docklet_shinyserver) 13 | importFrom(analogsea,droplet) 14 | importFrom(analogsea,droplet_ssh) 15 | importFrom(analogsea,droplet_upload) 16 | importFrom(ggplot2,ggtitle) 17 | importFrom(ggplot2,labs) 18 | importFrom(readr,read_file) 19 | importFrom(shinyBS,bsPopover) 20 | importFrom(shinyWidgets,checkboxGroupButtons) 21 | importFrom(shinycssloaders,withSpinner) 22 | importFrom(shinyjs,hide) 23 | importFrom(shinyjs,show) 24 | importFrom(shinyjs,useShinyjs) 25 | importFrom(utils,browseURL) 26 | importFrom(utils,read.csv) 27 | importFrom(whisker,whisker.render) 28 | -------------------------------------------------------------------------------- /R/cloud_setup.R: -------------------------------------------------------------------------------- 1 | # It is impossible to test DigitalOcean droplet's operation, hence no cov. 2 | # nocov start 3 | 4 | #' Create DigitalOcean droplet setup to deploy \code{Shiny} apps 5 | #' 6 | #' This function creates a new DigitalOcean droplet with Docker 19.03.12 on Ubuntu 18.04 with every required package and service to run xai2shiny applications in the cloud. 7 | #' 8 | #' @param size initial RAM (in GBs) for the droplet. Available values: 1, 2, 4, 8. You can always resize the droplet later on. 9 | #' @param ... additional params to function analogsea::docklet_create 10 | #' @export 11 | #' @importFrom analogsea droplet 12 | #' @importFrom analogsea docklet_create 13 | #' @importFrom analogsea docklet_shinyserver 14 | cloud_setup <- function(size = 1, ...){ 15 | if(!size %in% c(1,2,4,8)) { 16 | stop("The droplet's size (RAM) can be 1, 2, 4, or 8 GB. Please select one of these values.") 17 | } 18 | 19 | size_do <- paste0("s-1vcpu-",size,"gb") 20 | 21 | # Create new droplet with Docklet 19.03.12 on Ubuntu 18.04 and selected size 22 | docklet <- docklet_create(size = getOption("do_size", size_do), ...) 23 | Sys.sleep(15) # Wait for the droplet to initialize 24 | docklet <- droplet(docklet$id) 25 | 26 | # Install Shiny Server and all prerequisities from xai2shiny Docker image 27 | docklet_shinyserver(droplet = docklet, img = 'adamoso/xai2shiny') 28 | } 29 | 30 | #' Deploy Shiny applications to the cloud 31 | #' 32 | #' This function deploys a selected \code{Shiny} application (\code{xai2shiny} application) to the created droplet. 33 | #' 34 | #' @param droplet the droplet's id or droplet's object. The IP can be checked by running \code{analogsea::droplets()}. 35 | #' @param directory path to the directory containing \code{Shiny} application (\code{xai2shiny} application). 36 | #' @param packages vector of packages (package names) that are needed for the application to run. 37 | #' @param port port at which the application will run. 38 | #' @param browse a boolean, which indicates whether open the app on the web after deploying 39 | #' @param ssh_user the name of ssh console user, should NOT be modified when using the default xai2shiny cloud_setup 40 | #' @export 41 | #' @importFrom analogsea docklet_shinyapp 42 | #' @importFrom analogsea docklet_run 43 | #' @importFrom analogsea droplet_ssh 44 | #' @importFrom analogsea droplet_upload 45 | #' @importFrom readr read_file 46 | #' @importFrom whisker whisker.render 47 | deploy_shiny <- function(droplet, directory, packages = "stats", port = 80, browse = TRUE, ssh_user = "root"){ 48 | 49 | # Check if droplet exists 50 | if (missing(droplet) || is.null(droplet) || class(droplet) != "droplet" && class(droplet) != "numeric"){ 51 | stop("You must create a droplet using xai2shiny::cloud_setup() before deploying your application.\n After doing so, provide your droplet's id/droplet's object as the droplet parameter.") 52 | } 53 | if (class(droplet) == "droplet"){ 54 | docklet <- droplet 55 | } 56 | if (class(droplet) == "numeric"){ 57 | docklet <- droplet(id = droplet) 58 | } 59 | 60 | # Create folder for Dockerfile in droplet 61 | droplet_ssh(docklet, "mkdir -p /home/docker_setup") 62 | 63 | # Prepare the Dockerfile and data to fill it 64 | path_to_dockerfile <- system.file("docker", "Dockerfile", package="xai2shiny") 65 | dockerfile <- readr::read_file(path_to_dockerfile) 66 | pkgs <- paste("'", packages, "'", sep = "", collapse = ",") 67 | pkgs <- paste0("c(", pkgs, ")") 68 | packages_needed <- list(packages = pkgs) 69 | 70 | # Filling Dockefile with whisker 71 | text_to_file <- whisker::whisker.render(dockerfile, packages_needed) 72 | temp_directory <- tempdir() 73 | file_path <- paste0(temp_directory, "/Dockerfile") 74 | file.create(file_path) 75 | file_conn <- file(file_path) 76 | writeLines(text_to_file, file_conn) 77 | close(file_conn) 78 | 79 | # Upload Dockerfile to droplet 80 | droplet_upload(docklet, file_path, "/home/docker_setup/") 81 | 82 | # Build the image 83 | image_name <- tolower(packages[1]) 84 | cmd <- paste0("docker build -t ", image_name, " /home/docker_setup/") 85 | droplet_ssh(docklet, cmd) 86 | 87 | # Upload Shiny app files to droplet 88 | droplet_ssh(docklet, "mkdir -p /srv/shinyapps") 89 | droplet_upload(docklet, directory, "/srv/shinyapps/") 90 | 91 | # Run the application 92 | docklet_run(docklet, " -d", " -p ", paste0(port, ":3838"), 93 | cn(" -v ", '/srv/shinyapps/:/srv/shiny-server/'), 94 | cn(" -w", ''), " ", image_name, ssh_user = ssh_user) 95 | 96 | # Open the application in web browser 97 | url <- sprintf("http://%s:%s/", droplet_ip(docklet), port) 98 | if (browse) { 99 | Sys.sleep(5) # give Shiny Server a few seconds to start up 100 | browseURL(url) 101 | } 102 | } 103 | 104 | # nocov end 105 | -------------------------------------------------------------------------------- /R/helpers.R: -------------------------------------------------------------------------------- 1 | # nocov start 2 | #' @noRd 3 | #' @title droplet_ip 4 | #' @description This funciton returns the IP adress of a droplet 5 | #' @param x droplet - selected droplet 6 | droplet_ip <- function(x) { 7 | v4 <- x$network$v4 8 | if (length(v4) == 0) { 9 | stop("No network interface registered for this droplet\n Try refreshing like: droplet(d$id)", 10 | call. = FALSE 11 | ) 12 | } 13 | ips <- do.call("rbind", lapply(v4, as.data.frame)) 14 | public_ip <- ips$type == "public" 15 | if (!any(public_ip)) { 16 | ip <- v4[[1]]$ip_address 17 | } else { 18 | ip <- ips$ip_address[public_ip][[1]] 19 | } 20 | ip 21 | } 22 | # nocov end 23 | 24 | #' @noRd 25 | #' @title cn 26 | #' @description Simple helper function 27 | #' @param x string 28 | #' @param y string 29 | cn <- function(x, y) if (nchar(y) == 0) y else paste0(x, y) 30 | -------------------------------------------------------------------------------- /R/xai2shiny.R: -------------------------------------------------------------------------------- 1 | #' Create Shiny app from an explainer 2 | #' 3 | #' This function creates a \code{Shiny} application for explainers which are adapters for models created using the \code{DALEX} package. 4 | #' The application contains model performance and explanations to fully explore the model. 5 | #' 6 | #' @param ... one or more explainers created with \code{DALEX::explain()} function. They can be switched in top right corner of the application. 7 | #' @param directory path to the directory the application files will be created in. If \code{NULL} the application will be created in a temporary directory. 8 | #' @param selected_variables choosen variables for application start-up. There can be more added in the application interface through an input. 9 | #' @param run whether to run the Shiny application instantly 10 | #' @param override how to respond to a directory overriding case 11 | #' @param verbose whether to log in console internal function's steps 12 | #' @export 13 | #' @import shiny 14 | #' @import shinydashboard 15 | #' @import DALEX 16 | #' @importFrom shinyjs useShinyjs show hide 17 | #' @importFrom shinyBS bsPopover 18 | #' @importFrom shinycssloaders withSpinner 19 | #' @importFrom shinyWidgets checkboxGroupButtons 20 | #' @importFrom whisker whisker.render 21 | #' @importFrom readr read_file 22 | #' @importFrom utils browseURL read.csv 23 | #' @importFrom ggplot2 ggtitle labs 24 | #' @examples 25 | #' # Create models 26 | #' library("ranger") 27 | #' library("DALEX") 28 | #' model_rf <- ranger(survived ~ ., 29 | #' data = titanic_imputed, 30 | #' classification = TRUE, 31 | #' probability = TRUE) 32 | #' model_glm <- glm(survived ~ ., 33 | #' data = titanic_imputed, 34 | #' family = "binomial") 35 | #' 36 | #' # Create DALEX explainers 37 | #' explainer_rf <- explain(model_rf, 38 | #' data = titanic_imputed[,-8], 39 | #' y = titanic_imputed$survived) 40 | #' 41 | #' explainer_glm <- explain(model_glm, 42 | #' data = titanic_imputed[,-8], 43 | #' y = titanic_imputed$survived) 44 | #' 45 | #' # Create and run the application 46 | #' 47 | #'\dontrun{ 48 | #' xai2shiny(explainer_rf, explainer_glm) 49 | #' } 50 | xai2shiny <- function(..., directory = NULL, selected_variables = NULL, run = TRUE, override = FALSE, verbose = TRUE) { 51 | 52 | if(verbose == TRUE) { 53 | cat("Setting up new Shiny XAI application\n") 54 | } 55 | 56 | # Obtaining explainers 57 | args <- list(..., version = 1.0) 58 | explainers <- args[names(args) == ""] 59 | 60 | # Creating necessary directory in order to drop generated app there 61 | directory <- create_directory(directory, override, verbose) 62 | 63 | # Fetching explainers data 64 | data <- get_explainers_data(explainers) 65 | 66 | # Creating `cols` and `selected_variables` objects (if not specified) 67 | objects_to_template <- generate_cols_and_variables(data, selected_variables, verbose) 68 | 69 | # Observation string used further in template 70 | objects_to_template <- create_observation(data, objects_to_template, verbose) 71 | 72 | # Saving all explainers 73 | objects_to_template <- save_explainers(explainers, directory, objects_to_template, verbose) 74 | 75 | # Filling template 76 | template_text_filled <- fill_template(objects_to_template, explainers, verbose) 77 | 78 | # Saving filled template as Shiny application and .html file with XAI tab content 79 | save_files(directory, template_text_filled, verbose) 80 | 81 | if(verbose == TRUE) { 82 | cat("Application setup ended\n") 83 | } 84 | 85 | # Additional app running 86 | if(run) shiny::runApp(directory) 87 | } 88 | 89 | 90 | #' @noRd 91 | #' @title create_directory 92 | #' @description This funciton creates a directory at a given location. 93 | #' If not provided, the directory will be created in temporary directory. 94 | #' @param directory string - path to the desired directory location 95 | #' @param override bool - whether to override the directory if it already exists 96 | #' @param verbose bool - if text information should be displayed in console 97 | create_directory <- function(directory, override, verbose) { 98 | 99 | if(is.null(directory) | length(nchar(directory)) == 0) { 100 | 101 | cat("You passed no explicit directory location. If you want to specify it, please pass it now.\n") 102 | cat("In case of an empty string, temporary directory is going to be set up\n") 103 | directory <- readline("Please provide the final location: ") 104 | 105 | if(nchar(directory) == 0) { 106 | directory <- file.path(tempdir(), 'xai2shiny') 107 | } 108 | } 109 | 110 | if(verbose == TRUE) { 111 | cat(paste0("\tApplication is setting up at: ", directory, "\n")) 112 | } 113 | 114 | if(dir.exists(directory)) { 115 | if(override) { 116 | warning("Overiding existing directory with the newest application") 117 | cat(paste0("Caution! You are about to delete the directory content (", directory, ").\n")) 118 | if(readline("Are you sure? [y / n]: ") == "y") { 119 | suppressWarnings({ 120 | unlink(directory, recursive = TRUE) 121 | dir.create(directory) 122 | }) 123 | } else { 124 | stop("Specified directory exists. Please delete it by yourself or point to a different location.") 125 | } 126 | } else { 127 | stop('Specified directory exists and override is set to FALSE. Set it to TRUE or change xai2shiny files destination') 128 | } 129 | } else { 130 | dir.create(directory) 131 | } 132 | 133 | return(directory) 134 | } 135 | 136 | 137 | #' @noRd 138 | #' @title get_explainers_data 139 | #' @description This funciton provides explainers data for further calculations. 140 | #' Additionally, it checks whether all explainers are based on the same data frame (same columns, data can be split into train/test/validate). 141 | #' @param explainers explainer list - list of explainers input for the function 142 | get_explainers_data <- function(explainers) { 143 | 144 | if(length(unique(lapply(explainers, function(x) { colnames(x$data) }))) != 1) { 145 | stop("Explainers unique datasets amount does not equal 1.\nYou have to base explainers on data with the same columns!") 146 | } 147 | 148 | return(explainers[[1]]$data) 149 | } 150 | 151 | 152 | #' @noRd 153 | #' @title generate_cols_and_variables 154 | #' @description This funciton creates `cols` - data columns and `selected_variables` - variables chosen by the user objects for further calculations. 155 | #' Additionally, it checks for variables with just one unique value and ignores them. 156 | #' @param data data selected while creating the explainer 157 | #' @param selected_variables variables selected by the user. If not specified, will choose all variables or if there are more than 8 variables, will choose the first 8. 158 | #' @param verbose bool - if text information should be displayed in console 159 | generate_cols_and_variables <- function(data, selected_variables, verbose) { 160 | 161 | if(verbose == TRUE) { 162 | cat("\tGenerating internal Shiny app objects, part 1.\n") 163 | } 164 | 165 | if(length(which(apply(data, 2, function(x) length(unique(x))) == 1)) > 0) { 166 | cols <- colnames(data)[- which(apply(data, 2, function(x) length(unique(x))) == 1)] 167 | } 168 | else { 169 | cols <- colnames(data) 170 | } 171 | 172 | temp_cols <- cols 173 | cols <- paste0("'", cols, "'") 174 | cols <- paste(cols, sep = "", collapse = ",") 175 | cols <- paste0("c(", cols, ")") 176 | 177 | if(is.null(selected_variables)) { 178 | # if(length(cols) < 7) selected_variables <- cols 179 | # else selected_variables <- cols[1:7] 180 | selected_variables <- paste0("c('", temp_cols[1], "')") 181 | } else { 182 | temp_variables <- paste0("'", selected_variables, "'") 183 | temp_variables <- paste(temp_variables, sep = "", collapse = ",") 184 | selected_variables <- paste0("c(", temp_variables, ")") 185 | } 186 | 187 | objects_to_template <- list(cols = cols, selected_variables = selected_variables) 188 | 189 | return(objects_to_template) 190 | } 191 | 192 | 193 | #' @noRd 194 | #' @title create_observation 195 | #' @description Auxiliary function to create expression string used in Shiny application data input. 196 | #' @param data data selected while creating the explainer 197 | #' @param objects_to_template list - information to fill the template, generated by previous functions 198 | #' @param verbose bool - if text information should be displayed in console 199 | create_observation <- function(data, objects_to_template, verbose) { 200 | 201 | if(verbose == TRUE) { 202 | cat("\tGenerating internal Shiny app objects, part 2.\n") 203 | } 204 | 205 | vars <- lapply(data, class) 206 | t_vars <- as.data.frame(cbind(names = names(vars), type = vars)) 207 | t_vars$levels <- apply(t_vars, 1, function(x) paste0(', levels = levels(data$', x$`names`, ')')) 208 | t_vars$levels[t_vars$type != 'factor'] <- '' 209 | t_vars$as <- '' 210 | t_vars$as[t_vars$type != 'factor'] <- 'as.' 211 | 212 | t <- apply(t_vars, 1, function(x) paste0(x$`names`, ' = ', x$`as` , x$`type` , '(input$', x$`names`, x$`levels`, ')')) 213 | 214 | obstr <- paste(t, collapse = ", ", '\n\t\t\t') 215 | 216 | objects_to_template['obs'] <- paste0("list(", obstr,")") 217 | 218 | return(objects_to_template) 219 | } 220 | 221 | 222 | #' @noRd 223 | #' @title save_explainers 224 | #' @description This function loops through all explainers to save them into files. 225 | #' @param explainers explainer list - list of explainers input for the function 226 | #' @param directory string - path to the desired directory location 227 | #' @param objects_to_template list - information to fill the template, generated by previous functions 228 | #' @param verbose bool - if text information should be displayed in console 229 | save_explainers <- function(explainers, directory, objects_to_template, verbose) { 230 | 231 | if(verbose == TRUE) { 232 | cat("\tSaving explainers files to the final directory\n") 233 | cat("\tGenerating internal Shiny app objects, part 3.\n") 234 | } 235 | 236 | buttons <- '' 237 | explainers_reactive <- '' 238 | explainers_static <- '' 239 | libs <- '' 240 | 241 | for(i in 1:length(explainers)){ 242 | saveRDS(explainers[[i]], file = paste0(directory,"/exp", i, ".rds")) 243 | packages <- explainers[[i]]$model_info$package 244 | 245 | # Checking if model package has been read correctly 246 | if(length(packages) > 1) { 247 | for(package in packages) { 248 | if(!grepl("\\s", package)) { 249 | lib <- paste0("library(",package,")\n") 250 | } 251 | } 252 | } 253 | else { 254 | # Additional logic for h2o-based models 255 | if(grepl("\\s", packages)){ 256 | if(grepl("H2O", packages, fixed = TRUE)){ 257 | lib <- paste0("library('h2o')\n") 258 | } 259 | else{ 260 | stop(paste0("The package used to create the model was not read correctly.\n", 261 | "Set it manually to the correct value by using:\nexplainer$model_info$package <- 'name of package'")) 262 | } 263 | } 264 | else{ 265 | lib <- paste0("library(",packages,")\n") 266 | } 267 | } 268 | 269 | # Setting up proper values 270 | libs <- paste0(libs, lib) 271 | button <- paste0('tags$li(class = "dropdown", actionBttn("exp', i, '", explainer', i, '$label, style = "fill", block = TRUE))') 272 | buttons <- paste0(buttons, ", ", button) 273 | explainer_reactive <- paste0('observeEvent(input$exp', i, ', { exp$data <- explainer', i, ' })') 274 | explainers_reactive <- paste0(explainers_reactive, "\n\t", explainer_reactive) 275 | explainer_static <- paste0('explainer',i,' <- readRDS("exp',i,'.rds")') 276 | explainers_static <- paste0(explainers_static, "\n", explainer_static) 277 | } 278 | 279 | if(verbose == TRUE) { 280 | cat("\tExplainers saved properly\n") 281 | } 282 | 283 | objects_to_template['libs'] <- libs 284 | objects_to_template['buttons'] <- buttons 285 | objects_to_template['explainers_reactive'] <- explainers_reactive 286 | objects_to_template['explainers_static'] <- explainers_static 287 | 288 | return(objects_to_template) 289 | } 290 | 291 | 292 | #' @noRd 293 | #' @title fill_template 294 | #' @description This function generates template text by filling all the necessary placeholders with created data. 295 | #' @param objects_to_template list - information to fill the template, generated by previous functions 296 | #' @param explainers explainer list - list of explainers input for the function 297 | #' @param verbose bool - if text information should be displayed in console 298 | fill_template <- function(objects_to_template, explainers, verbose) { 299 | 300 | if(verbose == TRUE) { 301 | cat("\tGenerating Shiny application by filling the template file\n") 302 | } 303 | 304 | # Reading necessary files: template and static text for prediction description 305 | static_text <- read.csv(system.file("extdata", "app_static_text.csv", package="xai2shiny"), sep = ';') 306 | prediction_text <- ifelse(explainers[[1]]$model_info$type == "classification", 307 | paste0("'",as.character(static_text$text[static_text$text_destination == 'prediction_classification']),"'"), 308 | paste0("'",as.character(static_text$text[static_text$text_destination == 'prediction_regression']),"'")) 309 | path_to_template <- system.file("templates", "default_template.txt", package = "xai2shiny") 310 | template_text <- readr::read_file(path_to_template) 311 | 312 | # Adding further template objects 313 | objects_to_template['text_prediction'] <- prediction_text 314 | 315 | # Filling template values with whisker 316 | template_text_filled <- whisker::whisker.render(template_text, objects_to_template) 317 | 318 | return(template_text_filled) 319 | } 320 | 321 | 322 | #' @noRd 323 | #' @title save_files 324 | #' @description This function saves filled template text to the file as a Shiny app and XAI tab content 325 | #' @param directory string - path to the desired directory location 326 | #' @param template_text_filled template text - shiny application text that will be writen to the file 327 | #' @param verbose bool - if text information should be displayed in console 328 | save_files <- function(directory, template_text_filled, verbose) { 329 | 330 | if(verbose == TRUE) { 331 | cat("\tSaving application file\n") 332 | } 333 | 334 | # Saving Shiny app 335 | template_file_path <- paste0(directory, "/app.R") 336 | file.create(template_file_path) 337 | template_file_conn <- file(template_file_path) 338 | writeLines(template_text_filled, template_file_conn) 339 | close(template_file_conn) 340 | file.copy(from = system.file("extdata", "learn_more_about_xai.html", package = "xai2shiny"), to = paste0(directory, "/learn_more_about_xai.html")) 341 | } 342 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # xai2shiny 2 | 3 | 4 | [![R build status](https://github.com/ModelOriented/xai2shiny/workflows/R-CMD-check/badge.svg)](https://github.com/ModelOriented/xai2shiny/actions) 5 | [![Coverage Status](https://codecov.io/gh/ModelOriented/xai2shiny/branch/master/graph/badge.svg)](https://codecov.io/github/ModelOriented/xai2shiny?branch=master) 6 | 7 | 8 | ## Overview 9 | 10 | The `xai2shiny` R package creates a **Shiny application** for Explainers (adapters for machine learning models created using the `DALEX` package). Turn your model into an interactive application containing model's prediction, performance and many **XAI** methods with just **one function**. Furthermore, with `xai2shiny` you can simply export your application to the cloud and share it with others. 11 | 12 | ## Installation 13 | 14 | ``` 15 | # Install the development version from GitHub: 16 | devtools::install_github("ModelOriented/xai2shiny") 17 | ``` 18 | 19 | ## Example 20 | 21 | Package usage example will be based on the *titanic* dataset, including GLM and Random Forest models. 22 | [The final application created using the scipt below.](http://142.93.31.232:218/showcase/) 23 | First, it is necessary to have any explainers created whatsoever: 24 | 25 | ``` 26 | library("xai2shiny") 27 | library("ranger") 28 | library("DALEX") 29 | 30 | # Creating ML models 31 | model_rf <- ranger(survived ~ ., 32 | data = titanic_imputed, 33 | classification = TRUE, 34 | probability = TRUE) 35 | model_glm <- glm(survived ~ ., 36 | data = titanic_imputed, 37 | family = "binomial") 38 | 39 | # Creating DALEX explainers 40 | explainer_rf <- explain(model_rf, 41 | data = titanic_imputed[,-8], 42 | y = titanic_imputed$survived) 43 | 44 | explainer_glm <- explain(model_glm, 45 | data = titanic_imputed[,-8], 46 | y = titanic_imputed$survived) 47 | ``` 48 | 49 | Then all is left to do is to run: 50 | 51 | ``` 52 | xai2shiny::xai2shiny(explainer_glm, explainer_rf, 53 | directory = './', 54 | selected_variables = c('gender', 'age'), 55 | run = FALSE) 56 | ``` 57 | 58 | Above, in `xai2shiny` function, apart from explainers, following attributes were provided: 59 | 60 | * `directory` - a location indicator where to create whole `xai2shiny` directory and place there required files (an app and explainers), 61 | * `selected_variables` - a vector containing variables list chosen at an app start-up (used for modification and local explanations research), 62 | * `run` - whether to run an app immediately after creating. 63 | 64 | ## Cloud deployment 65 | 66 | Further cloud deployment can be performed. In order to do so, there are just three steps necessary to enjoy your new *xai2shiny* application in the cloud. 67 | 68 | 1. If you don't have an account on DigitalOcean, create one [here](https://m.do.co/c/c07558eaca11) and get $100 free credit. 69 | 2. [Create an SSH key](https://docs.github.com/en/enterprise/2.17/user/github/authenticating-to-github/generating-a-new-ssh-key-and-adding-it-to-the-ssh-agent?fbclid=IwAR3E66nCkq5cS6BSSHvgv-tzFa9MjWL37bUgRz3DKwglTO8Zn_t6tmKwvRo) if you don't have one yet. 70 | 3. [Deploy the SSH key to DigitalOcean](https://www.digitalocean.com/docs/droplets/how-to/add-ssh-keys/to-account/) 71 | 72 | And that's it, you are ready to get back to R and deploy your application. In order to create a new cloud instance, called a *droplet* by DigitalOcean, running Docker on Ubuntu with all prerequisities installed, just run: 73 | 74 | ``` 75 | xai2shiny::cloud_setup(size) 76 | ``` 77 | 78 | * `size` - ram size desired for the droplet, defaults to 1GB. It can be modified later through [DigitalOceans website](https://www.digitalocean.com/). 79 | 80 | Now that your droplet is setup, just deploy the created *xai2shiny* application with *one function*. 81 | 82 | ``` 83 | deploy_shiny(droplet = , path = './xai2shiny', packages = "ranger") 84 | ``` 85 | 86 | 87 | * `droplet` - the droplet object/droplet's ID that can be read from running `analogsea::droplets()`. 88 | * `path` - path to the *xai2shiny* application 89 | * `packages` - packages used to create or run the model, they will be installed on the droplet. 90 | 91 | And that's it, the *xai2shiny* application is running and will automatically open in your default web browser, now all that's left is to share it! 92 | 93 | ## Functionality 94 | 95 | The main function is called **xai2shiny** which creates the Shiny **app.R** file and runs it converting your models into an interactive application. 96 | 97 | At the time it supports such functionalities for **multiple models in one application**: 98 | 99 | 1. **Model prediction** 100 | 2. **Model performance** (with text descriptions of measures) 101 | 3. **Local explanations:** (with text descriptions) 102 | * Break Down plot 103 | * SHAP values plot 104 | * Ceteris Paribus plot 105 | 4. **Global explanations:** 106 | * Feature importance plots 107 | * Partial Dependence plots 108 | 109 | ## Acknowledgments 110 | 111 | Work on this package was financially supported by the Polish National Science Centre under Opus Grant number 2017/27/B/ST6/0130. 112 | -------------------------------------------------------------------------------- /_pkgdown.yml: -------------------------------------------------------------------------------- 1 | template: 2 | package: DrWhyTemplate 3 | default_assets: false -------------------------------------------------------------------------------- /codecov.yml: -------------------------------------------------------------------------------- 1 | comment: false 2 | 3 | coverage: 4 | status: 5 | project: 6 | default: 7 | target: auto 8 | threshold: 1% 9 | informational: true 10 | patch: 11 | default: 12 | target: auto 13 | threshold: 1% 14 | informational: true 15 | -------------------------------------------------------------------------------- /inst/docker/Dockerfile: -------------------------------------------------------------------------------- 1 | FROM adamoso/xai2shiny 2 | 3 | RUN R -e "install.packages({{{packages}}})" 4 | EXPOSE 3838 5 | 6 | 7 | -------------------------------------------------------------------------------- /inst/extdata/app_static_text.csv: -------------------------------------------------------------------------------- 1 | text_destination;text 2 | prediction_regression;The predicted value is: 3 | prediction_classification;The predicted probability is: 4 | 5 | -------------------------------------------------------------------------------- /inst/extdata/learn_more_about_xai.html: -------------------------------------------------------------------------------- 1 |
2 | 3 |

Learn more about XAI

4 | 5 |

Explainable Artificial Intelligence has never been an intelligible part of a Data Science ecosystem. Therefore, this tab is provided for any user who may not feel sound at every component of the main dashboard. Plots seem pretty, they do, but it's more important to be able to read, analyze them, and reach for insightful details they contain. Ensuing chapters ought to hand over the essence of every method used, both for local and global explanations.

6 | 7 |

Model performance and prediction

8 | 9 | As that's not a direct subcategory of XAI methods, it's worth adding some information about the model performance box, used methods, and plots' interpretations. 10 | 11 |

Break Down plot

12 | 13 |

Break-Down is probably the easiest to interpret instance-level method by a standard user. It shows, similarly to the SHAP Values, the influence of particular feature values on the final prediction. Thanks to that method we can research how does the model react to specific variable values.

14 | 15 |

At the top of the plot there is an average prediction for all observations. Below that, an algorithm calculates how that mean will alter when we fix values of consecutive features. The bottom of the plot is the provided record, so it's possible to analyze how the overall mean prediction changes over fixing ensuing variables until reaching the prepared observation.

16 | 17 |

SHAP Values plot

18 | 19 |

SHAP plot is another local explanation method which depicts the influence of specific features on the precition. It is based on the classic Shapley values from game theory and is calculated longer than Break Down profiles. If a model is not too complicated, it is usually worth it to compare the results provided by Break Down plots with SHAP plots and base the conclusions on both of them.

20 | 21 |

Unlike the Break Down profile, the plot does not contain the intercept value and only shows the contribution to the prediction of each variable. The boxplots found on each bar for a specific observation depict the uncertanity of attribution.

22 | 23 |

Ceteris Paribus plot

24 | 25 |

CP Profiles somehow differ from the previous two instance-level methods. Here we can investigate how specific feature value modifications are going to alter the final prediction. Given the rest of the variables having fixed values, we can calculate the model's expected value (i.e., prediction) for an updated observation. As the Latin phrase 'ceteris paribus' goes: other things held constant, this updates happen only for an examined feature.

26 | 27 |

Feature Importance plot

28 | 29 |

Apart from sophisticated graph interpretations, users may want to look at the plot and, with a glance, understand relations between the model's predictions and dataset variables. Feature importance may be a solution for that case. Underlying math may cause a headache, but the results are easy to conceive. Every variable is given a value (loss function decrease), which indicates how essential that specific variable is for the model (and how much it relies on this feature).

30 | 31 |

Partial Dependence plot

32 | 33 |

Second dataset-level method is an extension of Ceteris Paribus Profiles. It all comes down to calculating an expected value of the CP function, but in the vast majority of cases, it's not that simple, and a convenient estimator is used as a replacement. It calculates the mean of CP Profiles and can base on the group of the dataset observations or all of them.

34 | 35 |

Unfortunately, this is the main reason why calculation time is longer when compared to instance-level Ceteris Paribus. Nevertheless, when the model bases on the data with interactions (i.e., when one feature depends on another (e.g., age and hair graying level)), it's crucial to check overall patterns rather than rely on single observation results.

36 | 37 |

Hungry for more?

38 | 39 |

We do realize that the above descriptions won't satisfy everyone. For those who made through all of the chapters and who want to learn even more about XAI, please find below the list as the very reliable sources of knowledge. You will find there great methods depictions, citations to further articles, code examples, and packages recommendations to fully explore the wonderful world of Explainable Artificial Intelligence!

40 | 41 | 45 | 46 |
47 | -------------------------------------------------------------------------------- /inst/templates/default_template.txt: -------------------------------------------------------------------------------- 1 | library("shiny") 2 | library("shinyjs") 3 | library("shinyBS") 4 | library("shinydashboard") 5 | library("shinycssloaders") 6 | library("shinyWidgets") 7 | library("DALEX") 8 | library("ggplot2") 9 | 10 | # Additional models libraries used 11 | {{{libs}}} 12 | 13 | # Loading explainers from created files 14 | {{{explainers_static}}} 15 | 16 | # Assuming data is the same among explainers, so loading it from the first one is acceptable in this case 17 | data <- explainer1$data 18 | 19 | # Observation created based on average (numeric features) and 20 | # most frequent (categorical features) data for each column 21 | temp_data <- data[{{{cols}}}] 22 | chosen_observation <- suppressWarnings({as.data.frame(t(sapply(temp_data, function(x) {round(mean(x), 2)} )))}) 23 | chosen_observation_cat_help <- as.data.frame(t(sapply(temp_data, function(x) {names(sort(table(x), decreasing = T, na.last = T)[1])}))) 24 | 25 | # Filling NA values for categorical features 26 | for(name in names(chosen_observation)) { 27 | if(is.na(chosen_observation[name])) { 28 | chosen_observation[name] <- chosen_observation_cat_help[name] 29 | } 30 | } 31 | 32 | # Creating proper dropdown menu to choose explainers from 33 | dropdownActionMenu <- function (..., title=NULL, icon = NULL, .list = NULL, header=NULL) { 34 | items <- c(list(...), .list) 35 | 36 | # Asserting whether all tags passed are HTML list elements 37 | lapply(items, shinydashboard:::tagAssert, type = "li") 38 | 39 | # Specyfing list elements 40 | type <- "notifications" 41 | tags$li(class = paste0("dropdown ", type, "-menu"), 42 | a(href = "#", class = "dropdown-toggle", `data-toggle` = "dropdown", icon, title), 43 | tags$ul(class = "dropdown-menu", if(!is.null(header)) tags$li(class="header",header), tags$li(tags$ul(class = "menu", items)))) 44 | } 45 | 46 | 47 | 48 | ui <- dashboardPage( 49 | skin = "purple", 50 | 51 | # Header which includes a dashboard title and an unordered list of deployed explainers 52 | dashboardHeader(title = "xai2shiny", 53 | dropdownActionMenu(title="Chosen explainer" 54 | , icon = icon("arrow-circle-down") 55 | {{{buttons}}} 56 | ) 57 | ), 58 | 59 | # Sidebar containing all modifiable elements of the dashboard 60 | dashboardSidebar( 61 | sidebarMenu( 62 | 63 | # Sidebar menu element for model exploration 64 | menuItem(p(id = "menu1", "Model exploration"), 65 | tabName = "dashboard", 66 | icon = icon("arrow-circle-right")), 67 | 68 | # Checkboxes 69 | checkboxGroupButtons( 70 | inputId = "selected_features", 71 | label = "Select dashboard features:", 72 | choices = c("Local explanations", "Global explanations", "Model performance", "Text description"), 73 | selected = c("Local explanations", "Model performance"), 74 | direction = "vertical", 75 | justified = TRUE, 76 | status = "primary", 77 | checkIcon = list(yes = icon("ok", lib = "glyphicon"), no = icon("remove", lib = "glyphicon")) 78 | ), 79 | 80 | # Additional styling 81 | tags$script("$(\"input:checkbox[name='selected_features'][value='Local explanations']\").parent().css('background-color', '#483D8B');"), 82 | tags$script("$(\"input:checkbox[name='selected_features'][value='Global explanations']\").parent().css('background-color', '#1958a6');"), 83 | tags$script("$(\"input:checkbox[name='selected_features'][value='Model performance']\").parent().css('background-color', '#634A87');"), 84 | tags$script("$(\"input:checkbox[name='selected_features'][value='Text description']\").parent().css('background-color', '#46425E');"), 85 | 86 | pickerInput( 87 | inputId = "selected_columns", 88 | label = "Select variables to modify:", 89 | choices = {{{cols}}}, 90 | selected = {{{selected_variables}}}, 91 | options = list( 92 | `actions-box` = TRUE, 93 | size = 12, 94 | `selected-text-format` = "static", 95 | `none-selected-text` = 'Expand the list and choose' 96 | ), 97 | multiple = TRUE 98 | ), 99 | 100 | hr(), 101 | h4("Modify variables below:"), 102 | 103 | # Model variables input 104 | uiOutput('vars_input'), 105 | width = 3, 106 | 107 | # Sidebar menu element for XAI resources 108 | menuItem(p(id = "menu2", "Learn more about XAI"), tabName = "xai_resources", icon = icon("th")), 109 | 110 | # Additional styling 111 | tags$style(type="text/css", 112 | ".shiny-output-error { visibility: hidden; }", 113 | ".shiny-output-error:before { visibility: hidden; }", 114 | "h4 { display:block; margin-left: 0.8em; }" 115 | ) 116 | ) 117 | ), 118 | 119 | # Main dashboard body 120 | # Includes both explanations and XAI resources tabs 121 | dashboardBody( 122 | # Make dashboard background color overflow automaticaly to content 123 | tags$head(tags$style(HTML('.content-wrapper { overflow: auto; }'))), 124 | # Use shinyjs to show/hide objects 125 | shinyjs::useShinyjs(), 126 | tabItems( 127 | 128 | # Explanations tab 129 | tabItem(tabName = "dashboard", 130 | 131 | fluidRow( 132 | hidden( 133 | div(id = "global", 134 | 135 | # Global: Feature importance 136 | box(title = p(id = "tab5", "Model Parts", 137 | tags$span(icon("info-circle"), id = "icon_feature_importance", style = "color: #C0C0C0;")), 138 | bsPopover("icon_feature_importance", 139 | title = "What is Model Parts?", 140 | content = paste("By using different methods it is able to measure how much models rely on given variables.", 141 | "Check \"Learn more about XAI\" tab for more!", sep = "
"), 142 | placement = "right", 143 | options = list(container = 'body')), 144 | background = "blue", 145 | solidHeader = TRUE, 146 | collapsible = TRUE, 147 | width = 6, 148 | height = 350, 149 | withSpinner(plotOutput("plot_fi", height = 260), hide.ui = FALSE)), 150 | 151 | # Global: Partial-Depencende plot 152 | box(title = p(id = "tab6", "Model Profile", 153 | tags$span(icon("info-circle"), id = "icon_model_profile", style = "color: #C0C0C0;")), 154 | bsPopover("icon_model_profile", 155 | title = "What is Model Profile?", 156 | content = paste('Partial Dependence is an expected value of Ceteris Paribus (Predict Profile) function across whole dataset.', 157 | "Check \"Learn more about XAI\" tab for more!", sep = "
"), 158 | placement = "right", 159 | options = list(container = 'body')), 160 | background = "blue", solidHeader = TRUE, 161 | collapsible = TRUE, 162 | width = 6, 163 | height = 350, 164 | column(width = 3, uiOutput("variable_pdp")), 165 | column(width = 9, withSpinner(plotOutput("plot_pdp", height = 260), hide.ui = FALSE))) 166 | ))), 167 | 168 | fluidRow( 169 | 170 | # Prediction & Model performance 171 | div(id = "model_perf", 172 | column(width = 4, 173 | box(title = p(id = "tab1", "Prediction"), 174 | background = "purple", 175 | solidHeader = TRUE, 176 | collapsible = TRUE, 177 | width = 800, 178 | height = 130, 179 | p({{{text_prediction}}}), 180 | withSpinner(uiOutput("text_pred"), hide.ui = FALSE)), 181 | box(title = p(id = "tab2", "Model Performance", 182 | tags$span(icon("info-circle"), id = "icon_performance", style = "color: #C0C0C0;")), 183 | bsPopover("icon_performance", 184 | title = "What is Model Performance?", 185 | content = paste('Below you can find one of sound methods used to measure model`s quality (respecting regression / classification division).', 186 | "Check \"Learn more about XAI\" tab for more!", sep = "
"), 187 | placement = "right", 188 | options = list(container = 'body')), 189 | background = "blue", 190 | solidHeader = TRUE, 191 | collapsible = TRUE, 192 | width = 800, 193 | height = NULL, 194 | withSpinner(plotOutput("plot_modelperf", height = 360), hide.ui = FALSE), 195 | htmlOutput("text_performance")) 196 | ) 197 | ), 198 | 199 | div(id = "local", 200 | 201 | # Local: breakdown/ shap values 202 | box(title = p(id = "tab3", "Predict Parts", 203 | tags$span(icon("info-circle"), id = "icon_predict_parts", style = "color: #C0C0C0;")), 204 | bsPopover("icon_predict_parts", 205 | title = "What is Predict Parts?", 206 | content = paste("Here you can find particular features values impact on the final prediction", 207 | "Check \"Learn more about XAI\" tab for more!", sep = "
"), 208 | placement = "right", 209 | options = list(container = 'body')), 210 | background = "purple", 211 | solidHeader = TRUE, 212 | collapsible = TRUE, 213 | width = 4, 214 | height = NULL, 215 | selectInput(inputId = "pptype", 216 | label = 'Type of variable attributions:', 217 | choices = c("SHAP" = "shap", "Break Down" = "break_down"), 218 | selected = "break_down"), 219 | withSpinner(plotOutput("plot_bd", height = 400), hide.ui = FALSE), 220 | textOutput("text_predictprofile")), 221 | 222 | # Local: Ceteris paribus 223 | box(title = p(id = "tab4", "Predict Profile", 224 | tags$span(icon("info-circle"), id = "icon_predict_profile", style = "color: #C0C0C0;")), 225 | bsPopover("icon_predict_profile", 226 | title = "What is Predict Profile?", 227 | content = paste("In this visualization you can find prediction response to specific feature alterations.", 228 | "Check \"Learn more about XAI\" tab for more!", sep = "
"), 229 | placement = "right", 230 | options = list(container = 'body')), 231 | background = "purple", 232 | solidHeader = TRUE, 233 | collapsible = TRUE, 234 | width = 4, 235 | height = NULL, 236 | uiOutput("variable_cp"), 237 | withSpinner(plotOutput("plot_cp", height = 400), hide.ui = FALSE), 238 | textOutput("text_ceterisparibus")) 239 | )), 240 | 241 | tags$footer(style = "width: 100%; background-color: #605ca8; text-align: center; color: white; font-size: 115%; 242 | padding: 5px 10px 1px 10px; border-radius: 10px; box-shadow: 5px 5px 15px -5px #444;", 243 | HTML('

App built with: xai2shiny

'), 244 | HTML(paste0("

Last update date: ", Sys.time(), "

"))) 245 | 246 | ), 247 | 248 | # XAI resources tab 249 | tabItem(tabName = "xai_resources", 250 | column(7, includeHTML("learn_more_about_xai.html")) 251 | ) 252 | ) 253 | ) 254 | ) 255 | 256 | 257 | 258 | server <- function(input, output, session) { 259 | 260 | # Setting up observation to predict on 261 | new_observation <- reactive({ 262 | obs <- {{{obs}}} 263 | nulls <- sapply(obs, function(x) length(x) == 0) 264 | obs[nulls] <- as.list(chosen_observation)[nulls] 265 | as.data.frame(obs) 266 | }) 267 | 268 | # Loading explainer, default is the first one passed to a generating function 269 | exp <- reactiveValues(data = explainer1) 270 | {{{explainers_reactive}}} 271 | 272 | # Calculating a prediction 273 | pred <- reactive({ 274 | round(predict(exp$data, new_observation()), 4) 275 | }) 276 | 277 | # Rendering DALEX text describing explanations performance 278 | output$text_pred <- renderUI({ strong(paste0(pred())) }) 279 | output$text_predictprofile <- renderText({ if("Text description" %in% input$selected_features) iBreakDown::describe(pp()) else "" }) 280 | output$text_ceterisparibus <- renderText({ if("Text description" %in% input$selected_features) ingredients::describe(cp()) else "" }) 281 | output$text_performance <- renderText({ 282 | if("Text description" %in% input$selected_features){ 283 | perf <- model_performance(exp$data) 284 | if(perf$type == "regression") { 285 | paste0("Performance measures for ", perf$type, 286 | ":
MSE: ", round(perf$measures$mse, 3), 287 | "
RMSE: ", round(perf$measures$rmse, 3), 288 | "
R2: ", round(perf$measures$r2, 3), 289 | "
MAD: ", round(perf$measures$mad, 3)) 290 | } else { 291 | paste0("Performance measures for ", perf$type, 292 | ":
Accuracy: ", round(perf$measures$accuracy, 3), 293 | "
AUC: ", round(perf$measures$auc, 3), 294 | "
Recall: ", round(perf$measures$recall, 3), 295 | "
Precision: ", round(perf$measures$precision, 3), 296 | "
F1: ", round(perf$measures$f1, 3)) 297 | } 298 | } else "" 299 | }) 300 | 301 | # Calculating XAI functionalities based on set observation 302 | pp <- reactive({ predict_parts(exp$data, new_observation(), type = input$pptype) }) 303 | cp <- reactive({ predict_profile(exp$data, new_observation(), input$cp1) }) 304 | 305 | # Plotting 306 | output$plot_bd <- renderPlot({ 307 | if(input$pptype == "shap"){ 308 | plot(pp()) + ggplot2::ggtitle("SHAP\n") 309 | } 310 | else{ 311 | plot(pp()) 312 | } 313 | }) 314 | 315 | output$plot_cp <- renderPlot({ 316 | plot(cp(), variables = input$cp1) + ggplot2::labs(subtitle="") 317 | }) 318 | 319 | output$plot_modelperf <- renderPlot({ 320 | perf <- model_performance(exp$data) 321 | if(perf$type == "regression"){ 322 | plot(perf) 323 | } 324 | else{ 325 | plot(perf, geom = "roc") 326 | } 327 | }) 328 | 329 | output$plot_fi <- renderPlot({ 330 | mp <- model_parts(exp$data) 331 | plot(mp) + ggplot2::labs(subtitle = "") 332 | }) 333 | 334 | output$plot_pdp <- renderPlot({ 335 | pdp <- model_profile(exp$data, variables = input$pdp1) 336 | plot(pdp) + ggplot2::labs(subtitle = "") 337 | }) 338 | 339 | output$variable_cp <- renderUI({ 340 | selectInput(inputId = "cp1", 341 | label = "The profiles will be calculated for:", 342 | choices = input$selected_columns, 343 | selected = input$selected_columns[1]) 344 | }) 345 | 346 | output$variable_pdp <- renderUI({ 347 | selectInput(inputId = "pdp1", 348 | label = "The profiles will be calculated for:", 349 | choices = input$selected_columns, 350 | selected = input$selected_columns[1]) 351 | }) 352 | 353 | # Logic based on selected features (local/global explanations, model performance, text description) 354 | observe({ 355 | if (is.null(input$selected_features) ){ 356 | shinyjs::hide("local") 357 | shinyjs::hide("global") 358 | shinyjs::hide("model_perf") 359 | } 360 | }) 361 | 362 | observeEvent(input$selected_features, { 363 | 364 | if("Local explanations" %in% input$selected_features) { 365 | shinyjs::show("local") 366 | } else { 367 | shinyjs::hide("local") 368 | } 369 | 370 | if("Global explanations" %in% input$selected_features) { 371 | shinyjs::show("global") 372 | } else { 373 | shinyjs::hide("global") 374 | } 375 | 376 | if("Model performance" %in% input$selected_features) { 377 | shinyjs::show("model_perf") 378 | } else { 379 | shinyjs::hide("model_perf") 380 | } 381 | }) 382 | 383 | # Rendering proper variables input on the sidebar 384 | output$vars_input <- renderUI({ 385 | 386 | selected_columns <- input$selected_columns 387 | 388 | lapply(1:length(selected_columns), function(i) { 389 | var_values <- data[, colnames(data) == selected_columns[i]] 390 | if(class(var_values) == 'factor') { 391 | selectInput(inputId = selected_columns[i], 392 | label = selected_columns[i], 393 | choices = levels(var_values), 394 | selected = chosen_observation[[selected_columns[i]]])} 395 | else { 396 | if(all(floor(var_values) == var_values)){ 397 | step <- 1 398 | } 399 | else{ 400 | step <- signif(min(abs(diff(unique(var_values))))) 401 | } 402 | sliderInput(inputId = selected_columns[i], 403 | label = selected_columns[i], 404 | min = round(min(var_values, na.rm = TRUE)), 405 | max = round(max(var_values, na.rm = TRUE)), 406 | step = step, 407 | value = chosen_observation[[selected_columns[i]]]) 408 | } 409 | }) 410 | }) 411 | } 412 | 413 | shinyApp(ui = ui, server = server) 414 | -------------------------------------------------------------------------------- /man/cloud_setup.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/cloud_setup.R 3 | \name{cloud_setup} 4 | \alias{cloud_setup} 5 | \title{Create DigitalOcean droplet setup to deploy \code{Shiny} apps} 6 | \usage{ 7 | cloud_setup(size = 1, ...) 8 | } 9 | \arguments{ 10 | \item{size}{initial RAM (in GBs) for the droplet. Available values: 1, 2, 4, 8. You can always resize the droplet later on.} 11 | 12 | \item{...}{additional params to function analogsea::docklet_create} 13 | } 14 | \description{ 15 | This function creates a new DigitalOcean droplet with Docker 19.03.12 on Ubuntu 18.04 with every required package and service to run xai2shiny applications in the cloud. 16 | } 17 | -------------------------------------------------------------------------------- /man/deploy_shiny.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/cloud_setup.R 3 | \name{deploy_shiny} 4 | \alias{deploy_shiny} 5 | \title{Deploy Shiny applications to the cloud} 6 | \usage{ 7 | deploy_shiny( 8 | droplet, 9 | directory, 10 | packages = "stats", 11 | port = 80, 12 | browse = TRUE, 13 | ssh_user = "root" 14 | ) 15 | } 16 | \arguments{ 17 | \item{droplet}{the droplet's id or droplet's object. The IP can be checked by running \code{analogsea::droplets()}.} 18 | 19 | \item{directory}{path to the directory containing \code{Shiny} application (\code{xai2shiny} application).} 20 | 21 | \item{packages}{vector of packages (package names) that are needed for the application to run.} 22 | 23 | \item{port}{port at which the application will run.} 24 | 25 | \item{browse}{a boolean, which indicates whether open the app on the web after deploying} 26 | 27 | \item{ssh_user}{the name of ssh console user, should NOT be modified when using the default xai2shiny cloud_setup} 28 | } 29 | \description{ 30 | This function deploys a selected \code{Shiny} application (\code{xai2shiny} application) to the created droplet. 31 | } 32 | -------------------------------------------------------------------------------- /man/xai2shiny.Rd: -------------------------------------------------------------------------------- 1 | % Generated by roxygen2: do not edit by hand 2 | % Please edit documentation in R/xai2shiny.R 3 | \name{xai2shiny} 4 | \alias{xai2shiny} 5 | \title{Create Shiny app from an explainer} 6 | \usage{ 7 | xai2shiny( 8 | ..., 9 | directory = NULL, 10 | selected_variables = NULL, 11 | run = TRUE, 12 | override = FALSE, 13 | verbose = TRUE 14 | ) 15 | } 16 | \arguments{ 17 | \item{...}{one or more explainers created with \code{DALEX::explain()} function. They can be switched in top right corner of the application.} 18 | 19 | \item{directory}{path to the directory the application files will be created in. If \code{NULL} the application will be created in a temporary directory.} 20 | 21 | \item{selected_variables}{choosen variables for application start-up. There can be more added in the application interface through an input.} 22 | 23 | \item{run}{whether to run the Shiny application instantly} 24 | 25 | \item{override}{how to respond to a directory overriding case} 26 | 27 | \item{verbose}{whether to log in console internal function's steps} 28 | } 29 | \description{ 30 | This function creates a \code{Shiny} application for explainers which are adapters for models created using the \code{DALEX} package. 31 | The application contains model performance and explanations to fully explore the model. 32 | } 33 | \examples{ 34 | # Create models 35 | library("ranger") 36 | library("DALEX") 37 | model_rf <- ranger(survived ~ ., 38 | data = titanic_imputed, 39 | classification = TRUE, 40 | probability = TRUE) 41 | model_glm <- glm(survived ~ ., 42 | data = titanic_imputed, 43 | family = "binomial") 44 | 45 | # Create DALEX explainers 46 | explainer_rf <- explain(model_rf, 47 | data = titanic_imputed[,-8], 48 | y = titanic_imputed$survived) 49 | 50 | explainer_glm <- explain(model_glm, 51 | data = titanic_imputed[,-8], 52 | y = titanic_imputed$survived) 53 | 54 | # Create and run the application 55 | 56 | \dontrun{ 57 | xai2shiny(explainer_rf, explainer_glm) 58 | } 59 | } 60 | -------------------------------------------------------------------------------- /tests/testthat.R: -------------------------------------------------------------------------------- 1 | library(testthat) 2 | library(shinytest) 3 | library(xai2shiny) 4 | 5 | test_check("xai2shiny") 6 | -------------------------------------------------------------------------------- /tests/testthat/test-cloud_setup.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("The size parameter error works",{ 3 | expect_error(cloud_setup(size = 3)) 4 | expect_error(cloud_setup(size = NA)) 5 | expect_error(cloud_setup(size = "big")) 6 | }) 7 | 8 | test_that("The path parameter errors work",{ 9 | expect_error(deploy_shiny()) 10 | expect_error(deploy_shiny(droplet = 1)) 11 | }) 12 | 13 | 14 | test_that("The droplet parameter errors work",{ 15 | expect_error(deploy_shiny(path = "./xai2shiny")) 16 | expect_error(deploy_shiny(droplet = "name", path = "./xai2shiny")) 17 | }) 18 | -------------------------------------------------------------------------------- /tests/testthat/test-helpers.R: -------------------------------------------------------------------------------- 1 | 2 | test_that("cn function works properly",{ 3 | x <- "test" 4 | y <- "" 5 | expect_equal(cn(x,y),"") 6 | x <- "test" 7 | y <- "it" 8 | expect_equal(cn(x,y),"testit") 9 | }) 10 | -------------------------------------------------------------------------------- /tests/testthat/test-xai2shiny.R: -------------------------------------------------------------------------------- 1 | library(shinytest) 2 | 3 | shinytest::installDependencies() 4 | 5 | data <- DALEX::titanic_imputed 6 | 7 | mod_glm <- glm(survived ~ ., data, family = "binomial") 8 | explainer_glm <- DALEX::explain(mod_glm, data = data[,-8], y=data$survived) 9 | 10 | xai2shiny(explainer_glm, directory = paste0(getwd(), '/xai2shiny'), run = FALSE) 11 | 12 | test_that("All files are created",{ 13 | expect_true("xai2shiny" %in% list.files()) 14 | expect_true("app.R" %in% list.files("xai2shiny")) 15 | expect_true("exp1.rds" %in% list.files("xai2shiny")) 16 | }) 17 | 18 | app <- ShinyDriver$new("xai2shiny/") 19 | 20 | test_that("The application runs",{ 21 | output_pred <- app$getValue(name = "text_pred") 22 | pred_base <- substr(output_pred, 9, 12) 23 | expect_equal(pred_base, "0.08") 24 | 25 | output_bd_description <- app$getValue(name = "text_predictprofile") 26 | expect_equal(output_bd_description, "") 27 | 28 | output_cp_description <- app$getValue(name = "text_ceterisparibus") 29 | expect_equal(output_cp_description, "") 30 | 31 | output_performance <- app$getValue(name = "text_performance") 32 | expect_equal(output_performance, "") 33 | 34 | # Modyfing text checkbox 35 | app$setInputs(selected_features = c("Local explanations", "Model performance", "Text description")) 36 | 37 | output_bd_description <- app$getValue(name = "text_predictprofile") 38 | bd_description_base <- substr(output_bd_description, 1, 2) 39 | expect_equal(bd_description_base, "Lm") 40 | 41 | output_cp_description <- app$getValue(name = "text_ceterisparibus") 42 | cp_description_base <- substr(output_cp_description, 1, 3) 43 | expect_equal(cp_description_base, "For") 44 | 45 | output_performance <- app$getValue(name = "text_performance") 46 | performance_base <- substr(output_performance, 1, 11) 47 | expect_equal(performance_base, "Performance") 48 | }) 49 | 50 | app$stop() 51 | 52 | test_that("The selected_variables parameter functions properly",{ 53 | expect_true({ 54 | xai2shiny(explainer_glm, selected_variables = "age", directory = paste0(getwd(), '/xai2shiny_test2'), run = FALSE) 55 | TRUE 56 | }) 57 | }) 58 | 59 | test_that("Two explainers with different datasets will produce an error",{ 60 | explainer_glm2 <- DALEX::explain(mod_glm, data = data[,c(-7,-8)], y=data$survived) 61 | expect_error({ 62 | xai2shiny(explainer_glm, explainer_glm2, directory = paste0(getwd(), '/xai2shiny_test3'), run = FALSE) 63 | }) 64 | }) 65 | 66 | test_that("Two model packages load properly",{ 67 | explainer_glm$model_info$package <- c("stats", "base") 68 | expect_true({ 69 | xai2shiny(explainer_glm, directory = paste0(getwd(), '/xai2shiny_test4'), run = FALSE) 70 | TRUE 71 | }) 72 | }) 73 | 74 | test_that("Models with model packages including space but created using H2O load properly",{ 75 | explainer_glm$model_info$package <- "H2O with space" 76 | expect_true({ 77 | xai2shiny(explainer_glm, directory = paste0(getwd(), '/xai2shiny_test5'), run = FALSE) 78 | TRUE 79 | }) 80 | }) 81 | 82 | test_that("Other models with model packages including space provide an error",{ 83 | explainer_glm$model_info$package <- "stats with space" 84 | expect_error({ 85 | xai2shiny(explainer_glm, directory = paste0(getwd(), '/xai2shiny_test6'), run = FALSE) 86 | }) 87 | }) 88 | 89 | 90 | 91 | unlink("xai2shiny", recursive = TRUE) 92 | unlink("xai2shiny_test2", recursive = TRUE) 93 | unlink("xai2shiny_test3", recursive = TRUE) 94 | unlink("xai2shiny_test4", recursive = TRUE) 95 | unlink("xai2shiny_test5", recursive = TRUE) 96 | unlink("xai2shiny_test6", recursive = TRUE) 97 | -------------------------------------------------------------------------------- /vignettes/.gitignore: -------------------------------------------------------------------------------- 1 | *.html 2 | *.R 3 | -------------------------------------------------------------------------------- /vignettes/vignette_titanic_example.Rmd: -------------------------------------------------------------------------------- 1 | --- 2 | title: "xai2shiny introduction: Titanic dataset with multiple models" 3 | author: Adam Rydelek, Mateusz Polakowski 4 | date: "`r Sys.Date()`" 5 | output: rmarkdown::html_vignette 6 | vignette: > 7 | %\VignetteIndexEntry{vignette_titanic_rf_glm_h2o} 8 | %\VignetteEngine{knitr::rmarkdown} 9 | %\VignetteEncoding{UTF-8} 10 | --- 11 | 12 | ```{r setup, include = FALSE} 13 | knitr::opts_chunk$set( 14 | collapse = FALSE, 15 | comment = "#>", 16 | warning = FALSE, 17 | message = FALSE 18 | ) 19 | ``` 20 | 21 | ```{r} 22 | library("xai2shiny") 23 | ``` 24 | 25 | # Titanic dataset 26 | 27 | `DALEX` packages provides an imputed version of a common classification-oriented dataset - `titanic` (data was copied from the `stablelearner` package). 28 | Let's see a sample of observations: 29 | 30 | ```{r} 31 | library("DALEX") 32 | head(titanic_imputed, 3) 33 | ``` 34 | 35 | # Models to compare 36 | 37 | Package allows to pass multiple models from different packages to the main function, so why not create some: 38 | 39 | ```{r} 40 | library("ranger") 41 | 42 | model_rf <- ranger(survived ~ ., 43 | data = titanic_imputed, 44 | classification = TRUE, 45 | probability = TRUE) 46 | model_glm <- glm(survived ~ ., 47 | data = titanic_imputed, 48 | family = "binomial") 49 | ``` 50 | 51 | # Explainers based on models 52 | 53 | In fact, `xai2shiny` function accepts only explainers, i.e. `DALEX` special objects basing on provided models. Let's create all necessary explainers: 54 | 55 | ```{r results='hide'} 56 | explainer_rf <- explain(model_rf, 57 | data = titanic_imputed[,-8], 58 | y = titanic_imputed$survived) 59 | explainer_glm <- explain(model_glm, 60 | data = titanic_imputed[,-8], 61 | y = titanic_imputed$survived) 62 | ``` 63 | 64 | # Shiny application 65 | 66 | After that, the only thing left to do is to generate an app and run it: 67 | 68 | ```{r eval=FALSE} 69 | xai2shiny(explainer_rf, explainer_glm, directory = './', run = TRUE) 70 | ``` 71 | 72 | # Cloud deployment 73 | 74 | Further cloud deployment can be done in 2 simple steps (see [README example](https://github.com/ModelOriented/xai2shiny/blob/master/README.md) for details): 75 | 76 | ```{r eval=FALSE} 77 | xai2shiny::cloud_setup() 78 | my_droplet_id <- 1 # Compare it to your DigitalOcean account and set a proper ID 79 | deploy_shiny(droplet = my_droplet_id, directory = './xai2shiny', packages = "ranger") 80 | ``` 81 | 82 | # External package's model 83 | 84 | As `xai2shiny` covers as many external models sources as `DALEX` and `DALEXtra`, let's consider widely known `mlr3` package: 85 | 86 | ```{r eval=FALSE} 87 | library("DALEXtra") 88 | library("mlr3") 89 | library("mlr3learners") 90 | 91 | titanic <- titanic_imputed 92 | titanic[, 'survived'] <- as.factor(titanic[, 'survived']) 93 | 94 | task <- TaskClassif$new(id = 'titanic', 95 | backend = titanic, 96 | target = "survived", 97 | positive = '1') 98 | 99 | learner <- mlr_learners$get('classif.log_reg') 100 | learner$predict_type = "prob" 101 | 102 | train_set = sample(task$nrow, 0.8 * task$nrow) 103 | test_set = setdiff(seq_len(task$nrow), train_set) 104 | 105 | learner$train(task, row_ids = train_set) 106 | explainer_mlr <- explain_mlr3(learner, 107 | data = titanic[,-8], 108 | y = as.numeric(as.character(titanic$survived)), 109 | label = "mlr3 model") 110 | xai2shiny(explainer_mlr, directory = "./", run = FALSE) 111 | ``` 112 | -------------------------------------------------------------------------------- /xai2shiny.Rproj: -------------------------------------------------------------------------------- 1 | Version: 1.0 2 | 3 | RestoreWorkspace: Default 4 | SaveWorkspace: Default 5 | AlwaysSaveHistory: Default 6 | 7 | EnableCodeIndexing: Yes 8 | UseSpacesForTab: Yes 9 | NumSpacesForTab: 2 10 | Encoding: UTF-8 11 | 12 | RnwWeave: Sweave 13 | LaTeX: pdfLaTeX 14 | 15 | AutoAppendNewline: Yes 16 | StripTrailingWhitespace: Yes 17 | 18 | BuildType: Package 19 | PackageUseDevtools: Yes 20 | PackageInstallArgs: --no-multiarch --with-keep.source 21 | PackageRoxygenize: rd,collate,namespace,vignette 22 | --------------------------------------------------------------------------------