├── .github
└── workflows
│ ├── Book.yaml
│ └── link-checker.yml
├── .gitignore
├── CC-BY-NC-SA-4.0.txt
├── CHANGELOG.md
├── DESCRIPTION
├── LICENSE.txt
├── MIT-License.txt
├── README.md
├── cover
├── cutout-borders.jpg
├── paperback.pdf
└── paperback.xcf
├── data
├── TubeSpam.csv
├── bike-sharing-daily.csv
├── bike.RData
├── bike.csv
├── cached-anchors-edge.RDS
├── cached-anchors.RDS
├── cached-sbrl-bike.RDS
├── cached-sbrl-penguins.RDS
├── influence-df.RData
├── penguins.csv
├── speed_dating_data.csv
└── ycomments.RData
├── initialize-gh-pages.sh
├── manuscript
├── .gitignore
├── Makefile
├── _box-shap-book.qmd
├── _output.yml
├── _quarto-ebook.yml
├── _quarto-editing.yml
├── _quarto-kindle.yml
├── _quarto-pdf.yml
├── _quarto-print.yml
├── _quarto.yml
├── _setup.qmd
├── acknowledgements.qmd
├── adversarial.qmd
├── ale.qmd
├── anchors.qmd
├── ceteris-paribus.qmd
├── cite.qmd
├── cnn-features.qmd
├── counterfactual.qmd
├── course.qmd
├── css
│ ├── cookieconsent.min.css
│ └── style.css
├── data.qmd
├── decomposition.qmd
├── detecting-concepts.qmd
├── evaluation.qmd
├── extend-lm.qmd
├── feature-importance.qmd
├── future.qmd
├── global.qmd
├── goals.qmd
├── grateful-refs.bib
├── html
│ ├── epub.css
│ ├── purchase.html
│ └── style.scss
├── ice.qmd
├── images
│ ├── a484.jpg
│ ├── access-denied.jpg
│ ├── access-denied.xcf
│ ├── activation-optim.jpg
│ ├── adversarial-1pixel.jpg
│ ├── adversarial-ostrich.jpg
│ ├── adversarial-panda.jpg
│ ├── adversarial-toaster.jpg
│ ├── adversarial-turtle.jpg
│ ├── agnostic-black-box.jpg
│ ├── amazon-freq-bought-together.jpg
│ ├── anchors-process.jpg
│ ├── anchors-visualization.jpg
│ ├── anchors.jpg
│ ├── anchors1-1.jpg
│ ├── anchors2-1.jpg
│ ├── anchors3-1.jpg
│ ├── anchors4-1.jpg
│ ├── arch-compare.jpg
│ ├── big-picture.jpg
│ ├── big-picture.xcf
│ ├── black-box.jpg
│ ├── burnt-earth.jpg
│ ├── burnt-earth.xcf
│ ├── by-nc-sa.jpg
│ ├── cfexp-nsgaII.jpg
│ ├── cheatsheet-logistic-regression.jpg
│ ├── cheatsheet-shap.jpg
│ ├── cnn features-1.xcf
│ ├── cnn-features.jpg
│ ├── cooks-analyzed-1.jpg
│ ├── cover-shap-book.jpg
│ ├── cover-sidepanel.jpg
│ ├── cover-sml-science.jpg
│ ├── cover.jpg
│ ├── cp-ice-pdp.jpg
│ ├── culmen_depth.jpg
│ ├── dissection-dog-exemplary.jpg
│ ├── dissection-dogs.jpeg
│ ├── dissection-network.jpg
│ ├── doctor-840127_1280.xcf
│ ├── doge-stuck.jpg
│ ├── doge-stuck.xcf
│ ├── favicon.jpg
│ ├── feature-visualization-units.jpg
│ ├── graph.jpg
│ ├── handwritten-prototypes.jpg
│ ├── hospital.jpg
│ ├── iml.jpg
│ ├── interpretable-box.jpg
│ ├── learn-one-rule.jpg
│ ├── learner.jpg
│ ├── lime-images-package-example-1.jpg
│ ├── lter_penguins.jpg
│ ├── original-images-classification.jpg
│ ├── pen.jpg
│ ├── potato-chips.jpg
│ ├── programing-ml.jpg
│ ├── ramen.jpg
│ ├── rulefit.jpg
│ ├── shap-clustering.jpg
│ ├── shap-dependence-interaction.jpg
│ ├── shap-dependence.jpg
│ ├── shap-explain-1.jpg
│ ├── shap-explain-2.jpg
│ ├── shap-importance-extended.jpg
│ ├── shap-importance.jpg
│ ├── shap-simplified-features.jpg
│ ├── shap-superpixel.jpg
│ ├── shapl-importance.jpg
│ ├── shapley-coalitions.jpg
│ ├── shapley-instance-intervention.jpg
│ ├── shapley-instance.jpg
│ ├── smoothgrad.jpg
│ ├── specific-black-box.jpg
│ ├── spheres.jpg
│ ├── taxonomy.jpg
│ ├── tcav.jpg
│ ├── trippy.jpg
│ ├── units.jpg
│ └── vanilla.jpg
├── impressum.qmd
├── index.qmd
├── influential.qmd
├── interaction.qmd
├── interpretability.qmd
├── intro.qmd
├── javascript
│ └── cookieconsent.min.js
├── latex
│ ├── before_body.tex
│ └── preamble.tex
├── lime.qmd
├── limo.qmd
├── lofo.qmd
├── logistic.qmd
├── math-terms.qmd
├── neural-networks.qmd
├── overview.qmd
├── pdp.qmd
├── pixel-attribution.qmd
├── proto.qmd
├── r-packages.qmd
├── references.bib
├── references.qmd
├── rulefit.qmd
├── rules.qmd
├── shap.qmd
├── shapley.qmd
├── storytime.qmd
├── translations.qmd
├── tree.qmd
└── what-is-machine-learning.qmd
├── pkg
└── sbrl_1.2.tar.gz
├── renv.lock
└── scripts
├── dl-feature-attribution
├── activation-maximization.py
├── edge-detection.py
├── feature-attribution-dl.py
├── requirements.txt
├── utils.py
└── utils_imagenet.py
├── get-SpamTube-dataset.R
├── get-bike-sharing-dataset.R
├── grad-cam
├── .gitignore
├── keras-vis.py
└── requirements.txt
├── imagenet_classifier.R
├── lime.ipynb
├── mmd
└── MMD-critic
│ ├── .gitignore
│ ├── Helper.py
│ ├── LICENSE
│ ├── README
│ ├── classify.py
│ ├── data.py
│ ├── mmd.py
│ └── run_digits.py
├── redirect.py
└── shap
├── .gitignore
├── requirements.txt
└── shap-notebook.ipynb
/.github/workflows/Book.yaml:
--------------------------------------------------------------------------------
1 | on:
2 | push:
3 | branches:
4 | - main
5 | - master
6 | pull_request:
7 | branches:
8 | - main
9 | - master
10 |
11 | name: Build and deploy book
12 |
13 | jobs:
14 | build:
15 | runs-on: ubuntu-latest
16 | timeout-minutes: 180
17 |
18 | env:
19 | GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
20 | R_KNITR_OPTIONS: "knitr.chunk.tidy=TRUE"
21 |
22 | steps:
23 | - name: Checkout Repo
24 | uses: actions/checkout@v2
25 |
26 | - name: Install Quarto
27 | uses: quarto-dev/quarto-actions/setup@v2
28 |
29 | - name: Install R
30 | uses: r-lib/actions/setup-r@v2
31 | with:
32 | r-version: '4.4.2'
33 |
34 | - name: Configure Java for R
35 | run: |
36 | sudo R CMD javareconf
37 |
38 | - name: Install OS dependencies
39 | run: |
40 | sudo apt-get update
41 | sudo apt-get install -y \
42 | libcurl4-openssl-dev \
43 | libharfbuzz-dev \
44 | libfribidi-dev \
45 | libcairo2-dev \
46 | libjpeg-dev \
47 | libgsl-dev
48 |
49 | - name: Install R Dependencies
50 | uses: r-lib/actions/setup-renv@v2
51 | with:
52 | cache-version: 1
53 |
54 | - name: Install tinytex
55 | run: |
56 | quarto install tinytex
57 |
58 |
59 | - name: Publish to gh-pages
60 | uses: quarto-dev/quarto-actions/publish@v2
61 | with:
62 | target: gh-pages
63 | path: manuscript/
64 |
65 |
66 |
--------------------------------------------------------------------------------
/.github/workflows/link-checker.yml:
--------------------------------------------------------------------------------
1 | on:
2 | repository_dispatch:
3 | workflow_dispatch:
4 | schedule:
5 | - cron: "00 18 1 * *"
6 |
7 | jobs:
8 | linkChecker:
9 | runs-on: ubuntu-latest
10 | steps:
11 | - uses: actions/checkout@v3
12 | with:
13 | ref: "gh-pages"
14 |
15 | - name: Link Checker
16 | id: lychee
17 | uses: lycheeverse/lychee-action@v1.4.1
18 | with:
19 | args: --verbose --no-progress **/*.html
20 | env:
21 | GITHUB_TOKEN: ${{secrets.GITHUB_TOKEN}}
22 |
23 | - name: Create Issue From File
24 | if: ${{ steps.lychee.outputs.exit_code }} != 0
25 | uses: peter-evans/create-issue-from-file@v3
26 | with:
27 | title: Link Checker Report
28 | content-filepath: ./lychee/out.md
29 | labels: report, automated issue
30 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # History files
2 | .Rhistory
3 | .Rapp.history
4 |
5 | # Session Data files
6 | .RData
7 | .Rbuildignore
8 |
9 | # Example code in package build process
10 | *-Ex.R
11 |
12 | # Output files from R CMD build
13 | /*.tar.gz
14 |
15 | # Output files from R CMD check
16 | /*.Rcheck/
17 |
18 | # RStudio files
19 | .Rproj.user/
20 |
21 | # produced vignettes
22 | vignettes/*.html
23 | vignettes/*.pdf
24 |
25 | # OAuth2 token, see https://github.com/hadley/httr/releases/tag/v0.3
26 | .httr-oauth
27 |
28 | # knitr and R markdown default cache directories
29 | /*_cache/
30 | /cache/
31 |
32 | # Temporary files created by R markdown
33 | *.utf8.md
34 | *.knit.md
35 | .Rproj.user
36 |
37 | # The book files
38 | manuscript/_book/
39 | manuscript/_bookdown_files/
40 | # temporary ones
41 | manuscript/interpretable-ml*
42 | manuscript/images/*.pdf
43 |
44 | # Vim swap files
45 | *.swp
46 |
47 | # LaTeX files
48 | *.aux
49 | *.tex
50 | *.log
51 |
52 | xgboost.model
53 | .DS_Store
54 |
55 | # leanpub files
56 | manuscript/*.md
57 | manuscript/cache
58 |
59 | kindlegen
60 | manuscript/images/*.png
61 |
62 | # From SBRL package usage
63 | *.out
64 | *.label
65 | packrat/lib*/
66 |
67 | # Gets build automatically, so ignore for repo
68 | manuscript/11-references.Rmd
69 |
70 | # From Deep Learning scripts
71 | scripts/dl-feature-attribution/dl/
72 | scripts/dl-feature-attribution/*.png
73 |
74 | .ipynb_checkpoints
75 |
76 |
77 | # vim stuff
78 | *.swo
79 |
80 |
--------------------------------------------------------------------------------
/CHANGELOG.md:
--------------------------------------------------------------------------------
1 | # Changelog
2 |
3 | ## v3.0 (2025-03-11) [html; print; ebook]
4 |
5 | - Renamed chapters to reflect the more established names:
6 | - Local Surrogate (LIME) -> LIME
7 | - Global Surrogate -> Surrogate Models
8 | - SHAP (SHapley Additive exPlanations) -> SHAP
9 | - Pixel Attribution (Saliency Maps) -> Saliency Maps
10 | - Switched the order of global and local: Now local model-agnostic methods come before global methods.
11 | - Unified and improved the examples:
12 | - Train models just once
13 | - Measure and report performance (in Data chapter)
14 | - Study correlations and mutual information
15 | - Made examples in each chapter much more
16 | - Strongly shortened the text between first word and first method:
17 | - Scope of interpretability: Now part of Overview chapter.
18 | - removed preface by the author and moved relevant parts into about the book and introduction
19 | - moved chapters "Terminology" and "What is Machine Learning" into appendix
20 | - Moved short stories to the end of the book
21 | - Combined all the intro texts (e.g. global methods) into an overview chapter
22 | - New chapters:
23 | - Methods Overview
24 | - Goals of interpretability
25 | - Ceteris Paribus
26 | - LOFO
27 | - Updated lots of references (and move them from footnotes to proper bibtex references).
28 | - Made math more consistent
29 | - Improved the captions of the figures and referenced them from within the text.
30 | - Use Palmer Penguins for classification examples. This replaces the examples with the cancer dataset. There was an error in how how I coded the outcome, so all interpretations were reversed. Instead of reversing the labels, I decided to replace the data, since I on longer think it's a good fit for the book. The penguin data examples are more accessible, and less sensitive.
31 | - Deleted chapter "Other interpretable models": only contained naive bayes and knn, but raised more question than it answered.
32 | - Replaced contribute chapter with links to repo
33 | - Smaller errors fixed:
34 | - in chapter Learned Features -> Network Dissection -> Step 2: Retrieve network activations, quantile level was corrected to not depend on x, i.e.g T_k instead of T_k(x).
35 |
36 |
37 | ## v2.0 (2022-03-04)
38 |
39 | - Added "Preface by the Author" chapter
40 | - Started section on neural network interpretation
41 | - Added chapter on feature visualization
42 | - Added SHAP chapter
43 | - Added Anchors chapter
44 | - Fixed error in logistic regression chapter: Logistic regression was predicting class "Healthy", but interpretation in the text was for class "Cancer". Now regression weights have the correct sign.
45 | - Renamed Feature Importance chapter to "Permutation Feature Importance"
46 | - Added chapter about functional decomposition
47 | - Rearranged interpretation methods by local, global and deep learning (before: model-agnostic, example-based, deep learning)
48 | - Math Errata:
49 | - Chapter 4.3 GLM, GAM and more: Logistic regression uses logit, not logistic function as link function.
50 | - Chapter Linear models: Formula for adjusted R-squared was corrected (twice)
51 | - Chapter Decision Rules: Newly introduced mix up between Healthy and Cancer in OneR chapter was fixed.
52 | - Chapter RuleFit: The importance of the linear term in the total importance formulate was indexed with an $l$ instead of $j$.
53 | - Chapter Influential Instances: removed $(1-\epsilon)$ from model parameter update.
54 | - Updated images
55 |
56 | ## v1.1 (2019-03-23)
57 | - Fixes wrong index in Cooks Distance summation (i -> j)
58 | - fixed boxplot formula (1.5 instead of 1.58)
59 | - Change to colorblind-friendly color palettes (viridis)
60 | - Make sure plots work in black and white as well
61 | - Extends counterfactual chapter with MOC (by Susanne Dandl)
62 |
63 | ## v1.0 (2019-02-21)
64 | - Extensive proofreading and polishing
65 |
66 | ## v0.7 (2018-11-21)
67 | - Renamed Definitions chapter to Terminology
68 | - Added mathematical notation to Terminology (former Definitions) chapter
69 | - Added LASSO example
70 | - Restructured lm chapter and added pros/cons
71 | - Renamed "Criteria of Interpretability Methods" to "Taxonomy of Interpretability Methods"
72 | - Added advantages and disadvantages of logistic regression
73 | - Added list of references at the end of book
74 | - Added images to the short stories
75 | - Added drawback of shapley value: feature have to be independent
76 | - Added tree decomposition and feature importance to tree chapter
77 | - Improved explanation of individual prediction in lm
78 | - Added "What's Wrong With my Dog" example to Adversarial Examples
79 | - Added links to data files and pre-processing R scripts
80 |
81 | ## v0.6 (2018-11-02)
82 | - Added chapter on accumulated local effects plots
83 | - Added some advantages and disadvantages to pdps
84 | - Added chapter on extending linear models
85 | - Fixed missing square in the Friedman H-statistic
86 | - Added discussion about training vs. test data in feature importance chapter
87 | - Improved the definitions, also added some graphics
88 | - Added an example with a categorical feature to PDP
89 |
90 | ## v0.5 (2018-08-14)
91 | - Added chapter on influential instances
92 | - Added chapter on Decision Rules
93 | - Added chapter on adversarial machine examples
94 | - Added chapter on prototypes and criticisms
95 | - Added chapter on counterfactual explanations
96 | - Added section on LIME images (by Verena Haunschmid)
97 | - Added section on when we don't need interpretability
98 | - Renamed chapter: Human-style Explanations -> Human-friendly Explanations
99 |
100 | ## v0.4 (2018-05-23)
101 | - Added chapter on global surrogate models
102 | - Added improved Shapley pictograms
103 | - Added acknowledgements chapter
104 | - Added feature interaction chapter
105 | - Improved example in partial dependence plot chapter
106 | - The weights in LIME text chapter where shown with the wrong words. This has been fixed.
107 | - Improved introduction text
108 | - Added chapter about the future of interpretability
109 | - Added Criteria for Interpretability Methods
110 |
111 | ## v0.3 (2018-04-24)
112 | - Reworked the Feature Importance Chapter
113 | - Added third short story
114 | - Removed xkcd comic
115 | - Merged introduction and about the book chapters
116 | - Added pros & cons to pdp and ice chapters
117 | - Started using the iml package for plots in ice and pdp
118 | - Restructured the book files for Leanpub
119 | - Added a cover
120 | - Added some CSS for nicer formatting
121 |
122 | ## v0.2 (2018-02-13)
123 | - Added chapter about Shapley value explanations
124 | - Added short story chapters
125 | - Added donation links in Preface
126 | - Reworked RuleFit with examples and theory.
127 | - Interpretability chapter extended
128 | - Add chapter on human-style explanations
129 | - Making it easier to collaborate: Travis checks if book can be rendered for pull requests
130 |
131 | ## v0.1 (2017-12-03)
132 | - First release of the Interpretable Machine Learning book
133 |
--------------------------------------------------------------------------------
/DESCRIPTION:
--------------------------------------------------------------------------------
1 | Package: iml.book
2 | Title: Interpretable machine learning
3 | Version: 0.0.1
4 | Depends:
5 | knitr
6 | Imports:
7 | arules,
8 | bookdown,
9 | Cairo,
10 | caret,
11 | ceterisParibus,
12 | data.table,
13 | dplyr,
14 | DT,
15 | e1071,
16 | ggplot2,
17 | grid,
18 | gridExtra,
19 | iml,
20 | interactions,
21 | jpeg,
22 | jtools,
23 | kableExtra,
24 | knitr,
25 | latex2exp,
26 | lubridate,
27 | memoise,
28 | mgcv,
29 | mlbench,
30 | mlr,
31 | numDeriv,
32 | OneR,
33 | party,
34 | partykit,
35 | patchwork,
36 | png,
37 | pre,
38 | randomForest,
39 | readr,
40 | rjson,
41 | roxygen2,
42 | rpart,
43 | palmerpenguins,
44 | R.utils,
45 | RWeka,
46 | shiny,
47 | svglite,
48 | tidyr,
49 | tm,
50 | viridis,
51 | xgboost,
52 | yaImpute
53 | Remotes: christophM/interpretable-ml-book
54 |
55 |
--------------------------------------------------------------------------------
/LICENSE.txt:
--------------------------------------------------------------------------------
1 | The Interpretable Machine Learning book project (c) by Christoph Molnar use the following two licenses:
2 |
3 | * Attribution-NonCommercial-ShareAlike 4.0 International (CC BY-NC-SA 4.0) for the rendered book. The full terms are in the file CC-BY-NC-SA-4.0.txt in the root of this repository.
4 | * MIT License for the code that produces the book. The full terms are in file MIT-License.txt in the root folder of this repository.
5 |
--------------------------------------------------------------------------------
/MIT-License.txt:
--------------------------------------------------------------------------------
1 | Copyright 2019 Christoph Molnar
2 |
3 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
4 |
5 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
6 |
7 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
8 |
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # Interpretable Machine Learning
2 |
3 | This is the repository for the book Interpretable Machine Learning -- A Guide For Explaining Black Box Models.
4 |
5 | 
6 |
7 | You can read the book in the following ways:
8 |
9 | - [Online, for free](https://christophm.github.io/interpretable-ml-book/)
10 | - [Buy the ebook](https://leanpub.com/interpretable-machine-learning/)
11 | - [But the paperback](https://bookgoodies.com/a/3911578032)
12 |
13 | ## Summary
14 |
15 | You can find the current version of the book here: https://christophm.github.io/interpretable-ml-book/
16 |
17 | This book is about interpretable machine learning. Machine learning is being built into many products and processes of our daily lives, yet decisions made by machines don't automatically come with an explanation. An explanation increases the trust in the decision and in the machine learning model. As the programmer of an algorithm you want to know whether you can trust the learned model. Did it learn generalizable features? Or are there some odd artifacts in the training data which the algorithm picked up? This book will give an overview over techniques that can be used to make black boxes as transparent as possible and explain decisions. In the first chapter algorithms that produce simple, interpretable models are introduced together with instructions how to interpret the output. The later chapters focus on analyzing complex models and their decisions.
18 | In an ideal future, machines will be able to explain their decisions and make a transition into an algorithmic age more human. This books is recommended for machine learning practitioners, data scientists, statisticians and also for stakeholders deciding on the use of machine learning and intelligent algorithms.
19 |
20 | ## Changelog
21 |
22 | See [CHANGELOG.md](CHANGELOG.md) for version history.
23 |
24 | # Citing this Book {#cite}
25 |
26 | If you found this book useful for your blog post, research article or product, I would be grateful if you would cite this book.
27 | You can cite the book like this:
28 |
29 |
30 | ```
31 | Molnar, Christoph. *Interpretable Machine Learning: A Guide for Making Black Box Models Explainable*. 3rd ed., 2025. ISBN: 978-3-911578-03-5. Available at: \url{https://christophm.github.io/interpretable-ml-book}.
32 | ```
33 |
34 | Or use the following bibtex entry:
35 |
36 | ```
37 | @book{molnar2025,
38 | title={Interpretable Machine Learning},
39 | subtitle={A Guide for Making Black Box Models Explainable},
40 | author={Christoph Molnar},
41 | year={2025},
42 | edition={3},
43 | isbn={978-3-911578-03-5},
44 | url={https://christophm.github.io/interpretable-ml-book}
45 | }
46 | ```
47 |
48 | I'm always curious about where and how interpretation methods are used in industry and research.
49 | If you use the book as a reference, it would be great if you wrote me a line and told me what for.
50 | This is, of course, optional and only serves to satisfy my own curiosity and to stimulate interesting exchanges.
51 | My email is chris@christophmolnar.com
52 |
53 |
54 | ## Contributing
55 |
56 | If you find any errors in the book, I welcome your help in fixing them! To contribute:
57 |
58 | 1. Fork the repository.
59 | 1. Create a new branch for your fix.
60 | 1. Address the error(s) you found.
61 | 1. Submit a pull request with a clear description of the fix.
62 |
63 | Additionally, if you have content suggestions or requests, feel free to open an issue. While I can't promise that all suggestions will be added, I appreciate the feedback.
64 |
65 | Thank you for helping improve the book!
66 |
67 |
68 | # Further Resources:
69 |
70 | - Most R examples use the [iml R package](https://github.com/giuseppec/iml) that I developed
71 | - For a deep dive into SHAP, checkout my book [Interpreting Machine Learning Models with SHAP](https://christophmolnar.com/books/shap/)
72 |
73 |
74 |
75 |
--------------------------------------------------------------------------------
/cover/cutout-borders.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/cover/cutout-borders.jpg
--------------------------------------------------------------------------------
/cover/paperback.pdf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/cover/paperback.pdf
--------------------------------------------------------------------------------
/cover/paperback.xcf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/cover/paperback.xcf
--------------------------------------------------------------------------------
/data/bike.RData:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/data/bike.RData
--------------------------------------------------------------------------------
/data/cached-anchors-edge.RDS:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/data/cached-anchors-edge.RDS
--------------------------------------------------------------------------------
/data/cached-anchors.RDS:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/data/cached-anchors.RDS
--------------------------------------------------------------------------------
/data/cached-sbrl-bike.RDS:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/data/cached-sbrl-bike.RDS
--------------------------------------------------------------------------------
/data/cached-sbrl-penguins.RDS:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/data/cached-sbrl-penguins.RDS
--------------------------------------------------------------------------------
/data/influence-df.RData:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/data/influence-df.RData
--------------------------------------------------------------------------------
/data/speed_dating_data.csv:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/data/speed_dating_data.csv
--------------------------------------------------------------------------------
/data/ycomments.RData:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/data/ycomments.RData
--------------------------------------------------------------------------------
/initialize-gh-pages.sh:
--------------------------------------------------------------------------------
1 | #!/bin/sh
2 |
3 | git checkout --orphan gh-pages
4 | git rm -rf .
5 |
6 | # create a hidden file .nojekyll
7 | touch .nojekyll
8 | git add .nojekyll
9 |
10 | git commit -m"Initial commit"
11 | git push origin gh-pages
--------------------------------------------------------------------------------
/manuscript/.gitignore:
--------------------------------------------------------------------------------
1 | /.quarto/
2 | *_cache
3 | *_files
4 | *.html
5 | _site_libs
6 | output
7 | api.key
8 | *.toc
9 |
--------------------------------------------------------------------------------
/manuscript/Makefile:
--------------------------------------------------------------------------------
1 | .PHONY : ebook html kindle pdf print docx sbrl
2 |
3 | bookname = iml
4 |
5 | all : kindle print ebook pdf html
6 | kindle : output/kindle-$(bookname).epub
7 | print: output/print-$(bookname).pdf
8 | ebook: output/ebook-$(bookname).epub
9 | pdf: output/pdf-$(bookname).pdf
10 | beta-reader: output/beta-reader-$(bookname).docx
11 |
12 |
13 | COMMON_BOOK_FILES = $(wildcard *.qmd) $(wildcard *.qmd) references.bib grateful-refs.bib _quarto.yml
14 |
15 | grateful-refs.bib: _setup.qmd
16 | Rscript -e 'grateful::cite_packages(output = "table", out.dir = ".")'
17 |
18 | html: $(COMMON_BOOK_FILES) html/*.html
19 | quarto render --to html --no-cache
20 |
21 | docx : $(COMMON_BOOK_FILES)
22 | quarto render --to docx --profile beta-reader --no-cache
23 |
24 | output/ebook-$(bookname).epub : $(COMMON_BOOK_FILES) _quarto-ebook.yml html/epub.css
25 | quarto render --profile ebook --to epub --no-cache
26 | mv _book/$(bookname).epub output/ebook-$(bookname).epub
27 |
28 | output/kindle-$(bookname).epub : $(COMMON_BOOK_FILES) _quarto-kindle.yml html/epub.css
29 | quarto render --profile kindle --to epub --no-cache
30 | mv _book/$(bookname).epub output/kindle-$(bookname).epub
31 |
32 | output/print-$(bookname).pdf : $(COMMON_BOOK_FILES) _quarto-print.yml latex/
33 | git rev-parse --short HEAD > latex/hash.tex
34 | quarto render --profile print --to pdf --no-cache
35 | mv _book/$(bookname).pdf output/print-$(bookname).pdf
36 |
37 | output/pdf-$(bookname).pdf : $(COMMON_BOOK_FILES) _quarto-pdf.yml latex/
38 | git rev-parse --short HEAD > latex/hash.tex
39 | quarto render --profile pdf --to pdf --no-cache
40 | mv _book/$(bookname).pdf output/pdf-$(bookname).pdf
41 |
42 | output/sample-$(bookname).pdf: output/pdf-$(bookname).pdf
43 | pdftk output/pdf-$(bookname).pdf cat 1-40 output output/leanpub/sample-pdf-$(bookname).pdf
44 |
45 | output/beta-reader-$(bookname).docx: $(COMMON_BOOK_FILES)
46 | quarto render --to docx --no-cache
47 | mv _book/$(bookname).docx output/beta-reader-$(bookname).docx
48 |
49 | editing: *.qmd _quarto-editing.yml latex _quarto.yml
50 | git rev-parse --short HEAD > latex/hash.tex
51 | quarto render --profile editing --to pdf --no-cache
52 | mv _book/$(bookname).pdf output/editing.pdf
53 |
54 | check-epub-leanpub:
55 | java -jar ../scripts/epubcheck-5.0.0/epubcheck.jar output/ebook-$(bookname).epub
56 |
57 | check-epub-amazon:
58 | java -jar ../scripts/epubcheck-5.0.0/epubcheck.jar output/kindle-$(bookname).epub
59 |
60 | check-epubs: check-epub-leanpub check-epub-amazon
61 |
62 | html/epub.css :
63 | curl https://raw.githubusercontent.com/jgm/pandoc/main/data/epub.css --output html/epub.css
64 | sed -i '' 's/background-color: #fdfdfd;/background-color: transparent;/g' html/epub.css
65 |
66 | install-sbrl-deps:
67 | sudo apt install -y libgsl10-dev open-cobol
68 |
69 |
70 | data:
71 | Rscript ../scripts/get-bike-sharing-dataset.R
72 | Rscript ../scripts/get-SpamTube-dataset.R
73 |
--------------------------------------------------------------------------------
/manuscript/_box-shap-book.qmd:
--------------------------------------------------------------------------------
1 | ::: {.callout-tip}
2 |
3 | ::: {layout="[60,40]"}
4 |
5 | Looking for a comprehensive, hands-on guide to SHAP and Shapley values?
6 | [Interpreting Machine Learning Models with SHAP](https://leanpub.com/shap) has you covered.
7 | With practical Python examples using the shap package, you'll learn how to explain models ranging from simple to complex.
8 | It dives deep into the mechanics of SHAP, provides interpretation templates, and highlights key limitations, giving you the insights you need to apply SHAP confidently and effectively.
9 |
10 | {width=45% fig-align="center"}
11 |
12 | :::
13 |
14 | :::
15 |
16 |
--------------------------------------------------------------------------------
/manuscript/_output.yml:
--------------------------------------------------------------------------------
1 | bookdown::gitbook:
2 | dev: svglite
3 | split_by: section
4 | css: css/style.css
5 | includes:
6 | in_header: html/header.html
7 | before_body: html/cta-button.html
8 | config:
9 | edit: https://github.com/christophM/interpretable-ml-book/edit/master/manuscript/%s
10 | download: false
11 | sharing:
12 | github: true
13 | linkedin: true
14 | facebook: false
15 | toc:
16 | collapse: subsection
17 | before: |
18 |
Interpretable machine learning
19 | after: |
20 | Published with bookdown
21 | Impressum
22 | bookdown::pdf_book:
23 | includes:
24 | in_header: latex/preamble.tex
25 | before_body: latex/before_body.tex
26 | after_body: latex/after_body.tex
27 | keep_tex: yes
28 | latex_engine: xelatex
29 | dev: "cairo_pdf"
30 | pandoc_args: --top-level-division=chapter
31 | toc_depth: 2
32 | toc_unnumbered: no
33 | quote_footer: ["\\VA{", "}{}"]
34 | lot: no
35 | lof: no
36 | bookdown::epub_book:
37 | dev: svglite
38 | cover_image: ../cover/Bookcover_IML_KDP_v2/cutout.png
39 | pandoc_args: [ "--mathml" ]
40 |
--------------------------------------------------------------------------------
/manuscript/_quarto-ebook.yml:
--------------------------------------------------------------------------------
1 | format:
2 | epub:
3 | cover-image: ./images/cover.jpg
4 | fig-align: center
5 |
6 |
--------------------------------------------------------------------------------
/manuscript/_quarto-editing.yml:
--------------------------------------------------------------------------------
1 | format:
2 | pdf:
3 | documentclass: scrreport
4 | highlight-style: arrow
5 | font-size: 13
6 | color-links: true
7 | links-as-notes: true
8 | citecolor: "blue"
9 | include-in-header: [latex/preamble-editing.tex]
10 | include-before-body: latex/before_body.tex
11 | # geometry optimized for reading on ipad pro
12 | geometry:
13 | - paperwidth=214mm
14 | - paperheight=280mm
15 | - left=30mm
16 | - right=60mm
17 | - top=25mm
18 | - bottom=30mm
19 |
--------------------------------------------------------------------------------
/manuscript/_quarto-kindle.yml:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/_quarto-kindle.yml
--------------------------------------------------------------------------------
/manuscript/_quarto-pdf.yml:
--------------------------------------------------------------------------------
1 | format:
2 | pdf:
3 | documentclass: scrreport
4 | fontsize: "13"
5 | toc-depth: 2
6 |
--------------------------------------------------------------------------------
/manuscript/_quarto-print.yml:
--------------------------------------------------------------------------------
1 | format:
2 | pdf:
3 | documentclass: scrbook
4 | highlight-style: arrow
5 | fontsize: "10pt"
6 | toc: true
7 | toc-depth: 2
8 | toc-title: "Content"
9 | lof: false
10 | lot: false
11 | colorlinks: false
12 | links-as-notes: true
13 | #link-bibliography: false
14 | cite-method: natbib
15 | latex-engine: xelatex
16 | fig-format: jpeg
17 | include-in-header:
18 | - latex/preamble.tex
19 | include-before-body: [latex/before_body.tex]
20 | geometry:
21 | - paperwidth=18.9cm
22 | - paperheight=24.61cm
23 | - inner=30mm
24 | - outer=17mm
25 | - top=30mm
26 | - bottom=24mm
27 |
--------------------------------------------------------------------------------
/manuscript/_quarto.yml:
--------------------------------------------------------------------------------
1 | project:
2 | type: book
3 | resources:
4 | - privacy-policy.html
5 | - images/cover-sidepanel.jpg
6 | post-render:
7 | - ../scripts/redirect.py
8 |
9 | execute:
10 | echo: false
11 | warning: false
12 | message: false
13 | cache: true
14 | fig-align: center
15 |
16 | engine: knitr
17 |
18 | book:
19 | title: "Interpretable Machine Learning"
20 | subtitle: "A Guide for Making Black Box Models Explainable"
21 | author: "Christoph Molnar"
22 | output-file: iml
23 | favicon: images/favicon.jpg
24 | cookie-consent:
25 | style: simple
26 | palette: light
27 | google-analytics:
28 | tracking-id: "G-V7RTNZBGE2"
29 | anonymize-ip: true
30 | margin-footer:
31 | page-footer: |
32 | Privacy Policy | Impressum
33 | repo-url: https://github.com/christophM/interpretable-ml-book
34 | repo-actions: [source, issue]
35 | chapters:
36 | - index.qmd
37 | - intro.qmd
38 | - interpretability.qmd
39 | - goals.qmd
40 | - overview.qmd
41 | - data.qmd
42 | - part: "Interpretable Models"
43 | chapters:
44 | - limo.qmd
45 | - logistic.qmd
46 | - extend-lm.qmd
47 | - tree.qmd
48 | - rules.qmd
49 | - rulefit.qmd
50 | - part: "Local Model-Agnostic Methods"
51 | chapters:
52 | - ceteris-paribus.qmd
53 | - ice.qmd
54 | - lime.qmd
55 | - counterfactual.qmd
56 | - anchors.qmd
57 | - shapley.qmd
58 | - shap.qmd
59 | - part: "Global Model-Agnostic Methods"
60 | chapters:
61 | - pdp.qmd
62 | - ale.qmd
63 | - interaction.qmd
64 | - decomposition.qmd
65 | - feature-importance.qmd
66 | - lofo.qmd
67 | - global.qmd
68 | - proto.qmd
69 | - part: "Neural Network Interpretation"
70 | chapters:
71 | - cnn-features.qmd
72 | - pixel-attribution.qmd
73 | - detecting-concepts.qmd
74 | - adversarial.qmd
75 | - influential.qmd
76 | - part: "Beyond the Methods"
77 | chapters:
78 | - evaluation.qmd
79 | - storytime.qmd
80 | - future.qmd
81 | - translations.qmd
82 | - cite.qmd
83 | - acknowledgements.qmd
84 | appendices:
85 | - what-is-machine-learning.qmd
86 | - math-terms.qmd
87 | - r-packages.qmd
88 | - references.qmd
89 |
90 | bibliography: [references.bib,grateful-refs.bib]
91 | cite-method: citeproc
92 |
93 | format:
94 | epub:
95 | lang: en-US
96 | html-math-method: webtex
97 | highlight-style: printing
98 | always_allow_html: true
99 | css: html/epub.css
100 | description: "Machine learning algorithms usually operate as black boxes and it is unclear how they derived a certain decision. This book is a guide for practitioners to make machine learning decisions interpretable."
101 | toc-depth: 2
102 | pdf:
103 | documentclass: scrreprt
104 | to-depth: 2
105 | html:
106 | lightbox: true
107 | cover-image: images/cover.jpg
108 | include-in-header: [html/purchase.html]
109 | page-layout: article
110 | smooth-scroll: true
111 | theme: [cosmo, html/style.scss]
112 | grid:
113 | sidebar-width: 370px
114 | fig-align: center
115 | number-depth: 0
116 | #reference-location: margin
117 | #citation-location: margin
118 |
119 |
120 |
121 |
--------------------------------------------------------------------------------
/manuscript/_setup.qmd:
--------------------------------------------------------------------------------
1 | ```{r}
2 | #| label: load R packages
3 | library("ceterisParibus")
4 | library("dplyr")
5 | library("DALEX")
6 | library("GGally")
7 | library("ggplot2")
8 | library("grateful")
9 | library("iml")
10 | library("knitr")
11 | library("kableExtra")
12 | library("Metrics")
13 | library("nnet")
14 | library("palmerpenguins")
15 | library("patchwork")
16 | library("randomForest")
17 | library("ranger")
18 | library("reshape2")
19 | library("rpart")
20 | library("tidyr")
21 | library("viridis")
22 | library("yaImpute")
23 | ```
24 |
25 | ```{r}
26 | #| label: penguin data and models
27 | set.seed(2)
28 | penguins = na.omit(penguins)
29 | penguins_orig = penguins
30 | penguins_col_selection = c('species', 'bill_depth_mm', 'bill_length_mm', 'flipper_length_mm', 'sex', 'body_mass_g')
31 | penguins = penguins[penguins_col_selection]
32 | penguin_train_index <- sample(1:nrow(penguins), 2/3 * nrow(penguins))
33 | penguins_train <- penguins[penguin_train_index, ]
34 | penguins_test <- penguins[-penguin_train_index, ]
35 |
36 | # Make sure P(female) is predicted
37 | penguins$sex <- relevel(penguins$sex, ref = "male")
38 | pengu_rf = randomForest(sex ~ ., data = penguins_train)
39 | pengu_tree = rpart(sex ~ ., data = penguins_train)
40 |
41 | # Split the data by species
42 | species_models <- lapply(split(penguins_train, penguins_train$species), function(data) {
43 | data$species = NULL
44 | # ensuring the right levels
45 | data$sex <- relevel(data$sex, ref = "male")
46 | glm(sex ~ ., data = data, trace = FALSE, family = binomial(link = "logit"))
47 | })
48 |
49 | pengu_logreg <- list(models = species_models)
50 | class(pengu_logreg) <- "pengu_logreg"
51 |
52 | predict.pengu_logreg <- function(object, newdata, ...) {
53 | predictions <- numeric(nrow(newdata))
54 |
55 | for (species in names(object$models)) {
56 | # Filter test data for this species
57 | species_data <- newdata[newdata$species == species, ]
58 | if (nrow(species_data) > 0) {
59 | # Predict using the appropriate model
60 | model <- object$models[[species]]
61 | species_predictions <- predict(model, newdata = species_data, type = "response", ...)
62 |
63 | # Create temporary data frame with predictions
64 | pred_df <- data.frame(
65 | row_id = seq_len(nrow(newdata))[newdata$species == species],
66 | pred = species_predictions
67 | )
68 | # Update predictions using merge matching
69 | predictions[pred_df$row_id] <- pred_df$pred
70 | }
71 | }
72 |
73 | return(predictions)
74 | }
75 |
76 | ```
77 |
78 |
79 |
80 | ```{r}
81 | #| label: bike data and models
82 | set.seed(42)
83 | load("../data/bike.RData")
84 | bike = na.omit(bike)
85 | bike_features = c('season','holiday', 'workday', 'weather', 'temp', 'hum', 'windspeed', "cnt_2d_bfr")
86 | bike = bike[c(bike_features, "cnt")]
87 | bike_train_index <- sample(1:nrow(bike), 2/3 * nrow(bike))
88 | bike_train <- bike[bike_train_index, ]
89 | bike_test <- bike[-bike_train_index, ]
90 |
91 | bike_rf = randomForest(cnt ~ ., data = bike_train)
92 | bike_tree = rpart(cnt ~ ., data = bike_train)
93 | bike_svm = e1071::svm(cnt ~ ., data = bike_train)
94 | bike_lm = lm(cnt ~ ., data = bike_train, x = TRUE)
95 | ```
96 |
97 |
98 | ```{r}
99 | #| label: load data
100 | set.seed(42)
101 |
102 | ## Load datasets
103 | load("../data/ycomments.RData")
104 | ```
105 |
106 | ```{r}
107 | my_theme = function(legend.position='right'){
108 | theme_bw() %+replace%
109 | theme(legend.position=legend.position)
110 | }
111 |
112 | theme_set(my_theme())
113 | default_color = "azure4"
114 | ```
115 |
116 |
117 | ```{r}
118 | #| label: define util functions
119 |
120 | pretty_rownames = function(rnames){
121 | rnames = gsub('^`', '', rnames)
122 | rnames = gsub('`$', '', rnames)
123 | rnames = gsub('`', ':', rnames)
124 | rnames
125 | }
126 |
127 | ```
128 |
129 | ```{r}
130 | #| label: outfig function
131 | out2fig = function(out.width, out.width.default = 1, fig.width.default = NA) {
132 | if (is.na(fig.width.default)){
133 | fig.width.default = as.numeric(knitr::opts_current$get("fig.width"))
134 | }
135 | fig.width.default * out.width / out.width.default
136 | }
137 | ```
138 |
139 |
140 | ```{r}
141 | #| label: Define width for table
142 | single_col_width = "35em"
143 | ```
144 |
145 |
146 |
--------------------------------------------------------------------------------
/manuscript/acknowledgements.qmd:
--------------------------------------------------------------------------------
1 | # Acknowledgments
2 |
3 | Writing this book was (and still is) a lot of fun.
4 | But it's also a lot of work, and I'm very happy about the support I received.
5 |
6 | My biggest thank-you goes to Katrin, who had the hardest job in terms of hours and effort: she proofread the book from beginning to end and discovered many spelling mistakes and inconsistencies that I would never have found.
7 | I'm very grateful for her support.
8 |
9 | A big thank you goes out to all the guest authors.
10 | I was really surprised to learn that people were interested in contributing to the book.
11 | And thanks to their efforts, the contents of the book could be improved!
12 | Tobias Goerke and Magdalena Lang wrote the chapter about Scoped Rules (Anchors).
13 | Fangzhou Li contributed the chapter on Detecting Concepts.
14 | And Susanne Dandl greatly improved the chapter on Counterfactual Examples.
15 | Last but not least, [Verena Haunschmid](https://twitter.com/ExpectAPatronum) wrote the section about LIME explanations for images.
16 | I also want to thank all the readers who provided feedback and contributed corrections directly [on GitHub](https://github.com/christophM/interpretable-ml-book/graphs/contributors)!
17 |
18 | Furthermore, I want to thank everyone who created illustrations:
19 | The cover was designed by my friend [\@YvonneDoinel](https://twitter.com/YvonneDoinel).
20 | The graphics in the [Shapley Value chapter](#shapley) were created by [Heidi Seibold](https://twitter.com/HeidiBaya), as well as the turtle example in the [adversarial examples chapter](#adversarial).
21 | Verena Haunschmid created the graphic in the [RuleFit chapter](#rulefit).
22 |
23 | I would also like to thank my wife and family, who have always supported me.
24 | My wife, in particular, had to listen to me ramble on about the book a lot.
25 | She helped me make many decisions around the writing of the book.
26 |
27 | The way I published this book is a bit unconventional.
28 | First, it's not only available as a paperback and ebook, but also as a website, completely free of charge.
29 | I published the book as an in-progress book, which has helped me enormously to get feedback and to monetize it along the way.
30 | I would also like to thank you, dear reader, for reading this book without a big publisher name behind it.
31 |
32 | I'm grateful for the Bavarian State Ministry of Science and the Arts in the framework of the Centre Digitisation.Bavaria (ZD.B) and by the Bavarian Research Institute for Digital Transformation (bidt) for funding my PhD, which I finished in 2022.
33 |
34 | ::: {.content-visible when-format="epub"}
35 |
36 | # Impressum {-}
37 |
38 | Copyright: 2025 *Christoph Molnar*, Germany, Munich
39 |
40 | This book is licensed under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License.
41 |
42 | 
43 |
44 |
45 | Third edition
46 |
47 | Christoph Molnar
48 | c/o Digital Research Academy
49 | Bayerstraße 77c
50 | 80335 München, Germany
51 |
52 | :::
53 |
54 |
55 |
--------------------------------------------------------------------------------
/manuscript/cite.qmd:
--------------------------------------------------------------------------------
1 | # Citing this Book {#cite}
2 |
3 | If you found this book useful for your blog post, research article or product, I would be grateful if you would cite this book.
4 | You can cite the book like this:
5 |
6 |
7 | ```
8 | Molnar, C. (2025). Interpretable Machine Learning:
9 | A Guide for Making Black Box Models Explainable (3rd ed.).
10 | christophm.github.io/interpretable-ml-book/
11 | ```
12 |
13 | Or use the following bibtex entry:
14 |
15 | ```
16 | @book{molnar2025,
17 | title={Interpretable Machine Learning},
18 | subtitle={A Guide for Making Black Box Models Explainable},
19 | author={Christoph Molnar},
20 | year={2025},
21 | edition={3},
22 | isbn={978-3-911578-03-5},
23 | url={https://christophm.github.io/interpretable-ml-book}
24 | }
25 | ```
26 |
27 | I'm always curious about where and how interpretation methods are used in industry and research.
28 | If you use the book as a reference, it would be great if you wrote me a line and told me what for.
29 | This is, of course, optional and only serves to satisfy my own curiosity and to stimulate interesting exchanges.
30 | My email is chris@christophmolnar.com
31 |
--------------------------------------------------------------------------------
/manuscript/course.qmd:
--------------------------------------------------------------------------------
1 |
2 |
Interpretable ML Online Course
3 |
I'm excited to announce that I'm planning an online course on Interpretable Machine Learning.
4 | To help me tailor the course to your needs, please take a moment to complete this short survey.
5 |
Take the Survey
6 |
7 |
8 |
--------------------------------------------------------------------------------
/manuscript/css/cookieconsent.min.css:
--------------------------------------------------------------------------------
1 | .cc-window{opacity:1;transition:opacity 1s ease}.cc-window.cc-invisible{opacity:0}.cc-animate.cc-revoke{transition:transform 1s ease}.cc-animate.cc-revoke.cc-top{transform:translateY(-2em)}.cc-animate.cc-revoke.cc-bottom{transform:translateY(2em)}.cc-animate.cc-revoke.cc-active.cc-bottom,.cc-animate.cc-revoke.cc-active.cc-top,.cc-revoke:hover{transform:translateY(0)}.cc-grower{max-height:0;overflow:hidden;transition:max-height 1s}
2 | .cc-link,.cc-revoke:hover{text-decoration:underline}.cc-revoke,.cc-window{position:fixed;overflow:hidden;box-sizing:border-box;font-family:Helvetica,Calibri,Arial,sans-serif;font-size:16px;line-height:1.5em;display:-ms-flexbox;display:flex;-ms-flex-wrap:nowrap;flex-wrap:nowrap;z-index:9999}.cc-window.cc-static{position:static}.cc-window.cc-floating{padding:2em;max-width:24em;-ms-flex-direction:column;flex-direction:column}.cc-window.cc-banner{padding:1em 1.8em;width:100%;-ms-flex-direction:row;flex-direction:row}.cc-revoke{padding:.5em}.cc-header{font-size:18px;font-weight:700}.cc-btn,.cc-close,.cc-link,.cc-revoke{cursor:pointer}.cc-link{opacity:.8;display:inline-block;padding:.2em}.cc-link:hover{opacity:1}.cc-link:active,.cc-link:visited{color:initial}.cc-btn{display:block;padding:.4em .8em;font-size:.9em;font-weight:700;border-width:2px;border-style:solid;text-align:center;white-space:nowrap}.cc-banner .cc-btn:last-child{min-width:140px}.cc-highlight .cc-btn:first-child{background-color:transparent;border-color:transparent}.cc-highlight .cc-btn:first-child:focus,.cc-highlight .cc-btn:first-child:hover{background-color:transparent;text-decoration:underline}.cc-close{display:block;position:absolute;top:.5em;right:.5em;font-size:1.6em;opacity:.9;line-height:.75}.cc-close:focus,.cc-close:hover{opacity:1}
3 | .cc-revoke.cc-top{top:0;left:3em;border-bottom-left-radius:.5em;border-bottom-right-radius:.5em}.cc-revoke.cc-bottom{bottom:0;left:3em;border-top-left-radius:.5em;border-top-right-radius:.5em}.cc-revoke.cc-left{left:3em;right:unset}.cc-revoke.cc-right{right:3em;left:unset}.cc-top{top:1em}.cc-left{left:1em}.cc-right{right:1em}.cc-bottom{bottom:1em}.cc-floating>.cc-link{margin-bottom:1em}.cc-floating .cc-message{display:block;margin-bottom:1em}.cc-window.cc-floating .cc-compliance{-ms-flex:1;flex:1}.cc-window.cc-banner{-ms-flex-align:center;align-items:center}.cc-banner.cc-top{left:0;right:0;top:0}.cc-banner.cc-bottom{left:0;right:0;bottom:0}.cc-banner .cc-message{-ms-flex:1;flex:1}.cc-compliance{display:-ms-flexbox;display:flex;-ms-flex-align:center;align-items:center;-ms-flex-line-pack:justify;align-content:space-between}.cc-compliance>.cc-btn{-ms-flex:1;flex:1}.cc-btn+.cc-btn{margin-left:.5em}
4 | @media print{.cc-revoke,.cc-window{display:none}}@media screen and (max-width:900px){.cc-btn{white-space:normal}}@media screen and (max-width:414px) and (orientation:portrait),screen and (max-width:736px) and (orientation:landscape){.cc-window.cc-top{top:0}.cc-window.cc-bottom{bottom:0}.cc-window.cc-banner,.cc-window.cc-left,.cc-window.cc-right{left:0;right:0}.cc-window.cc-banner{-ms-flex-direction:column;flex-direction:column}.cc-window.cc-banner .cc-compliance{-ms-flex:1;flex:1}.cc-window.cc-floating{max-width:none}.cc-window .cc-message{margin-bottom:1em}.cc-window.cc-banner{-ms-flex-align:unset;align-items:unset}}
5 | .cc-floating.cc-theme-classic{padding:1.2em;border-radius:5px}.cc-floating.cc-type-info.cc-theme-classic .cc-compliance{text-align:center;display:inline;-ms-flex:none;flex:none}.cc-theme-classic .cc-btn{border-radius:5px}.cc-theme-classic .cc-btn:last-child{min-width:140px}.cc-floating.cc-type-info.cc-theme-classic .cc-btn{display:inline-block}
6 | .cc-theme-edgeless.cc-window{padding:0}.cc-floating.cc-theme-edgeless .cc-message{margin:2em 2em 1.5em}.cc-banner.cc-theme-edgeless .cc-btn{margin:0;padding:.8em 1.8em;height:100%}.cc-banner.cc-theme-edgeless .cc-message{margin-left:1em}.cc-floating.cc-theme-edgeless .cc-btn+.cc-btn{margin-left:0}
--------------------------------------------------------------------------------
/manuscript/css/style.css:
--------------------------------------------------------------------------------
1 | /* Copied from https://github.com/rstudio/bookdown/blob/master/inst/examples/css/style.css */
2 | p.caption {
3 | color: #777;
4 | margin-top: 10px;
5 | }
6 | p code {
7 | white-space: inherit;
8 | }
9 | pre {
10 | word-break: normal;
11 | word-wrap: normal;
12 | }
13 | pre code {
14 | white-space: inherit;
15 | }
16 | p.flushright {
17 | text-align: right;
18 | }
19 | blockquote > p:last-child {
20 | text-align: right;
21 | }
22 | blockquote > p:first-child {
23 | text-align: inherit;
24 | }
25 | .header-section-number {
26 | padding-right: .2em;
27 | font-weight: 500;
28 | }
29 | .level1 .header-section-number {
30 | display: inline-block;
31 | border-bottom: 3px solid;
32 | }
33 | .level1 h1 {
34 | border-bottom: 1px solid;
35 | }
36 | h1, h2, h3, h4, h5, h6 {
37 | font-weight: normal;
38 | }
39 | h1.title {
40 | font-weight: 700;
41 | }
42 | .book .book-body .page-wrapper .page-inner section.normal {
43 | font-weight: 500;
44 | }
45 |
46 | .rmdnote {
47 | padding: 1em 1em 1em 4em;
48 | margin-bottom: 10px;
49 | background: #f5f5f5 5px center/3em no-repeat;
50 | background-image: url("../images/note.png");
51 | }
52 |
--------------------------------------------------------------------------------
/manuscript/evaluation.qmd:
--------------------------------------------------------------------------------
1 | # Evaluation of Interpretability Methods
2 |
3 | This chapter is about the more advanced topic of how to evaluate interpretability methods.
4 | Evaluation is targeted at interpretability researchers and practitioners who get a bit deeper into interpretability.
5 | Feel free to skip it otherwise.
6 |
7 | Evaluating approaches to interpretable machine learning is difficult due to a general lack of ground truth.
8 | Speaking as someone who did a PhD in model-agnostic interpretability, it can especially be annoying since there is this mindset in supervised machine learning that everything needs to have benchmarks.
9 | And with benchmarks, I mean evaluation on real data against a ground truth.
10 | Benchmarks make sense when you develop prediction models with available ground truth.
11 | For interpretable machine learning, there is no ground truth in real-world data.
12 | You can only generate something resembling a ground truth with simulated data.
13 |
14 | Let's have a more general look at evaluation.
15 |
16 | ## Levels of evaluation
17 |
18 | Doshi-Velez and Kim (2017) propose three main levels for the evaluation of interpretability:
19 |
20 | **Application level evaluation (real task)**:
21 | Put the explanation into the product and have it tested by the end user.
22 | Imagine fracture detection software with a machine learning component that locates and marks fractures in X-rays.
23 | At the application level, radiologists would test the fracture detection software directly to evaluate the model.
24 | This requires a good experimental setup and an understanding of how to assess quality.
25 | A good baseline for this is always how good a human would be at explaining the same decision.
26 |
27 | **Human level evaluation (simple task)** is a simplified application level evaluation.
28 | The difference is that these experiments are not carried out with the domain experts, but with laypersons.
29 | This makes experiments cheaper (especially if the domain experts are radiologists), and it is easier to find more testers.
30 | An example would be to show a user different explanations, and the user would choose the best one.
31 |
32 | **Function level evaluation (proxy task)** does not require humans.
33 | This works best when the class of model used has already been evaluated by someone else in a human level evaluation.
34 | For example, it might be known that the end users understand decision trees.
35 | In this case, a proxy for explanation quality may be the depth of the tree.
36 | Shorter trees would get a better explainability score.
37 | It would make sense to add the constraint that the predictive performance of the tree remains good and does not decrease too much compared to a larger tree.
38 |
39 | The next chapter focuses on the evaluation of explanations for individual predictions on the function level.
40 | What are the relevant properties of explanations that we would consider for their evaluation?
41 |
42 | ## Properties of explanations {#properties}
43 |
44 | There are no ground truths for explanations.
45 | Instead, we can have a look at more general properties of explanations and qualitatively (sometimes quantitatively) evaluate how well an explanation fares.
46 | This is focused on explanations of individual predictions.
47 | **An explanation relates the feature values of an instance to its model prediction in a humanly understandable way.**
48 | Other types of explanations consist of a set of data instances (e.g., in the case of the k-nearest neighbor model).
49 | For example, we could predict cancer risk using a support vector machine and explain predictions using the [local surrogate method](#lime), which generates decision trees as explanations.
50 | Or we could use a linear regression model instead of a support vector machine.
51 | The linear regression model is already equipped with an explanation method (interpretation of the weights).
52 |
53 | We take a closer look at the properties of explanation methods and [@robniksikonja2018perturbationbased].
54 | These properties can be used to judge how good an explanation method or explanation is.
55 | It's not clear for all these properties how to measure them correctly, so one of the challenges is to formalize how they could be calculated.
56 |
57 | **Properties of Explanation Methods**
58 |
59 | - **Expressive Power** is the "language" or structure of the explanations the method is able to generate.
60 | An explanation method could generate IF-THEN rules, decision trees, a weighted sum, natural language, or something else.
61 | - **Translucency** describes how much the explanation method relies on looking into the machine learning model, like its parameters.
62 | For example, explanation methods relying on intrinsically interpretable models like the linear regression model (model-specific) are highly translucent.
63 | Methods only relying on manipulating inputs and observing the predictions have zero translucency.
64 | Depending on the scenario, different levels of translucency might be desirable.
65 | The advantage of high translucency is that the method can rely on more information to generate explanations.
66 | The advantage of low translucency is that the explanation method is more portable.
67 | - **Portability** describes the range of machine learning models with which the explanation method can be used.
68 | Methods with a low translucency have a higher portability because they treat the machine learning model as a black box.
69 | Surrogate models might be the explanation method with the highest portability.
70 | Methods that only work for e.g., recurrent neural networks have low portability.
71 | - **Algorithmic Complexity** describes the computational complexity of the method that generates the explanation.
72 | This property is important to consider when computation time is a bottleneck in generating explanations.
73 |
74 | **Properties of Individual Explanations**
75 |
76 | - **Accuracy**: How well does an explanation predict unseen data?
77 | High accuracy is especially important if the explanation is used for predictions in place of the machine learning model.
78 | Low accuracy can be fine if the accuracy of the machine learning model is also low, and if the goal is to explain what the black box model does.
79 | In this case, only fidelity is important.
80 | - **Fidelity**: How well does the explanation approximate the prediction of the black box model?
81 | High fidelity is one of the most important properties of an explanation because an explanation with low fidelity is useless to explain the machine learning model.
82 | Accuracy and fidelity are closely related.
83 | If the black box model has high accuracy and the explanation has high fidelity, the explanation also has high accuracy.
84 | Some explanations offer only local fidelity, meaning the explanation only approximates well to the model prediction for a subset of the data (e.g., [local surrogate models](#lime)) or even for only an individual data instance (e.g., [Shapley Values](#shapley)).
85 | - **Consistency**: How much does an explanation differ between models that have been trained on the same task and that produce similar predictions?
86 | For example, I train a support vector machine and a linear regression model on the same task, and both produce very similar predictions.
87 | I compute explanations using a method of my choice and analyze how different the explanations are.
88 | If the explanations are very similar, the explanations are highly consistent.
89 | I find this property somewhat tricky since the two models could use different features but get similar predictions (also called ["Rashomon Effect"](https://en.wikipedia.org/wiki/Rashomon_effect)).
90 | In this case, high consistency is not desirable because the explanations have to be very different.
91 | High consistency is desirable if the models really rely on similar relationships.
92 | - **Stability**: How similar are the explanations for similar instances?
93 | While consistency compares explanations between models, stability compares explanations between similar instances for a fixed model.
94 | High stability means that slight variations in the features of an instance do not substantially change the explanation (unless these slight variations also strongly change the prediction).
95 | A lack of stability can be the result of a high variance of the explanation method.
96 | In other words, the explanation method is strongly affected by slight changes in the feature values of the instance to be explained.
97 | A lack of stability can also be caused by non-deterministic components of the explanation method, such as a data sampling step, like the [local surrogate method](#lime) uses.
98 | High stability is always desirable.
99 | - **Comprehensibility**: How well do humans understand the explanations?
100 | This looks just like one more property among many, but it is the elephant in the room.
101 | Difficult to define and measure, but extremely important to get right.
102 | Many people agree that comprehensibility depends on the audience.
103 | Ideas for measuring comprehensibility include measuring the size of the explanation (number of features with a non-zero weight in a linear model, number of decision rules, ...) or testing how well people can predict the behavior of the machine learning model from the explanations.
104 | The comprehensibility of the features used in the explanation should also be considered.
105 | A complex transformation of features might be less comprehensible than the original features.
106 | - **Certainty**: Does the explanation reflect the certainty of the machine learning model?
107 | Many machine learning models only give predictions without a statement about the models confidence that the prediction is correct.
108 | If the model predicts a 4% probability of cancer for one patient, is it as certain as the 4% probability that another patient, with different feature values, received?
109 | An explanation that includes the model's certainty is very useful.
110 | In addition, the explanation itself may be a model or an estimate based on data and therefore itself subject to uncertainty.
111 | This uncertainty of the explanation is also a relevant property of the explanation itself.
112 | - **Degree of Importance**: How well does the explanation reflect the importance of features or parts of the explanation?
113 | For example, if a decision rule is generated as an explanation for an individual prediction, is it clear which of the conditions of the rule was the most important?
114 | - **Novelty**: Does the explanation reflect whether a data instance to be explained comes from a "new" region far removed from the distribution of training data?
115 | In such cases, the model may be inaccurate and the explanation may be useless.
116 | The concept of novelty is related to the concept of certainty.
117 | The higher the novelty, the more likely it is that the model will have low certainty due to lack of data.
118 | - **Representativeness**: How many instances does an explanation cover?
119 | Explanations can cover the entire model (e.g., interpretation of weights in a linear regression model) or represent only an individual prediction (e.g., [Shapley Values](#shapley)).
120 |
--------------------------------------------------------------------------------
/manuscript/global.qmd:
--------------------------------------------------------------------------------
1 | # Surrogate Models {#global}
2 |
3 | {{< include _setup.qmd >}}
4 |
5 | A global surrogate model is an interpretable model that is trained to approximate the predictions of a black box model.
6 | We can draw conclusions about the black box model by interpreting the surrogate model.
7 | Solving machine learning interpretability by using more machine learning!
8 |
9 | ## Theory
10 |
11 | Surrogate models are also used in engineering:
12 | If an outcome of interest is expensive, time-consuming, or otherwise difficult to measure (e.g., because it comes from a complex computer simulation), a cheap and fast surrogate model of the outcome can be used instead.
13 | The difference between the surrogate models used in engineering and in interpretable machine learning is that the underlying model is a machine learning model (not a simulation) and that the surrogate model must be interpretable.
14 | The purpose of (interpretable) surrogate models is to approximate the predictions of the underlying model as accurately as possible and to be interpretable at the same time.
15 | The idea of surrogate models can be found under different names:
16 | Approximation model, metamodel, response surface model, emulator, etc.
17 |
18 | About the theory:
19 | There's actually not much theory needed to understand surrogate models.
20 | We want to approximate our black box prediction function $f$ as closely as possible with the surrogate model prediction function $g$, under the constraint that $g$ is interpretable.
21 | For the function $g$, any interpretable model can be used.
22 |
23 | For example, a linear model:
24 |
25 | $$g(\mathbf{x}) = \beta_0 + \beta_1 x_1 + \ldots + \beta_p x_p$$
26 |
27 | Or a decision tree:
28 |
29 | $$g(\mathbf{x}) = \sum_{m=1}^M c_m I\{\mathbf{x} \in R_m\}$$
30 |
31 | Training a surrogate model is a model-agnostic method, since it does not require any information about the inner workings of the black box model, only access to data and the prediction function is necessary.
32 | If the underlying machine learning model was replaced with another, you could still use the surrogate method.
33 | The choice of the black box model type and of the surrogate model type is decoupled.
34 |
35 | Perform the following steps to obtain a surrogate model:
36 |
37 | 1. Select a dataset $\mathbf{X}$.
38 | This can be the same dataset that was used for training the black box model or a new dataset from the same distribution.
39 | You could even select a subset of the data or a grid of points, depending on your application.
40 | 1. For the selected dataset $\mathbf{X}$, get the predictions of the black box model.
41 | 1. Select an interpretable model type (linear model, decision tree, ...).
42 | 1. Train the interpretable model on the dataset $\mathbf{X}$ and its predictions.
43 | 1. Congratulations! You now have a surrogate model.
44 | 1. Measure how well the surrogate model replicates the predictions of the black box model.
45 | 1. Interpret the surrogate model.
46 |
47 | You may find approaches for surrogate models that have some extra steps or differ a little, but the general idea is usually as described here.
48 |
49 | One way to measure how well the surrogate replicates the black box model is the R-squared measure:
50 |
51 | $$R^2=1 - \frac{SSE}{SST} = 1 - \frac{\sum_{i=1}^n (\hat{y}_*^{(i)} - \hat{y}^{(i)})^2}{\sum_{i=1}^n (\hat{y}^{(i)} - \bar{\hat{y}})^2}$$
52 |
53 | where $\hat{y}_*^{(i)}$ is the prediction for the i-th instance of the surrogate model, $\hat{y}^{(i)}$ the prediction of the black box model and $\bar{\hat{y}}$ the mean of the black box model predictions.
54 | SSE stands for sum of squares error and SST for sum of squares total.
55 | The R-squared measure can be interpreted as the percentage of variance that is captured by the surrogate model.
56 | If R-squared is close to 1 (= low SSE), then the interpretable model approximates the behavior of the black box model very well.
57 | If the interpretable model is very close, you might want to replace the complex model with the interpretable model.
58 | If the R-squared is close to 0 (= high SSE), then the interpretable model fails to explain the black box model.
59 |
60 | Note that we have not talked about the model performance of the underlying black box model, i.e., how good or bad it performs in predicting the actual outcome.
61 | The performance of the black box model does not play a role in training the surrogate model.
62 | The interpretation of the surrogate model is still valid because it makes statements about the model and not about the real world.
63 | But of course, the interpretation of the surrogate model becomes irrelevant if the black box model is bad because then the black box model itself is irrelevant.
64 |
65 |
66 | We could also build a surrogate model based on a subset of the original data or re-weight the instances.
67 | In this way, we change the distribution of the surrogate model's input, which changes the focus of the interpretation (then it is no longer really global).
68 | If we weight the data locally by a specific instance of the data (the closer the instances to the selected instance, the higher their weight), we get a local surrogate model that can explain the individual prediction of the instance.
69 |
70 | ## Example
71 |
72 | ```{r}
73 | #| label: prep-surrogate-bike
74 | pred.bike = Predictor$new(bike_svm, data = bike_train)
75 | tree = TreeSurrogate$new(pred.bike)
76 | pred.tree = predict(tree, bike_test)
77 | pred.svm = predict(bike_svm, bike_test)
78 | rsq = round(cor(pred.tree, pred.svm)^2, 2)
79 | ```
80 |
81 |
82 | To demonstrate the surrogate models, we consider a regression and a classification example.
83 |
84 | First, we train a support vector machine to predict the [daily number of rented bikes](#bike-data) given weather and calendar information.
85 | The support vector machine is not very interpretable, so we train a surrogate with a CART decision tree as an interpretable model using the original training data to approximate the behavior of the support vector machine.
86 | The surrogate model shown in @fig-surrogate-bike has an R-squared (variance explained) of `r rsq` on the test data, which means it approximates the underlying black box behavior quite well, but not perfectly.
87 | If the fit were perfect, we could throw away the support vector machine and use the tree instead.
88 | The distributions in the nodes show that the surrogate tree predicts a higher number of rented bikes when the temperature is above 13 degrees Celsius and when count two days before was higher.
89 |
90 | ```{r}
91 | #| label: fig-surrogate-bike
92 | #| fig-cap: The terminal nodes of a surrogate tree that approximates the predictions of a support vector machine trained on the bike rental dataset.
93 | plot(tree)
94 | ```
95 |
96 |
97 | ```{r}
98 | #| label: fig-surrogate-penguins-prepare
99 | pred.penguins = Predictor$new(pengu_rf, data = penguins_train, type = "prob")
100 | tree.penguins = TreeSurrogate$new(pred.penguins, maxdepth = 2)
101 | pred.tree.penguins = predict(tree.penguins, penguins_test, type="prob")
102 | pred.penguins= predict(pengu_rf, penguins_test, type="prob")
103 | rsq = round(cor(pred.tree.penguins[,"female"], pred.penguins[,"female"])^2, 2)
104 | ```
105 |
106 |
107 |
108 | In our second example, we classify whether a [penguin is female or male](#penguins) with a random forest, again training a decision tree on the original dataset, but with the prediction of the random forest as the outcome, instead of the real classes (male/female) from the data.
109 | The surrogate model shown in @fig-surrogate-penguins has an R-squared (variance explained) of `r rsq`, which means it does approximate the random forest somewhat, but not perfectly.
110 |
111 | ```{r}
112 | #| label: fig-surrogate-penguins
113 | #| fig-cap: "The terminal nodes of a surrogate tree that approximates the predictions of a random forest trained on the penguins dataset. The counts in the nodes show the frequency of the black box models classifications in the nodes."
114 | plot(tree.penguins) +
115 | theme(strip.text.x = element_text(size = 8))
116 | ```
117 |
118 |
119 | ## Strengths
120 |
121 | The surrogate model method is **flexible**:
122 | Any interpretable model can be used.
123 | This also means that you can exchange not only the interpretable model, but also the underlying black box model.
124 | Suppose you create some complex model and explain it to different teams in your company.
125 | One team is familiar with linear models, and the other team can understand decision trees.
126 | You can train two surrogate models (linear model and decision tree) for the original black box model and offer two kinds of explanations.
127 | If you find a better-performing black box model, you do not have to change your method of interpretation because you can use the same class of surrogate models.
128 |
129 | I would argue that the approach is very **intuitive** and straightforward.
130 | This means it is easy to implement, but also easy to explain to people not familiar with data science or machine learning.
131 |
132 | With the **R-squared measure**, we can easily measure how good our surrogate models are in approximating the black box predictions.
133 |
134 | ## Limitations
135 |
136 | You have to be aware that you draw **conclusions about the model and not about the data**, since the surrogate model never sees the real outcome.
137 |
138 | It's not clear what the best **cut-off for R-squared** is in order to be confident that the surrogate model is close enough to the black box model.
139 | 80% of variance explained?
140 | 50%?
141 | 99%?
142 |
143 | We can measure how close the surrogate model is to the black box model.
144 | Let's assume we are not very close, but close enough.
145 | It could happen that the interpretable model is very **close for one subset of the dataset, but widely divergent for another subset**.
146 | In this case, the interpretation for the simple model would not be equally good for all data points.
147 |
148 | The interpretable model you choose as a surrogate **comes with all its advantages and disadvantages**.
149 |
150 | Some people argue that there are, in general, **no intrinsically interpretable models** (including even linear models and decision trees), and that it would even be dangerous to have an illusion of interpretability.
151 | If you share this opinion, then of course this method is not for you.
152 |
153 | ## Software
154 |
155 | I used the `iml` R package for the examples.
156 | If you can train a machine learning model, then you should be able to implement surrogate models yourself.
157 | Simply train an interpretable model to predict the predictions of the black box model.
158 |
159 |
--------------------------------------------------------------------------------
/manuscript/goals.qmd:
--------------------------------------------------------------------------------
1 | # Goals of Interpretability {#goals}
2 |
3 | Interpretability is not an end in itself, but a means to an end.
4 | It depends on your specific goals which interpretability approach to use, which deserves more discussion.[^also-my-bad]
5 | Inspired by @adadi2018peeking, I discuss three goals of interpretability: improve the model, justify the model and predictions, and discover insights.[^three-instead-of-four]
6 |
7 | ## Improving the model
8 |
9 | ::: {.callout-tip}
10 |
11 | ## Always evaluate performance
12 |
13 | When determining your interpretability goals, evaluate your model's performance metrics first.
14 | This can help you identify if your current goal should be model improvement.
15 |
16 | :::
17 |
18 | You can use interpretability methods to improve the model.
19 | In the [introduction](#intro), we talked about Clever Hans predictors, which refers to models that have learned to take "shortcuts," like relying on non-causal features to make predictions.
20 | The tricky thing about these shortcuts is that they often don't decrease model performance -- they might actually increase it.
21 | Like relying on snow in the background to classify whether a picture shows a wolf or a dog [@ribeiro2016why] or misleadingly predicting that asthma patients admitted to the emergency room are less likely to die of pneumonia [@caruana2015intelligible].[^pneumonia]
22 | Interpretability helps to **debug the model** by identifying when the model takes such unwanted shortcuts or makes other mistakes.
23 | Some of the bugs may be as simple as the wrong encoding of the target feature, or an error in feature engineering.
24 | Do feature effects contradict your domain knowledge?
25 | You may have switched target classes.
26 | A feature you know is important isn't used by the model according to your investigations?
27 | You may have made a mistake in data processing or feature engineering.
28 |
29 | I've also used interpretability methods in machine learning competitions to identify important features so that I can **get ideas for feature engineering**.
30 | For example, I participated in a competition to predict water supply, and through feature importance, I realized that the snow in the surrounding mountains was the most important feature.
31 | So I decided to try out alternative snow data sources that might make this feature even better, and invested time in feature engineering.
32 | In the end, it helped improve the model performance.
33 |
34 |
35 | ## Justify model and predictions
36 |
37 | Interpretable machine learning helps justify the model and its predictions to other people or entities.
38 | It's helpful to think of the stakeholders of a machine learning system [@tomsett2018interpretable]:
39 |
40 | - **Creators** build the system and train the model.
41 | - **Operators** interact with the system directly.
42 | - **Executors** make decisions based on the outputs.
43 | - **Decision** subjects are affected by the decisions.
44 | - **Auditors** audit and investigate the system.
45 | - **Data subjects** are people whose data the model is trained on.
46 |
47 | These stakeholders want justification of the model and its predictions and may require very different types of justification.
48 |
49 | A deliverable of the machine learning competition I participated in was to generate reports that explain the water supply forecasts.
50 | These reports and explanations are for hydrologists and officials who have to make decisions based on these water supply forecasts (Executors).
51 | Let's say the model predicts an unusually low water supply; it would mean that officials would have to issue drought contingency plans and adjust water allocations.
52 | Rather than blindly trusting the predictions, the decision maker may want to verify the predictions by looking at the explanations of **why** this particular prediction was made.
53 | And if the explanation conflicts with domain knowledge, the decision maker might question the forecast and investigate.
54 |
55 | In general, contesting a prediction made by a machine learning system requires interpretability of the system.
56 | Imagine that a machine learning system rejects a loan application.
57 | For a person (decision subject) to contest that rejection, there needs to be some justification for why that prediction was made.
58 | This concept is called recourse.
59 |
60 | Another example: If a company wants to build a medical device, it has to go through a lot of regulatory processes to show that the device is safe and efficient.
61 | If the device relies on machine learning, things get more complicated since the company also has to show that the machine learning model works as intended.
62 | Interpretable machine learning is part of the solution to justify the model to the regulators (Auditors) who will either approve or reject the medical device.
63 |
64 | ## Discover insights
65 |
66 | Machine learning models are not only used for making predictions; they can also be used to make decisions or to study the relationship between features and the target.
67 | In both cases, the predictions are not enough, but we want to extract additional insights from the model.
68 |
69 | A churn prediction model predicts how likely a person is to cancel their mobile phone contract, for example.
70 | The marketing team may rely on the model to make decisions about marketing campaigns.
71 | But without knowing **why** a person is likely to churn, it's difficult to design an effective response.
72 |
73 |
74 | More and more scientists are also applying machine learning to their research questions.
75 | For example, @zhang2019california used random forests to predict orchard almond yields based on fertilizer use.
76 | Prediction is not enough: they also used interpretability methods to extract how the different features, including fertilizer, affect the predicted yield.
77 | You need interpretability to extract the learned relationships between the features and the prediction.
78 | Otherwise, all you have is a function to make predictions.
79 |
80 |
81 | ::: {.callout-tip}
82 |
83 | ## How to use machine learning in science
84 |
85 | ::: {layout="[5,1.7]"}
86 |
87 | ::: {#first-column}
88 |
89 | Using machine learning in science is a much deeper philosophical question and requires more than just thinking about interpretability.
90 | That's why Timo Freiesleben and I have written a book dedicated to justifying machine learning for science.
91 |
92 | You can read it for free here: [ml-science-book.com](https://ml-science-book.com/)
93 |
94 | :::
95 |
96 | ::: {#second-column}
97 |
98 | {width=90%}
99 |
100 | :::
101 |
102 | :::
103 |
104 | :::
105 |
106 |
107 | Interpretable machine learning is useful not only for learning about the data, but also for learning about the model.
108 | For example, if you want to learn about how convolutional neural networks work, you can use interpretability to study what concepts individual neurons react to.
109 |
110 |
111 | What are your goals in your machine learning project, and how can interpretability help you?
112 | Your goals will determine which interpretability approaches and methods to use.
113 | In the next chapter, we will take a look at the landscape of methods and discuss how they relate to your goals.
114 |
115 |
116 | [^also-my-bad]: When researchers propose new interpretable models or interpretability methods, they rarely discuss what *specific* goals they serve. I'm not excluding myself here: For example, I did not introduce this chapter on goals until the third edition of the book.
117 |
118 | [^three-instead-of-four]: The paper by @adadi2018peeking additionally introduced "control", which refers to debugging the model and finding errors, but I subsumed this under "improvement".
119 |
120 | [^pneumonia]: A model predicted that patients who came to the emergency room with pneumonia were less likely to die from pneumonia when they had asthma, despite asthma being known as risk factor for pneumonia. Only thanks to using an interpretable model, the researchers found out that this model had learned this relationship. But it's an unwanted "shortcut": Asthma patients were treated earlier and more aggressively with antibiotics, so they were in fact less likely to develop severe pneumonia. The model learned the shortcut (asthma $\Rightarrow$ lowered risk of dying from pneumonia), because it lacked features about the later treatment.
121 |
--------------------------------------------------------------------------------
/manuscript/html/epub.css:
--------------------------------------------------------------------------------
1 | /* This defines styles and classes used in the book */
2 | @page {
3 | margin: 10px;
4 | }
5 | html, body, div, span, applet, object, iframe, h1, h2, h3, h4, h5, h6, p,
6 | blockquote, pre, a, abbr, acronym, address, big, cite, code, del, dfn, em, img,
7 | ins, kbd, q, s, samp, small, strike, strong, sub, sup, tt, var, b, u, i, center,
8 | fieldset, form, label, legend, table, caption, tbody, tfoot, thead, tr, th, td,
9 | article, aside, canvas, details, embed, figure, figcaption, footer, header,
10 | hgroup, menu, nav, output, ruby, section, summary, time, mark, audio, video, ol,
11 | ul, li, dl, dt, dd {
12 | margin: 0;
13 | padding: 0;
14 | border: 0;
15 | font-size: 100%;
16 | vertical-align: baseline;
17 | }
18 | html {
19 | line-height: 1.2;
20 | font-family: Georgia, serif;
21 | color: #1a1a1a;
22 | }
23 | p {
24 | text-indent: 0;
25 | margin: 1em 0;
26 | widows: 2;
27 | orphans: 2;
28 | }
29 | a, a:visited {
30 | color: #1a1a1a;
31 | }
32 | img {
33 | max-width: 100%;
34 | }
35 | sup {
36 | vertical-align: super;
37 | font-size: smaller;
38 | }
39 | sub {
40 | vertical-align: sub;
41 | font-size: smaller;
42 | }
43 | h1 {
44 | margin: 3em 0 0 0;
45 | font-size: 2em;
46 | page-break-before: always;
47 | line-height: 150%;
48 | }
49 | h2 {
50 | margin: 1.5em 0 0 0;
51 | font-size: 1.5em;
52 | line-height: 135%;
53 | }
54 | h3 {
55 | margin: 1.3em 0 0 0;
56 | font-size: 1.3em;
57 | }
58 | h4 {
59 | margin: 1.2em 0 0 0;
60 | font-size: 1.2em;
61 | }
62 | h5 {
63 | margin: 1.1em 0 0 0;
64 | font-size: 1.1em;
65 | }
66 | h6 {
67 | font-size: 1em;
68 | }
69 | h1, h2, h3, h4, h5, h6 {
70 | text-indent: 0;
71 | text-align: left;
72 | font-weight: bold;
73 | page-break-after: avoid;
74 | page-break-inside: avoid;
75 | }
76 |
77 | ol, ul {
78 | margin: 1em 0 0 1.7em;
79 | }
80 | li > ol, li > ul {
81 | margin-top: 0;
82 | }
83 | blockquote {
84 | margin: 1em 0 1em 1.7em;
85 | }
86 | code {
87 | font-family: Menlo, Monaco, 'Lucida Console', Consolas, monospace;
88 | font-size: 85%;
89 | margin: 0;
90 | hyphens: manual;
91 | }
92 | pre {
93 | margin: 1em 0;
94 | overflow: auto;
95 | }
96 | pre code {
97 | padding: 0;
98 | overflow: visible;
99 | overflow-wrap: normal;
100 | }
101 | .sourceCode {
102 | background-color: transparent;
103 | overflow: visible;
104 | }
105 | hr {
106 | background-color: #1a1a1a;
107 | border: none;
108 | height: 1px;
109 | margin: 1em 0;
110 | }
111 | table {
112 | margin: 1em 0;
113 | border-collapse: collapse;
114 | width: 100%;
115 | overflow-x: auto;
116 | display: block;
117 | }
118 | table caption {
119 | margin-bottom: 0.75em;
120 | }
121 | tbody {
122 | margin-top: 0.5em;
123 | border-top: 1px solid #1a1a1a;
124 | border-bottom: 1px solid #1a1a1a;
125 | }
126 | th, td {
127 | padding: 0.25em 0.5em 0.25em 0.5em;
128 | }
129 | th {
130 | border-top: 1px solid #1a1a1a;
131 | }
132 | header {
133 | margin-bottom: 4em;
134 | text-align: center;
135 | }
136 | #TOC li {
137 | list-style: none;
138 | }
139 | #TOC ul {
140 | padding-left: 1.3em;
141 | }
142 | #TOC > ul {
143 | padding-left: 0;
144 | }
145 | #TOC a:not(:hover) {
146 | text-decoration: none;
147 | }
148 | code {
149 | white-space: pre-wrap;
150 | }
151 | span.smallcaps {
152 | font-variant: small-caps;
153 | }
154 |
155 | /* This is the most compatible CSS, but it only allows two columns: */
156 | div.column {
157 | display: inline-block;
158 | vertical-align: top;
159 | width: 50%;
160 | }
161 | /* If you can rely on CSS3 support, use this instead: */
162 | /* div.columns {
163 | display: flex;
164 | gap: min(4vw, 1.5em);
165 | }
166 | div.column {
167 | flex: auto;
168 | overflow-x: auto;
169 | } */
170 |
171 | div.hanging-indent {
172 | margin-left: 1.5em;
173 | text-indent: -1.5em;
174 | }
175 | ul.task-list {
176 | list-style: none;
177 | }
178 | ul.task-list li input[type="checkbox"] {
179 | width: 0.8em;
180 | margin: 0 0.8em 0.2em -1.6em;
181 | vertical-align: middle;
182 | }
183 | .display.math {
184 | display: block;
185 | text-align: center;
186 | margin: 0.5rem auto;
187 | }
188 |
189 | /* For title, author, and date on the cover page */
190 | h1.title { }
191 | p.author { }
192 | p.date { }
193 |
194 | nav#toc ol, nav#landmarks ol {
195 | padding: 0;
196 | margin-left: 1em;
197 | }
198 | nav#toc ol li, nav#landmarks ol li {
199 | list-style-type: none;
200 | margin: 0;
201 | padding: 0;
202 | }
203 | a.footnote-ref {
204 | vertical-align: super;
205 | }
206 | em, em em em, em em em em em {
207 | font-style: italic;
208 | }
209 | em em, em em em em {
210 | font-style: normal;
211 | }
212 | q {
213 | quotes: "“" "”" "‘" "’";
214 | }
215 | @media screen { /* Workaround for iBooks issue; see #6242 */
216 | .sourceCode {
217 | overflow: visible !important;
218 | white-space: pre-wrap !important;
219 | }
220 | }
221 |
--------------------------------------------------------------------------------
/manuscript/html/purchase.html:
--------------------------------------------------------------------------------
1 |
2 |
120 |
121 |
181 |
--------------------------------------------------------------------------------
/manuscript/html/style.scss:
--------------------------------------------------------------------------------
1 | /*-- scss:defaults --*/
2 |
3 | $font-family-sans-serif: IBM Plex Sans;
4 | // $font-family-monospace: 'IBM Plex Mono';
5 |
6 | /*-- scss:rules --*/
7 |
8 | @import url('https://fonts.googleapis.com/css2?family=IBM+Plex+Sans:ital,wght@0,400;0,500;0,700;1,400&display=swap');
9 | @import url('https://fonts.googleapis.com/css2?family=IBM+Plex+Mono:ital,wght@0,400;0,500,700;1,400&display=swap');
10 |
11 |
12 | h2 {
13 | border: none;
14 | padding-bottom: 0;
15 | }
16 |
17 |
18 |
--------------------------------------------------------------------------------
/manuscript/ice.qmd:
--------------------------------------------------------------------------------
1 | # Individual Conditional Expectation (ICE) {#ice}
2 |
3 | {{< include _setup.qmd >}}
4 |
5 | Individual Conditional Expectation (ICE) plots display one line per instance that shows how the instance's prediction changes when a feature changes.
6 | An ICE plot [@goldstein2015peeking] visualizes the dependence of the prediction on a feature for *each* instance separately, resulting in one line per instance of a dataset.
7 | The values for a line (and one instance) can be computed by keeping all other features the same, creating variants of this instance by replacing the feature's value with values from a grid, and making predictions with the black box model for these newly created instances.
8 | The result is a set of points for an instance with the feature value from the grid and the respective predictions.
9 | In other words, ICE plots are all the [ceteris paribus curves](#ceteris-paribus) for a dataset in one plot.
10 |
11 | ## Examples
12 |
13 | @fig-ice-bike shows ICE plots for the [bike rental prediction](#bike-data).
14 | The underlying prediction model is a random forest.
15 | All curves seem to follow the same course, so there are no obvious interactions.
16 |
17 | ::: {layout-ncol=1}
18 |
19 | ```{r}
20 | #| label: fig-ice-bike
21 | #| fig-cap: "ICE plots of predicted bike rentals by temperature, humidity, and windspeed."
22 | #| fig-asp: 0.4
23 | #| out-width: 95%
24 | pred.bike = Predictor$new(bike_rf, bike_test)
25 |
26 | p1 = FeatureEffect$new(pred.bike, "temp", method = "ice")$plot() +
27 | scale_x_continuous("Temperature") +
28 | scale_y_continuous("Predicted bike rentals")
29 |
30 | p2 = FeatureEffect$new(pred.bike, "hum", method = "ice")$plot() +
31 | scale_x_continuous("Humidity") +
32 | scale_y_continuous("") +
33 | theme(axis.title.y = element_blank(),
34 | axis.text.y = element_blank())
35 |
36 | p3 = FeatureEffect$new(pred.bike, "windspeed", method = "ice")$plot() +
37 | scale_x_continuous("Windspeed") +
38 | scale_y_continuous("") +
39 | theme(axis.title.y = element_blank(),
40 | axis.text.y = element_blank())
41 |
42 | # Combine plots with reduced spacing
43 | (p1 | p2 | p3) + plot_layout(guides = "collect", widths = c(1, 1, 1))
44 |
45 |
46 | ```
47 |
48 | ```{r}
49 | #| label: fig-ice-bike-colored
50 | #| fig-cap: ICE curves for the random forest predicting bike rentals. Lines are colored by the season. Above the ICE plots are boxplots showing the distributions of humidity per season.
51 | #| out-width: 85%
52 | dat = FeatureEffect$new(pred.bike, "hum", method = "ice")$results
53 | bike2 = bike_test
54 | bike2$.id = 1:nrow(bike_test)
55 |
56 | dat = merge(bike2[c(".id", "season")], dat, by = ".id")
57 | iceplot = ggplot(dat) +
58 | geom_line(aes(group=.id, x=hum, y=.value, color=season), alpha=0.3) +
59 | scale_y_continuous("Predicted bike rentals", limits=c(0, NA)) +
60 | scale_color_viridis(discrete=TRUE) +
61 | scale_x_continuous("Humidity")
62 |
63 | iceplot
64 | ```
65 |
66 | :::
67 |
68 |
69 | But we can also explore possible interactions by modifying the ICE plot.
70 | @fig-ice-bike-colored shows again the ICE plot for humidity, with the difference that the lines are now colored by the season.
71 | This shows a couple of things:
72 | First -- and that's not surprising -- different seasons have different "intercepts".
73 | Meaning that, for example, winter days have a lower prediction and summer the highest ones, independent of the humidity.
74 | But @fig-ice-bike-colored also shows that the effect of the humidity differs for the seasons:
75 | In winter, an increase in humidity only slightly reduces the predicted number of bike rentals.
76 | For summer, the predicted bike rentals stay more or less flat between 20% and 60% relative humidity and above 60% they drop by quite a bit.
77 | Humidity effects for spring and fall seem to be a mix of the "winter flatness" and the "summer jump".
78 | However, as indicated by the boxplots in @fig-ice-bike-colored, we shouldn't over-interpret very low humidity effects for summer and fall.
79 |
80 | ::: {.callout-tip}
81 |
82 | # Use transparency and color
83 |
84 | If lines overlap heavily in a boxplot you can try to make them slightly transparent.
85 | If that doesn't help, you may be better off with a [partial dependence plot](#pdp).
86 | By coloring the lines based on another feature's value, you can study interactions.
87 |
88 | :::
89 |
90 |
91 |
92 | Let's go back to the [penguin classification task](#penguins) and see how the prediction of each instance is related to the feature `bill_length_mm`.
93 | We'll analyze a random forest that predicts the probability of a penguin being female given body measurements.
94 | @fig-ice-penguins is a rather ugly ICE plot.
95 | But sometimes that's the reality.
96 | The reason is that the model is rather sure for most penguins and jumps between 0 and 1.
97 |
98 | ```{r}
99 | #| label: fig-ice-penguins
100 | #| fig-cap: ICE plot of P(Adelie) by bill length. Each line represents a penguin.
101 | #| out-width: 80%
102 | pred.penguins = Predictor$new(pengu_rf, penguins_test, class = "female")
103 | ice = FeatureEffect$new(pred.penguins, "bill_length_mm", method = "ice")$plot() +
104 | scale_color_discrete(guide='none') +
105 | scale_y_continuous('P(female)') +
106 | scale_x_continuous("Bill length in mm")
107 | ice
108 | ```
109 |
110 |
111 | ## Centered ICE plot
112 |
113 | There's a problem with ICE plots:
114 | Sometimes it can be hard to tell whether the ICE curves differ between data points because they start at different predictions.
115 | A simple solution is to center the curves at a certain point in the feature and display only the difference in the prediction to this point.
116 | The resulting plot is called centered ICE plot (c-ICE).
117 | Anchoring the curves at the lower end of the feature is a good choice.
118 | Each curve is defined as:
119 |
120 | $$ICE^{(i)}_j(x_j) = \hat{f}(x_j, \mathbf{x}^{(i)}_{-j}) - \hat{f}(a, \mathbf{x}_{-j}^{(i)})$$
121 |
122 |
123 | where $\hat{f}$ is the fitted model, and $a$ is the anchor point.
124 |
125 | Let's have a look at a centered ICE plot for temperature for the bike rental prediction:
126 |
127 | ```{r}
128 | #| label: ice-bike-centered
129 | #| fig-cap: "Centered ICE plots of predicted number of bikes by temperature. The lines show the difference in prediction compared to the prediction with the temperature fixed at its observed minimum."
130 | #| out-width: 80%
131 | predictor = Predictor$new(bike_rf, data = bike_test)
132 | ytext1 = sprintf("Different to prediction at temp = %.1f", min(bike$temp))
133 | ice1 = FeatureEffect$new(predictor, feature = "temp", center.at = min(bike$temp), method = "ice")$plot() +
134 | scale_y_continuous(ytext1)
135 | #ytext2 = sprintf("Different to prediction at hum = %.1f", min(bike$hum))
136 | #ice2 = FeatureEffect$new(predictor, feature = "hum", center.at = min(bike$hum), method = "ice")$plot() +
137 | # scale_y_continuous(ytext2)
138 | #ytext3 = sprintf("Different to prediction at windspeed = %.1f", min(bike$windspeed))
139 | #ice3 = FeatureEffect$new(predictor, feature = "windspeed", center.at = min(bike$windspeed), method = "ice")$plot() +
140 | # scale_y_continuous(ytext3)
141 | #(ice1 | ice2 | ice3)
142 | ice1
143 | ```
144 |
145 | The centered ICE plots make it easier to compare the curves of individual instances.
146 | This can be useful if we do not want to see the absolute change of a predicted value, but the difference in the prediction compared to a fixed point of the feature range.
147 |
148 |
149 | ## Derivative ICE plot
150 |
151 | Another way to make it visually easier to spot heterogeneity is to look at the individual derivatives of the prediction function with respect to a feature.
152 | The resulting plot is called the derivative ICE plot (d-ICE).
153 | The derivatives of a function (or curve) tell you whether changes occur, and in which direction they occur.
154 | With the derivative ICE plot, it's easy to spot ranges of feature values where the black box predictions change for (at least some) instances.
155 | If there is no interaction between the analyzed feature $X_j$ and the other features $X_{-j}$, then the prediction function can be expressed as:
156 |
157 | $$\hat{f}(\mathbf{x}) = \hat{f}(x_j, \mathbf{x}_C) = g(x_j) + h(\mathbf{x}_{-j}), \quad\text{with}\quad\frac{\partial \hat{f}(\mathbf{x})}{\partial x_j} = g'(x_j)$$
158 |
159 | Without interactions, the individual partial derivatives should be the same for all instances.
160 | If they differ, it's due to interactions, and it becomes visible in the d-ICE plot.
161 | In addition to displaying the individual curves for the derivative of the prediction function with respect to the feature in $j$, showing the standard deviation of the derivative helps to highlight regions in feature $j$ with heterogeneity in the estimated derivatives.
162 | The derivative ICE plot takes a long time to compute and is rather impractical.
163 |
164 |
165 | ## Strengths
166 |
167 | Individual conditional expectation curves are **intuitive to understand**.
168 | One line represents the predictions for one instance if we vary the feature of interest.
169 |
170 | ICE curves can **uncover heterogeneous relationships**.
171 |
172 | ## Limitations
173 |
174 | ICE curves **can only display one feature** meaningfully, because two features would require the drawing of several overlaying surfaces, and you would not see anything in the plot.
175 |
176 | ICE curves suffer from correlation: If the feature of interest is correlated with the other features, then **some points in the lines might be invalid data points** according to the joint feature distribution.
177 |
178 | If many ICE curves are drawn, the **plot can become overcrowded**, and you will not see anything.
179 | The solution: Either add some transparency to the lines or draw only a sample of the lines.
180 |
181 | In ICE plots it might not be easy to **see the average**.
182 | This has a simple solution:
183 | Combine individual conditional expectation curves with the [partial dependence plot](#pdp).
184 |
185 | ## Software and alternatives
186 |
187 | ICE plots are implemented in the R packages `iml` [@molnar2018iml] (used for these examples), `ICEbox`, and `pdp`.
188 | Another R package that does something very similar to ICE is `condvis`.
189 | In Python, you can use [PiML](https://selfexplainml.github.io/PiML-Toolbox/_build/html/index.html) [@sudjianto2023piml].
190 |
191 |
--------------------------------------------------------------------------------
/manuscript/images/a484.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/a484.jpg
--------------------------------------------------------------------------------
/manuscript/images/access-denied.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/access-denied.jpg
--------------------------------------------------------------------------------
/manuscript/images/access-denied.xcf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/access-denied.xcf
--------------------------------------------------------------------------------
/manuscript/images/activation-optim.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/activation-optim.jpg
--------------------------------------------------------------------------------
/manuscript/images/adversarial-1pixel.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/adversarial-1pixel.jpg
--------------------------------------------------------------------------------
/manuscript/images/adversarial-ostrich.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/adversarial-ostrich.jpg
--------------------------------------------------------------------------------
/manuscript/images/adversarial-panda.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/adversarial-panda.jpg
--------------------------------------------------------------------------------
/manuscript/images/adversarial-toaster.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/adversarial-toaster.jpg
--------------------------------------------------------------------------------
/manuscript/images/adversarial-turtle.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/adversarial-turtle.jpg
--------------------------------------------------------------------------------
/manuscript/images/agnostic-black-box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/agnostic-black-box.jpg
--------------------------------------------------------------------------------
/manuscript/images/amazon-freq-bought-together.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/amazon-freq-bought-together.jpg
--------------------------------------------------------------------------------
/manuscript/images/anchors-process.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/anchors-process.jpg
--------------------------------------------------------------------------------
/manuscript/images/anchors-visualization.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/anchors-visualization.jpg
--------------------------------------------------------------------------------
/manuscript/images/anchors.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/anchors.jpg
--------------------------------------------------------------------------------
/manuscript/images/anchors1-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/anchors1-1.jpg
--------------------------------------------------------------------------------
/manuscript/images/anchors2-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/anchors2-1.jpg
--------------------------------------------------------------------------------
/manuscript/images/anchors3-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/anchors3-1.jpg
--------------------------------------------------------------------------------
/manuscript/images/anchors4-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/anchors4-1.jpg
--------------------------------------------------------------------------------
/manuscript/images/arch-compare.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/arch-compare.jpg
--------------------------------------------------------------------------------
/manuscript/images/big-picture.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/big-picture.jpg
--------------------------------------------------------------------------------
/manuscript/images/big-picture.xcf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/big-picture.xcf
--------------------------------------------------------------------------------
/manuscript/images/black-box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/black-box.jpg
--------------------------------------------------------------------------------
/manuscript/images/burnt-earth.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/burnt-earth.jpg
--------------------------------------------------------------------------------
/manuscript/images/burnt-earth.xcf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/burnt-earth.xcf
--------------------------------------------------------------------------------
/manuscript/images/by-nc-sa.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/by-nc-sa.jpg
--------------------------------------------------------------------------------
/manuscript/images/cfexp-nsgaII.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/cfexp-nsgaII.jpg
--------------------------------------------------------------------------------
/manuscript/images/cheatsheet-logistic-regression.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/cheatsheet-logistic-regression.jpg
--------------------------------------------------------------------------------
/manuscript/images/cheatsheet-shap.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/cheatsheet-shap.jpg
--------------------------------------------------------------------------------
/manuscript/images/cnn features-1.xcf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/cnn features-1.xcf
--------------------------------------------------------------------------------
/manuscript/images/cnn-features.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/cnn-features.jpg
--------------------------------------------------------------------------------
/manuscript/images/cooks-analyzed-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/cooks-analyzed-1.jpg
--------------------------------------------------------------------------------
/manuscript/images/cover-shap-book.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/cover-shap-book.jpg
--------------------------------------------------------------------------------
/manuscript/images/cover-sidepanel.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/cover-sidepanel.jpg
--------------------------------------------------------------------------------
/manuscript/images/cover-sml-science.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/cover-sml-science.jpg
--------------------------------------------------------------------------------
/manuscript/images/cover.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/cover.jpg
--------------------------------------------------------------------------------
/manuscript/images/cp-ice-pdp.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/cp-ice-pdp.jpg
--------------------------------------------------------------------------------
/manuscript/images/culmen_depth.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/culmen_depth.jpg
--------------------------------------------------------------------------------
/manuscript/images/dissection-dog-exemplary.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/dissection-dog-exemplary.jpg
--------------------------------------------------------------------------------
/manuscript/images/dissection-dogs.jpeg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/dissection-dogs.jpeg
--------------------------------------------------------------------------------
/manuscript/images/dissection-network.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/dissection-network.jpg
--------------------------------------------------------------------------------
/manuscript/images/doctor-840127_1280.xcf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/doctor-840127_1280.xcf
--------------------------------------------------------------------------------
/manuscript/images/doge-stuck.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/doge-stuck.jpg
--------------------------------------------------------------------------------
/manuscript/images/doge-stuck.xcf:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/doge-stuck.xcf
--------------------------------------------------------------------------------
/manuscript/images/favicon.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/favicon.jpg
--------------------------------------------------------------------------------
/manuscript/images/feature-visualization-units.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/feature-visualization-units.jpg
--------------------------------------------------------------------------------
/manuscript/images/graph.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/graph.jpg
--------------------------------------------------------------------------------
/manuscript/images/handwritten-prototypes.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/handwritten-prototypes.jpg
--------------------------------------------------------------------------------
/manuscript/images/hospital.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/hospital.jpg
--------------------------------------------------------------------------------
/manuscript/images/iml.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/iml.jpg
--------------------------------------------------------------------------------
/manuscript/images/interpretable-box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/interpretable-box.jpg
--------------------------------------------------------------------------------
/manuscript/images/learn-one-rule.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/learn-one-rule.jpg
--------------------------------------------------------------------------------
/manuscript/images/learner.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/learner.jpg
--------------------------------------------------------------------------------
/manuscript/images/lime-images-package-example-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/lime-images-package-example-1.jpg
--------------------------------------------------------------------------------
/manuscript/images/lter_penguins.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/lter_penguins.jpg
--------------------------------------------------------------------------------
/manuscript/images/original-images-classification.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/original-images-classification.jpg
--------------------------------------------------------------------------------
/manuscript/images/pen.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/pen.jpg
--------------------------------------------------------------------------------
/manuscript/images/potato-chips.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/potato-chips.jpg
--------------------------------------------------------------------------------
/manuscript/images/programing-ml.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/programing-ml.jpg
--------------------------------------------------------------------------------
/manuscript/images/ramen.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/ramen.jpg
--------------------------------------------------------------------------------
/manuscript/images/rulefit.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/rulefit.jpg
--------------------------------------------------------------------------------
/manuscript/images/shap-clustering.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shap-clustering.jpg
--------------------------------------------------------------------------------
/manuscript/images/shap-dependence-interaction.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shap-dependence-interaction.jpg
--------------------------------------------------------------------------------
/manuscript/images/shap-dependence.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shap-dependence.jpg
--------------------------------------------------------------------------------
/manuscript/images/shap-explain-1.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shap-explain-1.jpg
--------------------------------------------------------------------------------
/manuscript/images/shap-explain-2.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shap-explain-2.jpg
--------------------------------------------------------------------------------
/manuscript/images/shap-importance-extended.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shap-importance-extended.jpg
--------------------------------------------------------------------------------
/manuscript/images/shap-importance.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shap-importance.jpg
--------------------------------------------------------------------------------
/manuscript/images/shap-simplified-features.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shap-simplified-features.jpg
--------------------------------------------------------------------------------
/manuscript/images/shap-superpixel.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shap-superpixel.jpg
--------------------------------------------------------------------------------
/manuscript/images/shapl-importance.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shapl-importance.jpg
--------------------------------------------------------------------------------
/manuscript/images/shapley-coalitions.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shapley-coalitions.jpg
--------------------------------------------------------------------------------
/manuscript/images/shapley-instance-intervention.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shapley-instance-intervention.jpg
--------------------------------------------------------------------------------
/manuscript/images/shapley-instance.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/shapley-instance.jpg
--------------------------------------------------------------------------------
/manuscript/images/smoothgrad.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/smoothgrad.jpg
--------------------------------------------------------------------------------
/manuscript/images/specific-black-box.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/specific-black-box.jpg
--------------------------------------------------------------------------------
/manuscript/images/spheres.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/spheres.jpg
--------------------------------------------------------------------------------
/manuscript/images/taxonomy.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/taxonomy.jpg
--------------------------------------------------------------------------------
/manuscript/images/tcav.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/tcav.jpg
--------------------------------------------------------------------------------
/manuscript/images/trippy.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/trippy.jpg
--------------------------------------------------------------------------------
/manuscript/images/units.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/units.jpg
--------------------------------------------------------------------------------
/manuscript/images/vanilla.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/manuscript/images/vanilla.jpg
--------------------------------------------------------------------------------
/manuscript/impressum.qmd:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/manuscript/index.qmd:
--------------------------------------------------------------------------------
1 | # About the Book {-}
2 |
3 | ## Summary
4 |
5 | Machine learning is part of our products, processes, and research.
6 | But **computers usually don't explain their predictions**, which can cause many problems, ranging from trust issues to undetected bugs.
7 | This book is about making machine learning models and their decisions interpretable.
8 |
9 | After exploring the concepts of interpretability, you will learn about simple, **interpretable models** such as decision trees and linear regression.
10 | The focus of the book is on model-agnostic methods for **interpreting black box models**.
11 | Some model-agnostic methods like LIME and Shapley values can be used to explain individual predictions, while other methods like permutation feature importance and accumulated local effects can be used to get insights about the more general relations between features and predictions.
12 | In addition, the book presents methods specific to deep neural networks.
13 |
14 | All interpretation methods are explained in depth and discussed critically.
15 | How do they work?
16 | What are their strengths and weaknesses?
17 | How do you interpret them?
18 | This book will enable you to select and correctly apply the interpretation method that is most suitable for your machine learning application.
19 | Reading the book is recommended for machine learning practitioners, data scientists, statisticians, and anyone else interested in making machine learning models interpretable.
20 |
21 | ## Why I wrote the book
22 |
23 |
24 | This book began as a side project while I was working as a statistician in clinical research.
25 | On my free day, I explored topics that interested me, and interpretable machine learning eventually caught my focus.
26 | Expecting plenty of resources on interpreting machine learning models, I was surprised to find only scattered research papers and blog posts, with no comprehensive guide.
27 | This motivated me to create the resource I wished existed -- a book to deepen my own understanding and share insights with others.
28 | Today this book is a go-to resource for interpreting machine learning models.
29 | Researchers have cited the book thousands of times, students messaged me that it was essential to their theses, instructors use it in their classes, and data scientists in industry rely on it for their daily work and recommend it to their colleagues.
30 | The book has also been the foundation of my own career; first, it inspired me to do a PhD on interpretable machine learning, and later it encouraged me to become a self-employed writer, educator, and consultant.
31 |
32 | ## Who this book is for
33 |
34 | This book is for practitioners looking for an overview of techniques to make machine learning models more interpretable.
35 | It’s also valuable for students, teachers, researchers, and anyone interested in the topic.
36 | A basic understanding of machine learning and basic university-level math will help in following the theory, but the intuitive explanations at the start of each chapter should be accessible without math knowledge.
37 |
38 | ## What's new in the 3rd edition?
39 |
40 | The 3rd edition is both a small and a big update.
41 | It's a small update because I only added two new method chapters, namely [LOFO](#lofo) and [Ceteris Paribus](#ceteris-paribus), and two introductory chapters: [Methods Overview](#overview) and [Goals of Interpretability](#goals)).
42 | However, I also made some bigger changes to the book that are more subtle, but which I believe improve the book.
43 | I reorganized the introduction part so that it's leaner, yet more insightful.
44 | Further, I gave the data examples more depth (e.g., studying correlations and doing more nuanced analysis), and replaced the cancer dataset with the more accessible Palmer penguin dataset.
45 | To make the book more practical, I introduced tips and warning boxes to help interpreting machine learning models the right way.
46 | A huge change in the 3rd edition was also cleaning up the book's repository and rendering the book with Quarto instead of bookdown.
47 | For you as the reader, this is only visible in the appearance of the web version of the book, but it also means the book is now much easier to maintain for me, and this will benefit the Interpretable Machine Learning book in the long run.
48 | I also fixed a lot of small things, which you can see in the [book repository's README](https://github.com/christophM/interpretable-ml-book), section "Changelog".
49 |
50 | ## About the author
51 |
52 | Hi!
53 | My name is Christoph Molnar.
54 | I write and teach about machine learning, specifically topics that go beyond merely predictive performance.
55 | I studied statistics, worked for a few years as a data scientist, did my PhD on interpretable machine learning, and now I'm a writer and also [offer workshops and consulting](https://christophmolnar.com/).
56 | To stay up to date with my work on machine learning, you can subscribe to my newsletter [Mindful Modeler](https://mindfulmodeler.substack.com/).
57 |
58 | ::: {.content-visible when-format="html"}
59 |
60 | 
61 |
62 | This book is licensed under the [CC BY-NC-SA 4.0](http://creativecommons.org/licenses/by-nc-sa/4.0/) license.
63 |
64 |
65 | :::
66 |
--------------------------------------------------------------------------------
/manuscript/intro.qmd:
--------------------------------------------------------------------------------
1 | # Introduction {#intro}
2 |
3 |
4 | "What's 2 + 5?" asked teacher Wilhelm van Osten.
5 | The answer, of course, was 7.
6 | The crowd that had gathered to witness this spectacle was amazed.
7 | Because it wasn't a human who answered, but a horse called "Clever Hans".
8 | Clever Hans could do math -- or so it seemed.
9 | 2 + 5?
10 | That's seven taps with the horse's foot and not one more.
11 | Quite impressive for a horse.
12 |
13 | And indeed, Clever Hans was very clever, as later investigations showed.
14 | But its skills were not in math, but in reading social cues.
15 | It turned out that an important success factor was that the human asking Hans knew the answer.
16 | Hans relied on the tiniest changes in the human's body language and facial expressions to stop tapping at the right time.
17 |
18 | ## Don't blindly trust model performance
19 |
20 | In machine learning, we have our own versions of this clever horse: Clever Hans Predictors, a term coined by @lapuschkinUnmaskingCleverHans2019.
21 | Some examples:
22 |
23 | - A machine learning model trained to detect whales learned to rely on artifacts in audio files instead of basing the classification on the audio content [@kaggle2013challenge].
24 | - An image classifier learned to use text on images instead of visual features [@lapuschkinUnmaskingCleverHans2019].
25 | - A wolf versus dog classifier relied on snow in the background instead of image regions that showed the animals [@ribeiro2016why].
26 |
27 |
28 | In all these examples, the flaws didn't lower the predictive performance on the test set.
29 | So it's not surprising that people are wary, even for well-performing models.
30 | They want to look inside the models, to make sure they are not taking shortcuts.
31 | And there are many other reasons to make models interpretable.
32 | For example, scientists are using machine learning in their work.
33 | In a survey asking scientists for their biggest concerns about using machine learning, the top answer was “Leads to more reliance on pattern recognition without understanding” [@vannoordenAIScienceWhat2023].
34 | This lack of understanding is not unique to science.
35 | If you work in marketing and build a churn model, you want to predict not only who is likely to churn, but also understand why.
36 | Otherwise, how would the marketing team know what the right response is?
37 | The team could send everyone a voucher, but what if the reason for high churn probability was that they are annoyed by the many emails?
38 | Good predictive performance alone wouldn't be enough to make full use of the churn model.
39 |
40 | Further, many data scientists and statisticians have told me that one reason they are using "simpler models" is that they couldn't convince their boss to use a "black box model".
41 | But what if the complex models make better predictions?
42 | Wouldn't it be great if you could have both good performance **and** interpretability?
43 |
44 | To solve trust issues, to provide insights into the models, and to better debug the models, you are reading the right book.
45 | Interpretable Machine Learning offers the tools to extract insights from the model.
46 |
47 | ## A young field with old roots
48 |
49 |
50 | Linear regression models were already used at the beginning of the 19th century.
51 | [@legendre1806nouvelles;@gauss1877theoria].
52 | Statistical modeling grew around that linear regression model, and today we have more options like generalized additive models and LASSO, to name some popular model classes.
53 | In classic statistics, we typically model distributions and rely on further assumptions that allow us to make conclusions about the world.
54 | To do that, interpretability is key.
55 | For example, if you model the effect of drinking alcohol on risk for cardiovascular problems, statisticians need to be able to extract that insight from the model.
56 | This is typically done by keeping the model interpretable and having a coefficient that can be interpreted as the effect of a feature on the outcome.
57 |
58 | Machine learning has a different modeling approach.
59 | It's more task-driven and prediction-focused, and the emphasis is on algorithms rather than distributions.
60 | Typically, machine learning produces more complex models.
61 | Foundational work in machine learning began in the mid-20th century, while later developments expanded the field further in the later half of the century.
62 | However, neural networks go back to the 1960s [@schmidhuber2015deep], and rule-based machine learning, which is part of interpretable machine learning, is an active research area since the mid of the 20th century.
63 | While not the main focus, interpretability has always been a concern in machine learning, and researchers suggested ways to improve interpretability:
64 | An example would be the random forest [@breiman2001random] which already came with built-in feature importance measure.
65 |
66 | Interpretable Machine Learning, or Explainable AI, has really exploded as a field around 2015 [@molnar2020interpretable].
67 | Especially the subfield of model-agnostic interpretability, which offers methods that work for any model, gained a lot of attention.
68 | New methods for the interpretation of machine learning models are still being published at breakneck speed.
69 | To keep up with everything that is published would be madness and simply impossible.
70 | That's why you will not find the most novel and fancy methods in this book, but established methods and basic concepts of machine learning interpretability.
71 | These basics prepare you for making machine learning models interpretable.
72 | Internalizing the basic concepts also empowers you to better understand and evaluate any new paper on interpretability published on the pre-print server arxiv.org in the last 5 minutes since you began reading this book (I might be exaggerating the publication rate).
73 |
74 |
75 | ## How to read the book
76 |
77 | You don't have to read the book cover to cover, since Interpretable Machine Learning is more of a reference book with most chapters describing one method.
78 | If you are new to interpretability, I would only recommend reading the chapters on [Interpretability](#interpretability), [Goals](#goals), and [Methods Overview](#overview) first to understand what interpretability is all about and to have a "map" where you can place each method.
79 |
80 | The book is organized into the following parts:
81 |
82 | - The introductory chapters, including interpretability definitions and methods overview
83 | - Interpretable models
84 | - Local model-agnostic methods
85 | - Global model-agnostic methods
86 | - Methods for neural networks
87 | - Outlook
88 | - Machine learning terminology
89 |
90 | Each method chapter follows a similar structure:
91 | The first paragraph summarizes the method, followed by an intuitive explanation that doesn't rely on math.
92 | Then we look into the theory of the method to get a deeper understanding of how it works, including math and algorithms.
93 | I believe that a new method is best understood using examples.
94 | Therefore, each method is applied to real data.
95 | Some people say that statisticians are very critical people.
96 | For me, this is true because each chapter contains critical discussions about the pros and cons of the respective interpretation method.
97 | This book is not an advertisement for the methods, but it should help you decide whether a method is a good fit for your project or not.
98 | In the last section of each chapter, I listed available software implementations.
99 |
100 | I hope you will enjoy the read!
101 |
--------------------------------------------------------------------------------
/manuscript/latex/before_body.tex:
--------------------------------------------------------------------------------
1 | \thispagestyle{empty}
2 |
3 | \newcommand{\authorMail}{science-book@christophmolnar.com}
4 | \newcommand{\authorAddress}{%
5 |
6 | Christoph Molnar \\
7 | c/o DRA \\
8 | Bayerstraße 77c \\
9 | 80335 München, Germany \\
10 | }
11 | \newcommand{\isbn}{}
12 | \newcommand{\edition}{Third Edition}
13 | \newcommand{\publicationYear}{2025}
14 | % =========================================================================
15 | % Page with publishing details
16 | % =========================================================================
17 |
18 | \thispagestyle{empty}
19 | \begin{center}
20 | {
21 | \bfseries \sffamily \LARGE {{< meta title >}}\par
22 | \bfseries \small {{< meta subtitle >}}\par
23 | }
24 | \copyright~\publicationYear \textit{ Christoph Molnar}, Germany, Munich\\
25 |
26 | \vspace*{\baselineskip}
27 |
28 | This book is published under a CC BY-NC-SA 4.0 license.
29 |
30 |
31 | ISBN 978-3-911578-03-5 (paperback) \\
32 | ISBN 978-3-911578-04-2 (ebook) \\
33 |
34 | \publicationYear, \edition
35 |
36 | \textsc{}
37 |
38 | Publisher: Self-published
39 |
40 | \authorAddress
41 |
42 | \vspace*{1cm}
43 |
44 | \textit{commit id: \input{latex/hash.tex}}
45 |
46 | \end{center}
47 |
48 | % =========================================================================
49 | % Dedication / Empty page before outline
50 | % =========================================================================
51 | \newpage
52 | \thispagestyle{empty}
53 | \mbox{}
54 | \newpage
55 |
56 |
--------------------------------------------------------------------------------
/manuscript/latex/preamble.tex:
--------------------------------------------------------------------------------
1 | \usepackage{hyperref}
2 | % Decide for a font
3 | %\usepackage{Alegreya}
4 | %\usepackage[scale=.7]{sourcecodepro}
5 | % Make stuff less floaty
6 | \renewcommand{\textfraction}{0.05}
7 | \renewcommand{\topfraction}{0.8}
8 | \renewcommand{\bottomfraction}{0.8}
9 | \renewcommand{\floatpagefraction}{0.75}
10 |
11 | % creates a hanging indentation for figure captions
12 | \usepackage[format=plain]{caption}
13 | \captionsetup{
14 | font={normalsize, sf}, % Small font size and sans-serif style
15 | labelfont={bf}, % Bold and small for the label
16 | textfont={it}, % Italicize the text
17 | justification=raggedright, % Left-align the caption
18 | singlelinecheck=false % Prevent centering for single-line captions
19 | }
20 |
21 | % for \argmin
22 | \usepackage{amsmath}
23 | \DeclareMathOperator*{\argmax}{arg\,max}
24 | \DeclareMathOperator*{\argmin}{arg\,min}
25 |
26 | % Header and footer configuration
27 | \usepackage[automark,headsepline]{scrlayer-scrpage}
28 | % Use scrpage2 instead of scrlayer-scrpage if you want to nicer chapter boxes, see main/main.tex. --- WILL NOT WORK WITH TEXLIVE 2019! ----
29 | %\usepackage[automark,headsepline]{scrpage2}
30 | \pagestyle{scrheadings}
31 | %\ihead[\headmark]{\headmark}\ohead{\pagemark}
32 | % alternating chapter / section titles at the top of the page
33 | %\automark[section]{chapter}
34 | % Make heading of each page italics and small capitals.
35 | %\renewcommand*{\headfont}{\itshape\scshape}
36 | % Name of the chapter (\chapapp), number of the chapter (\thechapter) and no period (\autodot)
37 | %\renewcommand*{\chaptermarkformat}{\chapapp~\thechapter\autodot\enskip}
38 | % Removes page numbers from bottom
39 | %\ofoot{}
40 |
41 |
42 | % Pages for publishing, like empty pages
43 | \frontmatter
44 |
45 |
--------------------------------------------------------------------------------
/manuscript/math-terms.qmd:
--------------------------------------------------------------------------------
1 | ## Math Terms
2 |
3 | Math terms used throughout this book:
4 |
5 | | **Math Term** | **Meaning** |
6 | |----------------------|-----------------------------------------------------------------------------|
7 | | $X_j$ | Random variable corresponding to feature $j$. |
8 | | $X$ | Set of all random variables. |
9 | | $\mathbb{P}$ | Probability measure. |
10 | | $\mathbb{P}(X_j = c)$| Probability that random variable $X_j$ takes on the value $c$. |
11 | | $\mathbf{x}_j$ | Vector of values for feature $j$ across multiple data instances. |
12 | | $\mathbf{x}^{(i)}$ | Feature vector of the $i$-th instance. |
13 | | $x^{(i)}_j$ | Value of feature $j$ for the $i$-th instance. |
14 | | $y^{(i)}$ | Target value for the $i$-th instance. |
15 | | $\mathbf{X}$ | Feature matrix, where rows correspond to instances and columns to features.|
16 | | $\mathbf{y}$ | Vector of target values, one for each instance. |
17 | | $n$ | Number of data instances (rows in $\mathbf{X}$). |
18 | | $p$ | Number of features (columns in $\mathbf{X}$). |
19 |
--------------------------------------------------------------------------------
/manuscript/neural-networks.qmd:
--------------------------------------------------------------------------------
1 |
2 |
--------------------------------------------------------------------------------
/manuscript/r-packages.qmd:
--------------------------------------------------------------------------------
1 | ## R packages used
2 |
3 | {{< include _setup.qmd >}}
4 |
5 | ```{r}
6 | #| label: create references for r packages
7 | pkgs <- cite_packages(output = "table", out.dir = ".")
8 | knitr::kable(pkgs, booktabs=TRUE)
9 | ```
10 |
11 |
12 |
--------------------------------------------------------------------------------
/manuscript/references.qmd:
--------------------------------------------------------------------------------
1 | # References
2 |
3 | ::: {#refs}
4 | :::
5 |
6 |
7 |
8 |
--------------------------------------------------------------------------------
/manuscript/storytime.qmd:
--------------------------------------------------------------------------------
1 | ## Story Time {#storytime}
2 |
3 | Each of the following short stories is an exaggerated call for interpretable machine learning.
4 | The format is inspired by Jack Clark's Tech Tales in his [Import AI Newsletter](https://jack-clark.net/).
5 | If you like these kind of stories or if you are interested in AI, I recommend that you sign up.
6 |
7 | ### Lightning Never Strikes Twice
8 |
9 | **2030: A medical lab in Switzerland**
10 |
11 | {width=75% align="center"}
12 |
13 | "It's definitely not the worst way to die!" Tom concluded, trying to find something positive in the tragedy.
14 | He removed the pump from the intravenous pole.
15 | "He just died for the wrong reasons," Lena added.
16 | "And certainly with the wrong morphine pump!
17 | Just creating more work for us!" Tom complained while unscrewing the back plate of the pump.
18 | After removing all the screws, he lifted the plate and put it aside.
19 | He plugged a cable into the diagnostic port.
20 | "You weren't just complaining about having a job, were you?" Lena gave him a mocking smile.
21 | "Of course not. Never!" he exclaimed with a sarcastic undertone.
22 |
23 | He booted the pump's computer.
24 | Lena plugged the other end of the cable into her tablet.
25 | "All right, diagnostics are running," she announced.
26 | "I'm really curious about what went wrong."
27 | "It certainly shot our John Doe into Nirvana.
28 | That high concentration of this morphine stuff.
29 | Man. I mean ... that's a first, right?
30 | Normally a broken pump gives off too little of the sweet stuff or nothing at all.
31 | But never, you know, like that crazy shot," Tom explained.
32 | "I know. You don't have to convince me ... Hey, look at that." Lena held up her tablet.
33 | "Do you see this peak here? That's the potency of the painkillers mix.
34 | Look! This line shows the reference level.
35 | The poor guy had a mixture of painkillers in his blood system that could kill him 17 times over.
36 | Injected by our pump here.
37 | And here ..." she swiped, "here you can see the moment of the patient's demise."
38 | "So, any idea what happened, boss?" Tom asked his supervisor.
39 | "Hm ... The sensors seem to be fine.
40 | Heart rate, oxygen levels, glucose, ... The data were collected as expected.
41 | Some missing values in the blood oxygen data, but that's not unusual.
42 | Look here.
43 | The sensors have also detected the patient's slowing heart rate and extremely low cortisol levels caused by the morphine derivate and other pain blocking agents."
44 | She continued to swipe through the diagnostics report.
45 | Tom stared captivated at the screen.
46 | It was his first investigation of a real device failure.
47 |
48 | "Ok, here is our first piece of the puzzle.
49 | The system failed to send a warning to the hospital's communication channel.
50 | The warning was triggered, but rejected at protocol level.
51 | It could be our fault, but it could also be the fault of the hospital.
52 | Please send the logs over to the IT team," Lena told Tom.
53 | Tom nodded with his eyes still fixed on the screen.
54 | Lena continued:
55 | "It's odd.
56 | The warning should also have caused the pump to shut down.
57 | But it obviously failed to do so.
58 | That must be a bug.
59 | Something the quality team missed.
60 | Something really bad.
61 | Maybe it's related to the protocol issue."
62 | "So, the emergency system of the pump somehow broke down, but why did the pump go full bananas and inject so much painkiller into John Doe?" Tom wondered.
63 | "Good question.
64 | You're right.
65 | Protocol emergency failure aside, the pump shouldn't have administered that amount of medication at all.
66 | The algorithm should have stopped much earlier on its own, given the low level of cortisol and other warning signs," Lena explained.
67 | "Maybe some bad luck, like a one in a million thing, like being hit by a lightning?" Tom asked her.
68 | "No, Tom.
69 | If you had read the documentation I sent you, you would have known that the pump was first trained in animal experiments, then later on humans, to learn to inject the perfect amount of painkillers based on the sensory input.
70 | The algorithm of the pump might be opaque and complex, but it's not random.
71 | That means that in the same situation the pump would behave exactly the same way again.
72 | Our patient would die again.
73 | A combination or undesired interaction of the sensory inputs must have triggered the erroneous behavior of the pump.
74 | That's why we have to dig deeper and find out what happened here," Lena explained.
75 |
76 | "I see ...," Tom replied, lost in thought.
77 | "Wasn't the patient going to die soon anyway? Because of cancer or something?"
78 | Lena nodded while she read the analysis report.
79 | Tom got up and went to the window.
80 | He looked outside, his eyes fixed on a point in the distance.
81 | "Maybe the machine did him a favor, you know, in freeing him from the pain.
82 | No more suffering.
83 | Maybe it just did the right thing.
84 | Like a lightning bolt, but, you know, a good one.
85 | I mean like the lottery, but not random.
86 | But for a reason.
87 | If I were the pump, I would have done the same."
88 | She finally lifted her head and looked at him.
89 | He continued to look at something outside.
90 | Both were silent for a few moments.
91 | Lena lowered her head again and continued the analysis.
92 | "No, Tom. It's a bug... Just a damn bug."
93 |
94 | ### Trust Fall
95 |
96 | **2050: A subway station in Germany**
97 |
98 | {width=75% align="center"}
99 |
100 | She rushed to the subway station.
101 | Her mind was already at work.
102 | The tests for the new neural architecture should be completed by now.
103 | She was leading the redesign of the government's "Tax Affinity Prediction System for Individual Entities", which predicts whether a person will hide money from the tax office.
104 | Her team has come up with an elegant piece of engineering.
105 | If successful, the system would not only serve the tax office, but also feed into other systems such as the counter-terrorism alarm system and the commercial registry.
106 | One day, the government could even integrate the predictions into the Civic Trust Score.
107 | The Civic Trust Score estimates how trustworthy a person is.
108 | The estimate affects every part of your daily life, such as getting a loan or how long one has to wait for a new passport.
109 | As she descended the escalator, she imagined how an integration of her team's system into the Civic Trust Score System might look like.
110 |
111 | She routinely wiped her hand over the RFID reader without reducing her walking speed.
112 | Her mind was occupied, but a dissonance of sensory expectations and reality rang alarm bells in her brain.
113 |
114 | Too late.
115 |
116 | Nose first she ran into the subway entrance gate and fell with her butt first to the ground.
117 | The door was supposed to open, ... but it did not.
118 | Dumbfounded, she stood up and looked at the screen next to the gate.
119 | "Please try another time," suggested a friendly looking smiley on the screen.
120 | A person passed by and, ignoring her, wiped his hand over the reader.
121 | The door opened and he went through.
122 | The door closed again.
123 | She wiped her nose.
124 | It hurt, but at least it did not bleed.
125 | She tried to open the door, but was rejected again.
126 | It was strange.
127 | Maybe her public transport account did not have sufficient tokens.
128 | She looked at her smartwatch to check the account balance.
129 |
130 | "Login denied. Please contact your Citizens Advice Bureau!" her watch informed her.
131 |
132 | A feeling of nausea hit her like a fist to the stomach.
133 | She suspected what had happened.
134 | To confirm her theory, she started the mobile game "Sniper Guild", an ego shooter.
135 | The app was directly closed again automatically, which confirmed her theory.
136 | She became dizzy and sat down on the floor again.
137 |
138 | There was only one possible explanation:
139 | Her Civic Trust Score had dropped.
140 | Substantially.
141 | A small drop meant minor inconveniences, such as not getting first class flights or having to wait a little longer for official documents.
142 | A low trust score was rare and meant that you were classified as a threat to society.
143 | One measure in dealing with these people was to keep them away from public places such as the subway.
144 | The government restricted the financial transactions of subjects with low Civic Trust Scores.
145 | They also began to actively monitor your behavior on social media and even went as far as to restrict certain content, such as violent games.
146 | It became exponentially more difficult to increase your Civic Trust Score the lower it was.
147 | People with a very low score usually never recovered.
148 |
149 | She could not think of any reason why her score should have fallen.
150 | The score was based on machine learning.
151 | The Civic Trust Score System worked like a well-oiled engine that ran society.
152 | The performance of the Trust Score System was always closely monitored.
153 | Machine learning had become much better since the beginning of the century.
154 | It had become so efficient that decisions made by the Trust Score System could no longer be disputed.
155 | An infallible system.
156 |
157 | She laughed in despair.
158 | Infallible system.
159 | If only.
160 | The system has rarely failed.
161 | But it failed.
162 | She must be one of those special cases;
163 | an error of the system;
164 | from now on an outcast.
165 | Nobody dared to question the system.
166 | It was too integrated into the government, into society itself, to be questioned.
167 | In the few remaining democratic countries it was forbidden to form anti-democratic movements, not because they were inherently malicious, but because they would destabilize the current system.
168 | The same logic applied to the now more common algocraties.
169 | Critiquing the algorithms was forbidden because of the danger it posed to the status quo.
170 |
171 | Algorithmic trust was the fabric of the social order.
172 | For the common good, rare false trust scores were tacitly accepted.
173 | Hundreds of other prediction systems and databases fed into the score, making it impossible to know what caused the drop in her score.
174 | She felt like a big dark hole was opening in and under her.
175 | With horror she looked into the void.
176 |
177 |
178 | ### Fermi's Paperclips
179 |
180 | **Year 612 AMS (after Mars settlement): A museum on Mars**
181 |
182 | {width=75% align="center"}
183 |
184 |
185 | "History is boring," Xola whispered to her friend.
186 | Xola, a blue-haired girl, was lazily chasing one of the projector drones humming in the room with her left hand.
187 | "History is important," the teacher said with an upset voice, looking at the girls.
188 | Xola blushed.
189 | She did not expect her teacher to overhear her.
190 |
191 | "Xola, what did you just learn?" the teacher asked her.
192 | "That the ancient people used up all resources from Earther Planet and then died?" she asked carefully.
193 | "No. They made the climate hot and it wasn't people, it was computers and machines. And it's Planet Earth, not Earther Planet," added another girl named Lin.
194 | Xola nodded in agreement.
195 | With a touch of pride, the teacher smiled and nodded.
196 | "You're both right. Do you know why it happened?"
197 | "Because people were short-sighted and greedy?" Xola asked.
198 | "People could not stop their machines!" Lin blurted out.
199 |
200 | "Again, you are both right," the teacher decided,
201 | "but it's much more complicated than that.
202 | Most people at the time were not aware of what was happening.
203 | Some saw the drastic changes, but could not reverse them.
204 | The most famous piece from this period is a poem by an anonymous author.
205 | It best captures what happened at that time.
206 | Listen carefully!"
207 |
208 |
209 | The teacher started the poem.
210 | A dozen of the small drones repositioned themselves in front of the children and began to project the video directly into their eyes.
211 | It showed a person in a suit standing in a forest with only tree stumps left.
212 | He began to talk:
213 |
214 | *The machines compute; the machines predict.*
215 |
216 | *We march on as we are part of it.*
217 |
218 | *We chase an optimum as trained.*
219 |
220 | *The optimum is one-dimensional, local and unconstrained.*
221 |
222 |
223 | *Silicon and flesh, chasing exponentiality.*
224 |
225 | *Growth is our mentality.*
226 |
227 | *When all rewards are collected,*
228 |
229 | *and side-effects neglected;*
230 |
231 | *When all the coins are mined,*
232 |
233 | *and nature has fallen behind;*
234 |
235 | *We'll be in trouble,*
236 |
237 | *After all, exponential growth is a bubble.*
238 |
239 |
240 | *The tragedy of the commons unfolding,*
241 |
242 | *Exploding,*
243 |
244 | *Before our eyes.*
245 |
246 |
247 | *Cold calculations and icy greed,*
248 |
249 | *Fill the earth with heat.*
250 |
251 | *Everything is dying,*
252 |
253 | *And we are complying.*
254 |
255 |
256 | *Like horses with blinders we race the race of our own creation,*
257 |
258 | *Towards the Great Filter of civilization.*
259 |
260 | *And so we march on relentlessly.*
261 |
262 | *As we are part of the machine.*
263 |
264 | *Embracing entropy.*
265 |
266 | "A dark memory," the teacher said to break the silence in the room.
267 | "It'll be uploaded to your library.
268 | Your homework is to memorise it until next week."
269 | Xola sighed.
270 | She managed to catch one of the little drones.
271 | The drone was warm from the CPU and the engines.
272 | Xola liked how it warmed her hands.
273 |
--------------------------------------------------------------------------------
/manuscript/translations.qmd:
--------------------------------------------------------------------------------
1 | # Translations {#translations}
2 |
3 | **Interested in translating the book?**
4 |
5 | This book is licensed under the [Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License](http://creativecommons.org/licenses/by-nc-sa/4.0/).
6 | This means that you are allowed to translate it and put it online.
7 | You have to mention me as the original author and you are not allowed to sell the book.
8 |
9 | If you are interested in translating the book, you can write a message and I can link your translation here.
10 | My address is chris@christophmolnar.com .
11 |
12 | **List of translations**
13 |
14 | **Bahasa Indonesia**
15 |
16 | - [A complete translation by Hatma Suryotrisongko and Smart City & Cybersecurity Laboratory, Information Technology, ITS](https://www.its.ac.id/it/id/interpretable-machine-learning/).
17 |
18 |
19 | **Chinese**:
20 |
21 | - [Complete translation of 2nd edition](https://blog.csdn.net/jiazhen/article/details/126824573) by Jiazhen from CSDN, an online community of programmers.
22 | - [Complete translations](https://github.com/MingchaoZhu/InterpretableMLBook) by [Mingchao Zhu](https://github.com/MingchaoZhu). [Ebook and print versions](https://www.phei.com.cn/module/goods/wssd_content.jsp?bookid=57710) of this translation are available.
23 | - [Translation of most chapters](https://blog.csdn.net/wizardforcel/article/details/98992150) by CSDN.
24 | - [Translation of some chapters](https://zhuanlan.zhihu.com/p/63408696). The website also includes questions and answers from various users.
25 |
26 | **French**:
27 |
28 | - [Translation (in progress)](https://nicolasguillard.github.io/interpretable-ml-book-fr/) by Nicolas Guillard.
29 |
30 | **Japanese**
31 |
32 | - [Complete translation](https://hacarus.github.io/interpretable-ml-book-ja/index.html) by Ryuji Masui and team HACARUS.
33 |
34 | **Korean**:
35 |
36 | - [Complete Korean translation](https://tootouch.github.io/IML/taxonomy_of_interpretability_methods/) by [TooTouch](https://tootouch.github.io/)
37 |
38 | - [Partial Korean translation](https://subinium.github.io/IML/) by [An Subin](https://subinium.github.io/)
39 |
40 |
41 | **Spanish**
42 |
43 | - [Full Spanish translation](https://fedefliguer.github.io/AAI/) by [Federico Fliguer](https://www.linkedin.com/in/federico-fliguer/?originalSubdomain=ar)
44 |
45 | **Turkish**
46 |
47 | - [Full Turkish translation](https://ozancans-organization.gitbook.io/yorumlanabilir-makine-ogrenmesi) by [Ozancan Özdemir](https://ozancanozdemir.github.io/) (2nd edition)
48 |
49 |
50 | **Vietnamese**
51 |
52 | - [A complete translation](https://github.com/giangnguyen2412/InterpretableMLBook-Vietnamese) by Giang Nguyen, Duy-Tung Nguyen, Hung-Quang Nguyen, Tri Le and Hoang Nguyen.
53 |
54 |
--------------------------------------------------------------------------------
/manuscript/what-is-machine-learning.qmd:
--------------------------------------------------------------------------------
1 | # Machine Learning Terms
2 |
3 | To avoid confusion due to ambiguity, here are some definitions of terms used in this book:
4 |
5 | An **Algorithm** is a set of rules that a machine follows to achieve a particular goal [@merriam_algorithm_2017].
6 | An algorithm can be considered as a recipe that defines the inputs, the output, and all the steps needed to get from the inputs to the output.
7 | Cooking recipes are algorithms where the ingredients are the inputs, the cooked food is the output, and the preparation and cooking steps are the algorithm instructions.
8 |
9 |
10 | **Machine Learning** is a set of methods that allow computers to learn from data to make and improve predictions (for example, predicting cancer, weekly sales, credit default).
11 | Machine learning is a paradigm shift from "normal programming" where all instructions must be explicitly given to the computer to "indirect programming" that takes place through providing data.
12 |
13 | {width=85%}
14 |
15 | A **Learner** or **Machine Learning Algorithm** is the program used to learn a machine learning model from data.
16 | Another name is "inducer" (e.g., "tree inducer").
17 |
18 |
19 | A **Machine Learning Model** is the learned program that maps inputs to predictions.
20 | This can be a set of weights for a linear model or for a neural network.
21 | Other names for the rather unspecific word "model" are "predictor" or - depending on the task - "classifier" or "regression model".
22 | In formulas, the trained machine learning model is called $\hat{f}$ or $\hat{f}(\mathbf{x})$.
23 |
24 | {width=60%}
25 |
26 |
27 | A **Black Box Model** is a system that does not reveal its internal mechanisms.
28 | In machine learning, the term "black box" describes models that cannot be understood by looking at their parameters (e.g., a neural network).
29 | The opposite of a black box is sometimes referred to as **White Box**, and is called interpretable model in this book.
30 | Model-agnostic methods for interpretability treat machine learning models as black boxes, even if they are not.
31 |
32 |
33 | {width=85%}
34 |
35 |
36 | **Interpretable Machine Learning** refers to methods and models that make the behavior and predictions of machine learning systems understandable to humans.
37 |
38 |
39 | A **Dataset** is a table with the data from which the machine learns.
40 | The dataset contains the features and the target to predict.
41 | When used to induce a model, the dataset is called training data.
42 |
43 | An **Instance** is a row in the dataset.
44 | Other names for 'instance' are: (data) point, example, observation.
45 | An instance consists of the feature values $\mathbf{x}^{(i)}$ and, if known, the target outcome $y^{(i)}$.
46 |
47 | The **Features** are the inputs used for prediction or classification.
48 | A feature is a column in the dataset.
49 | Throughout the book, features are assumed to be interpretable, meaning it is easy to understand what they mean, like the temperature on a given day or the height of a person.
50 | The interpretability of the features is a big assumption.
51 | But if it is hard to understand the input features, it is even harder to understand what the model does.
52 | The matrix with all features is called $\mathbf{X}$ and $\mathbf{x}^{(i)}$ for a single instance.
53 | The vector of a single feature for all instances is $\mathbf{x}_j$ and the value for the feature $j$ and instance $i$ is $x^{(i)}_j$.
54 |
55 | The **Target** is the information the machine learns to predict.
56 | In mathematical formulas, the target is usually called $y$ or $y^{(i)}$ for a single instance.
57 |
58 | A **Machine Learning Task** is the combination of a dataset with features and a target.
59 | Depending on the type of the target, the task could be classification, regression, survival analysis, clustering, or outlier detection.
60 |
61 | The **Prediction** is the machine learning model's "guess" for the target value based on the given features.
62 | In this book, the model prediction is denoted by $\hat{f}(\mathbf{x}^{(i)})$ or $\hat{y}$.
63 |
64 |
65 |
--------------------------------------------------------------------------------
/pkg/sbrl_1.2.tar.gz:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/christophM/interpretable-ml-book/c3b7569830afddeea6e255b62b80d0aeaa7790e3/pkg/sbrl_1.2.tar.gz
--------------------------------------------------------------------------------
/scripts/dl-feature-attribution/activation-maximization.py:
--------------------------------------------------------------------------------
1 | # code from here: https://gist.github.com/saurabhpal97/158988f112e2e3b6067d25c5f6499ef3#file-activation_max-py
2 |
3 | #importing the required modules
4 | from vis.visualization import visualize_activation
5 | from vis.utils import utils
6 | from keras import activations
7 | from keras import applications
8 | import matplotlib.pyplot as plt
9 | from scipy.misc import imread
10 |
11 | plt.rcParams['figure.figsize'] = (18,6)
12 | #creating a VGG16 model using fully connected layers also because then we can
13 | #visualize the patterns for individual category
14 | from keras.applications import VGG16
15 | model = VGG16(weights='imagenet',include_top=True)
16 |
17 | #finding out the layer index using layer name
18 | #the find_layer_idx function accepts the model and name of layer as parameters and return the index of respective layer
19 | layer_idx = utils.find_layer_idx(model,'predictions')
20 | #changing the activation of the layer to linear
21 | model.layers[layer_idx].activation = activations.linear
22 | #applying modifications to the model
23 | model = utils.apply_modifications(model)
24 | #Indian elephant
25 | img3 = visualize_activation(model,layer_idx,filter_indices=385,max_iter=5000,verbose=True)
26 | plt.imshow(img3)
27 |
--------------------------------------------------------------------------------
/scripts/dl-feature-attribution/edge-detection.py:
--------------------------------------------------------------------------------
1 | import cv2
2 | import os
3 | import numpy as np
4 | from matplotlib import pyplot as plt
5 | import imp
6 |
7 | base_dir = os.path.dirname(__file__)
8 | utils = imp.load_source("utils", os.path.join(base_dir, "utils.py"))
9 | base_dir = os.path.dirname(__file__)
10 | img = utils.load_image(
11 | os.path.join(base_dir, "..", "..", "manuscript", "images", "dog_and_book.jpeg"), 224)
12 | img = np.uint8(img)
13 | edges = cv2.Canny(img, 200, 400)
14 | edges = np.max(edges) - edges
15 | plt.imshow(edges,cmap = 'gray')
16 | plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
17 | plt.axis('off')
18 | plt.title("Canny Edge Detector")
19 | plt.savefig("dog_and_book_edge.png", bbox_inches = "tight")
20 |
21 |
--------------------------------------------------------------------------------
/scripts/dl-feature-attribution/feature-attribution-dl.py:
--------------------------------------------------------------------------------
1 | # Code from here: https://github.com/albermax/innvestigate/blob/master/examples/notebooks/imagenet_compare_methods.ipynb
2 | import keras
3 | import keras.backend
4 | import imp
5 | import matplotlib.pyplot as plt
6 | import numpy as np
7 | import os
8 | import innvestigate
9 | import innvestigate.utils
10 | import keras.applications.vgg16 as vgg16
11 | from keras.applications.vgg16 import decode_predictions
12 |
13 | model, preprocess = vgg16.VGG16(), vgg16.preprocess_input
14 | base_dir = os.path.dirname(__file__)
15 | utils = imp.load_source("utils", os.path.join(base_dir, "utils.py"))
16 | imgnetutils = imp.load_source("utils_imagenet", "utils_imagenet.py")
17 |
18 | def inverse_graymap(X):
19 | return imgnetutils.graymap(np.max(X) - X)
20 |
21 |
22 | # Methods we use and some properties.
23 | methods = [
24 | # NAME OPT.PARAMS POSTPROC FXN TITLE
25 | # Show input.
26 | ("input", {}, imgnetutils.image, "Input"),
27 | # Function
28 | ("gradient", {"postprocess": "abs"}, inverse_graymap, "Gradient"),
29 | ("smoothgrad", {"augment_by_n": 64, "postprocess": "square"}, inverse_graymap, "SmoothGrad"),
30 | # Signal
31 | #("deconvnet", {}, imgnetutils.bk_proj, "Deconvnet"),
32 | #("guided_backprop", {}, imgnetutils.bk_proj, "Guided Backprop"),
33 | #("pattern.net", {}, imgnetutils.bk_proj, "PatternNet"),
34 | # Interaction
35 | #("deep_taylor", {}, imgnetutils.heatmap, "Deep Taylor"),
36 | #("pattern.attribution", {}, imgnetutils.heatmap, "PatternAttribution"),
37 | #("input_t_gradient", {}, imgnetutils.heatmap, "Input * Gradient"),
38 | #("integrated_gradients", {"steps": 64}, imgnetutils.heatmap, "Integrated Gradients"),
39 | #("lrp.z", {}, imgnetutils.heatmap, "LRP-Z"),
40 | #("lrp.epsilon", {"epsilon": 1}, imgnetutils.heatmap, "LRP-Epsilon"),
41 | #("lrp.sequential_preset_a_flat",{"epsilon": 1}, imgnetutils.heatmap, "LRP-PresetAFlat"),
42 | #("lrp.sequential_preset_b_flat",{"epsilon": 1}, imgnetutils.heatmap, "LRP-PresetBFlat"),
43 | ]
44 |
45 | if __name__ == "__main__":
46 | # Load an image.
47 | image = utils.load_image(
48 | os.path.join(base_dir, "..", "..", "manuscript", "images", "dog_and_book.jpeg"), 224)
49 |
50 | # Get model
51 | yhat = model.predict(preprocess(image[None]))
52 | label = decode_predictions(yhat)
53 | label = label[0][0]
54 | print('%s (%.2f%%)' % (label[1], label[2]*100))
55 | # Strip softmax layer
56 | model = innvestigate.utils.model_wo_softmax(model)
57 | for method in methods:
58 | print(method[0])
59 | analyzer = innvestigate.create_analyzer(method[0],
60 | model,
61 | **method[1])
62 | if method[0] == "input":
63 | a = image[None]
64 | a = (a - a.min())/ (a.max() - a.min())
65 | else:
66 | x = preprocess(image[None])
67 | # use preprocessing from other script
68 | a = analyzer.analyze(x)
69 | a = imgnetutils.postprocess(a, "BGRtoRGB", False)
70 | a = method[2](a)
71 | plt.imshow(a[0], cmap="seismic", clim=(-1, 1))
72 | plt.axis('off')
73 | plt.title(method[3])
74 | plt.savefig("dog_and_book_" + method[0] + ".png", bbox_inches = "tight")
75 |
76 |
--------------------------------------------------------------------------------
/scripts/dl-feature-attribution/requirements.txt:
--------------------------------------------------------------------------------
1 | absl-py==0.11.0
2 | astor==0.8.1
3 | attrs==20.2.0
4 | certifi==2020.6.20
5 | cycler==0.10.0
6 | future==0.18.2
7 | gast==0.4.0
8 | google-pasta==0.2.0
9 | grpcio==1.33.2
10 | h5py==2.10.0
11 | imageio==2.9.0
12 | importlib-metadata==2.0.0
13 | iniconfig==1.1.1
14 | innvestigate==1.0.8
15 | Keras==2.2.4
16 | Keras-Applications==1.0.8
17 | Keras-Preprocessing==1.1.2
18 | kiwisolver==1.3.0
19 | Markdown==3.3.3
20 | matplotlib==3.3.2
21 | numpy==1.19.3
22 | packaging==20.4
23 | Pillow==8.0.1
24 | pkg-resources==0.0.0
25 | pluggy==0.13.1
26 | protobuf==3.13.0
27 | py==1.9.0
28 | pyparsing==2.4.7
29 | pytest==6.1.2
30 | python-dateutil==2.8.1
31 | PyYAML==5.3.1
32 | scipy==1.5.3
33 | six==1.15.0
34 | tensorboard==1.14.0
35 | tensorflow==1.14.0
36 | tensorflow-estimator==1.14.0
37 | termcolor==1.1.0
38 | tf-keras-vis==0.5.3
39 | toml==0.10.1
40 | Werkzeug==1.0.1
41 | wrapt==1.12.1
42 | zipp==3.4.0
43 |
--------------------------------------------------------------------------------
/scripts/dl-feature-attribution/utils.py:
--------------------------------------------------------------------------------
1 | # Begin: Python 2/3 compatibility header small
2 | # Get Python 3 functionality:
3 | from __future__ import\
4 | absolute_import, print_function, division, unicode_literals
5 | from future.utils import raise_with_traceback, raise_from
6 | # catch exception with: except Exception as e
7 | from builtins import range, map, zip, filter
8 | from io import open
9 | import six
10 | # End: Python 2/3 compatability header small
11 |
12 | import matplotlib.pyplot as plt
13 | import numpy as np
14 | import os
15 | import PIL.Image
16 | import shutil
17 |
18 |
19 | ###############################################################################
20 | # Download utilities
21 | ###############################################################################
22 |
23 |
24 | def download(url, filename):
25 | if not os.path.exists(filename):
26 | print("Download: %s ---> %s" % (url, filename))
27 | response = six.moves.urllib.request.urlopen(url)
28 | with open(filename, 'wb') as out_file:
29 | shutil.copyfileobj(response, out_file)
30 |
31 |
32 | ###############################################################################
33 | # Plot utility
34 | ###############################################################################
35 |
36 |
37 | def load_image(path, size):
38 | ret = PIL.Image.open(path)
39 | ret = ret.resize((size, size))
40 | ret = np.asarray(ret, dtype=np.uint8).astype(np.float32)
41 | if ret.ndim == 2:
42 | ret.resize((size, size, 1))
43 | ret = np.repeat(ret, 3, axis=-1)
44 | return ret
45 |
46 |
47 | def get_imagenet_data(size=224):
48 | base_dir = os.path.dirname(__file__)
49 |
50 | # ImageNet 2012 validation set images?
51 | with open(os.path.join(base_dir, "images", "ground_truth_val2012")) as f:
52 | ground_truth_val2012 = {x.split()[0]: int(x.split()[1])
53 | for x in f.readlines() if len(x.strip()) > 0}
54 | with open(os.path.join(base_dir, "images", "synset_id_to_class")) as f:
55 | synset_to_class = {x.split()[1]: int(x.split()[0])
56 | for x in f.readlines() if len(x.strip()) > 0}
57 | with open(os.path.join(base_dir, "images", "imagenet_label_mapping")) as f:
58 | image_label_mapping = {int(x.split(":")[0]): x.split(":")[1].strip()
59 | for x in f.readlines() if len(x.strip()) > 0}
60 |
61 | def get_class(f):
62 | # File from ImageNet 2012 validation set
63 | ret = ground_truth_val2012.get(f, None)
64 | if ret is None:
65 | # File from ImageNet training sets
66 | ret = synset_to_class.get(f.split("_")[0], None)
67 | if ret is None:
68 | # Random JPEG file
69 | ret = "--"
70 | return ret
71 |
72 | images = [(load_image(os.path.join(base_dir, "images", f), size),
73 | get_class(f))
74 | for f in os.listdir(os.path.join(base_dir, "images"))
75 | if f.lower().endswith(".jpg") or f.lower().endswith(".jpeg")]
76 | return images, image_label_mapping
77 |
78 |
79 | def plot_image_grid(grid,
80 | row_labels_left,
81 | row_labels_right,
82 | col_labels,
83 | file_name=None,
84 | figsize=None,
85 | dpi=224):
86 | n_rows = len(grid)
87 | n_cols = len(grid[0])
88 | if figsize is None:
89 | figsize = (n_cols, n_rows+1)
90 |
91 | plt.clf()
92 | plt.rc("font", family="sans-serif")
93 |
94 | plt.figure(figsize=figsize)
95 | for r in range(n_rows):
96 | for c in range(n_cols):
97 | ax = plt.subplot2grid(shape=[n_rows+1, n_cols], loc=[r+1, c])
98 | # TODO controlled color mapping wrt all grid entries,
99 | # or individually. make input param
100 | if grid[r][c] is not None:
101 | ax.imshow(grid[r][c], interpolation='none')
102 | else:
103 | for spine in plt.gca().spines.values():
104 | spine.set_visible(False)
105 | ax.set_xticks([])
106 | ax.set_yticks([])
107 |
108 | # column labels
109 | if not r:
110 | if col_labels != []:
111 | ax.set_title(col_labels[c],
112 | rotation=22.5,
113 | horizontalalignment='left',
114 | verticalalignment='bottom')
115 |
116 | # row labels
117 | if not c:
118 | if row_labels_left != []:
119 | txt_left = [l+'\n' for l in row_labels_left[r]]
120 | ax.set_ylabel(
121 | ''.join(txt_left),
122 | rotation=0,
123 | verticalalignment='center',
124 | horizontalalignment='right',
125 | )
126 |
127 | if c == n_cols-1:
128 | if row_labels_right != []:
129 | txt_right = [l+'\n' for l in row_labels_right[r]]
130 | ax2 = ax.twinx()
131 | ax2.set_xticks([])
132 | ax2.set_yticks([])
133 | ax2.set_ylabel(
134 | ''.join(txt_right),
135 | rotation=0,
136 | verticalalignment='center',
137 | horizontalalignment='left'
138 | )
139 |
140 | if file_name is None:
141 | plt.show()
142 | else:
143 | print('Saving figure to {}'.format(file_name))
144 | plt.savefig(file_name, orientation='landscape', dpi=dpi)
145 |
--------------------------------------------------------------------------------
/scripts/dl-feature-attribution/utils_imagenet.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 |
3 | import innvestigate
4 | import innvestigate.utils as iutils
5 | import innvestigate.utils.visualizations as ivis
6 |
7 |
8 | def preprocess(X, net):
9 | X = X.copy()
10 | X = net["preprocess_f"](X)
11 | return X
12 |
13 |
14 | def postprocess(X, color_conversion, channels_first):
15 | X = X.copy()
16 | X = iutils.postprocess_images(
17 | X, color_coding=color_conversion, channels_first=channels_first)
18 | return X
19 |
20 |
21 | def image(X):
22 | X = X.copy()
23 | return ivis.project(X, absmax=255.0, input_is_postive_only=True)
24 |
25 |
26 | def bk_proj(X):
27 | X = ivis.clip_quantile(X, 1)
28 | return ivis.project(X)
29 |
30 |
31 | def heatmap(X):
32 | #X = ivis.gamma(X, minamp=0, gamma=0.95)
33 | return ivis.heatmap(X)
34 |
35 |
36 | def graymap(X):
37 | return ivis.graymap(np.abs(X), input_is_positive_only=True)
38 |
--------------------------------------------------------------------------------
/scripts/get-SpamTube-dataset.R:
--------------------------------------------------------------------------------
1 | get.ycomments.data = function(data_dir){
2 | ycomments.file = sprintf('%s/TubeSpam.csv', data_dir)
3 | if (!file.exists(ycomments.file)) {
4 | download.spam.data()
5 | }
6 | read.csv(ycomments.file, stringsAsFactors = FALSE)
7 | }
8 |
9 |
10 |
11 | # Download the youtube datasets
12 | download.spam.data = function(){
13 | urls = sprintf('http://lasid.sor.ufscar.br/labeling/datasets/%i/download/', 9:13)
14 | ycomments = lapply(urls, read.csv, stringsAsFactors=FALSE)
15 | ycomments = do.call('rbind', ycomments)
16 | cleanFun <- function(htmlString) {
17 | return(gsub("<.*?>", "", htmlString))
18 | }
19 | ycomments$CONTENT = cleanFun(ycomments$CONTENT)
20 | # Convert to ASCII
21 | ycomments$CONTENT = iconv(ycomments$CONTENT, "UTF-8", "ASCII", sub="")
22 | write.csv( x = ycomments, file = sprintf('%s/TubeSpam.csv', data_dir),row.names=FALSE)
23 | }
24 |
25 | data_dir = "../data/"
26 |
27 | ycomments = get.ycomments.data(data_dir)
28 | save(ycomments, file = sprintf('%sycomments.RData', data_dir))
29 |
30 |
--------------------------------------------------------------------------------
/scripts/get-bike-sharing-dataset.R:
--------------------------------------------------------------------------------
1 | library(lubridate)
2 | data_dir <- "../data/"
3 |
4 | day_diff <- function(date1, date2) {
5 | as.numeric(difftime(as.Date(date1), as.Date(date2), units = "days"))
6 | }
7 |
8 | bike = read.csv(sprintf('%s/bike-sharing-daily.csv', data_dir), stringsAsFactors = FALSE)
9 | # See http://archive.ics.uci.edu/ml/datasets/Bike+Sharing+Dataset
10 |
11 | bike$weekday = factor(bike$weekday, levels=0:6, labels = c('SUN', 'MON', 'TUE', 'WED', 'THU', 'FRI', 'SAT'))
12 | bike$holiday = factor(bike$holiday, levels = c(0,1), labels = c('N', 'Y'))
13 | bike$workday = factor(bike$workingday, levels = c(0,1), labels = c('N', 'Y'))
14 | bike$workingday = NULL
15 | bike$season = factor(bike$season, levels = 1:4, labels = c('WINTER', 'SPRING', 'SUMMER', 'FALL'))
16 | bike$weather = factor(bike$weathersit, levels = 1:3, labels = c('GOOD', 'MISTY', 'BAD'))
17 | bike$weathersit = NULL
18 | bike$mnth = factor(bike$mnth, levels = 1:12, labels = c('JAN', 'FEB', 'MAR', 'APR', 'MAY', 'JUN', 'JUL', 'AUG', 'SEP', 'OCT', 'NOV', 'DEC'))
19 | bike$yr[bike$yr == 0] <- 2011
20 | bike$yr[bike$yr == 1] <- 2012
21 | bike$yr <- factor(bike$yr)
22 | bike$days_since_2011 = day_diff(bike$dteday, min(as.Date(bike$dteday)))
23 | bike <- bike[order(bike$days_since_2011), ]
24 | bike$cnt_2d_bfr <- c(NA, NA, head(bike$cnt, -2))
25 |
26 | # denormalize weather features:
27 | # temp : Normalized temperature in Celsius. The values are derived via (t-t_min)/(t_max-t_min), t_min=-8, t_max=+39 (only in hourly scale)
28 | bike$temp = bike$temp * (39 - (-8)) + (-8)
29 | # atemp: Normalized feeling temperature in Celsius. The values are derived via (t-t_min)/(t_max-t_min), t_min=-16, t_max=+50 (only in hourly scale)
30 | bike$atemp = bike$atemp * (50 - (16)) + (16)
31 |
32 | #windspeed: Normalized wind speed. The values are divided to 67 (max)
33 | bike$windspeed = 67 * bike$windspeed
34 | #hum: Normalized humidity. The values are divided to 100 (max)
35 | bike$hum = 100 * bike$hum
36 | #dplyr::select(bike, -instant, -dteday, -registered, -casual, -atemp)
37 |
38 | # Throwing out the two first days
39 | bike = na.omit(bike)
40 |
41 | bike = bike[bike$hum != 0,]
42 |
43 | save(bike, file = sprintf('%sbike.RData', data_dir))
44 | write.csv(bike,file = sprintf('%sbike.csv', data_dir), row.names = FALSE)
45 |
46 |
--------------------------------------------------------------------------------
/scripts/grad-cam/.gitignore:
--------------------------------------------------------------------------------
1 | venv
2 |
--------------------------------------------------------------------------------
/scripts/grad-cam/keras-vis.py:
--------------------------------------------------------------------------------
1 | import keras
2 | import keras.backend
3 | import imp
4 | import matplotlib.pyplot as plt
5 | from matplotlib import gridspec
6 | import numpy as np
7 | import os
8 | import keras.applications.vgg16 as vgg16
9 | from keras.applications.vgg16 import decode_predictions
10 | from tensorflow.keras.preprocessing.image import load_img
11 | from tensorflow.keras import backend as K
12 | from tf_keras_vis.saliency import Saliency
13 | from tf_keras_vis.utils import normalize
14 | import tensorflow as tf
15 | from tf_keras_vis.gradcam import Gradcam
16 |
17 |
18 | model, preprocess = vgg16.VGG16(), vgg16.preprocess_input
19 | base_dir = os.path.dirname(__file__)
20 | image_path = os.path.join(base_dir, "..", "..", "manuscript", "images")
21 | # Load an image.
22 | img1 = load_img(
23 | os.path.join(image_path,"dog_and_book.jpeg"), target_size=(224,224))
24 | img2 = load_img(
25 | os.path.join(image_path, "ramen.jpg"), target_size=(224,224))
26 | img3 = load_img(
27 | os.path.join(image_path, "octopus.jpeg"), target_size=(224,224))
28 |
29 |
30 |
31 | images = np.asarray([np.array(img1), np.array(img2), np.array(img3)])
32 | image = preprocess(images)
33 |
34 | # Top prediction is 'Italian_greyhound', with index 171 according to
35 | # ~/.keras/models/imagenet_class_index.json
36 | # For the ramen it's soup bowl: 809
37 | # Make sure to execute before applying model_modifier
38 | # And this is then how to get the probabilities and top classes:
39 | # decode_predictions(model.predict(image), top = 1)
40 | # Out[23]:
41 | # [[('n02091032', 'Italian_greyhound', 0.35211313)],
42 | # [('n04263257', 'soup_bowl', 0.49959907)],
43 | # [('n02526121', 'eel', 0.69820803)]]
44 |
45 |
46 | fig = plt.figure(figsize=(10,5))
47 | nrows = 1
48 | ncols = 3
49 | f, ax = plt.subplots(nrows=nrows, ncols=ncols)
50 | fig.subplots_adjust(wspace=0, hspace=0)
51 | fs = 10
52 | ax[0].set_title("Greyhound (vanilla)", fontsize=fs)
53 | ax[0].imshow(images[0])
54 |
55 | ax[1].set_title("Soup Bowl (vanilla)", fontsize=fs)
56 | ax[1].imshow(images[1])
57 |
58 | ax[2].set_title("Eel (vanilla)", fontsize=fs)
59 | ax[2].imshow(images[2])
60 |
61 | for i in range(0, ncols):
62 | ax[i].set_xticks([])
63 | ax[i].set_yticks([])
64 |
65 |
66 | plt.tight_layout()
67 | plt.savefig(os.path.join(image_path, 'original-images-classification.png'), bbox_inches='tight')
68 |
69 |
70 |
71 | def loss(output):
72 | # Italian_greyhound, soup bowl, eel
73 | return(output[0][921], output[1][809], output[2][390])
74 |
75 | def model_modifier(m):
76 | m.layers[-1].activation = tf.keras.activations.linear
77 | return m
78 |
79 | saliency = Saliency(model,
80 | model_modifier=model_modifier,
81 | clone=False)
82 |
83 | # GradCAM
84 |
85 | # VAnilla
86 | saliency_map = saliency(loss, image)
87 | saliency_map_vanilla = normalize(saliency_map)
88 |
89 |
90 | # SmoothGrad
91 | saliency_map = saliency(loss, image,
92 | # TODO: Increase
93 | smooth_samples=30,
94 | smooth_noise=0.2)
95 | saliency_map_smooth = normalize(saliency_map)
96 |
97 |
98 | # Generate heatmap with GradCAM
99 | # Create Gradcam object
100 | gradcam = Gradcam(model,
101 | model_modifier=model_modifier,
102 | clone=False)
103 |
104 | cam = gradcam(loss,
105 | image,
106 | penultimate_layer=-1, # model.layers number
107 | )
108 | cam = normalize(cam)
109 |
110 | # Single image as example for chapter start
111 | fig = plt.figure()
112 | plt.imshow(saliency_map_vanilla[0], cmap = 'jet')
113 | plt.axis("off")
114 | plt.savefig(os.path.join(image_path, 'vanilla.png'))
115 |
116 |
117 | fig = plt.figure(figsize=(10,10))
118 | #gs = gridspec.GridSpec(nrows=3, ncols=3, width_ratios=[1,1,1],
119 | # wspace=0.0, hspace=0.0, top=0.95, bottom=0.05,
120 | # right=0.845)
121 | nrows = 3
122 | ncols = 3
123 | f, ax = plt.subplots(nrows=nrows, ncols=ncols)
124 | fig.subplots_adjust(wspace=0, hspace=0)
125 | fs = 10
126 |
127 | ax[0][0].set_title("Greyhound (vanilla)", fontsize=fs)
128 | ax[0][0].imshow(saliency_map_vanilla[0], cmap = 'jet')
129 |
130 | ax[0][1].set_title("Soup Bowl (vanilla)", fontsize=fs)
131 | ax[0][1].imshow(saliency_map_vanilla[1], cmap = 'jet')
132 |
133 | ax[0][2].set_title("Eel (vanilla)", fontsize=fs)
134 | ax[0][2].imshow(saliency_map_vanilla[2], cmap = 'jet')
135 |
136 | ax[1][0].set_title("Greyhound (Smoothgrad)", fontsize=fs)
137 | ax[1][0].imshow(saliency_map_smooth[0], cmap = 'jet')
138 |
139 | ax[1][1].set_title("Soup Bowl (Smoothgrad)", fontsize=fs)
140 | ax[1][1].imshow(saliency_map_smooth[1], cmap = 'jet')
141 |
142 | ax[1][2].set_title("Eel (Smoothgrad)", fontsize=fs)
143 | ax[1][2].imshow(saliency_map_smooth[2], cmap = 'jet')
144 |
145 | ax[2][0].set_title("Greyhound (Grad-Cam)", fontsize=fs)
146 | ax[2][0].imshow(cam[0], cmap = 'jet')
147 |
148 | ax[2][1].set_title("Soup Bowl (Grad-Cam)", fontsize=fs)
149 | ax[2][1].imshow(cam[1], cmap = 'jet')
150 |
151 | ax[2][2].set_title("Eel (Grad-Cam)", fontsize=fs)
152 | ax[2][2].imshow(cam[2], cmap = 'jet')
153 |
154 | for i in range(0, nrows):
155 | for j in range(0, ncols):
156 | ax[i][j].set_xticks([])
157 | ax[i][j].set_yticks([])
158 |
159 | plt.tight_layout()
160 | plt.savefig(os.path.join(image_path, 'smoothgrad.png'), bbox_inches='tight')
161 |
162 |
163 |
--------------------------------------------------------------------------------
/scripts/grad-cam/requirements.txt:
--------------------------------------------------------------------------------
1 | absl-py==0.11.0
2 | astor==0.8.1
3 | astunparse==1.6.3
4 | backcall==0.2.0
5 | cached-property==1.5.2
6 | cachetools==4.1.1
7 | certifi==2020.11.8
8 | chardet==3.0.4
9 | cycler==0.10.0
10 | decorator==4.4.2
11 | gast==0.3.3
12 | google-auth==1.23.0
13 | google-auth-oauthlib==0.4.2
14 | google-pasta==0.2.0
15 | grpcio==1.33.2
16 | h5py==2.10.0
17 | idna==2.10
18 | imageio==2.9.0
19 | importlib-metadata==2.0.0
20 | ipython==7.16.1
21 | ipython-genutils==0.2.0
22 | jedi==0.17.2
23 | Keras==2.4.3
24 | Keras-Applications==1.0.8
25 | Keras-Preprocessing==1.1.2
26 | keras-vis==0.4.1
27 | kiwisolver==1.3.1
28 | Markdown==3.3.3
29 | matplotlib==3.3.3
30 | networkx==2.5
31 | numpy==1.18.5
32 | oauthlib==3.1.0
33 | opt-einsum==3.3.0
34 | parso==0.7.1
35 | pexpect==4.8.0
36 | pickleshare==0.7.5
37 | Pillow==8.0.1
38 | pkg-resources==0.0.0
39 | prompt-toolkit==3.0.8
40 | protobuf==3.13.0
41 | ptyprocess==0.6.0
42 | pyasn1==0.4.8
43 | pyasn1-modules==0.2.8
44 | Pygments==2.7.2
45 | pyparsing==2.4.7
46 | python-dateutil==2.8.1
47 | PyWavelets==1.1.1
48 | PyYAML==5.3.1
49 | requests==2.25.0
50 | requests-oauthlib==1.3.0
51 | rsa==4.6
52 | scikit-image==0.17.2
53 | scipy==1.5.4
54 | six==1.15.0
55 | tensorboard==2.4.0
56 | tensorboard-plugin-wit==1.7.0
57 | tensorflow==2.3.1
58 | tensorflow-estimator==2.3.0
59 | termcolor==1.1.0
60 | tf-keras-vis==0.5.3
61 | tifffile==2020.9.3
62 | traitlets==4.3.3
63 | urllib3==1.26.2
64 | wcwidth==0.2.5
65 | Werkzeug==1.0.1
66 | wrapt==1.12.1
67 | zipp==3.4.0
68 |
--------------------------------------------------------------------------------
/scripts/imagenet_classifier.R:
--------------------------------------------------------------------------------
1 | # https://cran.rstudio.com/web/packages/keras/vignettes/applications.html
2 |
3 | library(keras)
4 |
5 | # instantiate the model
6 | model <- application_mobilenet(weights = 'imagenet')
7 | #model <- application_resnet50(weights = 'imagenet')
8 |
9 | # load the image
10 | # img_path <- "F:/Data/pets_data/cats/20160211_200107000_iOS.jpg"
11 | #img_path <- "F:/Data/pets_data/dogs/IMG_20170920_200039_286.jpg"
12 | img_path <- "/run/media/verena/SAMSUNG/Data/pets_data/dogs/IMG_20170920_200039_286.jpg"
13 | img <- image_load(img_path, target_size = c(224,224))
14 | x <- image_to_array(img)
15 |
16 | # ensure we have a 4d tensor with single element in the batch dimension,
17 | # the preprocess the input for prediction using resnet50
18 | x <- array_reshape(x, c(1, dim(x)))
19 | x <- imagenet_preprocess_input(x)
20 |
21 | # make predictions then decode and print them
22 | preds <- model %>% predict(x)
23 | imagenet_decode_predictions(preds, top = 3)[[1]]
24 |
25 | library(lime)
26 | library(abind)
27 |
28 | img_preprocess <- function(x) {
29 | arrays <- lapply(x, function(path) {
30 | img <- image_load(path, target_size = c(224,224))
31 | x <- image_to_array(img)
32 | x <- array_reshape(x, c(1, dim(x)))
33 | x <- imagenet_preprocess_input(x)
34 | })
35 | do.call(abind, c(arrays, list(along = 1)))
36 | }
37 |
38 | # Create an explainer (lime recognise the path as an image)
39 | explainer <- lime(img_path, as_classifier(model), img_preprocess)
40 |
41 |
42 | # Explain the model (can take a long time depending on your system)
43 | explanation <- explain(img_path, explainer, n_labels = 2, n_features = 3, n_superpixels = 10)
44 |
45 | library(microbenchmark)
46 | mb <- microbenchmark(
47 | explanation1 <- explain(img_path, explainer, n_labels = 2, n_features = 3, n_superpixels = 10),
48 | explanation2 <- explain(img_path, explainer, n_labels = 1, n_features = 3, n_superpixels = 10),
49 | explanation3 <- explain(img_path, explainer, n_labels = 2, n_features = 10, n_superpixels = 10),
50 | explanation4 <- explain(img_path, explainer, n_labels = 2, n_features = 3, n_superpixels = 20)
51 | )
52 | # explanation$label <- imagenet_decode_predictions(explanation$label_prob)
53 |
54 | plot_explanations(explanation1)
55 |
--------------------------------------------------------------------------------
/scripts/mmd/MMD-critic/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # Data generated by this tool.
7 | data/
8 |
9 | # C extensions
10 | *.so
11 |
12 | # Distribution / packaging
13 | .Python
14 | build/
15 | develop-eggs/
16 | dist/
17 | downloads/
18 | eggs/
19 | .eggs/
20 | lib/
21 | lib64/
22 | parts/
23 | sdist/
24 | var/
25 | wheels/
26 | *.egg-info/
27 | .installed.cfg
28 | *.egg
29 | MANIFEST
30 |
31 | # PyInstaller
32 | # Usually these files are written by a python script from a template
33 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
34 | *.manifest
35 | *.spec
36 |
37 | # Installer logs
38 | pip-log.txt
39 | pip-delete-this-directory.txt
40 |
41 | # Unit test / coverage reports
42 | htmlcov/
43 | .tox/
44 | .coverage
45 | .coverage.*
46 | .cache
47 | nosetests.xml
48 | coverage.xml
49 | *.cover
50 | .hypothesis/
51 |
52 | # Translations
53 | *.mo
54 | *.pot
55 |
56 | # Django stuff:
57 | *.log
58 | local_settings.py
59 |
60 | # Flask stuff:
61 | instance/
62 | .webassets-cache
63 |
64 | # Scrapy stuff:
65 | .scrapy
66 |
67 | # Sphinx documentation
68 | docs/_build/
69 |
70 | # PyBuilder
71 | target/
72 |
73 | # Jupyter Notebook
74 | .ipynb_checkpoints
75 |
76 | # pyenv
77 | .python-version
78 |
79 | # celery beat schedule file
80 | celerybeat-schedule
81 |
82 | # SageMath parsed files
83 | *.sage.py
84 |
85 | # Environments
86 | .env
87 | .venv
88 | env/
89 | venv/
90 | ENV/
91 | env.bak/
92 | venv.bak/
93 |
94 | # Spyder project settings
95 | .spyderproject
96 | .spyproject
97 |
98 | # Rope project settings
99 | .ropeproject
100 |
101 | # mkdocs documentation
102 | /site
103 |
104 | # mypy
105 | .mypy_cache/
106 |
--------------------------------------------------------------------------------
/scripts/mmd/MMD-critic/Helper.py:
--------------------------------------------------------------------------------
1 | # maintained by rajivak@utexas.edu
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 | import os
6 |
7 | import numpy as np
8 |
9 | def format_numsel(numsel):
10 | ss = ''
11 | for i,j in enumerate(numsel):
12 | ss = ss + " %d:%d " %(i,j)
13 | return ss
14 |
15 | def get_train_testindices(n, ntest, seed):
16 | np.random.seed(seed)
17 | testindices = np.random.choice(n,ntest,replace=False)
18 | trainindices = np.setdiff1d( range(n), testindices)
19 | return trainindices, testindices
20 |
21 | def exit(str):
22 | print(str)
23 | exit(1)
24 |
25 |
26 | def dir_exists(filename):
27 | """Creates the directory of a file if the directory does not exist.
28 |
29 | Raises:
30 | IOError: If the directory could not be created (and the directory does not
31 | exist). This may be due to for instance permissions issues or a race
32 | condition in which the directory is created right before makdirs runs.
33 | """
34 | dir = os.path.dirname(filename)
35 | if not os.path.exists(dir):
36 | os.makedirs(dir)
37 |
--------------------------------------------------------------------------------
/scripts/mmd/MMD-critic/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2017 Been Kim
4 |
5 | Permission is hereby granted, free of charge, to any person obtaining a copy
6 | of this software and associated documentation files (the "Software"), to deal
7 | in the Software without restriction, including without limitation the rights
8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9 | copies of the Software, and to permit persons to whom the Software is
10 | furnished to do so, subject to the following conditions:
11 |
12 | The above copyright notice and this permission notice shall be included in all
13 | copies or substantial portions of the Software.
14 |
15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21 | SOFTWARE.
22 |
--------------------------------------------------------------------------------
/scripts/mmd/MMD-critic/README:
--------------------------------------------------------------------------------
1 | run run_digits.py to reproduce numbers in the paper:
2 |
3 | ===========================================================================
4 | Examples are not Enough, Learn to Criticize! Criticism for Interpretability.
5 | ===========================================================================
6 | Been Kim*, Rajiv Khanna*, Sanmi Koyejo*. NIPS 2016
7 |
8 | paper:
9 |
10 | http://people.csail.mit.edu/beenkim/papers/KIM2016NIPS_MMD.pdf
11 |
12 |
13 | @inproceedings{kim2016MMD,
14 | title={Examples are not Enough, Learn to Criticize! Criticism for Interpretability},
15 | author={Been Kim, Rajiv Khanna and Sanmi Koyejo },
16 | booktitle={Advances in Neural Information Processing Systems},
17 | year={2016}
18 | }
19 |
--------------------------------------------------------------------------------
/scripts/mmd/MMD-critic/classify.py:
--------------------------------------------------------------------------------
1 | # maintained by rajivak@utexas.edu
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 | import numpy as np
6 | from sklearn.neighbors import KNeighborsClassifier
7 |
8 | # simple class to build 1NN classifier and classify using it
9 | class Classifier:
10 | model=None
11 |
12 | def __init__(self):
13 | pass
14 |
15 | def build_model(self, trainX, trainy):
16 | print("building model using %d points " %len(trainy))
17 | self.model = KNeighborsClassifier(n_neighbors=1)
18 | self.model.fit(trainX, trainy)
19 |
20 | def classify(self, testX, testy):
21 |
22 | print("classifying %d points " %len(testy))
23 | predy = self.model.predict(testX)
24 |
25 | ncorrect = np.sum(predy == testy)
26 | return 1.0 - ncorrect/(len(predy) + 0.0)
27 |
--------------------------------------------------------------------------------
/scripts/mmd/MMD-critic/data.py:
--------------------------------------------------------------------------------
1 | # maintained by rajivak@utexas.edu
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 | from sklearn.datasets import load_svmlight_file
6 | import numpy as np
7 | from sklearn.datasets import load_svmlight_file
8 | from sklearn.metrics.pairwise import rbf_kernel
9 |
10 |
11 | # class to load and handle data
12 | class Data:
13 | X = None # n * d
14 | y = None # n
15 | gamma = None
16 | kernel = None # n* n
17 |
18 | def __init__(self):
19 | pass
20 |
21 | # only to perform cross validation for picking gamma
22 | def splittraintest(self, testpercent):
23 | ntest = int(np.shape(self.X)[0] * testpercent/100.0)
24 | testindices = np.random.choice(np.shape(self.X)[0], ntest, replace=False)
25 | self.testX = self.X[testindices, :]
26 | self.testy = self.y[testindices]
27 | trainindices = np.setdiff1d(np.arange(np.shape(self.X)[0]), testindices)
28 | self.X = self.X[trainindices,:]
29 | self.y = self.y[trainindices]
30 |
31 |
32 | def subset(self, i):
33 | return np.where(y==i)[0]
34 |
35 | def load_data(self, X, y, gamma=None, docalkernel=False, savefile=None, testfile=None, dobin=False):
36 | self.X = X
37 | if dobin:
38 | bins = [-1.0, -0.67, -0.33, 0, 0.33, 0.67, 1.0]
39 | # bins = [-1.0, 0, 1.0]
40 | binned = np.digitize(self.X, bins )
41 | self.X=np.array([bins[binned[i, j] - 1] for i in range(np.shape(self.X)[0]) for j in range(np.shape(self.X)[1])]).reshape(np.shape(self.X))
42 |
43 | self.y = y
44 | if testfile is not None:
45 | dat2 = load_svmlight_file(testfile)
46 | self.testX = dat2[0].todense()
47 | if dobin:
48 | bins = [-1.0, -0.67, -0.33, 0, 0.33, 0.67, 1.0]
49 | binned = np.digitize(self.testX, bins)
50 | self.testX = np.array([bins[binned[i, j] - 1] for i in range(np.shape(self.testX)[0]) for j in range(np.shape(self.testX)[1])]).reshape(np.shape(self.testX))
51 |
52 | self.testy = dat2[1]
53 | # print(np.shape(self.X))
54 |
55 | self.gamma = gamma
56 | self.kernel = rbf_kernel(self.X, gamma=gamma)
57 |
58 | def load_svmlight(self, filename, gamma=None, docalkernel=False, savefile=None, testfile=None, dobin=False):
59 | data = load_svmlight_file(filename)
60 | self.load_data(data[0].todense(), data[1], gamma, docalkernel, savefile, testfile, dobin)
61 |
62 | def calculate_kernel(self, g=None):
63 | if g is None:
64 | if self.gamma is None:
65 | print("gamma not provided!")
66 | exit(1)
67 | else:
68 | self.kernel = rbf_kernel(self.X, gamma=self.gamma)
69 | else:
70 | self.kernel = rbf_kernel(self.X, gamma=g)
71 |
72 | # only calculate distance within class. across class, distance = 0
73 | def calculate_kernel_individual(self, g=None):
74 | touseg = g
75 | if touseg is None:
76 | touseg = self.gamma
77 | if touseg is None:
78 | print("gamma not provided!")
79 | exit(1)
80 | self.kernel = np.zeros((np.shape(self.X)[0], np.shape(self.X)[0]) )
81 | sortind = np.argsort(self.y)
82 | self.X = self.X[sortind, :]
83 | self.y = self.y[sortind]
84 |
85 | for i in np.arange(10):
86 | j = i+1
87 | ind = np.where(self.y == j)[0]
88 | startind = np.min(ind)
89 | endind = np.max(ind)+1
90 | self.kernel[startind:endind, startind:endind ] = rbf_kernel(self.X[startind:endind, :], gamma=self.gamma)
91 |
92 |
93 | def loadstate(self,filename):
94 | temp = np.load(filename)
95 | self.X = temp['X']
96 | self.y = temp['y']
97 | self.gamma = temp['gamma']
98 | self.kernel = temp['kernel']
99 |
100 | def setgamma(self, newgamma):
101 | if self.kernel is not None:
102 | temp = np.log(self.kernel)
103 | temp = temp * newgamma/self.gamma
104 | self.kernel = np.exp(temp)
105 | self.gamma = newgamma
106 | if self.kernel is None:
107 | self.calculate_kernel()
108 |
109 | def savestate(self, outpfile):
110 | np.savez(file=outpfile, X=self.X, y=self.y, gamma=self.gamma, kernel=self.kernel)
111 |
112 | def rbf(v1,v2):
113 | dd = v1 - v2
114 | res = - self.gamma * np.dot(dd,dd)
115 | return math.exp(res)
116 |
117 | def getsim(self, i, j):
118 | if kernel is not None:
119 | return self.kernel[i,j]
120 | else:
121 | return self.rbf(X[i,:], X[j,:])
122 |
123 |
124 | if __name__ == "__main__":
125 | import matplotlib.pyplot as plt
126 | file = 'data/usps'
127 | data=load_svmlight_file(file)
128 | X = data[0].todense()
129 | print(data[1])
130 | plt.imshow(X[2,:].reshape((16,16)))
131 | plt.show()
132 |
133 |
134 |
135 |
136 |
--------------------------------------------------------------------------------
/scripts/mmd/MMD-critic/mmd.py:
--------------------------------------------------------------------------------
1 | # maintained by rajivak@utexas.edu
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 | import numpy as np
6 | # from mpi4py import MPI
7 | import sys
8 | import math
9 |
10 |
11 | ##############################################################################################################################
12 | # function to select criticisms
13 | # ARGS:
14 | # K: Kernel matrix
15 | # selectedprotos: prototypes already selected
16 | # m : number of criticisms to be selected
17 | # reg: regularizer type.
18 | # is_K_sparse: True means K is the pre-computed csc sparse matrix? False means it is a dense matrix.
19 | # RETURNS: indices selected as criticisms
20 | ##############################################################################################################################
21 | def select_criticism_regularized(K, selectedprotos, m, reg='logdet', is_K_sparse=True):
22 |
23 | n = np.shape(K)[0]
24 | if reg in ['None','logdet','iterative']:
25 | pass
26 | else:
27 | print("wrong regularizer :" + reg)
28 | exit(1)
29 | options = dict()
30 |
31 | selected = np.array([], dtype=int)
32 | candidates2 = np.setdiff1d(range(n), selectedprotos)
33 | inverse_of_prev_selected = None # should be a matrix
34 |
35 | if is_K_sparse:
36 | colsum = np.array(K.sum(0)).ravel()/n
37 | else:
38 | colsum = np.sum(K, axis=0)/n
39 |
40 | for i in range(m):
41 | maxx = -sys.float_info.max
42 | argmax = -1
43 | candidates = np.setdiff1d(candidates2, selected)
44 |
45 | s1array = colsum[candidates]
46 |
47 | temp = K[selectedprotos, :][:, candidates]
48 | if is_K_sparse:
49 | s2array = temp.sum(0)
50 | else:
51 | s2array = np.sum(temp, axis=0)
52 |
53 | s2array = s2array / (len(selectedprotos))
54 |
55 | s1array = np.abs(s1array - s2array)
56 | if reg == 'logdet':
57 | if inverse_of_prev_selected is not None: # first call has been made already
58 | temp = K[selected, :][:, candidates]
59 | if is_K_sparse:
60 | temp2 = temp.transpose().dot(inverse_of_prev_selected)
61 | regularizer = temp.transpose().multiply(temp2)
62 | regcolsum = regularizer.sum(1).ravel()# np.sum(regularizer, axis=0)
63 | regularizer = np.abs(K.diagonal()[candidates] - regcolsum)
64 |
65 | else:
66 | # hadamard product
67 | temp2 = np.array(np.dot(inverse_of_prev_selected, temp))
68 | regularizer = temp2 * temp
69 | regcolsum = np.sum(regularizer, axis=0)
70 | regularizer = np.log(np.abs(np.diagonal(K)[candidates] - regcolsum))
71 | s1array = s1array + regularizer
72 | else:
73 | if is_K_sparse:
74 | s1array = s1array - np.log(np.abs(K.diagonal()[candidates]))
75 | else:
76 | s1array = s1array - np.log(np.abs(np.diagonal(K)[candidates]))
77 | argmax = candidates[np.argmax(s1array)]
78 | maxx = np.max(s1array)
79 |
80 | selected = np.append(selected, argmax)
81 | if reg == 'logdet':
82 | KK = K[selected,:][:,selected]
83 | if is_K_sparse:
84 | KK = KK.todense()
85 |
86 | inverse_of_prev_selected = np.linalg.inv(KK) # shortcut
87 | if reg == 'iterative':
88 | selectedprotos = np.append(selectedprotos, argmax)
89 |
90 | return selected
91 |
92 | ##############################################################################################################################
93 | # Function choose m of all rows by MMD as per kernelfunc
94 | # ARGS:
95 | # K : kernel matrix
96 | # candidate_indices : array of potential choices for selections, returned values are chosen from these indices
97 | # m: number of selections to be made
98 | # is_K_sparse: True means K is the pre-computed csc sparse matrix? False means it is a dense matrix.
99 | # RETURNS: subset of candidate_indices which are selected as prototypes
100 | ##############################################################################################################################
101 |
102 | def greedy_select_protos(K, candidate_indices, m, is_K_sparse=False):
103 |
104 | if len(candidate_indices) != np.shape(K)[0]:
105 | K = K[:,candidate_indices][candidate_indices,:]
106 |
107 | n = len(candidate_indices)
108 |
109 | # colsum = np.array(K.sum(0)).ravel() # same as rowsum
110 | if is_K_sparse:
111 | colsum = 2*np.array(K.sum(0)).ravel() / n
112 | else:
113 | colsum = 2*np.sum(K, axis=0) / n
114 |
115 | selected = np.array([], dtype=int)
116 | value = np.array([])
117 | for i in range(m):
118 | maxx = -sys.float_info.max
119 | argmax = -1
120 | candidates = np.setdiff1d(range(n), selected)
121 |
122 | s1array = colsum[candidates]
123 | if len(selected) > 0:
124 | temp = K[selected, :][:, candidates]
125 | if is_K_sparse:
126 | # s2array = temp.sum(0) *2
127 | s2array = temp.sum(0) * 2 + K.diagonal()[candidates]
128 |
129 | else:
130 | s2array = np.sum(temp, axis=0) *2 + np.diagonal(K)[candidates]
131 |
132 | s2array = s2array/(len(selected) + 1)
133 |
134 | s1array = s1array - s2array
135 |
136 | else:
137 | if is_K_sparse:
138 | s1array = s1array - (np.abs(K.diagonal()[candidates]))
139 | else:
140 | s1array = s1array - (np.abs(np.diagonal(K)[candidates]))
141 |
142 | argmax = candidates[np.argmax(s1array)]
143 | # print("max %f" %np.max(s1array))
144 |
145 | selected = np.append(selected, argmax)
146 | # value = np.append(value,maxx)
147 | KK = K[selected, :][:, selected]
148 | if is_K_sparse:
149 | KK = KK.todense()
150 |
151 | inverse_of_prev_selected = np.linalg.inv(KK) # shortcut
152 |
153 | return candidate_indices[selected]
154 |
--------------------------------------------------------------------------------
/scripts/mmd/MMD-critic/run_digits.py:
--------------------------------------------------------------------------------
1 | # maintained by rajivak@utexas.edu
2 | from __future__ import absolute_import
3 | from __future__ import division
4 | from __future__ import print_function
5 | import argparse
6 | import os
7 | from data import Data
8 | from mmd import select_criticism_regularized, greedy_select_protos
9 | import matplotlib.pyplot as plt
10 | from pylab import *
11 | from matplotlib import gridspec
12 | from classify import Classifier
13 | #from mpi4py import MPI
14 | import Helper
15 |
16 |
17 | DATA_DIRECTORY = os.path.join(os.getcwd(), 'data')
18 |
19 |
20 | ##############################################################################################################################
21 | # plotter function to draw the selected prototypes/criticisms
22 | # ARGS :
23 | # xx : the matrix of selected pictures, each row is the representation of the digit picture
24 | # y : true classification of the picture, only used to print in order
25 | # fileprefix: path prefix
26 | # printselectionnumbers : if True, number of selected digits of each type are also outputted in the output file.
27 | # RETURNS: nothing
28 | ##############################################################################################################################
29 | def plotfigs2(xx, selectedy, fileprefix=None, printselectionnumbers = False):
30 | num_selected = np.array([0] * 10)
31 | for ii in range(10):
32 | num_selected[ii] = len(np.where(selectedy == (ii + 1))[0])
33 | print(ii, num_selected[ii])
34 |
35 | totm = np.shape(xx)[0]
36 | print("number of images being printed %d" %totm)
37 | perpic_m = 60
38 | begin_at = 0
39 | counter = 0
40 | perrow = 10
41 |
42 | while counter < int(totm/perpic_m) + 1:
43 |
44 | counter += 1
45 | print("counter %d " % counter)
46 |
47 | offset = 0
48 | if begin_at == 0:
49 | offset = 5 # for text about number of protos/crits of each type
50 | if not printselectionnumbers:
51 | offset = 0
52 |
53 | # m=m+offset # for num_selected
54 | gs = gridspec.GridSpec(int(perpic_m/perrow)+int(offset/perrow),
55 | int(perrow), wspace=0.0, hspace=0.0)
56 | fig = plt.figure()
57 |
58 | if begin_at == 0 and printselectionnumbers:
59 | ax=fig.add_subplot(gs[0,:])
60 | ax.text(0.1,0.5,Helper.format_numsel(num_selected))
61 | ax.axis('off')
62 |
63 | endd = begin_at + offset+ perpic_m
64 | if endd-offset > totm:
65 | endd = totm +offset
66 | print(" begin %d, end %d" %(begin_at + offset, endd))
67 | for i in np.array(range(begin_at + offset, endd)):
68 | ax = fig.add_subplot(gs[int(i - begin_at)])
69 | #ax.imshow(xx[i - offset, :].reshape((16, 16)), cmap="Greys_r")
70 | ax.imshow(xx[int(i - offset), :].reshape((16, 16)))
71 | ax.axis('off')
72 |
73 | file = fileprefix+str(counter) + '.png'
74 | if file is not None:
75 | # print("saving file")
76 | plt.savefig(file , dpi=2000)
77 |
78 | begin_at += perpic_m
79 |
80 |
81 |
82 | ##############################################################################################################################
83 | # this function makes selects prototypes/criticisms and outputs the respective pictures. Also does 1-NN classification test
84 | # ARGS:
85 | # filename: the path to usps file
86 | # gamma: parameter for the kernel exp( - gamma * \| x1 - x2 \|_2 )
87 | # ktype: kernel type, 0 for global, 1 for local
88 | # outfig: path where selected prototype pictures are outputted, can be None when outputting of pictures is skipped
89 | # critoutfig: path where selected criticism pictures are outputted, can be None
90 | # testfile : path to the test usps.t
91 | # RETURNS: returns indices of selected prototypes, criticisms and the built data structure that contains the loaded usps dataset
92 | ##############################################################################################################################
93 | def run(filename, gamma, m, k, ktype, outfig, critoutfig,testfile):
94 |
95 | digitsdat = Data()
96 | digitsdat.load_svmlight(filename, gamma=gamma, docalkernel=False, savefile=None, testfile=testfile, dobin=False)
97 |
98 | if ktype == 0:
99 | digitsdat.calculate_kernel()
100 | print("Running Kernel type : global ")
101 | else:
102 | digitsdat.calculate_kernel_individual()
103 | print("Running Kernel type : local ")
104 |
105 |
106 |
107 | # selected = greedy_parallel(digitsdat.kernel, m)
108 | # print(np.sort(selected))
109 | selected = greedy_select_protos(digitsdat.kernel, np.array(range(np.shape(digitsdat.kernel)[0])), m)
110 | # print(np.sort(selected))
111 | # critselected = select_criticism(digitsdat.kernel, selected, k)
112 | selectedy = digitsdat.y[selected]
113 | sortedindx = np.argsort(selectedy)
114 | critselected= None
115 |
116 | if outfig is not None:
117 | plotfigs2(digitsdat.X[selected[sortedindx], :], selectedy[sortedindx], outfig)
118 |
119 |
120 | if k > 0:
121 | critselected = select_criticism_regularized(digitsdat.kernel, selected, k, is_K_sparse=False, reg='logdet')
122 |
123 | critselectedy = digitsdat.y[critselected]
124 | critsortedindx = np.argsort(critselectedy)
125 |
126 | if critoutfig is not None:
127 | plotfigs2(digitsdat.X[critselected[critsortedindx], :], critselectedy[critsortedindx], critoutfig+reg)
128 |
129 | return selected, critselected, digitsdat
130 |
131 | #########################################################################################################################
132 | # build a 1 NN classifier based on selected prototypes, test it against testfile
133 | # ARGS:
134 | # digitsdat : Data() structure already built. should also have built the kernels and loaded the test file as well.
135 | # selected : the indices of selected prototypes, in order of their selection (the order is important for all_test_k to be viable.
136 | # all_test_m : array of number of prototypes to be used to build classifier. Since the selections are greedy, one can select for 5000 prototypes,
137 | # and test for num_prototypes = 10, 100, 1000, 4000, etc.
138 | ##############################################################################################################################
139 | def test_1NN(digitsdat, selected, all_test_m):
140 |
141 | for testm in all_test_m:
142 |
143 | classifier = Classifier()
144 | classifier.build_model(digitsdat.X[selected[0:testm], :], digitsdat.y[ selected[0:testm]])
145 | print("m=%d error=%f" % ( testm, classifier.classify(digitsdat.testX, digitsdat.testy)))
146 |
147 | # uncomment for stats on how many protos were selected for each type of digit.
148 | #num_selected = np.array([0] * 10)
149 |
150 | #for ii in range(10):
151 | # num_selected[ii] = len(np.where(selectedy == (ii + 1))[0])
152 | # print(ii, num_selected[ii])
153 |
154 |
155 | #########################################################################################################################
156 | #########################################################################################################################
157 | #########################################################################################################################
158 | # start here
159 | def main(
160 | data_prefix,
161 | output_prefix,
162 | gamma,
163 | m,
164 | alltestm,
165 | kerneltype,
166 | do_output_pics):
167 | ioff()
168 |
169 | outfig = None
170 | critoutfig = None
171 |
172 | k = 0 # number of criticisms
173 |
174 | if do_output_pics == 1:
175 | outfig = os.path.join(output_prefix, 'images/%d/protos' % m)
176 | critoutfig = os.path.join(output_prefix, 'images/%d/crit' % m)
177 |
178 | Helper.dir_exists(outfig)
179 |
180 | selected, critselected, digitsdat = run(
181 | os.path.join(data_prefix, 'usps'),
182 | gamma,
183 | m,
184 | k,
185 | kerneltype,
186 | outfig,
187 | critoutfig,
188 | os.path.join(data_prefix, 'usps.t'))
189 |
190 | test_1NN(digitsdat, selected, alltestm)
191 |
192 | print("...done")
193 |
194 |
195 | if __name__ == '__main__':
196 | parser = argparse.ArgumentParser()
197 | parser.add_argument(
198 | "--data_directory",
199 | type=str,
200 | default=DATA_DIRECTORY,
201 | help="The directory that contains data such as the usps file.")
202 | parser.add_argument(
203 | "--output_directory",
204 | type=str,
205 | default="./figs/",
206 | help="The directory in which to output data.")
207 | FLAGS, unparsed = parser.parse_known_args()
208 |
209 | data_prefix = FLAGS.data_directory
210 | output_prefix = os.path.join(FLAGS.output_directory, "data")
211 | gamma = 0.026 # kernel parameter, obtained after cross validation
212 |
213 | #m= 4433 # total number of prototypes to select
214 | #alltestm = np.array([4433, 3772, 3135, 2493, 1930, 1484, 1145, 960, 828, 715, 643, 584, 492, 410, 329, 286, 219, 185, 130, 110]) # test using these number of prototypes
215 |
216 | m = 50 # total number of prototypes to select
217 | alltestm = np.array(
218 | [410, 329, 286, 219, 185, 130,
219 | 110]) # test using these number of prototypes
220 |
221 | do_output_pics = 1
222 | kernel_type = 1 # 1 for local, 0 for global
223 |
224 | main(data_prefix, output_prefix, gamma, m, alltestm, kernel_type, do_output_pics)
225 |
226 |
227 |
228 |
229 |
230 |
--------------------------------------------------------------------------------
/scripts/redirect.py:
--------------------------------------------------------------------------------
1 |
2 | redirects = {
3 | "properties.html": "interpretability.html#properties",
4 | "explanation.html": "interpretability.html#explanation",
5 | "bike-data.html": "data.html#bike-data",
6 | "spam-data.html": "data.html#spam-data",
7 | "cervical.html": "data.html#penguins",
8 | "terminology.html": "what-is-machine-learning.html",
9 | "global-methods.html": "overview.html#global-methods",
10 | "example-based.html": "overview.html#example-based",
11 | "agnostic.html": "overview.html#agnostic",
12 | "neural-networks.html": "overview.html#neural-networks",
13 | "simple.html": "overview.html#simple",
14 | "preface-by-the-author.html": "index.html",
15 | "other-interpretable.html": "overview.html",
16 | "scope-of-interpretability.html": "overview.html",
17 | "taxonomy-of-interpretability-methods.html": "overview.html",
18 | "evaluation-of-interpretability.html": "evaluation.html",
19 | "interpretability-importance.html": "interpretability.html",
20 | }
21 |
22 | import os
23 |
24 | output_dir = "./_book/" # Change to "_site" if needed
25 | os.makedirs(output_dir, exist_ok=True)
26 |
27 | html_template = """
28 |
29 |
30 |
31 |
32 |
33 |
34 | If you are not redirected, click here.
35 |
36 |
37 | """
38 |
39 | for old, new in redirects.items():
40 | with open(os.path.join(output_dir, old), "w") as f:
41 | f.write(html_template.format(new=new))
42 |
43 | print("Redirect pages generated.")
44 |
--------------------------------------------------------------------------------
/scripts/shap/.gitignore:
--------------------------------------------------------------------------------
1 | shap
2 |
--------------------------------------------------------------------------------
/scripts/shap/requirements.txt:
--------------------------------------------------------------------------------
1 | attrs==19.1.0
2 | backcall==0.1.0
3 | bleach==3.1.4
4 | cycler==0.10.0
5 | decorator==4.4.0
6 | defusedxml==0.6.0
7 | entrypoints==0.3
8 | imageio==2.5.0
9 | ipykernel==5.1.2
10 | ipython==7.7.0
11 | ipython-genutils==0.2.0
12 | ipywidgets==7.5.1
13 | jedi==0.14.1
14 | Jinja2==2.10.1
15 | joblib==0.13.2
16 | jsonschema==3.0.2
17 | jupyter-client==5.3.1
18 | jupyter-core==4.5.0
19 | kiwisolver==1.1.0
20 | MarkupSafe==1.1.1
21 | mistune==0.8.4
22 | nbconvert==5.5.0
23 | nbformat==4.4.0
24 | networkx==2.3
25 | notebook==6.1.5
26 | numpy==1.17.0
27 | pandas==0.25.0
28 | pandocfilters==1.4.2
29 | parso==0.5.1
30 | pexpect==4.7.0
31 | pickleshare==0.7.5
32 | Pillow==6.2.0
33 | pkg-resources==0.0.0
34 | prometheus-client==0.7.1
35 | prompt-toolkit==2.0.9
36 | ptyprocess==0.6.0
37 | Pygments==2.4.2
38 | pyparsing==2.4.2
39 | pyrsistent==0.15.4
40 | python-dateutil==2.8.0
41 | pytz==2019.2
42 | PyWavelets==1.0.3
43 | pyzmq==18.0.2
44 | scikit-image==0.15.0
45 | scikit-learn==0.21.3
46 | scipy==1.3.0
47 | Send2Trash==1.5.0
48 | shap==0.29.3
49 | six==1.12.0
50 | terminado==0.8.2
51 | testpath==0.4.2
52 | tornado==6.0.3
53 | tqdm==4.32.2
54 | traitlets==4.3.2
55 | wcwidth==0.1.7
56 | webencodings==0.5.1
57 | widgetsnbextension==3.5.1
58 |
--------------------------------------------------------------------------------