├── .conda
├── conda_build_config.yaml
└── meta.yaml
├── .github
└── workflows
│ ├── python-package-TF2.17.yml
│ ├── python-package-TF2.5.yml
│ ├── python-package-cron.yml
│ ├── python-package.yml
│ ├── python-publish.yml
│ └── sphinx-publish.yml
├── .gitignore
├── CITATION.cff
├── LICENSE
├── MANIFEST.in
├── README.md
├── VERSION
├── dockerfiles
├── cpu.Dockerfile
└── gpu.Dockerfile
├── docs
├── Makefile
├── api
│ ├── tf_keras_vis.activation_maximization.callbacks.md
│ ├── tf_keras_vis.activation_maximization.input_modifiers.md
│ ├── tf_keras_vis.activation_maximization.md
│ ├── tf_keras_vis.activation_maximization.regularizers.md
│ ├── tf_keras_vis.gradcam.md
│ ├── tf_keras_vis.gradcam_plus_plus.md
│ ├── tf_keras_vis.layercam.md
│ ├── tf_keras_vis.md
│ ├── tf_keras_vis.saliency.md
│ ├── tf_keras_vis.scorecam.md
│ ├── tf_keras_vis.utils.md
│ ├── tf_keras_vis.utils.model_modifiers.md
│ └── tf_keras_vis.utils.scores.md
├── conf.py
├── examples
│ ├── attentions.ipynb
│ ├── images
│ │ ├── activation_maximization.0.gif
│ │ ├── bear.jpg
│ │ ├── faster-scorecam.png
│ │ ├── goldfish.jpg
│ │ ├── gradcam.png
│ │ ├── gradcam_plus_plus.png
│ │ ├── input-images.png
│ │ ├── layercam.png
│ │ ├── scorecam.png
│ │ ├── smoothgrad.png
│ │ ├── soldiers.jpg
│ │ ├── vanilla-saliency.png
│ │ ├── visualize-dense-layer.png
│ │ └── visualize-filters.png
│ ├── index.md
│ ├── visualize_conv_filters.ipynb
│ └── visualize_dense_layer.ipynb
├── images
├── index.md
├── license.md
└── quick-start.md
├── examples
├── README.md
├── attentions.ipynb
├── images
├── visualize_conv_filters.ipynb
└── visualize_dense_layer.ipynb
├── setup.cfg
├── setup.py
├── tests
├── __init__.py
└── tf_keras_vis
│ ├── __init__.py
│ ├── activation_maximization
│ ├── __init__.py
│ ├── activation_maximization_test.py
│ ├── callbacks_test.py
│ └── input_modifiers_test.py
│ ├── attentions_test.py
│ ├── conftest.py
│ ├── gradcam_plus_plus_test.py
│ ├── gradcam_test.py
│ ├── saliency_test.py
│ ├── scorecam_test.py
│ ├── tf_keras_vis_test.py
│ └── utils
│ ├── __init__.py
│ ├── model_modifiers_test.py
│ ├── scores_test.py
│ └── utils_test.py
└── tf_keras_vis
├── __init__.py
├── activation_maximization
├── __init__.py
├── callbacks.py
├── input_modifiers.py
└── regularizers.py
├── gradcam.py
├── gradcam_plus_plus.py
├── layercam.py
├── saliency.py
├── scorecam.py
└── utils
├── __init__.py
├── callbacks.py
├── input_modifiers.py
├── losses.py
├── model_modifiers.py
├── regularizers.py
├── scores.py
└── test.py
/.conda/conda_build_config.yaml:
--------------------------------------------------------------------------------
1 | python:
2 | - 3.7
3 | - 3.8
4 | - 3.9
5 | - 3.10
--------------------------------------------------------------------------------
/.conda/meta.yaml:
--------------------------------------------------------------------------------
1 | {% set data = load_setup_py_data() %}
2 |
3 | package:
4 | name: tf-keras-vis
5 | version: {{ data['version'] }}
6 |
7 | source:
8 | path: ../
9 |
10 | build:
11 | number: 0
12 | script: python setup.py install --single-version-externally-managed --record=record.txt
13 |
14 | requirements:
15 | build:
16 | - python >=3.7,<3.11
17 | - setuptools
18 | {% for package in data['install_requires'] %}
19 | - {{ package.lower() }}
20 | {% endfor %}
21 |
22 | run:
23 | {% for package in data['install_requires'] %}
24 | - {{ package.lower() }}
25 | {% endfor %}
26 | - tensorflow
27 |
28 | test:
29 | imports:
30 | - tf_keras_vis
31 |
32 | about:
33 | home: {{ data['url'] }}
34 | license_file: LICENSE
35 | description: {{ data['description'] }}
36 |
--------------------------------------------------------------------------------
/.github/workflows/python-package-TF2.17.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Python package TF2.17
5 |
6 | on:
7 | push:
8 | branches: [ master ]
9 | pull_request:
10 |
11 | env:
12 | TF_KERAS_VIS_MAX_STEPS: 3
13 |
14 | jobs:
15 | build:
16 | runs-on: ubuntu-24.04
17 | strategy:
18 | fail-fast: false
19 | matrix:
20 | python-version: ['3.8', '3.9', '3.10']
21 | tensorflow: ['2.6.0', '2.7.0', '2.8.0', '2.9.0', '2.10.0', '2.11.0', '2.12.0', '2.13.0', '2.14.0', '2.15.0', '2.16.0', '2.17.0']
22 | include:
23 | - python-version: '3.11'
24 | tensorflow: '2.12.0'
25 | - python-version: '3.11'
26 | tensorflow: '2.13.0'
27 | - python-version: '3.11'
28 | tensorflow: '2.14.0'
29 | - python-version: '3.11'
30 | tensorflow: '2.15.0'
31 | - python-version: '3.11'
32 | tensorflow: '2.16.0'
33 | - python-version: '3.11'
34 | tensorflow: '2.17.0'
35 | - python-version: '3.12'
36 | tensorflow: '2.16.0'
37 | - python-version: '3.12'
38 | tensorflow: '2.17.0'
39 | exclude:
40 | - python-version: '3.8'
41 | tensorflow: '2.14.0'
42 | - python-version: '3.8'
43 | tensorflow: '2.15.0'
44 | - python-version: '3.10'
45 | tensorflow: '2.6.0'
46 | - python-version: '3.10'
47 | tensorflow: '2.7.0'
48 |
49 | steps:
50 | - uses: actions/checkout@v4
51 | - name: Set up Python ${{ matrix.python-version }}
52 | uses: actions/setup-python@v5
53 | with:
54 | python-version: ${{ matrix.python-version }}
55 | - name: Upgrade pip
56 | run: |
57 | python -m pip install --upgrade pip
58 | - name: Install dependencies
59 | run: |
60 | python -m pip install --upgrade -e .[develop,examples,numpy1x] tensorflow~=${{ matrix.tensorflow }}
61 | - name: Test with pytest
62 | run: |
63 | pytest -n auto --dist loadscope
64 | timeout-minutes: 60
65 | - name: Test attentions.ipynb
66 | run: |
67 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/attentions.ipynb
68 | - name: Test visualize_dense_layer.ipynb
69 | run: |
70 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/visualize_dense_layer.ipynb
71 | - name: Test visualize_conv_filters.ipynb
72 | run: |
73 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/visualize_conv_filters.ipynb
74 |
--------------------------------------------------------------------------------
/.github/workflows/python-package-TF2.5.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Python package TF2.5
5 |
6 | on:
7 | push:
8 | branches: [ master ]
9 | pull_request:
10 |
11 | env:
12 | TF_KERAS_VIS_MAX_STEPS: 3
13 |
14 | jobs:
15 | build:
16 | runs-on: ubuntu-22.04
17 | strategy:
18 | fail-fast: false
19 | matrix:
20 | python-version: ['3.7', '3.8']
21 | tensorflow: ['2.0.0', '2.1.0', '2.2.0', '2.3.0', '2.4.0', '2.5.0']
22 | include:
23 | - python-version: '3.9'
24 | tensorflow: '2.5.0'
25 | exclude:
26 | - python-version: '3.8'
27 | tensorflow: '2.0.0'
28 | - python-version: '3.8'
29 | tensorflow: '2.1.0'
30 | steps:
31 | - uses: actions/checkout@v4
32 | - name: Set up Python ${{ matrix.python-version }}
33 | uses: actions/setup-python@v5
34 | with:
35 | python-version: ${{ matrix.python-version }}
36 | - name: Upgrade pip
37 | run: |
38 | python -m pip install --upgrade pip
39 | - name: Install dependencies
40 | run: |
41 | python -m pip install --upgrade -e .[develop,examples,numpy1x,protobuf3] tensorflow~=${{ matrix.tensorflow }}
42 | - name: Test with pytest
43 | run: |
44 | pytest -n auto --dist loadscope
45 | timeout-minutes: 60
46 | - name: Test attentions.ipynb
47 | run: |
48 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/attentions.ipynb
49 | - name: Test visualize_dense_layer.ipynb
50 | run: |
51 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/visualize_dense_layer.ipynb
52 | - name: Test visualize_conv_filters.ipynb
53 | run: |
54 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/visualize_conv_filters.ipynb
55 |
--------------------------------------------------------------------------------
/.github/workflows/python-package-cron.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Python package run by Cron
5 |
6 | on:
7 | push:
8 | branches: [ master ]
9 | pull_request:
10 | schedule:
11 | - cron: "30 1 * * *"
12 |
13 | env:
14 | TF_KERAS_VIS_MAX_STEPS: 3
15 |
16 | jobs:
17 | build:
18 | runs-on: ubuntu-latest
19 | strategy:
20 | fail-fast: false
21 | matrix:
22 | python-version: ['3.9', '3.10', '3.11', '3.12']
23 | tensorflow: [current, pre, nightly]
24 | steps:
25 | - uses: actions/checkout@v4
26 | - name: Set up Python ${{ matrix.python-version }}
27 | uses: actions/setup-python@v5
28 | with:
29 | python-version: ${{ matrix.python-version }}
30 | - name: Upgrade pip
31 | run: |
32 | python -m pip install --upgrade pip
33 | - name: Install dependencies and current Tensorflow
34 | if: matrix.tensorflow == 'current'
35 | run: |
36 | python -m pip install --upgrade -e .[develop,examples] --upgrade tensorflow
37 | - name: Install dependencies and pre-released Tensorflow
38 | if: matrix.tensorflow == 'pre'
39 | run: |
40 | python -m pip install --upgrade -e .[develop,examples] --upgrade --pre tensorflow
41 | - name: Install dependencies and nightly-built Tensorflow
42 | if: matrix.tensorflow == 'nightly'
43 | run: |
44 | python -m pip install --upgrade -e .[develop,examples] --upgrade tf-nightly
45 | - name: Test with pytest
46 | run: |
47 | pytest -n auto --dist loadscope
48 | timeout-minutes: 60
49 | - name: Test attentions.ipynb
50 | run: |
51 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/attentions.ipynb
52 | - name: Test visualize_dense_layer.ipynb
53 | run: |
54 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/visualize_dense_layer.ipynb
55 | - name: Test visualize_conv_filters.ipynb
56 | run: |
57 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/visualize_conv_filters.ipynb
58 |
--------------------------------------------------------------------------------
/.github/workflows/python-package.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Python package
5 |
6 | on:
7 | push:
8 | branches: [ master ]
9 | pull_request:
10 |
11 | env:
12 | TF_KERAS_VIS_MAX_STEPS: 3
13 |
14 | jobs:
15 | build:
16 | runs-on: ubuntu-latest
17 | strategy:
18 | fail-fast: false
19 | matrix:
20 | python-version: ['3.9', '3.10', '3.11', '3.12']
21 | tensorflow: ['2.18.0', '2.19.0']
22 | numpy: ['numpy1x', numpy2x]
23 | steps:
24 | - uses: actions/checkout@v4
25 | - name: Set up Python ${{ matrix.python-version }}
26 | uses: actions/setup-python@v5
27 | with:
28 | python-version: ${{ matrix.python-version }}
29 | - name: Upgrade pip
30 | run: |
31 | python -m pip install --upgrade pip
32 | - name: Install dependencies
33 | run: |
34 | python -m pip install --upgrade -e .[develop,examples,${{matrix.numpy}}] tensorflow~=${{ matrix.tensorflow }}
35 | - name: Test with pytest
36 | run: |
37 | pytest -n auto --dist loadscope
38 | timeout-minutes: 60
39 | - name: Test attentions.ipynb
40 | run: |
41 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/attentions.ipynb
42 | - name: Test visualize_dense_layer.ipynb
43 | run: |
44 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/visualize_dense_layer.ipynb
45 | - name: Test visualize_conv_filters.ipynb
46 | run: |
47 | jupyter-nbconvert --ExecutePreprocessor.timeout=600 --to notebook --execute examples/visualize_conv_filters.ipynb
48 |
--------------------------------------------------------------------------------
/.github/workflows/python-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflows will upload a Python Package using Twine when a release is created
2 | # For more information see: https://help.github.com/en/actions/language-and-framework-guides/using-python-with-github-actions#publishing-to-package-registries
3 |
4 | name: Upload Python Package
5 |
6 | on:
7 | release:
8 | types: [published]
9 |
10 | jobs:
11 | deploy:
12 |
13 | runs-on: ubuntu-latest
14 |
15 | steps:
16 | - uses: actions/checkout@v4
17 | - name: Set up Python
18 | uses: actions/setup-python@v5
19 | with:
20 | python-version: 3.9
21 | - name: Install dependencies
22 | run: |
23 | python -m pip install --no-cache-dir --upgrade pip
24 | python -m pip install --no-cache-dir --upgrade setuptools wheel twine
25 | - name: Build and publish
26 | env:
27 | TWINE_USERNAME: ${{ secrets.PYPI_USERNAME }}
28 | TWINE_PASSWORD: ${{ secrets.PYPI_PASSWORD }}
29 | run: |
30 | python setup.py sdist bdist_wheel
31 | twine upload dist/*
32 |
--------------------------------------------------------------------------------
/.github/workflows/sphinx-publish.yml:
--------------------------------------------------------------------------------
1 | # This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2 | # For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3 |
4 | name: Sphinx publish
5 |
6 | on:
7 | push:
8 | branches: [ master ]
9 |
10 | env:
11 | GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
12 | ACCESS_KEY: ${{ secrets.DOCUMENT_PUBLISHING }}
13 |
14 | jobs:
15 | build-and-publish:
16 | runs-on: ubuntu-latest
17 | steps:
18 | - uses: actions/checkout@v4
19 | - name: Set up Python
20 | uses: actions/setup-python@v5
21 | with:
22 | python-version: 3.9
23 | - name: Install pandoc
24 | run: |
25 | sudo apt update
26 | sudo apt install -y pandoc
27 | - name: Install dependencies
28 | run: |
29 | python -m pip install --upgrade pip
30 | python -m pip install --upgrade -e .[docs] tensorflow
31 | - name: Clone the documentation project
32 | run: |
33 | git clone -n https://${GITHUB_ACTOR}:${GITHUB_TOKEN}@github.com/keisen/tf-keras-vis-docs.git docs/_build
34 | - name: Build
35 | run: |
36 | cd docs
37 | make html
38 | - name: Configuration
39 | run: |
40 | cd docs/_build
41 | echo "$ACCESS_KEY" > ~/access_key.pem
42 | chmod 600 ~/access_key.pem
43 | git config --local user.email k.keisen@gmail.com
44 | git config --local user.name keisen
45 | git config remote.origin.url "git@github.com:keisen/tf-keras-vis-docs.git"
46 | - name: Publish files
47 | env:
48 | GIT_SSH_COMMAND: ssh -i ~/access_key.pem -o StrictHostKeyChecking=no -F /dev/null
49 | run: |
50 | cd docs/_build
51 | git add .
52 | if ! git diff --cached --quiet; then
53 | git commit -m "Auto commit"
54 | git push -f origin master
55 | fi
56 |
--------------------------------------------------------------------------------
/.gitignore:
--------------------------------------------------------------------------------
1 | # Byte-compiled / optimized / DLL files
2 | __pycache__/
3 | *.py[cod]
4 | *$py.class
5 |
6 | # C extensions
7 | *.so
8 |
9 | # Distribution / packaging
10 | .Python
11 | build/
12 | develop-eggs/
13 | dist/
14 | downloads/
15 | eggs/
16 | .eggs/
17 | lib/
18 | lib64/
19 | parts/
20 | sdist/
21 | var/
22 | wheels/
23 | *.egg-info/
24 | .installed.cfg
25 | *.egg
26 | MANIFEST
27 |
28 | # PyInstaller
29 | # Usually these files are written by a python script from a template
30 | # before PyInstaller builds the exe, so as to inject date/other infos into it.
31 | *.manifest
32 | *.spec
33 |
34 | # Installer logs
35 | pip-log.txt
36 | pip-delete-this-directory.txt
37 |
38 | # Unit test / coverage reports
39 | htmlcov/
40 | .tox/
41 | .coverage
42 | .coverage.*
43 | .cache
44 | nosetests.xml
45 | coverage.xml
46 | *.cover
47 | .hypothesis/
48 | .pytest_cache/
49 |
50 | # Translations
51 | *.mo
52 | *.pot
53 |
54 | # Django stuff:
55 | *.log
56 | local_settings.py
57 | db.sqlite3
58 |
59 | # Flask stuff:
60 | instance/
61 | .webassets-cache
62 |
63 | # Scrapy stuff:
64 | .scrapy
65 |
66 | # Sphinx documentation
67 | # docs/_build/
68 | /docs/build
69 |
70 | # PyBuilder
71 | target/
72 |
73 | # Jupyter Notebook
74 | .ipynb_checkpoints
75 |
76 | # pyenv
77 | .python-version
78 |
79 | # celery beat schedule file
80 | celerybeat-schedule
81 |
82 | # SageMath parsed files
83 | *.sage.py
84 |
85 | # Environments
86 | .env
87 | .venv
88 | env/
89 | venv/
90 | ENV/
91 | env.bak/
92 | venv.bak/
93 |
94 | # Spyder project settings
95 | .spyderproject
96 | .spyproject
97 |
98 | # Rope project settings
99 | .ropeproject
100 |
101 | # mkdocs documentation
102 | # /site
103 |
104 | # mypy
105 | .mypy_cache/
106 |
107 | # Add by keisen
108 | .DS_Store
109 | *.swp
110 | *.Identifier
111 | core*
112 | /docs/_build/
113 | /docs/examples/sandbox.ipynb
114 | /docs/examples/workspace.ipynb
115 | /docs/examples/Untitled.ipynb
116 | /dockerfiles/docker-compose.yml
117 | DraftReleaseNote.md
118 | /temp
119 | /.vscode
120 | /test.sh
121 |
--------------------------------------------------------------------------------
/CITATION.cff:
--------------------------------------------------------------------------------
1 | cff-version: 1.2.0
2 | message: "If you use this software, please cite it as below."
3 | authors:
4 | - family-names: Kubota
5 | given-names: Yasuhiro
6 | email: "k.keisen@gmail.com"
7 | title: "tf-keras-vis"
8 | repository: "https://github.com/keisen/tf-keras-vis"
9 | url: "https://keisen.github.io/tf-keras-vis-docs/"
10 | type: software
11 | version: 0.8.8
12 | date-released: "2024-04-17"
13 | license-url: "https://github.com/keisen/tf-keras-vis/blob/master/LICENSE"
14 | references:
15 | - authors:
16 | - family-names: Kotikalapudi
17 | given-names: Raghavendra
18 | title: "keras-vis"
19 | repository: "https://github.com/raghakot/keras-vis"
20 | url: "https://raghakot.github.io/keras-vis/"
21 | type: software
22 | version: 0.4.1
23 |
--------------------------------------------------------------------------------
/LICENSE:
--------------------------------------------------------------------------------
1 | MIT License
2 |
3 | Copyright (c) 2016 Raghavendra Kotikalapudi
4 | Copyright (c) 2019 keisen(Yasuhiro Kubota) and contributors
5 |
6 | Permission is hereby granted, free of charge, to any person obtaining a copy
7 | of this software and associated documentation files (the "Software"), to deal
8 | in the Software without restriction, including without limitation the rights
9 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 | copies of the Software, and to permit persons to whom the Software is
11 | furnished to do so, subject to the following conditions:
12 |
13 | The above copyright notice and this permission notice shall be included in all
14 | copies or substantial portions of the Software.
15 |
16 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 | SOFTWARE.
23 |
--------------------------------------------------------------------------------
/MANIFEST.in:
--------------------------------------------------------------------------------
1 | include VERSION
2 | recursive-exclude tests *
--------------------------------------------------------------------------------
/README.md:
--------------------------------------------------------------------------------
1 | # [tf-keras-vis](https://keisen.github.io/tf-keras-vis-docs/)
2 |
3 |
4 |
5 | [](https://pepy.tech/project/tf-keras-vis)
6 | [](https://badge.fury.io/py/tf-keras-vis)
7 | [](https://badge.fury.io/py/tf-keras-vis)
8 | [](https://github.com/keisen/tf-keras-vis/actions/workflows/python-package.yml)
9 | [](https://opensource.org/licenses/MIT)
10 | [](https://keisen.github.io/tf-keras-vis-docs/)
11 |
12 |
13 |
14 |
15 |
16 |
17 |
18 | ## Web documents
19 |
20 | https://keisen.github.io/tf-keras-vis-docs/
21 |
22 |
23 | ## Overview
24 |
25 |
26 |
27 | tf-keras-vis is a visualization toolkit for debugging `keras.Model` in Tensorflow2.0+.
28 | Currently supported methods for visualization include:
29 |
30 | * Feature Visualization
31 | - ActivationMaximization ([web](https://distill.pub/2017/feature-visualization/), [github](https://github.com/raghakot/keras-vis))
32 | * Class Activation Maps
33 | - GradCAM ([paper](https://arxiv.org/pdf/1610.02391v1.pdf))
34 | - GradCAM++ ([paper](https://arxiv.org/pdf/1710.11063.pdf))
35 | - ScoreCAM ([paper](https://arxiv.org/pdf/1910.01279.pdf), [github](https://github.com/haofanwang/Score-CAM))
36 | - Faster-ScoreCAM ([github](https://github.com/tabayashi0117/Score-CAM/blob/master/README.md#faster-score-cam))
37 | - LayerCAM ([paper](http://mftp.mmcheng.net/Papers/21TIP_LayerCAM.pdf), [github](https://github.com/PengtaoJiang/LayerCAM)) :new::zap:
38 | * Saliency Maps
39 | - Vanilla Saliency ([paper](https://arxiv.org/pdf/1312.6034.pdf))
40 | - SmoothGrad ([paper](https://arxiv.org/pdf/1706.03825.pdf))
41 |
42 | tf-keras-vis is designed to be light-weight, flexible and ease of use.
43 | All visualizations have the features as follows:
44 |
45 | * Support **N-dim image inputs**, that's, not only support pictures but also such as 3D images.
46 | * Support **batch wise** processing, so, be able to efficiently process multiple input images.
47 | * Support the model that have either **multiple inputs** or **multiple outputs**, or both.
48 | * Support the **mixed-precision** model.
49 |
50 | And in ActivationMaximization,
51 |
52 | * Support Optimizers that are built to keras.
53 |
54 |
55 |
56 | ### Visualizations
57 |
58 |
59 |
60 | #### Dense Unit
61 |
62 |
63 |
64 | #### Convolutional Filter
65 |
66 |
67 |
68 | #### Class Activation Map
69 |
70 |
71 |
72 | The images above are generated by `GradCAM++`.
73 |
74 | #### Saliency Map
75 |
76 |
77 |
78 | The images above are generated by `SmoothGrad`.
79 |
80 |
81 |
82 | ## Usage
83 |
84 | ### ActivationMaximization (Visualizing Convolutional Filter)
85 |
86 |
87 |
88 | ```python
89 | import tensorflow as tf
90 | from keras.applications import VGG16
91 | from matplotlib import pyplot as plt
92 | from tf_keras_vis.activation_maximization import ActivationMaximization
93 | from tf_keras_vis.activation_maximization.callbacks import Progress
94 | from tf_keras_vis.activation_maximization.input_modifiers import Jitter, Rotate2D
95 | from tf_keras_vis.activation_maximization.regularizers import TotalVariation2D, Norm
96 | from tf_keras_vis.utils.model_modifiers import ExtractIntermediateLayer, ReplaceToLinear
97 | from tf_keras_vis.utils.scores import CategoricalScore
98 |
99 | # Create the visualization instance.
100 | # All visualization classes accept a model and model-modifier, which, for example,
101 | # replaces the activation of last layer to linear function so on, in constructor.
102 | activation_maximization = \
103 | ActivationMaximization(VGG16(),
104 | model_modifier=[ExtractIntermediateLayer('block5_conv3'),
105 | ReplaceToLinear()],
106 | clone=False)
107 |
108 | # You can use Score class to specify visualizing target you want.
109 | # And add regularizers or input-modifiers as needed.
110 | activations = \
111 | activation_maximization(CategoricalScore(FILTER_INDEX),
112 | steps=200,
113 | input_modifiers=[Jitter(jitter=16), Rotate2D(degree=1)],
114 | regularizers=[TotalVariation2D(weight=1.0),
115 | Norm(weight=0.3, p=1)],
116 | optimizer=keras.optimizers.RMSprop(1.0, 0.999),
117 | callbacks=[Progress()])
118 |
119 | ## Since v0.6.0, calling `astype()` is NOT necessary.
120 | # activations = activations[0].astype(np.uint8)
121 |
122 | # Render
123 | plt.imshow(activations[0])
124 | ```
125 |
126 |
127 |
128 | ### Gradcam++
129 |
130 |
131 |
132 | ```python
133 | import numpy as np
134 | from matplotlib import pyplot as plt
135 | from matplotlib import cm
136 | from tf_keras_vis.gradcam_plus_plus import GradcamPlusPlus
137 | from tf_keras_vis.utils.model_modifiers import ReplaceToLinear
138 | from tf_keras_vis.utils.scores import CategoricalScore
139 |
140 | # Create GradCAM++ object
141 | gradcam = GradcamPlusPlus(YOUR_MODEL_INSTANCE,
142 | model_modifier=ReplaceToLinear(),
143 | clone=True)
144 |
145 | # Generate cam with GradCAM++
146 | cam = gradcam(CategoricalScore(CATEGORICAL_INDEX),
147 | SEED_INPUT)
148 |
149 | ## Since v0.6.0, calling `normalize()` is NOT necessary.
150 | # cam = normalize(cam)
151 |
152 | plt.imshow(SEED_INPUT_IMAGE)
153 | heatmap = np.uint8(cm.jet(cam[0])[..., :3] * 255)
154 | plt.imshow(heatmap, cmap='jet', alpha=0.5) # overlay
155 | ```
156 |
157 |
158 |
159 | Please see the guides below for more details:
160 |
161 | ### Getting Started Guides
162 |
163 |
164 |
165 | * [Saliency and CAMs](https://keisen.github.io/tf-keras-vis-docs/examples/attentions.html)
166 | * [Visualize Dense Layer](https://keisen.github.io/tf-keras-vis-docs/examples/visualize_dense_layer.html)
167 | * [Visualize Convolutional Filer](https://keisen.github.io/tf-keras-vis-docs/examples/visualize_conv_filters.html)
168 |
169 |
170 |
171 | **[NOTES]**
172 | If you have ever used [keras-vis](https://github.com/raghakot/keras-vis), you may feel that tf-keras-vis is similar with keras-vis.
173 | Actually tf-keras-vis derived from keras-vis, and both provided visualization methods are almost the same.
174 | But please notice that tf-keras-vis APIs does NOT have compatibility with keras-vis.
175 |
176 |
177 | ## Requirements
178 |
179 |
180 |
181 | * Python 3.7+
182 | * Tensorflow 2.0+
183 |
184 |
185 |
186 |
187 | ## Installation
188 |
189 |
190 |
191 | * PyPI
192 |
193 | ```bash
194 | $ pip install tf-keras-vis tensorflow
195 | ```
196 |
197 | * Source (for development)
198 |
199 | ```bash
200 | $ git clone https://github.com/keisen/tf-keras-vis.git
201 | $ cd tf-keras-vis
202 | $ pip install -e .[develop] tensorflow
203 | ```
204 |
205 |
206 |
207 | ## Use Cases
208 |
209 |
210 |
211 | * [chitra](https://github.com/aniketmaurya/chitra)
212 | * A Deep Learning Computer Vision library for easy data loading, model building and model interpretation with GradCAM/GradCAM++.
213 |
214 |
215 | ## Known Issues
216 |
217 | * With InceptionV3, ActivationMaximization doesn't work well, that's, it might generate meaninglessly blur image.
218 | * With cascading model, Gradcam and Gradcam++ don't work well, that's, it might occur some error. So we recommend to use FasterScoreCAM in this case.
219 | * `channels-first` models and data is unsupported.
220 |
221 |
222 | ## ToDo
223 |
224 | * Guides
225 | * Visualizing multiple attention or activation images at once utilizing batch-system of model
226 | * Define various score functions
227 | * Visualizing attentions with multiple inputs models
228 | * Visualizing attentions with multiple outputs models
229 | * Advanced score functions
230 | * Tuning Activation Maximization
231 | * Visualizing attentions for N-dim image inputs
232 | * We're going to add some methods such as below
233 | - Deep Dream
234 | - Style transfer
235 |
--------------------------------------------------------------------------------
/VERSION:
--------------------------------------------------------------------------------
1 | 0.8.8
--------------------------------------------------------------------------------
/dockerfiles/cpu.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:2.4.1
2 |
3 | # Default ENV Settings
4 | ARG TF_KERAS_VIS_VERSION=0.6.0
5 | ARG JUPYTER_ALLOW_IP="0.0.0.0"
6 | ARG JUPYTER_TOKEN=""
7 |
8 | # Setting for jupyter
9 | RUN export JUPYTER_HOME=/etc/jupyter && \
10 | export JUPYTER_CONF=$JUPYTER_HOME/jupyter_notebook_config.py && \
11 | mkdir -p $JUPYTER_HOME && \
12 | touch $JUPYTER_CONF && \
13 | echo 'c.NotebookApp.allow_root = True' >> $JUPYTER_CONF && \
14 | echo "c.NotebookApp.ip = '$JUPYTER_ALLOW_IP'" >> $JUPYTER_CONF && \
15 | echo "c.NotebookApp.token = '$JUPYTER_TOKEN'" >> $JUPYTER_CONF && \
16 | echo "c.NotebookApp.terminado_settings = \
17 | {'shell_command': ['/bin/bash']}" >> $JUPYTER_CONF
18 |
19 | # Install essential python libraries
20 | RUN pip install --no-cache-dir \
21 | tf-keras-vis[develop,examples]==$TF_KERAS_VIS_VERSION
22 |
23 | CMD jupyter lab
--------------------------------------------------------------------------------
/dockerfiles/gpu.Dockerfile:
--------------------------------------------------------------------------------
1 | FROM tensorflow/tensorflow:2.4.1-gpu
2 |
3 | ARG TF_KERAS_VIS_VERSION=0.6.0
4 | ARG JUPYTER_ALLOW_IP="0.0.0.0"
5 | ARG JUPYTER_TOKEN=""
6 |
7 | # Setting for jupyter
8 | RUN export JUPYTER_HOME=/etc/jupyter && \
9 | export JUPYTER_CONF=$JUPYTER_HOME/jupyter_notebook_config.py && \
10 | mkdir -p $JUPYTER_HOME && \
11 | touch $JUPYTER_CONF && \
12 | echo 'c.NotebookApp.allow_root = True' >> $JUPYTER_CONF && \
13 | echo "c.NotebookApp.ip = '$JUPYTER_ALLOW_IP'" >> $JUPYTER_CONF && \
14 | echo "c.NotebookApp.token = '$JUPYTER_TOKEN'" >> $JUPYTER_CONF && \
15 | echo "c.NotebookApp.terminado_settings = \
16 | {'shell_command': ['/bin/bash']}" >> $JUPYTER_CONF
17 |
18 | # Install essential python libraries
19 | RUN pip install --no-cache-dir \
20 | tf-keras-vis[develop,examples]==$TF_KERAS_VIS_VERSION
21 |
22 | CMD jupyter lab
--------------------------------------------------------------------------------
/docs/Makefile:
--------------------------------------------------------------------------------
1 | # Makefile for Sphinx documentation
2 | #
3 |
4 | # You can set these variables from the command line.
5 | SPHINXOPTS ?=
6 | SPHINXBUILD ?= sphinx-build
7 | PAPER ?=
8 | SOURCEDIR = .
9 | BUILDDIR = _build
10 |
11 | # Internal variables.
12 | PAPEROPT_a4 = -D latex_elements.papersize=a4paper
13 | PAPEROPT_letter = -D latex_elements.papersize=letterpaper
14 | # $(O) is meant as a shortcut for $(SPHINXOPTS)
15 | ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(O) $(SOURCEDIR)
16 | # the i18n builder cannot share the environment and doctrees with the others
17 | I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) $(O) $(SOURCEDIR)
18 |
19 | .PHONY: help
20 | help:
21 | @echo "Please use \`make ' where is one of"
22 | @echo " html to make standalone HTML files"
23 | @echo " dirhtml to make HTML files named index.html in directories"
24 | @echo " singlehtml to make a single large HTML file"
25 | @echo " pickle to make pickle files"
26 | @echo " json to make JSON files"
27 | @echo " htmlhelp to make HTML files and an HTML help project"
28 | @echo " qthelp to make HTML files and a qthelp project"
29 | @echo " applehelp to make an Apple Help Book"
30 | @echo " devhelp to make HTML files and a Devhelp project"
31 | @echo " epub to make an epub"
32 | @echo " latex to make LaTeX files (you can set PAPER=a4 or PAPER=letter)"
33 | @echo " latexpdf to make LaTeX files and then PDFs out of them"
34 | @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx"
35 | @echo " lualatexpdf to make LaTeX files and run them through lualatex"
36 | @echo " xelatexpdf to make LaTeX files and run them through xelatex"
37 | @echo " text to make text files"
38 | @echo " man to make manual pages"
39 | @echo " texinfo to make Texinfo files"
40 | @echo " info to make Texinfo files and run them through makeinfo"
41 | @echo " gettext to make PO message catalogs"
42 | @echo " changes to make an overview of all changed/added/deprecated items"
43 | @echo " xml to make Docutils-native XML files"
44 | @echo " pseudoxml to make pseudoxml-XML files for display purposes"
45 | @echo " linkcheck to check all external links for integrity"
46 | @echo " doctest to run all doctests embedded in the documentation (if enabled)"
47 | @echo " coverage to run coverage check of the documentation (if enabled)"
48 | @echo " dummy to check syntax errors of document sources"
49 | @echo " clean to remove everything in the build directory"
50 |
51 | .PHONY: clean
52 | clean:
53 | rm -rf $(BUILDDIR)/*
54 |
55 | .PHONY: latexpdf
56 | latexpdf:
57 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
58 | @echo "Running LaTeX files through pdflatex..."
59 | $(MAKE) -C $(BUILDDIR)/latex all-pdf
60 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
61 |
62 | .PHONY: latexpdfja
63 | latexpdfja:
64 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
65 | @echo "Running LaTeX files through platex and dvipdfmx..."
66 | $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja
67 | @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex."
68 |
69 | .PHONY: lualatexpdf
70 | lualatexpdf:
71 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
72 | @echo "Running LaTeX files through lualatex..."
73 | $(MAKE) PDFLATEX=lualatex -C $(BUILDDIR)/latex all-pdf
74 | @echo "lualatex finished; the PDF files are in $(BUILDDIR)/latex."
75 |
76 | .PHONY: xelatexpdf
77 | xelatexpdf:
78 | $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex
79 | @echo "Running LaTeX files through xelatex..."
80 | $(MAKE) PDFLATEX=xelatex -C $(BUILDDIR)/latex all-pdf
81 | @echo "xelatex finished; the PDF files are in $(BUILDDIR)/latex."
82 |
83 | .PHONY: info
84 | info:
85 | $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo
86 | @echo "Running Texinfo files through makeinfo..."
87 | make -C $(BUILDDIR)/texinfo info
88 | @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo."
89 |
90 | .PHONY: gettext
91 | gettext:
92 | $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale
93 |
94 | # Catch-all target: route all unknown targets to Sphinx
95 | .PHONY: Makefile
96 | %: Makefile
97 | $(SPHINXBUILD) -b "$@" $(ALLSPHINXOPTS) "$(BUILDDIR)/"
98 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.activation_maximization.callbacks.md:
--------------------------------------------------------------------------------
1 | Callbacks
2 | ========================================================
3 |
4 |
5 | ```{eval-rst}
6 | .. automodule:: tf_keras_vis.activation_maximization.callbacks
7 | :members:
8 | :show-inheritance:
9 | ```
10 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.activation_maximization.input_modifiers.md:
--------------------------------------------------------------------------------
1 | Input Modifiers
2 | ===============================================================
3 |
4 |
5 | ```{eval-rst}
6 | .. automodule:: tf_keras_vis.activation_maximization.input_modifiers
7 | :members:
8 | :show-inheritance:
9 | ```
10 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.activation_maximization.md:
--------------------------------------------------------------------------------
1 | ActivationMaximization
2 | ===============================================
3 |
4 | * Convolutional Filters
5 |
6 | 
7 |
8 |
9 | * Dense Units
10 |
11 | 
12 |
13 | * Visualizing Dense Unit
14 |
15 | 
16 |
17 |
18 | ```{note}
19 | ActivationMaximization class is optimized for VGG16 model in Keras.
20 | So, when you visualize with other models,
21 | you **MUST** tune the parameters of ActivationMaximization.
22 | ```
23 |
24 | ```{toctree}
25 | :maxdepth: 1
26 | :hidden:
27 |
28 | tf_keras_vis.activation_maximization.callbacks
29 | tf_keras_vis.activation_maximization.input_modifiers
30 | tf_keras_vis.activation_maximization.regularizers
31 | ```
32 |
33 | ```{eval-rst}
34 | .. automodule:: tf_keras_vis.activation_maximization
35 | :members:
36 | :show-inheritance:
37 | ```
38 |
39 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.activation_maximization.regularizers.md:
--------------------------------------------------------------------------------
1 | Regularizers
2 | ===========================================================
3 |
4 |
5 | ```{eval-rst}
6 | .. automodule:: tf_keras_vis.activation_maximization.regularizers
7 | :members:
8 | :show-inheritance:
9 | ```
10 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.gradcam.md:
--------------------------------------------------------------------------------
1 | GradCAM
2 | =============================
3 |
4 | 
5 |
6 | ```{eval-rst}
7 | .. automodule:: tf_keras_vis.gradcam
8 | :members:
9 | :show-inheritance:
10 | ```
11 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.gradcam_plus_plus.md:
--------------------------------------------------------------------------------
1 | GradCAM++
2 | =========================================
3 |
4 | 
5 |
6 |
7 | ```{eval-rst}
8 | .. automodule:: tf_keras_vis.gradcam_plus_plus
9 | :members:
10 | :show-inheritance:
11 | ```
12 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.layercam.md:
--------------------------------------------------------------------------------
1 | LayerCAM
2 | =============================
3 |
4 | ```{eval-rst}
5 | .. automodule:: tf_keras_vis.layercam
6 | :members:
7 | :show-inheritance:
8 | ```
9 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.md:
--------------------------------------------------------------------------------
1 | API
2 | ==============
3 |
4 | Feature Visualization
5 | ----------------------
6 |
7 | ```{toctree}
8 | :maxdepth: 1
9 |
10 | tf_keras_vis.activation_maximization
11 | ```
12 |
13 | Attentions
14 | -----------
15 |
16 | ```{toctree}
17 | :maxdepth: 1
18 |
19 | tf_keras_vis.saliency
20 | ```
21 |
22 |
23 | ### Class Activation Map
24 |
25 | ```{toctree}
26 | :maxdepth: 1
27 |
28 | tf_keras_vis.gradcam
29 | tf_keras_vis.gradcam_plus_plus
30 | tf_keras_vis.scorecam
31 | tf_keras_vis.layercam
32 | ```
33 |
34 | Utilities
35 | ----------
36 |
37 | ```{toctree}
38 | :maxdepth: 1
39 |
40 | tf_keras_vis.utils.model_modifiers
41 | tf_keras_vis.utils.scores
42 | tf_keras_vis.utils
43 | ```
44 |
45 | Model Visualization Class
46 | -------------------------
47 |
48 |
49 | ```{eval-rst}
50 | .. automodule:: tf_keras_vis
51 | :members:
52 | :show-inheritance:
53 | ```
54 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.saliency.md:
--------------------------------------------------------------------------------
1 | Vanilla Saliency and SmoothGrad
2 | ===============================
3 |
4 | * Vanilla Saliency
5 |
6 | 
7 |
8 |
9 | * SmoothGrad
10 |
11 | 
12 |
13 |
14 | ```{eval-rst}
15 | .. automodule:: tf_keras_vis.saliency
16 | :members:
17 | :show-inheritance:
18 | ```
19 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.scorecam.md:
--------------------------------------------------------------------------------
1 | ScoreCAM
2 | ==============================
3 |
4 | 
5 |
6 | ```{eval-rst}
7 | .. automodule:: tf_keras_vis.scorecam
8 | :members:
9 | :show-inheritance:
10 | ```
11 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.utils.md:
--------------------------------------------------------------------------------
1 | Functions
2 | ============================
3 |
4 |
5 | ```{eval-rst}
6 | .. automodule:: tf_keras_vis.utils
7 | :members:
8 | :show-inheritance:
9 | ```
10 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.utils.model_modifiers.md:
--------------------------------------------------------------------------------
1 | Model Modifiers
2 | ============================================
3 |
4 |
5 | ```{eval-rst}
6 | .. automodule:: tf_keras_vis.utils.model_modifiers
7 | :members:
8 | :show-inheritance:
9 | ```
10 |
--------------------------------------------------------------------------------
/docs/api/tf_keras_vis.utils.scores.md:
--------------------------------------------------------------------------------
1 | Scores
2 | ==================================
3 |
4 | The Scores are used to specify somethings you want to visualized with Saliency, X-CAMs and ActivationMaximization.
5 | Here, we will introduce the fundamental score usage with the models below.
6 |
7 | * Categorical classification model
8 | * Binary classification model
9 | * Regression model
10 |
11 | If you want just to see the API specification, please skip to
12 | [Classes section](tf_keras_vis.utils.scores.html#module-tf_keras_vis.utils.scores).
13 |
14 |
15 | ## Categorical classification model
16 |
17 | We expect the output shape of the categorical classification model is `(batch_size, num_of_classes)`. That's, `softmax` activation function is applied to the output layer of the model.
18 |
19 | The `output` variable below is assigned model output value. The code snippet below means that it will return the values corresponding to the 20th of categories.
20 |
21 | ```python
22 | def score_function(output): # output shape is (batch_size, num_of_classes)
23 | return output[:, 20]
24 | ```
25 |
26 | The function below means the same as above. When you don't need to implement additional code to process output value, we recommend you to utilize the [CategoricalScore](tf_keras_vis.utils.scores.html#module-tf_keras_vis.utils.scores.CategoricalScore).
27 |
28 | ```python
29 | from tf_keras_vis.utils.scores import CategoricalScore
30 |
31 | score = CategoricalScore(20)
32 | ```
33 |
34 | If you want to visualize corresponding to multiple various categories, you can define as follows. The code snippet below means that it will return the three values for the 20th, the 48th and the No.123rd of categories respectively.
35 |
36 | ```python
37 | def score_function(output): # output shape is (batch_size, num_of_classes)
38 | return (output[0, 20], output[1, 48], output[0, 128])
39 | ```
40 |
41 | ```{note}
42 | Please note that the length of the values returned by the score function MUST be identical to `batch_size` (the number of samples).
43 | ```
44 |
45 | Of course, you can also use [CategoricalScore](tf_keras_vis.utils.scores.html#module-tf_keras_vis.utils.scores.CategoricalScore).
46 |
47 | ```python
48 | from tf_keras_vis.utils.scores import CategoricalScore
49 |
50 | score = CategoricalScore([20, 48, 128])
51 | ```
52 |
53 |
54 | ## Binary classification task
55 |
56 | We expect the output shape of the binary classification model is `(batch_size, 1)` and the output value range is `[0, 1]`. That's, `sigmoid` activation function is applied to the output layer of the model.
57 |
58 | In categorical classification, the score functions just return the values corresponding to somethings you want to visualize. However, in binary classification, you need to be aware of whether the value you want to visualize is 0.0 or 1.0 (False or True).
59 |
60 | ### 1.0 (True)
61 |
62 | Like the categorical classification, it just returns the value as follows.
63 |
64 | ```python
65 | def score_function(output): # output shape is (batch_size, 1)
66 | return output[:, 0]
67 | ```
68 |
69 | ### 0.0 (False)
70 |
71 | The model output value smaller, the score value should be larger, so you need to multiply by `-1.0`.
72 |
73 | ```python
74 | def score_function(output): # output shape is (batch_size, 1)
75 | return -1.0 * output[:, 0]
76 | ```
77 |
78 |
79 | ### Utilizing BinaryScore class
80 |
81 | Of course, we recommend you to utilize BinaryScore class as follows.
82 |
83 | ```python
84 | from tf_keras_vis.utils.scores import BinaryScore
85 | score = BinaryScore(0.0) # or BinaryScore(False)
86 | ```
87 |
88 | or
89 |
90 | ```python
91 | from tf_keras_vis.utils.scores import BinaryScore
92 | score = BinaryScore(1.0) # or BinaryScore(True)
93 | ```
94 |
95 |
96 | ## Regression task
97 |
98 | We expect the output shape of the regression model is `(batch_size, 1)` like binary classification, however the output value range is no limitation. That's, `linear` activation function is applied to the output layer of the model.
99 |
100 | In regression task, we need to consider how what we want to visualize contributes to the model output.
101 | Here, we introduce a simple way each for three situations below we want to visualize.
102 |
103 | 1. Increase the output value
104 | 2. Decrease the output value
105 | 3. Maintain the output value at ...
106 |
107 |
108 | ### 1. Increase the output value
109 |
110 | It just returns the value like the categorical classification.
111 |
112 | ```python
113 | def score_function(output):
114 | return output[:, 0]
115 | ```
116 |
117 | ### 2. Decrease the output value
118 |
119 | The model output value smaller, the score value should be larger,
120 | so you need to multiply by `-1.0`.
121 |
122 | ```python
123 | def score_function(output):
124 | return -1.0 * output[:, 0]
125 | ```
126 |
127 | ### 3. Maintain the output value at ...
128 |
129 | The model output value closer to the target value, the score value should be larger, so you need to calculate `abs(1.0 / (target_value - model_output_value))`.
130 | For example, suppose the target value is 0.0, the score function should be as follows.
131 |
132 | ```python
133 | def score_function(output):
134 | return tf.math.abs(1.0 / (output[:, 0] + keras.backend.epsilon()))
135 | ```
136 |
137 |
138 | Classes
139 | -----------------
140 |
141 | ```{eval-rst}
142 | .. automodule:: tf_keras_vis.utils.scores
143 | :members:
144 | :show-inheritance:
145 | ```
146 |
--------------------------------------------------------------------------------
/docs/conf.py:
--------------------------------------------------------------------------------
1 | # Configuration file for the Sphinx documentation builder.
2 | #
3 | # This file only contains a selection of the most common options. For a full
4 | # list see the documentation:
5 | # https://www.sphinx-doc.org/en/master/usage/configuration.html
6 |
7 | # -- Path setup --------------------------------------------------------------
8 |
9 | # If extensions (or modules to document with autodoc) are in another directory,
10 | # add these directories to sys.path here. If the directory is relative to the
11 | # documentation root, use os.path.abspath to make it absolute, like shown here.
12 | #
13 | import os
14 | import sys
15 |
16 | sys.path.insert(0, os.path.abspath('..'))
17 |
18 | # -- Project information -----------------------------------------------------
19 |
20 | project = 'tf-keras-vis'
21 | author = 'keisen(Yasuhiro Kubota)'
22 | copyright = f'2022, {author}'
23 |
24 | # The short X.Y version
25 | with open("../VERSION") as f:
26 | version = f.read().strip()
27 |
28 | # The full version, including alpha/beta/rc tags
29 | release = f"v{version}"
30 |
31 | # -- General configuration ---------------------------------------------------
32 |
33 | # Add any Sphinx extension module names here, as strings. They can be
34 | # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
35 | # ones.
36 | extensions = [
37 | 'sphinx.ext.autodoc',
38 | 'sphinx.ext.doctest',
39 | 'sphinx.ext.intersphinx',
40 | 'sphinx.ext.todo',
41 | 'sphinx.ext.coverage',
42 | 'sphinx.ext.imgmath',
43 | 'sphinx.ext.mathjax',
44 | 'sphinx.ext.ifconfig',
45 | 'sphinx.ext.viewcode',
46 | 'sphinx.ext.githubpages',
47 | 'sphinx.ext.napoleon',
48 | 'myst_parser',
49 | 'nbsphinx',
50 | ]
51 |
52 | # Add any paths that contain templates here, relative to this directory.
53 | templates_path = ['_templates']
54 |
55 | # The language for content autogenerated by Sphinx. Refer to documentation
56 | # for a list of supported languages.
57 | #
58 | # This is also used if you do content translation via gettext catalogs.
59 | # Usually you set "language" from the command line for these cases.
60 | language = 'en'
61 |
62 | # List of patterns, relative to source directory, that match files and
63 | # directories to ignore when looking for source files.
64 | # This pattern also affects html_static_path and html_extra_path.
65 | exclude_patterns = ['.ipynb_checkpoints/*', '_build/*']
66 |
67 | source_suffix = {
68 | '.rst': 'restructuredtext',
69 | '.txt': 'markdown',
70 | '.md': 'markdown',
71 | }
72 |
73 | # -- Options for HTML output -------------------------------------------------
74 |
75 | # The theme to use for HTML and HTML Help pages. See the documentation for
76 | # a list of builtin themes.
77 | #
78 | html_theme = 'sphinx_rtd_theme'
79 |
80 | # Add any paths that contain custom static files (such as style sheets) here,
81 | # relative to this directory. They are copied after the builtin static files,
82 | # so a file named "default.css" will overwrite the builtin "default.css".
83 | html_static_path = []
84 |
85 | html_show_sourcelink = False
86 |
87 | # -- Extension configuration -------------------------------------------------
88 |
89 | autodoc_default_options = {
90 | 'members': True,
91 | 'member-order': 'groupwise',
92 | 'special-members': '__call__',
93 | 'inherited-members': True,
94 | 'show-inheritance': True,
95 | 'no-undoc-members': True,
96 | }
97 | autoclass_content = "both"
98 | autodoc_class_signature = "mixed"
99 | autodoc_member_order = "bysource"
100 | autodoc_typehints = "description"
101 | autodoc_typehints_description_target = "documented"
102 | autodoc_type_aliases = {}
103 | autodoc_preserve_defaults = True
104 | autodoc_inherit_docstrings = False
105 |
106 | napoleon_numpy_docstring = False
107 | napoleon_use_admonition_for_references = True
108 |
109 | pygments_style = "sphinx"
110 |
111 | myst_update_mathjax = False
112 |
113 | # -- Options for intersphinx extension ---------------------------------------
114 |
115 | # Example configuration for intersphinx: refer to the Python standard library.
116 | intersphinx_mapping = {
117 | 'python': ('https://docs.python.org/3', None),
118 | 'numpy': ('http://docs.scipy.org/doc/numpy/', None),
119 | 'scipy': ('http://docs.scipy.org/doc/scipy/reference/', None),
120 | }
121 |
122 | # -- Options for todo extension ----------------------------------------------
123 |
124 | # If true, `todo` and `todoList` produce output, else they produce nothing.
125 | todo_include_todos = True
126 |
--------------------------------------------------------------------------------
/docs/examples/images/activation_maximization.0.gif:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/activation_maximization.0.gif
--------------------------------------------------------------------------------
/docs/examples/images/bear.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/bear.jpg
--------------------------------------------------------------------------------
/docs/examples/images/faster-scorecam.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/faster-scorecam.png
--------------------------------------------------------------------------------
/docs/examples/images/goldfish.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/goldfish.jpg
--------------------------------------------------------------------------------
/docs/examples/images/gradcam.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/gradcam.png
--------------------------------------------------------------------------------
/docs/examples/images/gradcam_plus_plus.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/gradcam_plus_plus.png
--------------------------------------------------------------------------------
/docs/examples/images/input-images.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/input-images.png
--------------------------------------------------------------------------------
/docs/examples/images/layercam.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/layercam.png
--------------------------------------------------------------------------------
/docs/examples/images/scorecam.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/scorecam.png
--------------------------------------------------------------------------------
/docs/examples/images/smoothgrad.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/smoothgrad.png
--------------------------------------------------------------------------------
/docs/examples/images/soldiers.jpg:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/soldiers.jpg
--------------------------------------------------------------------------------
/docs/examples/images/vanilla-saliency.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/vanilla-saliency.png
--------------------------------------------------------------------------------
/docs/examples/images/visualize-dense-layer.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/visualize-dense-layer.png
--------------------------------------------------------------------------------
/docs/examples/images/visualize-filters.png:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/docs/examples/images/visualize-filters.png
--------------------------------------------------------------------------------
/docs/examples/index.md:
--------------------------------------------------------------------------------
1 | Examples
2 | =========
3 |
4 | ```{toctree}
5 | :maxdepth: 2
6 |
7 | attentions
8 | visualize_conv_filters
9 | visualize_dense_layer
10 | ```
11 |
--------------------------------------------------------------------------------
/docs/images:
--------------------------------------------------------------------------------
1 | examples/images
--------------------------------------------------------------------------------
/docs/index.md:
--------------------------------------------------------------------------------
1 | Welcome to tf-keras-vis!
2 | ========================================
3 |
4 | ```{include} ../README.md
5 | :start-after:
6 | :end-before:
7 | ```
8 |
9 | Gallery
10 | --------
11 |
12 | ### Dense Units
13 |
14 | 
15 |
16 | ### Convolutional Filters
17 |
18 | 
19 |
20 |
21 | ### GradCAM
22 |
23 | 
24 |
25 | ### GradCAM++
26 |
27 | 
28 |
29 | ### ScoreCAM
30 |
31 | 
32 |
33 | #### Vanilla Saliency
34 |
35 | 
36 |
37 | ### SmoothGrad
38 |
39 | 
40 |
41 |
42 |
43 | What's tf-keras-vis
44 | --------------------
45 |
46 | ```{include} ../README.md
47 | :start-after:
48 | :end-before:
49 | ```
50 |
51 | Table of Contents
52 | ------------------
53 |
54 | ```{toctree}
55 | :hidden:
56 |
57 | HOME
58 | ```
59 |
60 | ```{toctree}
61 | :maxdepth: 1
62 |
63 | quick-start
64 | examples/index
65 | api/tf_keras_vis
66 | license
67 | ```
68 |
--------------------------------------------------------------------------------
/docs/license.md:
--------------------------------------------------------------------------------
1 | LICENSE
2 | =======
3 |
4 | ```{eval-rst}
5 | .. literalinclude:: ../LICENSE
6 | ```
7 |
8 | ```{toctree}
9 | :includehidden:
10 | ```
11 |
--------------------------------------------------------------------------------
/docs/quick-start.md:
--------------------------------------------------------------------------------
1 | Quick Start
2 | =====================
3 |
4 | Requirements
5 | ------------
6 |
7 | ```{include} ../README.md
8 | :start-after:
9 | :end-before:
10 | ```
11 |
12 |
13 | Installation
14 | ------------
15 |
16 | We recommend you to install tf-keras-vis with `pip` as follows.
17 | However, when you want to develop or debug tf-keras-vis, you can also install from source code directly.
18 |
19 | ```{include} ../README.md
20 | :start-after:
21 | :end-before:
22 | ```
23 |
24 |
25 | Usage
26 | -----
27 |
28 | ### ActivationMaximization
29 |
30 | ```{include} ../README.md
31 | :start-after:
32 | :end-before:
33 | ```
34 |
35 |
36 | ### Gradcam++
37 |
38 | ```{include} ../README.md
39 | :start-after:
40 | :end-before:
41 | ```
42 |
43 |
44 | Next steps
45 | -----------
46 |
47 | We recommend you to read the example notebooks below.
48 |
49 | ```{include} ../README.md
50 | :start-after:
51 | :end-before:
52 | ```
53 |
54 | And, in addition to above, to know how to use Score class will help you.
55 |
56 | * [The Score API](api/tf_keras_vis.utils.scores)
57 |
58 |
59 | ```{toctree}
60 | :maxdepth: 1
61 | ```
62 |
--------------------------------------------------------------------------------
/examples/README.md:
--------------------------------------------------------------------------------
1 | # Note!
2 |
3 | All example notebooks were moved to [the document folder](../docs).
4 |
5 | * If you want to **READ** the notebooks, please access [the web documents](https://keisen.github.io/tf-keras-vis-docs/examples/index.html).
6 | * If you want to **RUN** the notebooks, please access [docs/examples folder](../docs/examples).
7 |
8 | Please note that this folder has been no longer maintained and will be removed in future.
9 |
--------------------------------------------------------------------------------
/examples/attentions.ipynb:
--------------------------------------------------------------------------------
1 | ../docs/examples/attentions.ipynb
--------------------------------------------------------------------------------
/examples/images:
--------------------------------------------------------------------------------
1 | ../docs/examples/images
--------------------------------------------------------------------------------
/examples/visualize_conv_filters.ipynb:
--------------------------------------------------------------------------------
1 | ../docs/examples/visualize_conv_filters.ipynb
--------------------------------------------------------------------------------
/examples/visualize_dense_layer.ipynb:
--------------------------------------------------------------------------------
1 | ../docs/examples/visualize_dense_layer.ipynb
--------------------------------------------------------------------------------
/setup.cfg:
--------------------------------------------------------------------------------
1 | [metadata]
2 | description-file = README.md
3 |
4 | [flake8]
5 | exclude =
6 | __pycache__
7 | .git
8 | .github
9 | .ipynb_checkpoints
10 | .pytest_cache
11 | .vscode
12 | dockerfiles
13 | docs
14 | examples
15 | max-line-length = 99
16 | max-complexity = 15
17 | docstring-convention = google
18 |
19 | [yapf]
20 | based_on_style = pep8
21 | column_limit = 99
22 | spaces_before_comment = 2
23 |
24 | [isort]
25 | skip =
26 | __pycache__
27 | .git
28 | .github
29 | .ipynb_checkpoints
30 | .pytest_cache
31 | .vscode
32 | dockerfiles
33 | docs
34 | examples
35 | line_length = 99
36 |
37 | [tool:pytest]
38 | addopts =
39 | -v
40 | --durations=10
41 | --cache-clear
42 | --cov=tf_keras_vis/
43 | --cov-report=term-missing
44 | --pycodestyle
45 | env =
46 | TF_KERAS_VIS_MAX_STEPS=3
47 |
48 | [pycodestyle]
49 | max-line-length = 99
50 | exclude = docs/*
51 |
52 | [coverage:run]
53 | omit =
54 | docs/*
55 | */test.py
56 | tests/*
57 | __pycache__/*
58 | tf_keras_vis/utils/callbacks.py
59 | tf_keras_vis/utils/losses.py
60 | tf_keras_vis/utils/input_modifiers.py
61 |
62 | [coverage:report]
63 | exclude_lines =
64 | continue
65 | pass
66 | pragma: no cover
67 | raise NotImplementedError
68 | if __name__ == .__main__.:
69 | version
70 |
--------------------------------------------------------------------------------
/setup.py:
--------------------------------------------------------------------------------
1 | from setuptools import find_packages, setup
2 |
3 | with open("README.md", "r") as f:
4 | long_description = f.read()
5 |
6 | with open("VERSION") as f:
7 | version = f.read().strip()
8 |
9 | setup(
10 | name="tf-keras-vis",
11 | version=version,
12 | author="keisen (Yasuhiro Kubota)",
13 | author_email="k.keisen@gmail.com",
14 | description="Neural network visualization toolkit for tf.keras",
15 | long_description=long_description,
16 | long_description_content_type="text/markdown",
17 | url="https://github.com/keisen/tf-keras-vis",
18 | packages=find_packages(),
19 | classifiers=[
20 | "Intended Audience :: Developers",
21 | "Intended Audience :: Education",
22 | "Intended Audience :: Science/Research",
23 | "License :: OSI Approved :: MIT License",
24 | "Operating System :: OS Independent",
25 | "Programming Language :: Python :: 3",
26 | "Programming Language :: Python :: 3 :: Only",
27 | "Programming Language :: Python :: 3.7",
28 | "Programming Language :: Python :: 3.8",
29 | "Programming Language :: Python :: 3.9",
30 | "Programming Language :: Python :: 3.10",
31 | "Programming Language :: Python :: 3.11",
32 | "Programming Language :: Python :: 3.12",
33 | "Topic :: Education",
34 | "Topic :: Scientific/Engineering",
35 | "Topic :: Scientific/Engineering :: Artificial Intelligence",
36 | "Topic :: Scientific/Engineering :: Image Recognition",
37 | "Topic :: Scientific/Engineering :: Visualization",
38 | "Topic :: Software Development",
39 | "Topic :: Software Development :: Libraries",
40 | "Topic :: Software Development :: Libraries :: Python Modules",
41 | ],
42 | python_requires='>=3.7',
43 | install_requires=[
44 | 'scipy',
45 | 'pillow',
46 | 'deprecated',
47 | 'imageio',
48 | 'packaging',
49 | 'importlib-metadata; python_version < "3.8"',
50 | ],
51 | extras_require={
52 | 'develop': [
53 | 'flake8',
54 | 'flake8-docstrings',
55 | 'isort',
56 | 'yapf',
57 | 'pytest',
58 | 'pytest-pycodestyle',
59 | 'pytest-cov',
60 | 'pytest-env',
61 | 'pytest-xdist',
62 | ],
63 | 'docs': [
64 | 'sphinx',
65 | 'sphinx-autobuild',
66 | 'sphinx-rtd-theme',
67 | 'myst-parser',
68 | 'nbsphinx',
69 | 'pandoc',
70 | ],
71 | 'examples': [
72 | 'jupyterlab',
73 | 'jupyterlab_code_formatter',
74 | 'matplotlib',
75 | ],
76 | 'protobuf3': ['protobuf~=3.20.0'],
77 | 'numpy1x': ['numpy~=1.0'],
78 | 'numpy2x': ['numpy~=2.0'],
79 | },
80 | include_package_data=True,
81 | )
82 |
--------------------------------------------------------------------------------
/tests/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/tests/__init__.py
--------------------------------------------------------------------------------
/tests/tf_keras_vis/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/tests/tf_keras_vis/__init__.py
--------------------------------------------------------------------------------
/tests/tf_keras_vis/activation_maximization/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/tests/tf_keras_vis/activation_maximization/__init__.py
--------------------------------------------------------------------------------
/tests/tf_keras_vis/activation_maximization/callbacks_test.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 |
5 | from tf_keras_vis.activation_maximization import ActivationMaximization # noqa: E501
6 | from tf_keras_vis.activation_maximization.callbacks import GifGenerator2D, PrintLogger, Progress
7 | from tf_keras_vis.utils.scores import CategoricalScore
8 |
9 |
10 | class TestPrintLogger():
11 |
12 | def test__init__(self):
13 | interval = 999
14 | logger = PrintLogger(interval)
15 | assert logger.interval == interval
16 |
17 | @pytest.mark.usefixtures("mixed_precision")
18 | def test__call__(self, conv_model):
19 | activation_maximization = ActivationMaximization(conv_model)
20 | result = activation_maximization(CategoricalScore(1), steps=1, callbacks=PrintLogger(1))
21 | assert result.shape == (1, 8, 8, 3)
22 |
23 | @pytest.mark.usefixtures("mixed_precision")
24 | def test__call__without_regularization(self, conv_model):
25 | activation_maximization = ActivationMaximization(conv_model)
26 | result = activation_maximization(CategoricalScore(1),
27 | steps=1,
28 | regularizers=None,
29 | callbacks=PrintLogger(1))
30 | assert result.shape == (1, 8, 8, 3)
31 |
32 |
33 | class TestProgress():
34 |
35 | @pytest.mark.usefixtures("mixed_precision")
36 | def test__call__(self, multiple_outputs_model):
37 | activation_maximization = ActivationMaximization(multiple_outputs_model)
38 | result = activation_maximization(
39 | [CategoricalScore(0), CategoricalScore(0)], callbacks=Progress())
40 | assert result.shape == (1, 8, 8, 3)
41 |
42 | @pytest.mark.usefixtures("mixed_precision")
43 | def test__call__without_regularizers(self, conv_model):
44 | activation_maximization = ActivationMaximization(conv_model)
45 | result = activation_maximization(CategoricalScore(0),
46 | regularizers=None,
47 | callbacks=Progress())
48 | assert result.shape == (1, 8, 8, 3)
49 |
50 |
51 | class TestGifGenerator2D():
52 |
53 | def test__init__(self, tmpdir):
54 | path = tmpdir.mkdir("tf-keras-vis").join("test.gif")
55 | generator = GifGenerator2D(path)
56 | assert generator.path == path
57 |
58 | @pytest.mark.usefixtures("mixed_precision")
59 | def test__call__(self, tmpdir, conv_model):
60 | path = tmpdir.mkdir("tf-keras-vis").join("test.gif")
61 | activation_maximization = ActivationMaximization(conv_model)
62 | assert not os.path.isfile(path)
63 | result = activation_maximization(CategoricalScore(0), callbacks=GifGenerator2D(str(path)))
64 | assert os.path.isfile(path)
65 | assert result.shape == (1, 8, 8, 3)
66 |
--------------------------------------------------------------------------------
/tests/tf_keras_vis/activation_maximization/input_modifiers_test.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import tensorflow as tf
3 |
4 | from tf_keras_vis import keras
5 | from tf_keras_vis.activation_maximization.input_modifiers import Rotate, Rotate2D, Scale
6 | from tf_keras_vis.utils.test import NO_ERROR, assert_raises, dummy_sample
7 |
8 |
9 | class TestRotate():
10 | @pytest.mark.parametrize("degree", [0, 1, 3, 0.0, 1.0, 3.0])
11 | @pytest.mark.parametrize("axes,expected_error", [
12 | (None, NO_ERROR),
13 | ((None,), ValueError),
14 | ((0,), ValueError),
15 | ((0, 1), NO_ERROR),
16 | ([0, 1], NO_ERROR),
17 | ((0.0, 1.0), TypeError),
18 | ((0, 1, 2), ValueError),
19 | ])
20 | def test__init__(self, degree, axes, expected_error):
21 | with assert_raises(expected_error):
22 | if axes is None:
23 | instance = Rotate(degree=degree)
24 | else:
25 | instance = Rotate(axes=axes, degree=degree)
26 | assert instance.axes == axes
27 | assert instance.degree == float(degree)
28 |
29 |
30 | class TestRotate2D():
31 | @pytest.mark.parametrize("degree", [0, 1, 3, 0.0, 1.0, 3.0])
32 | def test__init__(self, degree):
33 | instance = Rotate2D(degree=degree)
34 | assert instance.axes == (1, 2)
35 | assert instance.degree == float(degree)
36 |
37 |
38 | class TestScale():
39 | @pytest.mark.parametrize(
40 | "seed_input", [dummy_sample(
41 | (1, 8, 8, 3)), tf.constant(dummy_sample((1, 8, 8, 3)))])
42 | def test__call__(self, seed_input):
43 | result = Scale()(seed_input)
44 | assert result.shape == seed_input.shape
45 |
--------------------------------------------------------------------------------
/tests/tf_keras_vis/attentions_test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import tensorflow as tf
4 | from packaging.version import parse as version
5 |
6 | from tf_keras_vis import keras
7 | from tf_keras_vis.gradcam import Gradcam
8 | from tf_keras_vis.gradcam_plus_plus import GradcamPlusPlus
9 | from tf_keras_vis.layercam import Layercam
10 | from tf_keras_vis.saliency import Saliency
11 | from tf_keras_vis.scorecam import Scorecam
12 | from tf_keras_vis.utils.scores import BinaryScore, CategoricalScore
13 | from tf_keras_vis.utils.test import (NO_ERROR, assert_raises, dummy_sample, mock_conv_model,
14 | mock_conv_model_with_float32_output, mock_multiple_io_model,
15 | score_with_list, score_with_tensor, score_with_tuple)
16 |
17 |
18 | @pytest.fixture(scope='function', params=[Gradcam, GradcamPlusPlus, Scorecam, Layercam])
19 | def xcam(request):
20 | global Xcam
21 | Xcam = request.param
22 | yield
23 | Xcam = None
24 |
25 |
26 | @pytest.fixture(scope='function')
27 | def saliency():
28 | global Xcam
29 | Xcam = Saliency
30 | yield
31 | Xcam = None
32 |
33 |
34 | class TestXcam():
35 | @pytest.mark.parametrize("scores,expected_error", [
36 | (None, ValueError),
37 | (CategoricalScore(0), NO_ERROR),
38 | (score_with_tuple, NO_ERROR),
39 | (score_with_list, NO_ERROR),
40 | (score_with_tensor, NO_ERROR),
41 | ([None], ValueError),
42 | ([CategoricalScore(0)], NO_ERROR),
43 | ([score_with_tuple], NO_ERROR),
44 | ([score_with_list], NO_ERROR),
45 | ([score_with_tensor], NO_ERROR),
46 | ])
47 | @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision")
48 | def test__call__if_score_is_(self, scores, expected_error, conv_model):
49 | cam = Xcam(conv_model)
50 | with assert_raises(expected_error):
51 | result = cam(scores, dummy_sample((1, 8, 8, 3)))
52 | assert result.shape == (1, 8, 8)
53 |
54 | @pytest.mark.parametrize("seed_input,expected,expected_error", [
55 | (None, None, ValueError),
56 | (dummy_sample((8,)), None, ValueError),
57 | (dummy_sample((8, 8, 3)), (1, 8, 8), NO_ERROR),
58 | ([dummy_sample((8, 8, 3))], [(1, 8, 8)], NO_ERROR),
59 | (dummy_sample((1, 8, 8, 3)), (1, 8, 8), NO_ERROR),
60 | ([dummy_sample((1, 8, 8, 3))], [(1, 8, 8)], NO_ERROR),
61 | ])
62 | @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision")
63 | def test__call__if_seed_input_is_(self, seed_input, expected, expected_error, conv_model):
64 | cam = Xcam(conv_model)
65 | with assert_raises(expected_error):
66 | result = cam(CategoricalScore(0), seed_input)
67 | if type(expected) is list:
68 | assert type(result) is list
69 | expected = expected[0]
70 | result = result[0]
71 | assert result.shape == expected
72 |
73 | @pytest.mark.parametrize("penultimate_layer,seek_penultimate_conv_layer,expected_error", [
74 | (None, True, NO_ERROR),
75 | (-1, True, NO_ERROR),
76 | (-1.0, True, ValueError),
77 | ('dense_1', True, NO_ERROR),
78 | ('dense_1', False, ValueError),
79 | (1, False, NO_ERROR),
80 | (1, True, NO_ERROR),
81 | ('conv_1', True, NO_ERROR),
82 | (0, True, ValueError),
83 | ('input_1', True, ValueError),
84 | (CategoricalScore(0), True, ValueError),
85 | (mock_conv_model().layers[-1], False, ValueError),
86 | ])
87 | @pytest.mark.usefixtures("xcam", "mixed_precision")
88 | def test__call__if_penultimate_layer_is(self, penultimate_layer, seek_penultimate_conv_layer,
89 | expected_error, conv_model):
90 | cam = Xcam(conv_model)
91 | with assert_raises(expected_error):
92 | result = cam(CategoricalScore(0),
93 | dummy_sample((1, 8, 8, 3)),
94 | penultimate_layer=penultimate_layer,
95 | seek_penultimate_conv_layer=seek_penultimate_conv_layer)
96 | assert result.shape == (1, 8, 8)
97 |
98 | @pytest.mark.usefixtures("xcam", "mixed_precision")
99 | def test__call__if_expand_cam_is_False(self, conv_model):
100 | cam = Xcam(conv_model)
101 | result = cam(CategoricalScore(0), dummy_sample((1, 8, 8, 3)), expand_cam=False)
102 | assert result.shape == (1, 6, 6)
103 |
104 | @pytest.mark.parametrize("score_class", [BinaryScore, CategoricalScore])
105 | @pytest.mark.parametrize("modifier_enabled", [False, True])
106 | @pytest.mark.parametrize("clone_enabled", [False, True])
107 | @pytest.mark.parametrize("batch_size", [0, 1, 5])
108 | @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision")
109 | def test__call__with_categorical_score(self, score_class, modifier_enabled, clone_enabled,
110 | batch_size, conv_model, conv_sigmoid_model):
111 | # Release v.0.6.0@dev(May 22 2021):
112 | # Add this case to test Scorecam with CAM class.
113 | def model_modifier(model):
114 | model.layers[-1].activation = keras.activations.linear
115 |
116 | if score_class is BinaryScore:
117 | model = conv_sigmoid_model
118 | else:
119 | model = conv_model
120 |
121 | score_targets = np.random.randint(0, 1, max(batch_size, 1))
122 | score = score_class(list(score_targets))
123 |
124 | seed_input_shape = (8, 8, 3)
125 | if batch_size > 0:
126 | seed_input_shape = (batch_size,) + seed_input_shape
127 | seed_input = dummy_sample(seed_input_shape)
128 |
129 | cam = Xcam(model,
130 | model_modifier=model_modifier if modifier_enabled else None,
131 | clone=clone_enabled)
132 | result = cam(score, seed_input=seed_input)
133 | if modifier_enabled and clone_enabled:
134 | assert model is not cam.model
135 | else:
136 | assert model is cam.model
137 | assert result.shape == (max(batch_size, 1), 8, 8)
138 |
139 | @pytest.mark.parametrize("expand_cam", [False, True])
140 | @pytest.mark.usefixtures("xcam", "mixed_precision")
141 | def test__call__with_expand_cam(self, expand_cam, conv_model):
142 | cam = Xcam(conv_model)
143 | result = cam(CategoricalScore(0), [dummy_sample((1, 8, 8, 3))], expand_cam=expand_cam)
144 | if expand_cam:
145 | assert result[0].shape == (1, 8, 8)
146 | else:
147 | assert result.shape == (1, 6, 6)
148 |
149 |
150 | class TestXcamWithMultipleInputsModel():
151 | @pytest.mark.parametrize("scores,expected_error", [
152 | (None, ValueError),
153 | (CategoricalScore(0), NO_ERROR),
154 | (score_with_tuple, NO_ERROR),
155 | (score_with_list, NO_ERROR),
156 | ([None], ValueError),
157 | ([CategoricalScore(0)], NO_ERROR),
158 | ([score_with_tuple], NO_ERROR),
159 | ([score_with_list], NO_ERROR),
160 | ])
161 | @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision")
162 | def test__call__if_score_is_(self, scores, expected_error, multiple_inputs_model):
163 | cam = Xcam(multiple_inputs_model)
164 | with assert_raises(expected_error):
165 | result = cam(scores, [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))])
166 | assert len(result) == 2
167 | assert result[0].shape == (1, 8, 8)
168 | assert result[1].shape == (1, 10, 10)
169 |
170 | @pytest.mark.parametrize("seed_input,expected_error", [
171 | (None, ValueError),
172 | (dummy_sample((1, 8, 8, 3)), ValueError),
173 | ([dummy_sample((1, 8, 8, 3))], ValueError),
174 | ([dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], NO_ERROR),
175 | ])
176 | @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision")
177 | def test__call__if_seed_input_is_(self, seed_input, expected_error, multiple_inputs_model):
178 | cam = Xcam(multiple_inputs_model)
179 | with assert_raises(expected_error):
180 | result = cam(CategoricalScore(0), seed_input)
181 | assert result[0].shape == (1, 8, 8)
182 | assert result[1].shape == (1, 10, 10)
183 |
184 | @pytest.mark.parametrize("expand_cam", [False, True])
185 | @pytest.mark.usefixtures("xcam", "mixed_precision")
186 | def test__call__with_expand_cam(self, expand_cam, multiple_inputs_model):
187 | cam = Xcam(multiple_inputs_model)
188 | result = cam(CategoricalScore(0),
189 | [dummy_sample(
190 | (1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))],
191 | expand_cam=expand_cam)
192 | if expand_cam:
193 | assert result[0].shape == (1, 8, 8)
194 | assert result[1].shape == (1, 10, 10)
195 | else:
196 | assert result.shape == (1, 8, 8)
197 |
198 |
199 | class TestXcamWithMultipleOutputsModel():
200 | @pytest.mark.parametrize("scores,expected_error", [
201 | (None, ValueError),
202 | ([None], ValueError),
203 | (CategoricalScore(0), ValueError),
204 | ([CategoricalScore(0)], ValueError),
205 | ([None, None], ValueError),
206 | ([CategoricalScore(0), None], ValueError),
207 | ([None, CategoricalScore(0)], ValueError),
208 | ([CategoricalScore(0), BinaryScore(0)], NO_ERROR),
209 | ([score_with_tuple, score_with_tuple], NO_ERROR),
210 | ([score_with_list, score_with_list], NO_ERROR),
211 | ])
212 | @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision")
213 | def test__call__if_score_is_(self, scores, expected_error, multiple_outputs_model):
214 | cam = Xcam(multiple_outputs_model)
215 | with assert_raises(expected_error):
216 | result = cam(scores, dummy_sample((1, 8, 8, 3)))
217 | assert result.shape == (1, 8, 8)
218 |
219 | @pytest.mark.parametrize("seed_input,expected,expected_error", [
220 | (None, None, ValueError),
221 | (dummy_sample((8,)), None, ValueError),
222 | (dummy_sample((8, 8, 3)), (1, 8, 8), NO_ERROR),
223 | ([dummy_sample((8, 8, 3))], [(1, 8, 8)], NO_ERROR),
224 | (dummy_sample((1, 8, 8, 3)), (1, 8, 8), NO_ERROR),
225 | ([dummy_sample((1, 8, 8, 3))], [(1, 8, 8)], NO_ERROR),
226 | ])
227 | @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision")
228 | def test__call__if_seed_input_is_(self, seed_input, expected, expected_error,
229 | multiple_outputs_model):
230 | cam = Xcam(multiple_outputs_model)
231 | with assert_raises(expected_error):
232 | result = cam([CategoricalScore(0), BinaryScore(0)], seed_input)
233 | if type(expected) is list:
234 | assert type(result) is list
235 | expected = expected[0]
236 | result = result[0]
237 | assert result.shape == expected
238 |
239 | @pytest.mark.parametrize("expand_cam", [False, True])
240 | @pytest.mark.usefixtures("xcam", "mixed_precision")
241 | def test__call__with_expand_cam(self, expand_cam, multiple_outputs_model):
242 | cam = Xcam(multiple_outputs_model)
243 | result = cam([CategoricalScore(0), BinaryScore(0)], [dummy_sample((1, 8, 8, 3))],
244 | expand_cam=expand_cam)
245 | if expand_cam:
246 | assert result[0].shape == (1, 8, 8)
247 | else:
248 | assert result.shape == (1, 6, 6)
249 |
250 |
251 | class TestXcamWithMultipleIOModel():
252 | @pytest.mark.parametrize("scores,expected_error", [
253 | (None, ValueError),
254 | ([None], ValueError),
255 | (CategoricalScore(0), ValueError),
256 | ([CategoricalScore(0)], ValueError),
257 | ([None, None], ValueError),
258 | ([CategoricalScore(0), None], ValueError),
259 | ([None, BinaryScore(0)], ValueError),
260 | ([CategoricalScore(0), BinaryScore(0)], NO_ERROR),
261 | ([score_with_tuple, score_with_tuple], NO_ERROR),
262 | ([score_with_list, score_with_list], NO_ERROR),
263 | ])
264 | @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision")
265 | def test__call__if_score_is_(self, scores, expected_error, multiple_io_model):
266 | cam = Xcam(multiple_io_model)
267 | with assert_raises(expected_error):
268 | result = cam(scores, [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))])
269 | assert result[0].shape == (1, 8, 8)
270 | assert result[1].shape == (1, 10, 10)
271 |
272 | @pytest.mark.parametrize("seed_input,expected_error", [
273 | (None, ValueError),
274 | (dummy_sample((1, 8, 8, 3)), ValueError),
275 | ([dummy_sample((1, 8, 8, 3))], ValueError),
276 | ([dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))], NO_ERROR),
277 | ])
278 | @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision")
279 | def test__call__if_seed_input_is_(self, seed_input, expected_error, multiple_io_model):
280 | cam = Xcam(multiple_io_model)
281 | with assert_raises(expected_error):
282 | result = cam([CategoricalScore(0), BinaryScore(0)], seed_input)
283 | assert result[0].shape == (1, 8, 8)
284 | assert result[1].shape == (1, 10, 10)
285 |
286 | @pytest.mark.parametrize("expand_cam", [False, True])
287 | @pytest.mark.usefixtures("xcam", "mixed_precision")
288 | def test__call__with_expand_cam(self, expand_cam, multiple_io_model):
289 | cam = Xcam(multiple_io_model)
290 | result = cam([CategoricalScore(0), BinaryScore(0)],
291 | [dummy_sample(
292 | (1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))],
293 | expand_cam=expand_cam)
294 | if expand_cam:
295 | assert result[0].shape == (1, 8, 8)
296 | assert result[1].shape == (1, 10, 10)
297 | else:
298 | assert result.shape == (1, 8, 8)
299 |
300 |
301 | class TestXcamWithDenseModel():
302 | @pytest.mark.usefixtures("xcam", "saliency", "mixed_precision")
303 | def test__call__(self, dense_model):
304 | cam = Xcam(dense_model)
305 | with assert_raises(ValueError):
306 | result = cam(CategoricalScore(0), dummy_sample((1, 8, 8, 3)))
307 | assert result.shape == (1, 8, 8)
308 |
309 |
310 | @pytest.mark.skipif(version(tf.version.VERSION) < version("2.4.0"),
311 | reason="This test is enabled when tensorflow version is 2.4.0+.")
312 | class TestMixedPrecision():
313 | @pytest.mark.usefixtures("xcam", "saliency")
314 | def test__call__with_single_io(self, tmpdir):
315 | # Create and save lower precision model
316 | keras.mixed_precision.set_global_policy('mixed_float16')
317 | model = mock_conv_model()
318 | self._test_for_single_io(model)
319 | path = str(tmpdir.mkdir("tf-keras-vis").join("single_io.keras"))
320 | model.save(path)
321 | # Load and test lower precision model on lower precision environment
322 | model = keras.models.load_model(path)
323 | self._test_for_single_io(model)
324 | # Load and test lower precision model on full precision environment
325 | keras.mixed_precision.set_global_policy('float32')
326 | model = keras.models.load_model(path)
327 | self._test_for_single_io(model)
328 |
329 | @pytest.mark.usefixtures("xcam", "saliency")
330 | def test__call__with_float32_output_model(self, tmpdir):
331 | # Create and save lower precision model
332 | keras.mixed_precision.set_global_policy('mixed_float16')
333 | model = mock_conv_model_with_float32_output()
334 | self._test_for_single_io(model)
335 | path = str(tmpdir.mkdir("tf-keras-vis").join("float32_output.keras"))
336 | model.save(path)
337 | # Load and test lower precision model on lower precision environment
338 | model = keras.models.load_model(path)
339 | self._test_for_single_io(model)
340 | # Load and test lower precision model on full precision environment
341 | keras.mixed_precision.set_global_policy('float32')
342 | model = keras.models.load_model(path)
343 | self._test_for_single_io(model)
344 |
345 | def _test_for_single_io(self, model):
346 | cam = Xcam(model)
347 | result = cam(CategoricalScore(0), dummy_sample((1, 8, 8, 3)))
348 | assert result.shape == (1, 8, 8)
349 |
350 | @pytest.mark.usefixtures("xcam", "saliency")
351 | def test__call__with_multiple_io(self, tmpdir):
352 | # Create and save lower precision model
353 | keras.mixed_precision.set_global_policy('mixed_float16')
354 | model = mock_multiple_io_model()
355 | self._test_for_multiple_io(model)
356 | path = str(tmpdir.mkdir("tf-keras-vis").join("multiple_io.keras"))
357 | model.save(path)
358 | # Load and test lower precision model on lower precision environment
359 | model = keras.models.load_model(path)
360 | self._test_for_multiple_io(model)
361 | # Load and test lower precision model on full precision environment
362 | keras.mixed_precision.set_global_policy('float32')
363 | model = keras.models.load_model(path)
364 | self._test_for_multiple_io(model)
365 |
366 | def _test_for_multiple_io(self, model):
367 | cam = Xcam(model)
368 | result = cam([CategoricalScore(0), CategoricalScore(0)],
369 | [dummy_sample(
370 | (1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))])
371 | assert result[0].shape == (1, 8, 8)
372 | assert result[1].shape == (1, 10, 10)
373 |
--------------------------------------------------------------------------------
/tests/tf_keras_vis/conftest.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 | import tensorflow as tf
5 | from packaging.version import parse as version
6 |
7 | from tf_keras_vis import keras
8 | from tf_keras_vis.utils.test import (mock_conv_model, mock_conv_model_with_sigmoid_output,
9 | mock_dense_model, mock_multiple_inputs_model,
10 | mock_multiple_io_model, mock_multiple_outputs_model)
11 |
12 |
13 | def _get_supported_policies():
14 | if version(tf.version.VERSION) < version("2.4.0"):
15 | return ["float32"]
16 | else:
17 | return ["float32", "mixed_float16"]
18 |
19 |
20 | def _source_of_models():
21 | return [None] + _get_supported_policies()
22 |
23 |
24 | def _save_and_load(model, source, path):
25 | if source is None:
26 | return model
27 |
28 | if version(tf.version.VERSION) >= version("2.16.0rc0"):
29 | path = path[:-3] + '.keras'
30 |
31 | if source == "mixed_float16":
32 | policy = keras.mixed_precision.global_policy()
33 | keras.mixed_precision.set_global_policy(source)
34 | try:
35 | model.save(path)
36 | finally:
37 | keras.mixed_precision.set_global_policy(policy)
38 | else:
39 | model.save(path)
40 | return keras.models.load_model(path)
41 |
42 |
43 | @pytest.fixture(scope='function', params=_get_supported_policies())
44 | def mixed_precision(request):
45 | if version(tf.version.VERSION) >= version("2.4.0"):
46 | keras.mixed_precision.set_global_policy(request.param)
47 | yield
48 | if version(tf.version.VERSION) >= version("2.4.0"):
49 | keras.mixed_precision.set_global_policy("float32")
50 |
51 |
52 | @pytest.fixture(scope='function', params=_source_of_models())
53 | def dense_model(request, tmpdir):
54 | return _save_and_load(mock_dense_model(), request.param,
55 | os.path.join(tmpdir, 'dense-model.keras'))
56 |
57 |
58 | @pytest.fixture(scope='function', params=_source_of_models())
59 | def conv_model(request, tmpdir):
60 | return _save_and_load(mock_conv_model(), request.param,
61 | os.path.join(tmpdir, 'conv-model.keras'))
62 |
63 |
64 | @pytest.fixture(scope='function', params=_source_of_models())
65 | def conv_sigmoid_model(request, tmpdir):
66 | return _save_and_load(mock_conv_model_with_sigmoid_output(), request.param,
67 | os.path.join(tmpdir, 'conv-model-with-sigmoid-output.keras'))
68 |
69 |
70 | @pytest.fixture(scope='function', params=_source_of_models())
71 | def multiple_inputs_model(request, tmpdir):
72 | return _save_and_load(mock_multiple_inputs_model(), request.param,
73 | os.path.join(tmpdir, 'multiple-inputs-model.keras'))
74 |
75 |
76 | @pytest.fixture(scope='function', params=_source_of_models())
77 | def multiple_outputs_model(request, tmpdir):
78 | return _save_and_load(mock_multiple_outputs_model(), request.param,
79 | os.path.join(tmpdir, 'multiple-outputs-model.keras'))
80 |
81 |
82 | @pytest.fixture(scope='function', params=_source_of_models())
83 | def multiple_io_model(request, tmpdir):
84 | return _save_and_load(mock_multiple_io_model(), request.param,
85 | os.path.join(tmpdir, 'multiple-io-model.keras'))
86 |
--------------------------------------------------------------------------------
/tests/tf_keras_vis/gradcam_plus_plus_test.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import tensorflow as tf
3 |
4 | from tf_keras_vis import keras
5 | from tf_keras_vis.gradcam_plus_plus import GradcamPlusPlus
6 | from tf_keras_vis.utils.scores import CategoricalScore
7 | from tf_keras_vis.utils.test import dummy_sample
8 |
9 |
10 | class TestGradcamPlusPlus():
11 | @pytest.mark.parametrize("gradient_modifier",
12 | [None, (lambda cam: keras.activations.relu(cam))])
13 | @pytest.mark.parametrize("activation_modifier",
14 | [None, (lambda cam: keras.activations.relu(cam))])
15 | @pytest.mark.usefixtures("mixed_precision")
16 | def test__call__if_activation_modifier_is_(self, gradient_modifier, activation_modifier,
17 | conv_model):
18 | cam = GradcamPlusPlus(conv_model)
19 | result = cam(CategoricalScore(0),
20 | dummy_sample((1, 8, 8, 3)),
21 | gradient_modifier=gradient_modifier,
22 | activation_modifier=activation_modifier)
23 | assert result.shape == (1, 8, 8)
24 |
--------------------------------------------------------------------------------
/tests/tf_keras_vis/gradcam_test.py:
--------------------------------------------------------------------------------
1 | class TestGradcam():
2 | pass
3 |
--------------------------------------------------------------------------------
/tests/tf_keras_vis/saliency_test.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import tensorflow as tf
3 | from packaging.version import parse as version
4 |
5 | from tf_keras_vis import keras
6 | from tf_keras_vis.saliency import Saliency
7 | from tf_keras_vis.utils.scores import BinaryScore, CategoricalScore
8 | from tf_keras_vis.utils.test import (dummy_sample, mock_conv_model,
9 | mock_conv_model_with_float32_output, mock_multiple_io_model)
10 |
11 | if version(tf.version.VERSION) >= version("2.4.0"):
12 | from tensorflow.keras.mixed_precision import set_global_policy
13 |
14 |
15 | class TestSaliency():
16 | @pytest.mark.parametrize("keepdims,expected", [
17 | (False, (1, 8, 8)),
18 | (True, (1, 8, 8, 3)),
19 | ])
20 | @pytest.mark.usefixtures("mixed_precision")
21 | def test__call__if_keepdims_is_(self, keepdims, expected, conv_model):
22 | saliency = Saliency(conv_model)
23 | result = saliency(CategoricalScore(0), dummy_sample((1, 8, 8, 3)), keepdims=keepdims)
24 | assert result.shape == expected
25 |
26 | @pytest.mark.parametrize("smooth_samples", [1, 3, 100])
27 | @pytest.mark.usefixtures("mixed_precision")
28 | def test__call__if_smoothing_is_active(self, smooth_samples, conv_model):
29 | saliency = Saliency(conv_model)
30 | result = saliency(CategoricalScore(0),
31 | dummy_sample((1, 8, 8, 3)),
32 | smooth_samples=smooth_samples)
33 | assert result.shape == (1, 8, 8)
34 |
35 | @pytest.mark.usefixtures("mixed_precision")
36 | def test__call__if_model_has_only_dense_layers(self, dense_model):
37 | saliency = Saliency(dense_model)
38 | result = saliency(CategoricalScore(0), dummy_sample((8,)), keepdims=True)
39 | assert result.shape == (1, 8)
40 |
41 |
42 | class TestSaliencyWithMultipleInputsModel():
43 | @pytest.mark.parametrize("keepdims,expected", [
44 | (False, [(1, 8, 8), (1, 10, 10)]),
45 | (True, [(1, 8, 8, 3), (1, 10, 10, 3)]),
46 | ])
47 | @pytest.mark.usefixtures("mixed_precision")
48 | def test__call__if_keepdims_is_(self, keepdims, expected, multiple_inputs_model):
49 | saliency = Saliency(multiple_inputs_model)
50 | result = saliency(
51 | CategoricalScore(0), [dummy_sample(
52 | (1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))],
53 | keepdims=keepdims)
54 | assert len(result) == 2
55 | assert result[0].shape == expected[0]
56 | assert result[1].shape == expected[1]
57 |
58 | @pytest.mark.parametrize("smooth_samples", [1, 3, 100])
59 | @pytest.mark.usefixtures("mixed_precision")
60 | def test__call__if_smoothing_is_active(self, smooth_samples, multiple_inputs_model):
61 | saliency = Saliency(multiple_inputs_model)
62 | result = saliency(
63 | CategoricalScore(0), [dummy_sample(
64 | (1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))],
65 | smooth_samples=smooth_samples)
66 | assert len(result) == 2
67 | assert result[0].shape == (1, 8, 8)
68 | assert result[1].shape == (1, 10, 10)
69 |
70 |
71 | class TestSaliencyWithMultipleOutputsModel():
72 | pass
73 |
74 |
75 | class TestSaliencyWithMultipleIOModel():
76 | pass
77 |
78 |
79 | @pytest.mark.skipif(version(tf.version.VERSION) < version("2.4.0"),
80 | reason="This test is enabled when tensorflow version is 2.4.0+.")
81 | class TestMixedPrecision():
82 | def test__call__with_single_io(self, tmpdir):
83 | # Create and save lower precision model
84 | keras.mixed_precision.set_global_policy('mixed_float16')
85 | model = mock_conv_model()
86 | self._test_for_single_io(model)
87 | path = str(tmpdir.mkdir("tf-keras-vis").join("single_io.keras"))
88 | model.save(path)
89 | # Load and test lower precision model on lower precision environment
90 | model = keras.models.load_model(path)
91 | self._test_for_single_io(model)
92 | # Load and test lower precision model on full precision environment
93 | keras.mixed_precision.set_global_policy('float32')
94 | model = keras.models.load_model(path)
95 | self._test_for_single_io(model)
96 |
97 | def test__call__with_float32_output_model(self, tmpdir):
98 | # Create and save lower precision model
99 | keras.mixed_precision.set_global_policy('mixed_float16')
100 | model = mock_conv_model_with_float32_output()
101 | self._test_for_single_io(model)
102 | path = str(tmpdir.mkdir("tf-keras-vis").join("float32_output.keras"))
103 | model.save(path)
104 | # Load and test lower precision model on lower precision environment
105 | model = keras.models.load_model(path)
106 | self._test_for_single_io(model)
107 | # Load and test lower precision model on full precision environment
108 | keras.mixed_precision.set_global_policy('float32')
109 | model = keras.models.load_model(path)
110 | self._test_for_single_io(model)
111 |
112 | def _test_for_single_io(self, model):
113 | saliency = Saliency(model)
114 | result = saliency(CategoricalScore(0), dummy_sample((1, 8, 8, 3)))
115 | assert result.shape == (1, 8, 8)
116 |
117 | def test__call__with_multiple_io(self, tmpdir):
118 | # Create and save lower precision model
119 | keras.mixed_precision.set_global_policy('mixed_float16')
120 | model = mock_multiple_io_model()
121 | self._test_for_multiple_io(model)
122 | path = str(tmpdir.mkdir("tf-keras-vis").join("multiple_io.keras"))
123 | model.save(path)
124 | # Load and test lower precision model on lower precision environment
125 | model = keras.models.load_model(path)
126 | self._test_for_multiple_io(model)
127 | # Load and test lower precision model on full precision environment
128 | keras.mixed_precision.set_global_policy('float32')
129 | model = keras.models.load_model(path)
130 | self._test_for_multiple_io(model)
131 |
132 | def _test_for_multiple_io(self, model):
133 | saliency = Saliency(model)
134 | result = saliency(
135 | [CategoricalScore(0), BinaryScore(0)],
136 | [dummy_sample((1, 8, 8, 3)), dummy_sample((1, 10, 10, 3))])
137 | assert len(result) == 2
138 | assert result[0].shape == (1, 8, 8)
139 | assert result[1].shape == (1, 10, 10)
140 |
--------------------------------------------------------------------------------
/tests/tf_keras_vis/scorecam_test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import tensorflow as tf
4 |
5 | from tf_keras_vis import keras
6 | from tf_keras_vis.scorecam import Scorecam
7 | from tf_keras_vis.utils.scores import CategoricalScore
8 | from tf_keras_vis.utils.test import (NO_ERROR, assert_raises, dummy_sample, score_with_list,
9 | score_with_tensor, score_with_tuple)
10 |
11 |
12 | class TestScorecam():
13 | @pytest.mark.parametrize("max_N,expected_error", [
14 | (-100, NO_ERROR),
15 | (-1, NO_ERROR),
16 | (0, NO_ERROR),
17 | (1, NO_ERROR),
18 | (3, NO_ERROR),
19 | (100, ValueError),
20 | ])
21 | @pytest.mark.usefixtures("mixed_precision")
22 | def test__call__if_max_N_is_(self, max_N, expected_error, conv_model):
23 | with assert_raises(expected_error):
24 | cam = Scorecam(conv_model)
25 | result = cam(CategoricalScore(0), dummy_sample((2, 8, 8, 3)), max_N=max_N)
26 | assert result.shape == (2, 8, 8)
27 |
28 | @pytest.mark.parametrize("scores,expected_error", [
29 | (None, ValueError),
30 | (CategoricalScore(0), NO_ERROR),
31 | (score_with_tuple, NO_ERROR),
32 | (score_with_list, NO_ERROR),
33 | (score_with_tensor, NO_ERROR),
34 | (lambda x: np.mean(x), ValueError),
35 | (lambda x: tf.reshape(x, (-1,)), ValueError),
36 | ([None], ValueError),
37 | ([CategoricalScore(0)], NO_ERROR),
38 | ([score_with_tuple], NO_ERROR),
39 | ([score_with_list], NO_ERROR),
40 | ([score_with_tensor], NO_ERROR),
41 | ([lambda x: np.mean(x)], ValueError),
42 | ([lambda x: tf.reshape(x, (-1,))], ValueError),
43 | ])
44 | @pytest.mark.usefixtures("mixed_precision")
45 | def test__call__if_score_is_(self, scores, expected_error, conv_model):
46 | cam = Scorecam(conv_model)
47 | with assert_raises(expected_error):
48 | result = cam(scores, dummy_sample((2, 8, 8, 3)))
49 | assert result.shape == (2, 8, 8)
50 |
--------------------------------------------------------------------------------
/tests/tf_keras_vis/tf_keras_vis_test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import tensorflow as tf
4 | from packaging.version import parse as version
5 |
6 | from tf_keras_vis import ModelVisualization, keras
7 | from tf_keras_vis.utils.model_modifiers import ReplaceToLinear
8 | from tf_keras_vis.utils.test import dummy_sample
9 |
10 |
11 | class MockVisualizer(ModelVisualization):
12 | def __call__(self):
13 | pass
14 |
15 |
16 | class TestModelVisualization():
17 | @pytest.mark.parametrize("modifier,clone,expected_same,expected_activation", [
18 | (None, False, True, keras.activations.softmax),
19 | (None, True, True, keras.activations.softmax),
20 | ('not-return', False, True, keras.activations.linear),
21 | ('not-return', True, False, keras.activations.linear),
22 | ('return', False, True, keras.activations.linear),
23 | ('return', True, False, keras.activations.linear),
24 | ])
25 | @pytest.mark.usefixtures("mixed_precision")
26 | def test__init__(self, modifier, clone, expected_same, expected_activation, conv_model):
27 | if modifier == 'return':
28 | mock = MockVisualizer(conv_model, model_modifier=ReplaceToLinear(), clone=clone)
29 | elif modifier == 'not-return':
30 | mock = MockVisualizer(conv_model, model_modifier=ReplaceToLinear(), clone=clone)
31 | else:
32 | mock = MockVisualizer(conv_model, clone=clone)
33 | assert (mock.model is conv_model) == expected_same
34 | assert mock.model.layers[-1].activation == expected_activation
35 | assert np.array_equal(mock.model.get_weights()[0], conv_model.get_weights()[0])
36 |
37 | @pytest.mark.parametrize("score,expected_shape", [
38 | (dummy_sample((2, 32, 32, 3)), (2,)),
39 | ((dummy_sample((32, 32, 3)), dummy_sample((32, 32, 3))), (2,)),
40 | ([dummy_sample((32, 32, 3)), dummy_sample((32, 32, 3))], (2,)),
41 | (tf.constant(dummy_sample((2, 32, 32, 3))), (2,)),
42 | ((tf.constant(dummy_sample((32, 32, 3))), tf.constant(dummy_sample((32, 32, 3)))), (2,)),
43 | ([tf.constant(dummy_sample((32, 32, 3))),
44 | tf.constant(dummy_sample((32, 32, 3)))], (2,)),
45 | ])
46 | @pytest.mark.usefixtures("mixed_precision")
47 | def test_mean_score_value(self, score, expected_shape, conv_model):
48 | actual = MockVisualizer(conv_model)._mean_score_value(score)
49 | assert actual.shape == expected_shape
50 |
--------------------------------------------------------------------------------
/tests/tf_keras_vis/utils/__init__.py:
--------------------------------------------------------------------------------
https://raw.githubusercontent.com/keisen/tf-keras-vis/8bb8392ae5894c91acdfacaa898088c6b0c7a522/tests/tf_keras_vis/utils/__init__.py
--------------------------------------------------------------------------------
/tests/tf_keras_vis/utils/model_modifiers_test.py:
--------------------------------------------------------------------------------
1 | import pytest
2 | import tensorflow as tf
3 | from packaging.version import parse as version
4 |
5 | from tf_keras_vis import keras
6 | from tf_keras_vis.activation_maximization import ActivationMaximization
7 | from tf_keras_vis.saliency import Saliency
8 | from tf_keras_vis.utils.model_modifiers import (ExtractIntermediateLayer, GuidedBackpropagation,
9 | ReplaceToLinear)
10 | from tf_keras_vis.utils.scores import CategoricalScore
11 | from tf_keras_vis.utils.test import (NO_ERROR, assert_raises, dummy_sample, mock_conv_model,
12 | mock_multiple_outputs_model)
13 |
14 |
15 | class TestReplaceToLinear():
16 | @pytest.mark.parametrize("model", [mock_conv_model(), mock_multiple_outputs_model()])
17 | @pytest.mark.usefixtures("mixed_precision")
18 | def test__call__(self, model):
19 | assert model.get_layer(name='output_1').activation != keras.activations.linear
20 | if len(model.outputs) > 1:
21 | assert model.get_layer(name='output_2').activation != keras.activations.linear
22 | instance = ActivationMaximization(model, model_modifier=ReplaceToLinear())
23 | assert instance.model != model
24 | assert instance.model.get_layer(name='output_1').activation == keras.activations.linear
25 | if len(model.outputs) > 1:
26 | assert instance.model.get_layer(name='output_2').activation == keras.activations.linear
27 | instance([CategoricalScore(0), CategoricalScore(0)])
28 | else:
29 | instance([CategoricalScore(0)])
30 |
31 |
32 | class TestExtractIntermediateLayer():
33 | @pytest.mark.parametrize("model", [mock_conv_model(), mock_multiple_outputs_model()])
34 | @pytest.mark.parametrize("layer,expected_error", [
35 | (None, TypeError),
36 | (1, NO_ERROR),
37 | ('conv_1', NO_ERROR),
38 | ])
39 | @pytest.mark.usefixtures("mixed_precision")
40 | def test__call__(self, model, layer, expected_error):
41 | if version(tf.version.VERSION) < version("2.15"):
42 | assert model.outputs[0].shape.as_list() == [None, 2]
43 | else:
44 | assert model.outputs[0].shape == (None, 2)
45 | with assert_raises(expected_error):
46 | instance = ActivationMaximization(model,
47 | model_modifier=ExtractIntermediateLayer(layer))
48 | assert instance.model != model
49 | if version(tf.version.VERSION) < version("2.15"):
50 | assert instance.model.outputs[0].shape.as_list() == [None, 6, 6, 6]
51 | else:
52 | assert instance.model.outputs[0].shape == (None, 6, 6, 6)
53 | instance([CategoricalScore(0)])
54 |
55 |
56 | class TestExtractIntermediateLayerForGradcam():
57 | pass
58 |
59 |
60 | class TestExtractGuidedBackpropagation():
61 | @pytest.mark.usefixtures("mixed_precision")
62 | def test__call__(self, conv_model):
63 | instance = Saliency(conv_model, model_modifier=GuidedBackpropagation())
64 | guided_model = instance.model
65 | assert guided_model != conv_model
66 | assert guided_model.get_layer('conv_1').activation != conv_model.get_layer(
67 | 'conv_1').activation
68 | assert guided_model.get_layer('dense_1').activation == conv_model.get_layer(
69 | 'dense_1').activation
70 | instance(CategoricalScore(0), dummy_sample((1, 8, 8, 3)))
71 |
--------------------------------------------------------------------------------
/tests/tf_keras_vis/utils/scores_test.py:
--------------------------------------------------------------------------------
1 | import numpy as np
2 | import pytest
3 | import tensorflow as tf
4 |
5 | from tf_keras_vis.utils.scores import BinaryScore, CategoricalScore, InactiveScore
6 | from tf_keras_vis.utils.test import NO_ERROR, assert_raises, dummy_sample
7 |
8 |
9 | class TestInactiveScore():
10 | @pytest.mark.parametrize("output,expected_shape,expected_error", [
11 | (dummy_sample((1, 1)), (1, 1), NO_ERROR),
12 | (dummy_sample((10, 5)), (10, 5), NO_ERROR),
13 | (dummy_sample((1, 224, 224, 3)), (1, 224, 224, 3), NO_ERROR),
14 | ])
15 | def test__call__(self, output, expected_shape, expected_error):
16 | with assert_raises(expected_error):
17 | actual = InactiveScore()(output)
18 | assert np.all(actual == 0.0)
19 | assert actual.shape == expected_shape
20 |
21 |
22 | class TestBinaryScore():
23 | @pytest.mark.parametrize("target_values,expected,expected_error", [
24 | (None, None, ValueError),
25 | (0, [False], NO_ERROR),
26 | (1, [True], NO_ERROR),
27 | (100, [True], NO_ERROR),
28 | (-1, [True], NO_ERROR),
29 | (1.0, [True], NO_ERROR),
30 | ([], None, ValueError),
31 | ([None], None, ValueError),
32 | ([0, 0], [False, False], NO_ERROR),
33 | ([0, 1, 0], [False, True, False], NO_ERROR),
34 | ([-1, 0], [True, False], NO_ERROR),
35 | ])
36 | def test__init__(self, target_values, expected, expected_error):
37 | with assert_raises(expected_error):
38 | score = BinaryScore(target_values)
39 | assert score.target_values == expected
40 |
41 | @pytest.mark.parametrize("target_values,output,expected,expected_error", [
42 | (False, [[1, 1, 0], [1, 0, 1]], [-1], ValueError),
43 | (False, [[1]], [-1], NO_ERROR),
44 | (False, [[0]], [0], NO_ERROR),
45 | (True, [[1]], [1], NO_ERROR),
46 | (True, [[0]], [0], NO_ERROR),
47 | (True, [[0], [1], [0]], [0, 1, 0], NO_ERROR),
48 | (False, [[0], [1], [0]], [0, -1, 0], NO_ERROR),
49 | ([True, False, True], [[0], [1], [0]], [0, -1, 0], NO_ERROR),
50 | ([False, True, False], [[0], [1], [0]], [0, 1, 0], NO_ERROR),
51 | ])
52 | def test__call__(self, target_values, output, expected, expected_error):
53 | output = tf.constant(output, tf.float32)
54 | score = BinaryScore(target_values)
55 | with assert_raises(expected_error):
56 | score_value = score(output)
57 | assert tf.math.reduce_all(score_value == expected)
58 |
59 |
60 | class TestCategoricalScore():
61 | @pytest.mark.parametrize("indices,expected,expected_error", [
62 | (None, None, ValueError),
63 | (5, [5], NO_ERROR),
64 | ((1, ), [1], NO_ERROR),
65 | ([3], [3], NO_ERROR),
66 | ([], None, ValueError),
67 | ([None], None, ValueError),
68 | ([2, None], None, ValueError),
69 | ((0, 8, 3), [0, 8, 3], NO_ERROR),
70 | ([0, 8, 3], [0, 8, 3], NO_ERROR),
71 | ])
72 | def test__init__(self, indices, expected, expected_error):
73 | with assert_raises(expected_error):
74 | score = CategoricalScore(indices)
75 | assert score.indices == expected
76 |
77 | @pytest.mark.parametrize("indices,output_shape,expected_error", [
78 | (2, (1, ), ValueError),
79 | (2, (1, 2), ValueError),
80 | (2, (1, 4, 1), ValueError),
81 | (2, (1, 4, 3), NO_ERROR),
82 | (2, (2, 4, 3), NO_ERROR),
83 | (2, (8, 32, 32, 3), NO_ERROR),
84 | ])
85 | def test__call__(self, indices, output_shape, expected_error):
86 | output = tf.constant(dummy_sample(output_shape), tf.float32)
87 | score = CategoricalScore(indices)
88 | with assert_raises(expected_error):
89 | score_value = score(output)
90 | assert score_value.shape == output_shape[0:1]
91 |
--------------------------------------------------------------------------------
/tests/tf_keras_vis/utils/utils_test.py:
--------------------------------------------------------------------------------
1 | import os
2 |
3 | import pytest
4 | import tensorflow as tf
5 | from tf_keras_vis import keras
6 | from packaging.version import parse as version
7 |
8 | from tf_keras_vis.utils import MAX_STEPS, find_layer, get_num_of_steps_allowed, num_of_gpus
9 |
10 |
11 | class TestUtils():
12 | @pytest.mark.parametrize("env,steps,expected", [
13 | (None, -1, -1),
14 | (None, 0, 0),
15 | (None, 1, 1),
16 | (None, 2, 2),
17 | (None, 100, 100),
18 | (1, -1, -1),
19 | (1, 0, 0),
20 | (1, 1, 1),
21 | (1, 2, 1),
22 | (1, 100, 1),
23 | (2, -1, -1),
24 | (2, 0, 0),
25 | (2, 1, 1),
26 | (2, 2, 2),
27 | (2, 100, 2),
28 | ])
29 | def test_get_num_of_steps_allowed(self, env, steps, expected):
30 | _env = os.environ.get(MAX_STEPS)
31 | try:
32 | if env is None:
33 | os.environ.pop(MAX_STEPS, None)
34 | else:
35 | os.environ[MAX_STEPS] = str(env)
36 | actual = get_num_of_steps_allowed(steps)
37 | assert actual == expected
38 | finally:
39 | if _env is not None:
40 | os.environ[MAX_STEPS] = _env
41 |
42 | def test_num_of_gpus_if_no_gpus(self, monkeypatch):
43 | def list_physical_devices(name):
44 | return None
45 |
46 | def list_logical_devices(name):
47 | return None
48 |
49 | if version(tf.version.VERSION) < version("2.1.0"):
50 | monkeypatch.setattr(tf.config.experimental, "list_physical_devices",
51 | list_physical_devices)
52 | monkeypatch.setattr(tf.config.experimental, "list_logical_devices",
53 | list_logical_devices)
54 |
55 | else:
56 | monkeypatch.setattr(tf.config, "list_physical_devices", list_physical_devices)
57 | monkeypatch.setattr(tf.config, "list_logical_devices", list_logical_devices)
58 | a, b, = num_of_gpus()
59 | assert a == 0
60 | assert b == 0
61 |
62 | def test_num_of_gpus(self, monkeypatch):
63 | def list_physical_devices(name):
64 | return ['dummy-a', 'dummy-b']
65 |
66 | def list_logical_devices(name):
67 | return ['a1', 'a2', 'b1', 'b2']
68 |
69 | if version(tf.version.VERSION) < version("2.1.0"):
70 | monkeypatch.setattr(tf.config.experimental, "list_physical_devices",
71 | list_physical_devices)
72 | monkeypatch.setattr(tf.config.experimental, "list_logical_devices",
73 | list_logical_devices)
74 |
75 | else:
76 | monkeypatch.setattr(tf.config, "list_physical_devices", list_physical_devices)
77 | monkeypatch.setattr(tf.config, "list_logical_devices", list_logical_devices)
78 | a, b, = num_of_gpus()
79 | assert a == 2
80 | assert b == 4
81 |
82 | @pytest.mark.parametrize("offset_of_child_layer", [
83 | False,
84 | True,
85 | ])
86 | # @pytest.mark.skipif(version(tf.version.VERSION) >= version("2.16.0rc0"),
87 | # reason="https://github.com/tensorflow/tensorflow/issues/64393") # FIXME
88 | def test_find_layer(self, offset_of_child_layer, conv_model):
89 | model = keras.Sequential([
90 | keras.layers.Conv2D(3, 3, padding='same', input_shape=(8, 8, 3)),
91 | conv_model,
92 | keras.layers.Dense(1),
93 | ])
94 | offset = conv_model.layers[-1] if offset_of_child_layer else None
95 | actual = find_layer(model, lambda _l: _l.name == 'conv_1', offset=offset)
96 | assert conv_model.get_layer(name='conv_1') == actual
97 |
--------------------------------------------------------------------------------
/tf_keras_vis/__init__.py:
--------------------------------------------------------------------------------
1 | try:
2 | from importlib.metadata import version as _version
3 | except ImportError: # pragma: no cover
4 | from importlib_metadata import version as _version
5 |
6 | __version__ = _version("tf-keras-vis")
7 |
8 | import os
9 |
10 | if "KERAS_BACKEND" not in os.environ:
11 | os.environ["KERAS_BACKEND"] = "tensorflow"
12 |
13 | import tensorflow as tf
14 | from packaging.version import parse as version
15 |
16 | if version(tf.version.VERSION) < version('2.16.0'):
17 | import tensorflow.keras as keras
18 | else:
19 | import keras
20 |
21 | from abc import ABC, abstractmethod
22 | from typing import Union
23 |
24 | import numpy as np
25 |
26 | from .utils import listify
27 |
28 |
29 | class ModelVisualization(ABC):
30 | """Visualization class that analyze the model for debugging.
31 | """
32 | def __init__(self, model, model_modifier=None, clone=True) -> None:
33 | """
34 | Args:
35 | model: A `keras.Model` instance. When `model_modifier` is NOT None, this model will
36 | be cloned with `keras.models.clone_model` function and then will be modified by
37 | `model_modifier` according to needs.
38 | model_modifier: A :obj:`tf_keras_vis.utils.model_modifiers.ModelModifier` instance,
39 | a function or a list of them. We recommend to apply
40 | `tf_keras_vis.utils.model_modifiers.ReplaceToLinear` to all visualizations (except
41 | :obj:`tf_keras_vis.scorecam.Scorecam`) when the model output is softmax. Defaults
42 | to None.
43 | clone: A bool that indicates whether or not it clones the `model`. When False, the
44 | model won't be cloned. Note that, although when True, the model won't be clone if
45 | `model_modifier` is None. Defaults to True.
46 | """
47 | self.model = model
48 | model_modifiers = listify(model_modifier)
49 | if len(model_modifiers) > 0:
50 | if clone:
51 | self.model = keras.models.clone_model(self.model)
52 | self.model.set_weights(model.get_weights())
53 | for modifier in model_modifiers:
54 | new_model = modifier(self.model)
55 | if new_model is not None:
56 | self.model = new_model
57 |
58 | @abstractmethod
59 | def __call__(self) -> Union[np.ndarray, list]:
60 | """Analyze the model.
61 |
62 | Raises:
63 | NotImplementedError: This method must be overwritten.
64 |
65 | Returns:
66 | Visualized image(s) or something(s).
67 | """
68 | raise NotImplementedError()
69 |
70 | def _get_scores_for_multiple_outputs(self, score):
71 | scores = listify(score)
72 | for score in scores:
73 | if not callable(score):
74 | raise ValueError(f"Score object must be callable! [{score}]")
75 | if len(scores) != len(self.model.outputs):
76 | raise ValueError(f"The model has {len(self.model.outputs)} outputs, "
77 | f"but the number of score-functions you passed is {len(scores)}.")
78 | return scores
79 |
80 | def _get_seed_inputs_for_multiple_inputs(self, seed_input):
81 | seed_inputs = listify(seed_input)
82 | if len(seed_inputs) != len(self.model.inputs):
83 | raise ValueError(
84 | f"The model has {len(self.model.inputs)} inputs, "
85 | f"but the number of seed-inputs tensors you passed is {len(seed_inputs)}.")
86 | seed_inputs = (x if tf.is_tensor(x) else tf.constant(x) for x in seed_inputs)
87 | seed_inputs = (tf.expand_dims(x, axis=0) if len(x.shape) == len(tensor.shape[1:]) else x
88 | for x, tensor in zip(seed_inputs, self.model.inputs))
89 | seed_inputs = list(seed_inputs)
90 | for i, (x, tensor) in enumerate(zip(seed_inputs, self.model.inputs)):
91 | if len(x.shape) != len(tensor.shape):
92 | raise ValueError(
93 | f"seed_input's shape is invalid. model-input index: {i},"
94 | f" model-input shape: {tensor.shape}, seed_input shape: {x.shape}.")
95 | return seed_inputs
96 |
97 | def _calculate_scores(self, outputs, score_functions):
98 | score_values = (func(output) for output, func in zip(outputs, score_functions))
99 | score_values = (self._mean_score_value(score) for score in score_values)
100 | score_values = list(score_values)
101 | return score_values
102 |
103 | def _mean_score_value(self, score):
104 | if not tf.is_tensor(score):
105 | if type(score) in [list, tuple]:
106 | if len(score) > 0 and tf.is_tensor(score[0]):
107 | score = tf.stack(score, axis=0)
108 | else:
109 | score = tf.constant(score)
110 | else:
111 | score = tf.constant(score)
112 | score = tf.math.reduce_mean(score, axis=tuple(range(score.ndim))[1:])
113 | return score
114 |
--------------------------------------------------------------------------------
/tf_keras_vis/activation_maximization/__init__.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | from collections import defaultdict
3 | from typing import Union
4 |
5 | import numpy as np
6 | import tensorflow as tf
7 |
8 | from .. import ModelVisualization, keras
9 | from ..utils import get_input_names, get_num_of_steps_allowed, is_mixed_precision, listify
10 | from ..utils.regularizers import LegacyRegularizer
11 | from .callbacks import managed_callbacks
12 | from .input_modifiers import Jitter, Rotate2D
13 | from .regularizers import Norm, TotalVariation2D
14 |
15 |
16 | class ActivationMaximization(ModelVisualization):
17 | """ActivationMaximization.
18 | """
19 | def __call__(
20 | self,
21 | score,
22 | seed_input=None,
23 | input_range=(0, 255),
24 | input_modifiers=[Jitter(jitter=4), Rotate2D(degree=1)],
25 | regularizers=[TotalVariation2D(weight=1.0),
26 | Norm(weight=0.3, p=1)],
27 | steps=200,
28 | optimizer=None, # When None, the default is tf.optimizers.RMSprop(1.0, 0.999)
29 | gradient_modifier=None,
30 | callbacks=None,
31 | training=False,
32 | unconnected_gradients=tf.UnconnectedGradients.NONE,
33 | activation_modifiers=None) -> Union[np.ndarray, list]:
34 | """Generate the model inputs that maximize the output of the given `score` functions.
35 |
36 | By default, this method is optimized to visualize `keras.application.VGG16` model.
37 | So if you want to visualize other models, you have to tune the parameters of this method.
38 |
39 | Args:
40 | score: A :obj:`tf_keras_vis.utils.scores.Score` instance, function or a list of them.
41 | For example of the Score instance to specify visualizing target::
42 |
43 | scores = CategoricalScore([1, 294, 413])
44 |
45 | The code above means the same with the one below::
46 |
47 | score = lambda outputs: (outputs[0][1], outputs[1][294], outputs[2][413])
48 |
49 | When the model has multiple outputs, you MUST pass a list of
50 | Score instances or functions. For example::
51 |
52 | from tf_keras_vis.utils.scores import CategoricalScore, InactiveScore
53 | score = [
54 | CategoricalScore([1, 23]), # For 1st model output
55 | InactiveScore(), # For 2nd model output
56 | ...
57 | ]
58 |
59 | seed_input: A tf.Tensor, :obj:`numpy.ndarray` or a list of them to input in the model.
60 | When `None`, the seed_input value will be automatically generated from a uniform
61 | distribution. If you want to visualize multiple images (i.e., batch_size > 1),
62 | you have to pass a seed_input object. For example::
63 |
64 | seed_input = tf.random.uniform((samples, ..., channels), low, high)
65 |
66 | Furthermore, if the model has multiple inputs and you want multiple images,
67 | you have to do as follows::
68 |
69 | seed_input = [
70 | tf.random.uniform((samples, ..., channels), low, high), # 1st input
71 | tf.random.uniform((samples, ..., channels), low, high), # 2nd input
72 | ...
73 | ]
74 |
75 | Defaults to None.
76 | input_range: A tuple of two int values or a list of them. The tuple indicates
77 | `(min, max)` values that is range of the result of this method. If the model has
78 | multiple inputs, you can use different input ranges for each model input by
79 | passing list of tuples. For example::
80 |
81 | input_range = [
82 | (0, 255), # The 1st model input's range
83 | (-1.0, 1.0), # The 2nd model input's range
84 | ...
85 | ]
86 |
87 | When `None` or `(None, None)` tuple, the input tensor
88 | (i.e., the result of this method) will be not applied any limitation.
89 | Defaults to (0, 255).
90 | input_modifiers: A :obj:`tf_keras_vis.activation_maximization.input_modifiers.
91 | InputModifier` instance, a function, a list of them when the model has a single
92 | input. For example::
93 |
94 | input_modifiers = [Jitter(jitter=8), Rotate(degree=3), Scale(high=1.1)]
95 |
96 | When the model has multiple inputs, you have to pass a dictionary
97 | that contains the lists of input modifiers for each model inputs::
98 |
99 | input_modifiers = {
100 | 'input_1': [Jitter(jitter=8), Rotate(degree=3), Scale(high=1.1)],
101 | 'input_2': [Jitter(jitter=8)],
102 | ...
103 | }
104 |
105 | Or you could also pass a list of lists of input modifiers for each model inputs as
106 | follows::
107 |
108 | input_modifiers = [
109 | [Jitter(jitter=8), Rotate(degree=3), Scale(high=1.1)], # For 1st input
110 | [Jitter(jitter=8)], # For 2nd input
111 | ...
112 | ]
113 |
114 | Defaults to [Jitter(jitter=4), Rotate(degree=1)].
115 | regularizers: A :obj:`tf_keras_vis.utils.regularizers.Regularizer` instance,
116 | a list of regularizers or a list that has lists of regularizers for each input.
117 | For example::
118 |
119 | regularizers = [TotalVariation2D(weight=1.0), Norm(weight=0.3, p=1)]
120 |
121 | > Please notice that `regularizes` does NOT accept function object like
122 | `input_modifiers`.
123 |
124 | When the model has multiple inputs, you have to pass a dictionary
125 | that contains the lists of regularizers for each model inputs::
126 |
127 | regularizers = {
128 | 'input_1': [TotalVariation2D(weight=1.0), Norm(weight=0.3, p=1)],
129 | 'input_2': [Norm(weight=1.0, p=2)],
130 | ...
131 | }
132 |
133 | Or you could also pass a list of lists of regularizers for each model inputs as
134 | follows::
135 |
136 | regularizers = [
137 | [TotalVariation2D(weight=1.0), Norm(weight=0.3, p=1)], # For 1st input
138 | [Norm(weight=1.0, p=2)], # For 2nt input
139 | ...
140 | ]
141 |
142 | Defaults to [TotalVariation2D(weight=1.0), Norm(weight=0.3, p=1)].
143 | steps: The number of gradient descent iterations. Defaults to 200.
144 | optimizer: A `tf.optimizers.Optimizer` instance. When None, it will be automatically
145 | created. Defaults to `tf.optimizers.RMSprop(learning_rate=1.0, rho=0.999)`.
146 | gradient_modifier: A function to modify gradients.
147 | Defaults to None.
148 | callbacks: A :obj:`tf_keras_vis.activation_maximization.callbacks.Callback` instance
149 | or a list of them. Defaults to None.
150 | training: A bool that indicates whether the model's training-mode on or off.
151 | Defaults to False.
152 | unconnected_gradients: Specifies the gradient value returned when the given input
153 | tensors are unconnected.
154 | Defaults to tf.UnconnectedGradients.NONE.
155 | activation_modifiers: A function or a dictionary of them (the key is input layer's
156 | name). When the model has multiple inputs, you have to pass a dictionary::
157 |
158 | activation_modifiers = {
159 | 'input_1': lambda x: ...,
160 | 'input_2': lambda x: ...,
161 | ...
162 | }
163 |
164 | This functions will be executed before returning the result. Defaults to None.
165 | Returns:
166 | An :obj:`numpy.ndarray` when the model has a single input.
167 | When the model has multiple inputs, a list of :obj:`numpy.ndarray`.
168 |
169 | Raises:
170 | :obj:`ValueError`: When there is any invalid arguments.
171 | """
172 | arguments = dict(
173 | (k, v) for k, v in locals().items() if k != 'self' and not k.startswith('_'))
174 |
175 | # Check model
176 | mixed_precision_model = is_mixed_precision(self.model)
177 |
178 | # optimizer
179 | optimizer = self._get_optimizer(optimizer, mixed_precision_model)
180 |
181 | # scores
182 | scores = self._get_scores_for_multiple_outputs(score)
183 |
184 | # Get initial seed-inputs
185 | input_ranges = self._get_input_ranges(input_range)
186 | seed_inputs = self._get_seed_inputs(seed_input, input_ranges)
187 |
188 | # input_modifiers
189 | input_modifiers = self._get_input_modifiers(input_modifiers)
190 |
191 | # regularizers
192 | regularizers = self._get_regularizers(regularizers)
193 |
194 | # activation_modifiers
195 | activation_modifiers = self._get_activation_modifiers(activation_modifiers)
196 |
197 | with managed_callbacks(**arguments) as callbacks:
198 | input_values = seed_inputs
199 | input_variables = [tf.Variable(X) for X in input_values]
200 | for step in range(get_num_of_steps_allowed(steps)):
201 | # Modify input values
202 | for i, name in enumerate(get_input_names(self.model)):
203 | for modifier in input_modifiers[name]:
204 | input_values[i] = modifier(input_values[i])
205 |
206 | # Copy input values to variables
207 | for V, X in zip(input_variables, input_values):
208 | V.assign(X)
209 |
210 | with tf.GradientTape(watch_accessed_variables=False) as tape:
211 | tape.watch(input_variables)
212 | input_values = [V.value() for V in input_variables]
213 | # Calculate scores
214 | outputs = self.model(input_values, training=training)
215 | outputs = listify(outputs)
216 | score_values = self._calculate_scores(outputs, scores)
217 | # Calculate regularization
218 | regularization_values, regularized_score_values = \
219 | self._calculate_regularization(regularizers, input_values, score_values)
220 | # Scale loss
221 | if mixed_precision_model:
222 | if 'get_scaled_loss' in dir(optimizer):
223 | regularized_score_values = [
224 | optimizer.get_scaled_loss(score_value)
225 | for score_value in regularized_score_values
226 | ]
227 | else:
228 | regularized_score_values = [
229 | optimizer.scale_loss(score_value)
230 | for score_value in regularized_score_values
231 | ]
232 | # Calculate gradients and Update variables
233 | grads = tape.gradient(regularized_score_values,
234 | input_variables,
235 | unconnected_gradients=unconnected_gradients)
236 | grads = listify(grads)
237 | if mixed_precision_model and 'get_unscaled_gradients' in dir(optimizer):
238 | grads = optimizer.get_unscaled_gradients(grads)
239 | if gradient_modifier is not None:
240 | grads = [gradient_modifier(g) for g in grads]
241 | optimizer.apply_gradients(zip(grads, input_variables))
242 |
243 | # Update input values
244 | input_values = [V.value() for V in input_variables]
245 |
246 | # Calculate clipped values
247 | clipped_value = self._clip_and_modify(input_values, input_ranges,
248 | activation_modifiers)
249 |
250 | # Execute callbacks
251 | for callback in callbacks:
252 | callback(step,
253 | clipped_value,
254 | grads,
255 | score_values,
256 | outputs,
257 | regularizations=regularization_values,
258 | overall_score=regularized_score_values)
259 |
260 | clipped_value = [x if isinstance(x, np.ndarray) else x.numpy() for x in clipped_value]
261 | if len(self.model.inputs) == 1 and (seed_input is None
262 | or not isinstance(seed_input, list)):
263 | clipped_value = clipped_value[0]
264 | return clipped_value
265 |
266 | def _calculate_regularization(self, regularizers, seed_inputs, score_values):
267 | if isinstance(regularizers, list):
268 | regularization_values = [
269 | (regularizer.name, regularizer(seed_inputs)) for regularizer in regularizers
270 | ]
271 | else:
272 | regularization_values = (
273 | [(name, regularizer(seed_inputs[i]))
274 | for name, regularizer in regularizers[input_layer_name].items()]
275 | for i, input_layer_name in enumerate(get_input_names(self.model)))
276 | regularization_values = sum(regularization_values, [])
277 | regularized_score_values = [-1.0 * score_value for score_value in score_values]
278 | regularized_score_values += [value for _, value in regularization_values]
279 | return regularization_values, regularized_score_values
280 |
281 | def _get_optimizer(self, optimizer, mixed_precision_model):
282 | if optimizer is None:
283 | optimizer = tf.optimizers.RMSprop(learning_rate=1.0, rho=0.999)
284 | if mixed_precision_model:
285 | try:
286 | # Wrap optimizer
287 | optimizer = keras.mixed_precision.LossScaleOptimizer(optimizer)
288 | except ValueError as e:
289 | raise ValueError(
290 | "The same `optimizer` instance should be NOT used twice or more."
291 | " You can be able to avoid this error by creating new optimizer instance"
292 | " each calling __call__().") from e
293 | return optimizer
294 |
295 | def _get_input_ranges(self, input_range):
296 | input_ranges = listify(input_range,
297 | return_empty_list_if_none=False,
298 | convert_tuple_to_list=False)
299 | if len(input_ranges) == 1 and len(self.model.inputs) > 1:
300 | input_ranges = input_ranges * len(self.model.inputs)
301 | input_ranges = [(None, None) if r is None else r for r in input_ranges]
302 | for i, r in enumerate(input_ranges):
303 | if len(r) != 2:
304 | raise ValueError(
305 | "The length of input range tuple must be 2 (Or it is just `None`, not tuple), "
306 | f"but you passed {r} as `input_ranges[{i}]`.")
307 | a, b = r
308 | if None not in r and type(a) is not type(b):
309 | raise TypeError(
310 | "The type of low and high values in the input range must be the same, "
311 | f"but you passed {r} are {type(a)} and {type(b)} ")
312 | return input_ranges
313 |
314 | def _get_seed_inputs(self, seed_inputs, input_ranges):
315 | # Prepare seed_inputs
316 | seed_inputs = listify(seed_inputs)
317 | if len(seed_inputs) == 0:
318 | # Replace None to 0.0-1.0 or any properly value
319 | input_ranges = ((0., 1.) if low is None and high is None else (low, high)
320 | for low, high in input_ranges)
321 | input_ranges = ((high - np.abs(high / 2.0), high) if low is None else (low, high)
322 | for low, high in input_ranges)
323 | input_ranges = ((low, low + np.abs(low * 2.0)) if high is None else (low, high)
324 | for low, high in input_ranges)
325 | # Prepare input_shape
326 | input_shapes = (input_tensor.shape[1:] for input_tensor in self.model.inputs)
327 | # Generate seed-inputs
328 | seed_inputs = (tf.random.uniform(shape, low, high)
329 | for (low, high), shape in zip(input_ranges, input_shapes))
330 | # Convert numpy to tf-tensor
331 | seed_inputs = (tf.cast(tf.constant(X), dtype=input_tensor.dtype)
332 | for X, input_tensor in zip(seed_inputs, self.model.inputs))
333 | # Do expand_dims when an seed_input doesn't have the dim for samples
334 | seed_inputs = (tf.expand_dims(X, axis=0) if len(X.shape) < len(input_tensor.shape) else X
335 | for X, input_tensor in zip(seed_inputs, self.model.inputs))
336 | seed_inputs = list(seed_inputs)
337 | if len(seed_inputs) != len(self.model.inputs):
338 | raise ValueError(
339 | "The lengths of seed_inputs and model's inputs don't match."
340 | f" seed_inputs: {len(seed_inputs)}, model's inputs: {len(self.model.inputs)}")
341 | return seed_inputs
342 |
343 | def _get_input_modifiers(self, input_modifier):
344 | return self._get_callables_to_apply_to_each_input(input_modifier, "input modifiers")
345 |
346 | def _get_regularizers(self, regularizer):
347 | legacy_regularizers = self._get_legacy_regularizers(regularizer)
348 | if legacy_regularizers is not None:
349 | warnings.warn(
350 | "`tf_keras_vis.utils.regularizers.Regularizer` is deprecated. "
351 | "Use tf_keras_vis.activation_maximization.regularizers.Regularizer instead.",
352 | DeprecationWarning)
353 | return legacy_regularizers
354 | else:
355 | regularizers = self._get_callables_to_apply_to_each_input(regularizer, "regularizers")
356 | regularizers = ((input_layer_name,
357 | self._define_regularizer_names(regularizer_list, input_layer_name))
358 | for input_layer_name, regularizer_list in regularizers.items())
359 | return defaultdict(dict, regularizers)
360 |
361 | def _define_regularizer_names(self, regularizers, input_layer_name):
362 | regularizers = (
363 | (f"regularizer-{i}", regularizer) for i, regularizer in enumerate(regularizers))
364 | regularizers = (((regularizer.name if hasattr(regularizer, 'name') else name), regularizer)
365 | for name, regularizer in regularizers)
366 | if len(get_input_names(self.model)) > 1:
367 | regularizers = (
368 | (f"{name}({input_layer_name})", regularizer) for name, regularizer in regularizers)
369 | return defaultdict(list, regularizers)
370 |
371 | def _get_legacy_regularizers(self, regularizer):
372 | if isinstance(regularizer, dict):
373 | _regularizer = [listify(r) for r in regularizer.values()]
374 | else:
375 | _regularizer = regularizer
376 | if isinstance(_regularizer, (tuple, list)):
377 | if any(isinstance(r, (tuple, list)) for r in _regularizer):
378 | has_legacy = ((isinstance(r, LegacyRegularizer)
379 | for r in listify(_regularizers))
380 | for _regularizers in _regularizer)
381 | has_legacy = (any(_legacy) for _legacy in has_legacy)
382 | if any(has_legacy):
383 | raise ValueError(
384 | "Legacy Regularizer instances (that inherits "
385 | "`tf_keras_vis.utils.regularizers.LegacyRegularizer`) must be "
386 | "passed to ActivationMaximization#__call__() "
387 | "in the form of a instance or a list of instances. "
388 | "Please modify the `regularizer` argument or "
389 | "change the inheritance source to "
390 | "`tf_keras_vis.activation_maximization.regularizers.Regularizer`"
391 | f" regularizer: {regularizer}")
392 | else:
393 | has_legacy = [isinstance(r, LegacyRegularizer) for r in _regularizer]
394 | if all(has_legacy):
395 | return _regularizer
396 | if any(has_legacy):
397 | raise ValueError(
398 | "the regularizer instance (that inherits "
399 | "`tf_keras_vis.activation_maximization.regularizers.Regularizer`) "
400 | "and legacy regularizer (that inherits "
401 | "`tf_keras_vis.utils.regularizers.LegacyRegularizer` can NOT be mixed."
402 | f" regularizer: {regularizer}")
403 | elif isinstance(_regularizer, LegacyRegularizer):
404 | return listify(_regularizer)
405 | return None
406 |
407 | def _get_callables_to_apply_to_each_input(self, callables, object_name):
408 | keys = get_input_names(self.model)
409 | if isinstance(callables, dict):
410 | non_existent_keys = set(callables.keys()) - set(keys)
411 | if len(non_existent_keys) > 0:
412 | raise ValueError(
413 | f"The model inputs are `{keys}`. However the {object_name} you passed have "
414 | f"non existent input name: `{non_existent_keys}`")
415 | callables = ((k, listify(v)) for k, v in callables.items())
416 | else:
417 | callables = listify(callables)
418 | if len(callables) == 0 or len(list(filter(lambda x: type(x) is list, callables))) == 0:
419 | callables = [callables]
420 | if len(callables) <= len(keys):
421 | callables = (listify(value_each_input) for value_each_input in callables)
422 | callables = zip(keys, callables)
423 | else:
424 | raise ValueError(f"The number of model's inputs are {len(keys)},"
425 | f" but you define {len(callables)} {object_name}.")
426 | return defaultdict(list, callables)
427 |
428 | def _get_activation_modifiers(self, activation_modifiers):
429 | if isinstance(activation_modifiers, dict):
430 | non_existent_names = set(activation_modifiers.keys()) - set(get_input_names(
431 | self.model))
432 | if len(non_existent_names) > 0:
433 | raise ValueError(f"The model inputs are `{get_input_names(self.model)}`. "
434 | "However the activation modifiers you passed have "
435 | f"non existent input names: `{non_existent_names}`")
436 | else:
437 | activation_modifiers = {get_input_names(self.model)[0]: activation_modifiers}
438 | return defaultdict(lambda: None, activation_modifiers)
439 |
440 | def _clip_and_modify(self, seed_inputs, input_ranges, activation_modifiers):
441 | input_ranges = [(tf.as_dtype(input_tensor.dtype).min if low is None else low,
442 | tf.as_dtype(input_tensor.dtype).max if high is None else high)
443 | for (low, high), input_tensor in zip(input_ranges, self.model.inputs)]
444 | if 'clip' in dir(keras.backend):
445 | clipped_values = (keras.backend.clip(X, low, high)
446 | for X, (low, high) in zip(seed_inputs, input_ranges))
447 | else:
448 | clipped_values = (
449 | keras.ops.clip(X, low, high) for X, (low, high) in zip(seed_inputs, input_ranges))
450 | clipped_values = (tf.cast(X, input_tensor.dtype)
451 | for X, input_tensor in zip(clipped_values, self.model.inputs))
452 | if activation_modifiers is not None:
453 | clipped_values = (
454 | (activation_modifiers[name], seed_input)
455 | for name, seed_input in zip(get_input_names(self.model), clipped_values))
456 | clipped_values = (seed_input if modifier is None else modifier(seed_input)
457 | for modifier, seed_input in clipped_values)
458 | return list(clipped_values)
459 |
--------------------------------------------------------------------------------
/tf_keras_vis/activation_maximization/callbacks.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | from abc import ABC
3 | from contextlib import contextmanager
4 | from inspect import signature
5 |
6 | import imageio
7 | import numpy as np
8 | import tensorflow as tf
9 | from deprecated import deprecated
10 | from PIL import Image, ImageDraw, ImageFont
11 |
12 | from .. import keras
13 | from ..utils import listify
14 |
15 |
16 | class Callback(ABC):
17 | """Abstract class for defining callbacks.
18 | """
19 | def on_begin(self, **kwargs) -> None:
20 | """Called at the begin of optimization process.
21 |
22 | Args:
23 | kwargs: The parameters that was passed to
24 | :obj:`tf_keras_vis.activation_maximization.ActivationMaximization.__call__()`.
25 | """
26 | pass
27 |
28 | def __call__(self, i, values, grads, scores, model_outputs, **kwargs) -> None:
29 | """This function will be called after updating input values by gradient descent in
30 | :obj:`tf_keras_vis.activation_maximization.ActivationMaximization.__call__()`.
31 |
32 | Args:
33 | i: The current number of optimizer iteration.
34 | values: A list of tf.Tensor that indicates current `values`.
35 | grads: A list of tf.Tensor that indicates the gradients with respect to model input.
36 | scores: A list of tf.Tensor that indicates score values with respect to each the model
37 | outputs.
38 | model_outputs: A list of tf.Tensor that indicates the model outputs.
39 | regularizations: A list of tuples of (str, tf.Tensor) that indicates the regularizer
40 | values.
41 | overall_score: A list of tf.Tensor that indicates the overall scores that includes the
42 | scores and regularization values.
43 | """
44 | pass
45 |
46 | def on_end(self) -> None:
47 | """Called at the end of optimization process.
48 | """
49 | pass
50 |
51 |
52 | @deprecated(version='0.7.0', reason="Use `Progress` instead.")
53 | class PrintLogger(Callback):
54 | """Callback to print values during optimization.
55 |
56 | Warnings:
57 | This class is now **deprecated**!
58 | Please use :obj:`tf_keras_vis.activation_maximization.callbacks.Progress` instead.
59 | """
60 | def __init__(self, interval=10):
61 | """
62 | Args:
63 | interval: An integer that indicates the interval of printing.
64 | Defaults to 10.
65 | """
66 | self.interval = interval
67 |
68 | def __call__(self, i, values, grads, scores, model_outputs, regularizations, **kwargs):
69 | i += 1
70 | if (i % self.interval == 0):
71 | tf.print('Steps: {:03d}\tScores: {},\tRegularization: {}'.format(
72 | i, self._tolist(scores), self._tolist(regularizations)))
73 |
74 | def _tolist(self, ary):
75 | if isinstance(ary, list) or isinstance(ary, (np.ndarray, np.generic)):
76 | return [self._tolist(e) for e in ary]
77 | elif isinstance(ary, tuple):
78 | return tuple(self._tolist(e) for e in ary)
79 | elif tf.is_tensor(ary):
80 | return ary.numpy().tolist()
81 | else:
82 | return ary
83 |
84 |
85 | class GifGenerator2D(Callback):
86 | """Callback to construct a gif of optimized image.
87 | """
88 | def __init__(self, path) -> None:
89 | """
90 | Args:
91 | path: The file path to save gif.
92 | """
93 | self.path = path
94 |
95 | def on_begin(self, **kwargs) -> None:
96 | self.data = None
97 |
98 | def __call__(self, i, values, *args, **kwargs) -> None:
99 | if self.data is None:
100 | self.data = [[] for _ in range(len(values))]
101 | for n, value in enumerate(values):
102 | value = value[0].numpy() if tf.is_tensor(value[0]) else value[0]
103 | img = Image.fromarray(value.astype(np.uint8)) # 1st image in the batch
104 | ImageDraw.Draw(img).text((10, 10), f"Step {i + 1}", font=ImageFont.load_default())
105 | self.data[n].append(np.asarray(img))
106 |
107 | def on_end(self) -> None:
108 | path = self.path if self.path.endswith(".gif") else f"{self.path}.gif"
109 | for i in range(len(self.data)):
110 | with imageio.get_writer(path, mode='I', loop=0) as writer:
111 | for data in self.data[i]:
112 | writer.append_data(data)
113 |
114 |
115 | class Progress(Callback):
116 | """Callback to print values during optimization.
117 | """
118 | def on_begin(self, steps=None, **kwargs) -> None:
119 | self.progbar = keras.utils.Progbar(steps)
120 |
121 | def __call__(self, i, values, grads, scores, model_outputs, regularizations, **kwargs) -> None:
122 | if len(scores) > 1:
123 | scores = [(f"Score[{j}]", score_value) for j, score_value in enumerate(scores)]
124 | else:
125 | scores = [("Score", score_value) for score_value in scores]
126 | scores += regularizations
127 | self.progbar.update(i + 1, scores + regularizations)
128 |
129 |
130 | @contextmanager
131 | def managed_callbacks(callbacks=None, **kwargs):
132 | activated_callbacks = []
133 | try:
134 | for c in listify(callbacks):
135 | if len(signature(c.on_begin).parameters) == 0:
136 | warnings.warn("`Callback#on_begin()` now must accept keyword arguments.",
137 | DeprecationWarning)
138 | c.on_begin()
139 | else:
140 | c.on_begin(**kwargs)
141 | activated_callbacks.append(c)
142 | yield activated_callbacks
143 | for _ in range(len(activated_callbacks)):
144 | activated_callbacks.pop(0).on_end()
145 | finally:
146 | for c in activated_callbacks:
147 | try:
148 | c.on_end()
149 | except Exception as e:
150 | tf.print("Exception args: ", e)
151 | pass
152 |
--------------------------------------------------------------------------------
/tf_keras_vis/activation_maximization/input_modifiers.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Union
3 |
4 | import numpy as np
5 | import tensorflow as tf
6 | from scipy.ndimage import rotate, zoom
7 |
8 |
9 | class InputModifier(ABC):
10 | """Abstract class for defining an input modifier.
11 | """
12 | @abstractmethod
13 | def __call__(self, seed_input) -> Union[np.ndarray, tf.Tensor]:
14 | """Implement modification to the input before processing gradient descent.
15 |
16 | Args:
17 | seed_input: An :obj:`numpy.ndarray` or a tf.Tensor that indicates a value to input to
18 | model.
19 |
20 | Returns:
21 | An :obj:`numpy.ndarray` or a tf.Tensor.
22 |
23 | Raises:
24 | NotImplementedError: This method must be overwritten.
25 | """
26 | raise NotImplementedError()
27 |
28 |
29 | class Jitter(InputModifier):
30 | """An input modifier that introduces random jitter.
31 | Jitter has been shown to produce crisper activation maximization images.
32 | """
33 | def __init__(self, jitter=8) -> None:
34 | """
35 | Args:
36 | jitter: The amount of jitter to apply. Defaults to 8.
37 | """
38 | self.jitter = int(jitter)
39 |
40 | def __call__(self, seed_input) -> np.ndarray:
41 | ndim = len(seed_input.shape)
42 | if ndim < 3:
43 | raise ValueError("The dimensions of seed_input must be 3 or more "
44 | f"(batch_size, ..., channels), but was {ndim}.")
45 | seed_input = tf.roll(seed_input,
46 | shift=tuple(np.random.randint(-self.jitter, self.jitter, ndim - 2)),
47 | axis=tuple(range(ndim)[1:-1]))
48 | return seed_input
49 |
50 |
51 | class Rotate(InputModifier):
52 | """An input modifier that introduces random rotation.
53 | """
54 | def __init__(self, axes=(1, 2), degree=3.0) -> None:
55 | """
56 | Args:
57 | axes: The two axes that define the plane of rotation.
58 | Defaults to (1, 2).
59 | degree: The amount of rotation to apply. Defaults to 3.0.
60 |
61 | Raises:
62 | ValueError: When axes is not a tuple of two ints.
63 | """
64 | if type(axes) not in [list, tuple] or len(axes) != 2:
65 | raise ValueError(f"`axes` must be a tuple of two int values, but it was {axes}.")
66 | if not isinstance(axes[0], int) or not isinstance(axes[1], int):
67 | raise TypeError(f"`axes` must be consist of ints, but it was {axes}.")
68 | self.axes = axes
69 | self.degree = float(degree)
70 | self.random_generator = np.random.default_rng()
71 |
72 | def __call__(self, seed_input) -> np.ndarray:
73 | ndim = len(seed_input.shape)
74 | if ndim < 4:
75 | raise ValueError("The dimensions of seed_input must be 4 or more "
76 | f"(batch_size, ..., channels), but was {ndim}.")
77 | if tf.is_tensor(seed_input):
78 | seed_input = seed_input.numpy()
79 | _dtype = seed_input.dtype
80 | seed_input = seed_input.astype(np.float32)
81 | seed_input = rotate(seed_input,
82 | self.random_generator.uniform(-self.degree, self.degree),
83 | axes=self.axes,
84 | reshape=False,
85 | order=1,
86 | mode='reflect',
87 | prefilter=False)
88 | seed_input = seed_input.astype(_dtype)
89 | return seed_input
90 |
91 |
92 | class Rotate2D(Rotate):
93 | """An input modifier for 2D that introduces random rotation.
94 | """
95 | def __init__(self, degree=3.0) -> None:
96 | """
97 | Args:
98 | degree: The amount of rotation to apply. Defaults to 3.0.
99 | """
100 | super().__init__(axes=(1, 2), degree=degree)
101 |
102 |
103 | class Scale(InputModifier):
104 | """An input modifier that introduces randam scaling.
105 | """
106 | def __init__(self, low=0.9, high=1.1) -> None:
107 | """
108 | Args:
109 | low (float, optional): Lower boundary of the zoom factor. Defaults to 0.9.
110 | high (float, optional): Higher boundary of the zoom factor. Defaults to 1.1.
111 | """
112 | self.low = low
113 | self.high = high
114 | self.random_generator = np.random.default_rng()
115 |
116 | def __call__(self, seed_input) -> np.ndarray:
117 | ndim = len(seed_input.shape)
118 | if ndim < 3:
119 | raise ValueError("The dimensions of seed_input must be 3 or more "
120 | f"(batch_size, ..., channels), but was {ndim}.")
121 | if tf.is_tensor(seed_input):
122 | seed_input = seed_input.numpy()
123 | shape = seed_input.shape
124 | _dtype = seed_input.dtype
125 | seed_input = seed_input.astype(np.float32)
126 | _factor = factor = self.random_generator.uniform(self.low, self.high)
127 | factor *= np.ones(ndim - 2)
128 | factor = (1,) + tuple(factor) + (1,)
129 | seed_input = zoom(seed_input, factor, order=1, mode='reflect', prefilter=False)
130 | if _factor > 1.0:
131 | indices = (self._central_crop_range(x, e) for x, e in zip(seed_input.shape, shape))
132 | indices = (slice(start, stop) for start, stop in indices)
133 | seed_input = seed_input[tuple(indices)]
134 | if _factor < 1.0:
135 | pad_width = [self._pad_width(x, e) for x, e in zip(seed_input.shape, shape)]
136 | seed_input = np.pad(seed_input, pad_width, 'mean')
137 | seed_input = seed_input.astype(_dtype)
138 | return seed_input
139 |
140 | def _central_crop_range(self, x, e):
141 | start = (x - e) // 2
142 | stop = start + e
143 | return start, stop
144 |
145 | def _pad_width(self, x, e):
146 | diff = e - x
147 | before = diff // 2
148 | after = diff - before
149 | return before, after
150 |
--------------------------------------------------------------------------------
/tf_keras_vis/activation_maximization/regularizers.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 |
7 | class Regularizer(ABC):
8 | """Abstract class for defining a regularizer.
9 | """
10 | def __init__(self, name) -> None:
11 | """
12 | Args:
13 | name: Instance name.
14 | """
15 | self.name = name
16 |
17 | @abstractmethod
18 | def __call__(self, input_value) -> tf.Tensor:
19 | """Implement regularization.
20 |
21 | Args:
22 | input_value: A tf.Tensor that indicates the value to input to the model.
23 |
24 | Returns:
25 | tf.Tensor: Regularization value with respect to the input value.
26 |
27 | Raises:
28 | NotImplementedError: This method must be overwritten.
29 | """
30 | raise NotImplementedError()
31 |
32 |
33 | class TotalVariation2D(Regularizer):
34 | """A regularizer that introduces Total Variation.
35 | """
36 | def __init__(self, weight=10.0, name='TotalVariation2D') -> None:
37 | """
38 | Args:
39 | weight: This value will be apply to TotalVariation values.
40 | Defaults to 10.0.
41 | name : Instance name.
42 | Defaults to 'TotalVariation2D'.
43 | """
44 | super().__init__(name)
45 | self.weight = float(weight)
46 |
47 | def __call__(self, input_value) -> tf.Tensor:
48 | if len(input_value.shape) != 4:
49 | raise ValueError("seed_input's shape must be (batch_size, height, width, channels), "
50 | f"but was {input_value.shape}.")
51 | tv = tf.image.total_variation(input_value)
52 | tv /= np.prod(input_value.shape[1:], dtype=np.float32)
53 | tv *= self.weight
54 | return tv
55 |
56 |
57 | class Norm(Regularizer):
58 | """A regularizer that introduces Norm.
59 | """
60 | def __init__(self, weight=10., p=2, name='Norm') -> None:
61 | """
62 | Args:
63 | weight: This weight will be apply to TotalVariation values.
64 | Defaults to 10.
65 | p: Order of the norm. Defaults to 2.
66 | name: Instance name. Defaults to 'Norm'. Defaults to 'Norm'.
67 | """
68 | super().__init__(name)
69 | self.weight = float(weight)
70 | self.p = int(p)
71 |
72 | def __call__(self, input_value) -> tf.Tensor:
73 | input_value = tf.reshape(input_value, (input_value.shape[0], -1))
74 | norm = tf.norm(input_value, ord=self.p, axis=1)
75 | norm /= (float(input_value.shape[1])**(1.0 / float(self.p)))
76 | norm *= self.weight
77 | return norm
78 |
--------------------------------------------------------------------------------
/tf_keras_vis/gradcam.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 | from scipy.ndimage.interpolation import zoom
6 |
7 | from . import ModelVisualization, keras
8 | from .utils import is_mixed_precision, normalize, zoom_factor
9 | from .utils.model_modifiers import ExtractIntermediateLayerForGradcam as ModelModifier
10 |
11 |
12 | class Gradcam(ModelVisualization):
13 | """Grad-CAM
14 |
15 | References:
16 | * Grad-CAM: Why did you say that?
17 | Visual Explanations from Deep Networks via Gradient-based Localization
18 | (https://arxiv.org/pdf/1610.02391v1.pdf)
19 | """
20 | def __call__(self,
21 | score,
22 | seed_input,
23 | penultimate_layer=None,
24 | seek_penultimate_conv_layer=True,
25 | gradient_modifier=None,
26 | activation_modifier=lambda cam: keras.activations.relu(cam),
27 | training=False,
28 | expand_cam=True,
29 | normalize_cam=True,
30 | unconnected_gradients=tf.UnconnectedGradients.NONE) -> Union[np.ndarray, list]:
31 | """Generate gradient based class activation maps (CAM) by using positive gradient of
32 | penultimate_layer with respect to score.
33 |
34 | Args:
35 | score: A :obj:`tf_keras_vis.utils.scores.Score` instance, function or a list of them.
36 | For example of the Score instance to specify visualizing target::
37 |
38 | scores = CategoricalScore([1, 294, 413])
39 |
40 | The code above means the same with the one below::
41 |
42 | score = lambda outputs: (outputs[0][1], outputs[1][294], outputs[2][413])
43 |
44 | When the model has multiple outputs, you MUST pass a list of
45 | Score instances or functions. For example::
46 |
47 | from tf_keras_vis.utils.scores import CategoricalScore, InactiveScore
48 | score = [
49 | CategoricalScore([1, 23]), # For 1st model output
50 | InactiveScore(), # For 2nd model output
51 | ...
52 | ]
53 |
54 | seed_input: A tf.Tensor, :obj:`numpy.ndarray` or a list of them to input in the model.
55 | That's when the model has multiple inputs, you MUST pass a list of tensors.
56 | penultimate_layer: An index or name of the layer, or the keras.layers.Layer
57 | instance itself. When None, it means the same with `-1`. If the layer specified by
58 | this option is not `convolutional` layer, `penultimate_layer` will work as the
59 | offset to seek `convolutional` layer. Defaults to None.
60 | seek_penultimate_conv_layer: A bool that indicates whether or not seeks a penultimate
61 | layer when the layer specified by `penultimate_layer` is not `convolutional` layer.
62 | Defaults to True.
63 | gradient_modifier: A function to modify gradients. Defaults to None.
64 | activation_modifier: A function to modify the Class Activation Map (CAM). Defaults to
65 | `lambda cam: keras.activations.relu(cam)`.
66 | training: A bool that indicates whether the model's training-mode on or off. Defaults
67 | to False.
68 | expand_cam: True to resize CAM to the same as input image size. **Note!** When False,
69 | even if the model has multiple inputs, return only a CAM. Defaults to True.
70 | normalize_cam: When True, CAM will be normalized. Defaults to True.
71 | unconnected_gradients: Specifies the gradient value returned when the given input
72 | tensors are unconnected. Defaults to tf.UnconnectedGradients.NONE.
73 |
74 | Returns:
75 | An :obj:`numpy.ndarray` or a list of them. They are the Class Activation Maps (CAMs)
76 | that indicate the `seed_input` regions whose change would most contribute the score
77 | value.
78 |
79 | Raises:
80 | :obj:`ValueError`: When there is any invalid arguments.
81 | """
82 |
83 | # Preparing
84 | scores = self._get_scores_for_multiple_outputs(score)
85 | seed_inputs = self._get_seed_inputs_for_multiple_inputs(seed_input)
86 |
87 | # Processing gradcam
88 | model = ModelModifier(penultimate_layer, seek_penultimate_conv_layer)(self.model)
89 |
90 | with tf.GradientTape(watch_accessed_variables=False) as tape:
91 | tape.watch(seed_inputs)
92 | outputs = model(seed_inputs, training=training)
93 | outputs, penultimate_output = outputs[:-1], outputs[-1]
94 | score_values = self._calculate_scores(outputs, scores)
95 | grads = tape.gradient(score_values,
96 | penultimate_output,
97 | unconnected_gradients=unconnected_gradients)
98 |
99 | # When mixed precision enabled
100 | if is_mixed_precision(model):
101 | grads = tf.cast(grads, dtype=model.variable_dtype)
102 | penultimate_output = tf.cast(penultimate_output, dtype=model.variable_dtype)
103 |
104 | cam = self._calculate_cam(grads, penultimate_output, gradient_modifier,
105 | activation_modifier)
106 | if not expand_cam:
107 | if normalize_cam:
108 | cam = normalize(cam)
109 | return cam
110 |
111 | # Visualizing
112 | factors = (zoom_factor(cam.shape, X.shape) for X in seed_inputs)
113 | cam = [zoom(cam, factor, order=1) for factor in factors]
114 | if normalize_cam:
115 | cam = [normalize(x) for x in cam]
116 | if len(self.model.inputs) == 1 and not isinstance(seed_input, list):
117 | cam = cam[0]
118 | return cam
119 |
120 | def _calculate_cam(self, grads, penultimate_output, gradient_modifier, activation_modifier):
121 | if gradient_modifier is not None:
122 | grads = gradient_modifier(grads)
123 | weights = tf.math.reduce_mean(grads, axis=tuple(range(grads.ndim)[1:-1]), keepdims=True)
124 | cam = np.sum(np.multiply(penultimate_output, weights), axis=-1)
125 | if activation_modifier is not None:
126 | cam = activation_modifier(cam)
127 | return cam
128 |
129 |
130 | from tf_keras_vis.gradcam_plus_plus import GradcamPlusPlus # noqa: F401, E402
131 |
--------------------------------------------------------------------------------
/tf_keras_vis/gradcam_plus_plus.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 | from scipy.ndimage.interpolation import zoom
6 |
7 | from . import ModelVisualization, keras
8 | from .utils import is_mixed_precision, normalize, zoom_factor
9 | from .utils.model_modifiers import ExtractIntermediateLayerForGradcam as ModelModifier
10 |
11 |
12 | class GradcamPlusPlus(ModelVisualization):
13 | """Grad-CAM++
14 |
15 | References:
16 | * GradCAM++: Improved Visual Explanations for Deep Convolutional Networks
17 | (https://arxiv.org/pdf/1710.11063.pdf)
18 | """
19 | def __call__(self,
20 | score,
21 | seed_input,
22 | penultimate_layer=None,
23 | seek_penultimate_conv_layer=True,
24 | gradient_modifier=None,
25 | activation_modifier=lambda cam: keras.activations.relu(cam),
26 | training=False,
27 | expand_cam=True,
28 | normalize_cam=True,
29 | unconnected_gradients=tf.UnconnectedGradients.NONE) -> Union[np.ndarray, list]:
30 | """Generate gradient based class activation maps (CAM) by using positive gradient of
31 | penultimate_layer with respect to score.
32 |
33 | Args:
34 | score: A :obj:`tf_keras_vis.utils.scores.Score` instance, function or a list of them.
35 | For example of the Score instance to specify visualizing target::
36 |
37 | scores = CategoricalScore([1, 294, 413])
38 |
39 | The code above means the same with the one below::
40 |
41 | score = lambda outputs: (outputs[0][1], outputs[1][294], outputs[2][413])
42 |
43 | When the model has multiple outputs, you MUST pass a list of
44 | Score instances or functions. For example::
45 |
46 | from tf_keras_vis.utils.scores import CategoricalScore, InactiveScore
47 | score = [
48 | CategoricalScore([1, 23]), # For 1st model output
49 | InactiveScore(), # For 2nd model output
50 | ...
51 | ]
52 |
53 | seed_input: A tf.Tensor, :obj:`numpy.ndarray` or a list of them to input in the model.
54 | That's when the model has multiple inputs, you MUST pass a list of tensors.
55 | penultimate_layer: An index or name of the layer, or the keras.layers.Layer
56 | instance itself. When None, it means the same with `-1`. If the layer specified by
57 | this option is not `convolutional` layer, `penultimate_layer` will work as the
58 | offset to seek `convolutional` layer. Defaults to None.
59 | seek_penultimate_conv_layer: A bool that indicates whether or not seeks a penultimate
60 | layer when the layer specified by `penultimate_layer` is not `convolutional` layer.
61 | Defaults to True.
62 | gradient_modifier: A function to modify gradients. Defaults to None.
63 | activation_modifier: A function to modify the Class Activation Map (CAM). Defaults to
64 | `lambda cam: keras.activations.relu(cam)`.
65 | training: A bool that indicates whether the model's training-mode on or off. Defaults
66 | to False.
67 | expand_cam: True to resize CAM to the same as input image size. **Note!** When False,
68 | even if the model has multiple inputs, return only a CAM. Defaults to True.
69 | normalize_cam: When True, CAM will be normalized. Defaults to True.
70 | unconnected_gradients: Specifies the gradient value returned when the given input
71 | tensors are unconnected. Defaults to tf.UnconnectedGradients.NONE.
72 |
73 | Returns:
74 | An :obj:`numpy.ndarray` or a list of them. They are the Class Activation Maps (CAMs)
75 | that indicate the `seed_input` regions whose change would most contribute the score
76 | value.
77 |
78 | Raises:
79 | :obj:`ValueError`: When there is any invalid arguments.
80 | """
81 |
82 | # Preparing
83 | scores = self._get_scores_for_multiple_outputs(score)
84 | seed_inputs = self._get_seed_inputs_for_multiple_inputs(seed_input)
85 |
86 | # Processing gradcam
87 | model = ModelModifier(penultimate_layer, seek_penultimate_conv_layer)(self.model)
88 |
89 | with tf.GradientTape(watch_accessed_variables=False) as tape:
90 | tape.watch(seed_inputs)
91 | outputs = model(seed_inputs, training=training)
92 | outputs, penultimate_output = outputs[:-1], outputs[-1]
93 | score_values = self._calculate_scores(outputs, scores)
94 | grads = tape.gradient(score_values,
95 | penultimate_output,
96 | unconnected_gradients=unconnected_gradients)
97 |
98 | # When mixed precision enabled
99 | if is_mixed_precision(model):
100 | grads = tf.cast(grads, dtype=model.variable_dtype)
101 | penultimate_output = tf.cast(penultimate_output, dtype=model.variable_dtype)
102 | score_values = [tf.cast(v, dtype=model.variable_dtype) for v in score_values]
103 |
104 | score_values = sum(tf.math.exp(o) for o in score_values)
105 | score_values = tf.reshape(score_values, score_values.shape + (1,) * (grads.ndim - 1))
106 |
107 | if gradient_modifier is not None:
108 | grads = gradient_modifier(grads)
109 | first_derivative = score_values * grads
110 | second_derivative = first_derivative * grads
111 | third_derivative = second_derivative * grads
112 |
113 | global_sum = tf.math.reduce_sum(penultimate_output,
114 | axis=tuple(np.arange(len(penultimate_output.shape))[1:-1]),
115 | keepdims=True)
116 |
117 | alpha_denom = second_derivative * 2.0 + third_derivative * global_sum
118 | alpha_denom = alpha_denom + tf.cast((second_derivative == 0.0), second_derivative.dtype)
119 | alphas = second_derivative / alpha_denom
120 |
121 | alpha_normalization_constant = tf.math.reduce_sum(alphas,
122 | axis=tuple(
123 | np.arange(len(alphas.shape))[1:-1]),
124 | keepdims=True)
125 | alpha_normalization_constant = alpha_normalization_constant + tf.cast(
126 | (alpha_normalization_constant == 0.0), alpha_normalization_constant.dtype)
127 | alphas = alphas / alpha_normalization_constant
128 |
129 | if activation_modifier is None:
130 | weights = first_derivative
131 | else:
132 | weights = activation_modifier(first_derivative)
133 | deep_linearization_weights = weights * alphas
134 | deep_linearization_weights = tf.math.reduce_sum(
135 | deep_linearization_weights,
136 | axis=tuple(np.arange(len(deep_linearization_weights.shape))[1:-1]),
137 | keepdims=True)
138 |
139 | cam = tf.math.reduce_sum(deep_linearization_weights * penultimate_output, axis=-1)
140 | if activation_modifier is not None:
141 | cam = activation_modifier(cam)
142 |
143 | if not expand_cam:
144 | if normalize_cam:
145 | cam = normalize(cam)
146 | return cam
147 |
148 | # Visualizing
149 | factors = (zoom_factor(cam.shape, X.shape) for X in seed_inputs)
150 | cam = [zoom(cam, factor, order=1) for factor in factors]
151 | if normalize_cam:
152 | cam = [normalize(x) for x in cam]
153 | if len(self.model.inputs) == 1 and not isinstance(seed_input, list):
154 | cam = cam[0]
155 | return cam
156 |
--------------------------------------------------------------------------------
/tf_keras_vis/layercam.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | from . import keras
7 | from .gradcam import Gradcam
8 |
9 |
10 | class Layercam(Gradcam):
11 | """LayerCAM
12 |
13 | References:
14 | * LayerCAM: Exploring Hierarchical Class Activation Maps for Localization
15 | (https://ieeexplore.ieee.org/document/9462463)
16 | """
17 | def __call__(self,
18 | score,
19 | seed_input,
20 | penultimate_layer=None,
21 | seek_penultimate_conv_layer=True,
22 | gradient_modifier=lambda grads: keras.activations.relu(grads),
23 | activation_modifier=lambda cam: keras.activations.relu(cam),
24 | training=False,
25 | expand_cam=True,
26 | normalize_cam=True,
27 | unconnected_gradients=tf.UnconnectedGradients.NONE) -> Union[np.ndarray, list]:
28 | """Generate gradient based class activation maps (CAM) by using positive gradient of
29 | penultimate_layer with respect to score.
30 |
31 | Args:
32 | score: A :obj:`tf_keras_vis.utils.scores.Score` instance, function or a list of them.
33 | For example of the Score instance to specify visualizing target::
34 |
35 | scores = CategoricalScore([1, 294, 413])
36 |
37 | The code above means the same with the one below::
38 |
39 | score = lambda outputs: (outputs[0][1], outputs[1][294], outputs[2][413])
40 |
41 | When the model has multiple outputs, you MUST pass a list of
42 | Score instances or functions. For example::
43 |
44 | from tf_keras_vis.utils.scores import CategoricalScore, InactiveScore
45 | score = [
46 | CategoricalScore([1, 23]), # For 1st model output
47 | InactiveScore(), # For 2nd model output
48 | ...
49 | ]
50 |
51 | seed_input: A tf.Tensor, :obj:`numpy.ndarray` or a list of them to input in the model.
52 | That's when the model has multiple inputs, you MUST pass a list of tensors.
53 | penultimate_layer: An index or name of the layer, or the keras.layers.Layer
54 | instance itself. When None, it means the same with `-1`. If the layer specified by
55 | this option is not `convolutional` layer, `penultimate_layer` will work as the
56 | offset to seek `convolutional` layer. Defaults to None.
57 | seek_penultimate_conv_layer: A bool that indicates whether or not seeks a penultimate
58 | layer when the layer specified by `penultimate_layer` is not `convolutional` layer.
59 | Defaults to True.
60 | activation_modifier: A function to modify the Class Activation Map (CAM). Defaults to
61 | `lambda cam: keras.activations.relu(cam)`.
62 | training: A bool that indicates whether the model's training-mode on or off. Defaults
63 | to False.
64 | gradient_modifier: A function to modify gradients. Defaults to
65 | `lambda grads: keras.activations.relu(grads)`.
66 | expand_cam: True to resize CAM to the same as input image size. **Note!** When False,
67 | even if the model has multiple inputs, return only a CAM. Defaults to True.
68 | normalize_cam: When True, CAM will be normalized. Defaults to True.
69 | unconnected_gradients: Specifies the gradient value returned when the given input
70 | tensors are unconnected. Defaults to tf.UnconnectedGradients.NONE.
71 |
72 | Returns:
73 | An :obj:`numpy.ndarray` or a list of them. They are the Class Activation Maps (CAMs)
74 | that indicate the `seed_input` regions whose change would most contribute the score
75 | value.
76 |
77 | Raises:
78 | :obj:`ValueError`: When there is any invalid arguments.
79 | """
80 | arguments = locals().items()
81 | arguments = ((k, v) for k, v in arguments if k != 'self')
82 | arguments = ((k, v) for k, v in arguments if not k.startswith('_'))
83 | arguments = dict(arguments)
84 | return super().__call__(**arguments)
85 |
86 | def _calculate_cam(self, grads, penultimate_output, gradient_modifier, activation_modifier):
87 | if gradient_modifier is not None:
88 | grads = gradient_modifier(grads)
89 | cam = np.sum(np.multiply(penultimate_output, grads), axis=-1)
90 | if activation_modifier is not None:
91 | cam = activation_modifier(cam)
92 | return cam
93 |
--------------------------------------------------------------------------------
/tf_keras_vis/saliency.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 |
6 | from . import ModelVisualization, keras
7 | from .utils import get_num_of_steps_allowed, listify, normalize
8 |
9 |
10 | class Saliency(ModelVisualization):
11 | """Vanilla Saliency and Smooth-Grad
12 |
13 | References:
14 | * Vanilla Saliency: Deep Inside Convolutional Networks: Visualising Image Classification
15 | Models and Saliency Maps (https://arxiv.org/pdf/1312.6034)
16 | * SmoothGrad: removing noise by adding noise (https://arxiv.org/pdf/1706.03825)
17 | """
18 | def __call__(self,
19 | score,
20 | seed_input,
21 | smooth_samples=0,
22 | smooth_noise=0.20,
23 | keepdims=False,
24 | gradient_modifier=lambda grads: tf.abs(grads),
25 | training=False,
26 | normalize_map=True,
27 | unconnected_gradients=tf.UnconnectedGradients.NONE) -> Union[np.ndarray, list]:
28 | """Generate an attention map that appears how output value changes with respect to a small
29 | change in input image pixels.
30 |
31 | Args:
32 | score: A :obj:`tf_keras_vis.utils.scores.Score` instance, function or a list of them.
33 | For example of the Score instance to specify visualizing target::
34 |
35 | scores = CategoricalScore([1, 294, 413])
36 |
37 | The code above means the same with the one below::
38 |
39 | score = lambda outputs: (outputs[0][1], outputs[1][294], outputs[2][413])
40 |
41 | When the model has multiple outputs, you MUST pass a list of
42 | Score instances or functions. For example::
43 |
44 | from tf_keras_vis.utils.scores import CategoricalScore, InactiveScore
45 | score = [
46 | CategoricalScore([1, 23]), # For 1st model output
47 | InactiveScore(), # For 2nd model output
48 | ...
49 | ]
50 |
51 | seed_input: A tf.Tensor, :obj:`numpy.ndarray` or a list of them to input in the model.
52 | That's when the model has multiple inputs, you MUST pass a list of tensors.
53 | smooth_samples (int, optional): The number of calculating gradients iterations. When
54 | over zero, this method will work as SmoothGrad. When zero, it will work as Vanilla
55 | Saliency. Defaults to 0.
56 | smooth_noise: Noise level. Defaults to 0.20.
57 | keepdims: A bool that indicates whether or not to keep the channels-dimension.
58 | Defaults to False.
59 | gradient_modifier: A function to modify gradients. Defaults to None.
60 | training: A bool that indicates whether the model's training-mode on or off. Defaults
61 | to False.
62 | normalize_map (bool, optional): When True, saliency map will be normalized.
63 | Defaults to True.
64 | unconnected_gradients: Specifies the gradient value returned when the given input
65 | tensors are unconnected. Defaults to tf.UnconnectedGradients.NONE.
66 |
67 | Returns:
68 | An :obj:`numpy.ndarray` or a list of them.
69 | They are the saliency maps that indicate the `seed_input` regions
70 | whose change would most contribute the score value.
71 |
72 | Raises:
73 | :obj:`ValueError`: When there is any invalid arguments.
74 | """
75 |
76 | # Preparing
77 | scores = self._get_scores_for_multiple_outputs(score)
78 | seed_inputs = self._get_seed_inputs_for_multiple_inputs(seed_input)
79 | # Processing saliency
80 | if smooth_samples > 0:
81 | smooth_samples = get_num_of_steps_allowed(smooth_samples)
82 | seed_inputs = (tf.tile(X, (smooth_samples,) + tuple(np.ones(X.ndim - 1, np.int32)))
83 | for X in seed_inputs)
84 | seed_inputs = (
85 | tf.reshape(X, (smooth_samples, -1) + tuple(X.shape[1:])) for X in seed_inputs)
86 | seed_inputs = ((X, tuple(range(X.ndim)[2:])) for X in seed_inputs)
87 | seed_inputs = ((X, smooth_noise * (tf.math.reduce_max(X, axis=axis, keepdims=True) -
88 | tf.math.reduce_min(X, axis=axis, keepdims=True)))
89 | for X, axis in seed_inputs)
90 | seed_inputs = (X + np.random.normal(0., sigma, X.shape) for X, sigma in seed_inputs)
91 | seed_inputs = list(seed_inputs)
92 | total = (np.zeros_like(X[0]) for X in seed_inputs)
93 | for i in range(smooth_samples):
94 | grads = self._get_gradients([X[i] for X in seed_inputs], scores, gradient_modifier,
95 | training, unconnected_gradients)
96 | total = (total + g for total, g in zip(total, grads))
97 | grads = [g / smooth_samples for g in total]
98 | else:
99 | grads = self._get_gradients(seed_inputs, scores, gradient_modifier, training,
100 | unconnected_gradients)
101 | # Visualizing
102 | if not keepdims:
103 | grads = [np.max(g, axis=-1) for g in grads]
104 | if normalize_map:
105 | grads = [normalize(g) for g in grads]
106 | if len(self.model.inputs) == 1 and not isinstance(seed_input, list):
107 | grads = grads[0]
108 | return grads
109 |
110 | def _get_gradients(self, seed_inputs, scores, gradient_modifier, training,
111 | unconnected_gradients):
112 | with tf.GradientTape(watch_accessed_variables=False, persistent=True) as tape:
113 | tape.watch(seed_inputs)
114 | outputs = self.model(seed_inputs, training=training)
115 | outputs = listify(outputs)
116 | score_values = self._calculate_scores(outputs, scores)
117 | grads = tape.gradient(score_values,
118 | seed_inputs,
119 | unconnected_gradients=unconnected_gradients)
120 | if gradient_modifier is not None:
121 | grads = [gradient_modifier(g) for g in grads]
122 | return grads
123 |
--------------------------------------------------------------------------------
/tf_keras_vis/scorecam.py:
--------------------------------------------------------------------------------
1 | from typing import Union
2 |
3 | import numpy as np
4 | import tensorflow as tf
5 | from scipy.ndimage.interpolation import zoom
6 |
7 | from . import ModelVisualization, keras
8 | from .utils import get_num_of_steps_allowed, is_mixed_precision, listify, normalize, zoom_factor
9 | from .utils.model_modifiers import ExtractIntermediateLayerForGradcam as ModelModifier
10 |
11 |
12 | class Scorecam(ModelVisualization):
13 | """Score-CAM and Faster Score-CAM
14 |
15 | References:
16 | * Score-CAM: Score-Weighted Visual Explanations for Convolutional Neural Networks
17 | (https://arxiv.org/pdf/1910.01279.pdf)
18 | * Faster Score-CAM (https://github.com/tabayashi0117/Score-CAM#faster-score-cam)
19 | """
20 | def __call__(self,
21 | score,
22 | seed_input,
23 | penultimate_layer=None,
24 | seek_penultimate_conv_layer=True,
25 | activation_modifier=lambda cam: keras.activations.relu(cam),
26 | batch_size=32,
27 | max_N=None,
28 | training=False,
29 | expand_cam=True,
30 | normalize_cam=True) -> Union[np.ndarray, list]:
31 | """Generate score-weighted class activation maps (CAM) by using gradient-free
32 | visualization method.
33 |
34 | Args:
35 | score: A :obj:`tf_keras_vis.utils.scores.Score` instance, function or a list of them.
36 | For example of the Score instance to specify visualizing target::
37 |
38 | scores = CategoricalScore([1, 294, 413])
39 |
40 | The code above means the same with the one below::
41 |
42 | score = lambda outputs: (outputs[0][1], outputs[1][294], outputs[2][413])
43 |
44 | When the model has multiple outputs, you MUST pass a list of
45 | Score instances or functions. For example::
46 |
47 | from tf_keras_vis.utils.scores import CategoricalScore, InactiveScore
48 | score = [
49 | CategoricalScore([1, 23]), # For 1st model output
50 | InactiveScore(), # For 2nd model output
51 | ...
52 | ]
53 |
54 | seed_input: A tf.Tensor, :obj:`numpy.ndarray` or a list of them to input in the model.
55 | That's when the model has multiple inputs, you MUST pass a list of tensors.
56 | penultimate_layer: An index or name of the layer, or the keras.layers.Layer
57 | instance itself. When None, it means the same with `-1`. If the layer specified by
58 | this option is not `convolutional` layer, `penultimate_layer` will work as the
59 | offset to seek `convolutional` layer. Defaults to None.
60 | seek_penultimate_conv_layer: A bool that indicates whether or not seeks a penultimate
61 | layer when the layer specified by `penultimate_layer` is not `convolutional` layer.
62 | Defaults to True.
63 | activation_modifier: A function to modify the Class Activation Map (CAM). Defaults to
64 | `lambda cam: keras.activations.relu(cam)`.
65 | batch_size: The number of samples per batch. Defaults to 32.
66 | max_N: When None or under Zero, run as ScoreCAM. When not None and over Zero of
67 | Integer, run as Faster-ScoreCAM. Set larger number (or None), need more time to
68 | visualize CAM but to be able to get clearer attention images. Defaults to None.
69 | training: A bool that indicates whether the model's training-mode on or off. Defaults
70 | to False.
71 | expand_cam: True to resize CAM to the same as input image size. **Note!** When False,
72 | even if the model has multiple inputs, return only a CAM. Defaults to True.
73 | normalize_cam: When True, CAM will be normalized. Defaults to True.
74 | unconnected_gradients: Specifies the gradient value returned when the given input
75 | tensors are unconnected. Defaults to tf.UnconnectedGradients.NONE.
76 |
77 | Returns:
78 | An :obj:`numpy.ndarray` or a list of them. They are the Class Activation Maps (CAMs)
79 | that indicate the `seed_input` regions whose change would most contribute the score
80 | value.
81 |
82 | Raises:
83 | :obj:`ValueError`: When there is any invalid arguments.
84 | """
85 | # Preparing
86 | scores = self._get_scores_for_multiple_outputs(score)
87 | seed_inputs = self._get_seed_inputs_for_multiple_inputs(seed_input)
88 |
89 | # Processing score-cam
90 | model = ModelModifier(penultimate_layer, seek_penultimate_conv_layer, False)(self.model)
91 | penultimate_output = model(seed_inputs, training=training)
92 |
93 | if is_mixed_precision(self.model):
94 | penultimate_output = tf.cast(penultimate_output, self.model.variable_dtype)
95 |
96 | # For efficiently visualizing, extract maps that has a large variance.
97 | # This excellent idea is devised by tabayashi0117.
98 | # (see for details: https://github.com/tabayashi0117/Score-CAM#faster-score-cam)
99 | if max_N is None or max_N <= 0:
100 | max_N = get_num_of_steps_allowed(penultimate_output.shape[-1])
101 | elif max_N > 0 and max_N <= penultimate_output.shape[-1]:
102 | max_N = get_num_of_steps_allowed(max_N)
103 | else:
104 | raise ValueError(f"max_N must be 1 or more and {penultimate_output.shape[-1]} or less."
105 | f" max_N: {max_N}")
106 | if max_N < penultimate_output.shape[-1]:
107 | activation_map_std = tf.math.reduce_std(penultimate_output,
108 | axis=tuple(
109 | range(penultimate_output.ndim)[1:-1]),
110 | keepdims=True)
111 | _, top_k_indices = tf.math.top_k(activation_map_std, max_N)
112 | top_k_indices, _ = tf.unique(tf.reshape(top_k_indices, (-1,)))
113 | penultimate_output = tf.gather(penultimate_output, top_k_indices, axis=-1)
114 | nsamples = penultimate_output.shape[0]
115 | channels = penultimate_output.shape[-1]
116 |
117 | # Upsampling activations
118 | input_shapes = [seed_input.shape for seed_input in seed_inputs]
119 | zoom_factors = (zoom_factor(penultimate_output.shape[1:-1], input_shape[1:-1])
120 | for input_shape in input_shapes)
121 | zoom_factors = ((1,) + factor + (1,) for factor in zoom_factors)
122 | upsampled_activations = [
123 | zoom(penultimate_output, factor, order=1, mode='nearest') for factor in zoom_factors
124 | ]
125 | activation_shapes = [activation.shape for activation in upsampled_activations]
126 |
127 | # Normalizing activations
128 | min_activations = (np.min(activation,
129 | axis=tuple(range(activation.ndim)[1:-1]),
130 | keepdims=True) for activation in upsampled_activations)
131 | max_activations = (np.max(activation,
132 | axis=tuple(range(activation.ndim)[1:-1]),
133 | keepdims=True) for activation in upsampled_activations)
134 | normalized_activations = zip(upsampled_activations, min_activations, max_activations)
135 | normalized_activations = ((activation - _min) / (_max - _min + keras.backend.epsilon())
136 | for activation, _min, _max in normalized_activations)
137 |
138 | # (samples, h, w, c) -> (channels, samples, h, w, c)
139 | input_templates = (np.tile(seed_input, (channels,) + (1,) * len(seed_input.shape))
140 | for seed_input in seed_inputs)
141 | # (samples, h, w, channels) -> (c, samples, h, w, channels)
142 | masks = (np.tile(mask, (input_shape[-1],) + (1,) * len(map_shape)) for mask, input_shape,
143 | map_shape in zip(normalized_activations, input_shapes, activation_shapes))
144 | # (c, samples, h, w, channels) -> (channels, samples, h, w, c)
145 | masks = (np.transpose(mask,
146 | (len(mask.shape) - 1,) + tuple(range(len(mask.shape)))[1:-1] + (0,))
147 | for mask in masks)
148 | # Create masked inputs
149 | masked_seed_inputs = (np.multiply(input_template, mask)
150 | for input_template, mask in zip(input_templates, masks))
151 | # (channels, samples, h, w, c) -> (channels * samples, h, w, c)
152 | masked_seed_inputs = [
153 | np.reshape(seed_input, (-1,) + seed_input.shape[2:])
154 | for seed_input in masked_seed_inputs
155 | ]
156 |
157 | # Predicting masked seed-inputs
158 | preds = self.model.predict(masked_seed_inputs, batch_size=batch_size)
159 | # (channels * samples, logits) -> (channels, samples, logits)
160 | preds = (np.reshape(prediction, (channels, nsamples, prediction.shape[-1]))
161 | for prediction in listify(preds))
162 |
163 | # Calculating weights
164 | weights = ([score(keras.activations.softmax(tf.constant(p)))
165 | for p in prediction]
166 | for score, prediction in zip(scores, preds))
167 | weights = ([self._validate_weight(s, nsamples) for s in w] for w in weights)
168 | weights = (np.array(w, dtype=np.float32) for w in weights)
169 | weights = (np.reshape(w, (channels, nsamples, -1)) for w in weights)
170 | weights = (np.mean(w, axis=2) for w in weights)
171 | weights = (np.transpose(w, (1, 0)) for w in weights)
172 | weights = np.array(list(weights), dtype=np.float32)
173 | weights = np.sum(weights, axis=0)
174 |
175 | # Generate cam
176 | # cam = K.batch_dot(penultimate_output, weights)
177 | cam = keras.layers.Dot(axes=-1)([penultimate_output, weights])
178 | if activation_modifier is not None:
179 | cam = activation_modifier(cam)
180 | cam = cam.numpy()
181 |
182 | if not expand_cam:
183 | if normalize_cam:
184 | cam = normalize(cam)
185 | return cam
186 |
187 | # Visualizing
188 | zoom_factors = (zoom_factor(cam.shape, X.shape) for X in seed_inputs)
189 | cam = [zoom(cam.astype(np.float32), factor, order=1) for factor in zoom_factors]
190 | if normalize_cam:
191 | cam = [normalize(x) for x in cam]
192 | if len(self.model.inputs) == 1 and not isinstance(seed_input, list):
193 | cam = cam[0]
194 | return cam
195 |
196 | def _validate_weight(self, score, nsamples):
197 | invalid = False
198 | if tf.is_tensor(score) or isinstance(score, np.ndarray):
199 | invalid = (score.shape[0] != nsamples)
200 | elif isinstance(score, (list, tuple)):
201 | invalid = (len(score) != nsamples)
202 | else:
203 | invalid = (nsamples != 1)
204 | if invalid:
205 | raise ValueError(
206 | "Score function must return a Tensor, whose the first dimension is "
207 | "the same as the first dimension of seed_input or "
208 | ", a list or tuple, whose length is the first dimension of seed_input.")
209 | else:
210 | return score
211 |
212 |
213 | ScoreCAM = Scorecam
214 |
--------------------------------------------------------------------------------
/tf_keras_vis/utils/__init__.py:
--------------------------------------------------------------------------------
1 | import os
2 | from typing import Tuple
3 |
4 | import numpy as np
5 | import tensorflow as tf
6 | from deprecated import deprecated
7 | from packaging.version import parse as version
8 |
9 | from .. import keras
10 |
11 | MAX_STEPS = 'TF_KERAS_VIS_MAX_STEPS'
12 |
13 |
14 | @deprecated(version='0.7.0')
15 | def check_steps(steps):
16 | return get_num_of_steps_allowed(steps)
17 |
18 |
19 | def get_num_of_steps_allowed(steps) -> int:
20 | return min(steps, int(os.environ[MAX_STEPS])) if MAX_STEPS in os.environ else steps
21 |
22 |
23 | def num_of_gpus() -> Tuple[int, int]:
24 | """Return the number of physical and logical gpus.
25 |
26 | Returns:
27 | Tuple[int, int]: A tuple of the number of physical and logical gpus.
28 | """
29 | if version(tf.version.VERSION) < version("2.1.0"):
30 | list_physical_devices = tf.config.experimental.list_physical_devices
31 | list_logical_devices = tf.config.experimental.list_logical_devices
32 | else:
33 | list_physical_devices = tf.config.list_physical_devices
34 | list_logical_devices = tf.config.list_logical_devices
35 | physical_gpus = list_physical_devices('GPU')
36 | if physical_gpus:
37 | logical_gpus = list_logical_devices('GPU')
38 | return len(physical_gpus), len(logical_gpus)
39 | else:
40 | return 0, 0
41 |
42 |
43 | def listify(value, return_empty_list_if_none=True, convert_tuple_to_list=True) -> list:
44 | """Ensures that the value is a list.
45 |
46 | If it is not a list, it creates a new list with `value` as an item.
47 |
48 | Args:
49 | value (object): A list or something else.
50 | return_empty_list_if_none (bool, optional): When True (default), None you passed as `value`
51 | will be converted to a empty list (i.e., `[]`). When False, None will be converted to
52 | a list that has an None (i.e., `[None]`). Defaults to True.
53 | convert_tuple_to_list (bool, optional): When True (default), a tuple you passed as `value`
54 | will be converted to a list. When False, a tuple will be unconverted
55 | (i.e., returning a tuple object that was passed as `value`). Defaults to True.
56 | Returns:
57 | list: A list. When `value` is a tuple and `convert_tuple_to_list` is False, a tuple.
58 | """
59 | if not isinstance(value, list):
60 | if value is None and return_empty_list_if_none:
61 | value = []
62 | elif isinstance(value, tuple) and convert_tuple_to_list:
63 | value = list(value)
64 | else:
65 | value = [value]
66 | return value
67 |
68 |
69 | @deprecated(version='0.8.0', reason="Use `normalize()` instead.")
70 | def standardize(array, value_range=(1., 0.)):
71 | return normalize(array, value_range=(1., 0.))
72 |
73 |
74 | def normalize(array, value_range=(1., 0.)) -> np.ndarray:
75 | """Normalization.
76 |
77 | Args:
78 | array (np.ndarray): A tensor.
79 | value_range (tuple, optional): `array` will be scaled in this range. Defaults to (1., 0.).
80 |
81 | Returns:
82 | np.ndarray: Normalize array.
83 | """
84 | max_value = np.max(array, axis=tuple(range(array.ndim)[1:]), keepdims=True)
85 | min_value = np.min(array, axis=tuple(range(array.ndim)[1:]), keepdims=True)
86 | normalized_array = (array - min_value) / (max_value - min_value + keras.backend.epsilon())
87 | return normalized_array
88 |
89 |
90 | def find_layer(model, condition, offset=None, reverse=True) -> keras.layers.Layer:
91 | found_offset = offset is None
92 | for layer in reversed(model.layers):
93 | if not found_offset:
94 | found_offset = (layer == offset)
95 | if condition(layer) and found_offset:
96 | return layer
97 | if isinstance(layer, keras.Model):
98 | if found_offset:
99 | result = find_layer(layer, condition, offset=None, reverse=reverse)
100 | else:
101 | result = find_layer(layer, condition, offset=offset, reverse=reverse)
102 | if result is not None:
103 | return result
104 | return None
105 |
106 |
107 | def zoom_factor(from_shape, to_shape) -> Tuple[float, float]:
108 | return tuple(float(t) / float(f) for f, t in zip(from_shape, to_shape))
109 |
110 |
111 | def is_mixed_precision(model) -> bool:
112 | """Check whether the model has any lower precision variable or not.
113 |
114 | Args:
115 | model (keras.Model): A model instance.
116 |
117 | Returns:
118 | bool: When the model has any lower precision variable, True.
119 | """
120 | if version(tf.version.VERSION) >= version("2.4.0"):
121 | for layer in model.layers:
122 | if (layer.compute_dtype == tf.float16) or \
123 | (isinstance(layer, keras.Model) and is_mixed_precision(layer)):
124 | return True
125 | return False
126 |
127 |
128 | @deprecated(version='0.7.0', reason="Unnecessary function.")
129 | def lower_precision_dtype(model):
130 | if version(tf.version.VERSION) >= version("2.4.0"):
131 | for layer in model.layers:
132 | if (layer.compute_dtype in [tf.float16, tf.bfloat16]) or \
133 | (isinstance(layer, keras.Model) and is_mixed_precision(layer)):
134 | return layer.compute_dtype
135 | return model.dtype # pragma: no cover
136 |
137 |
138 | def get_input_names(model):
139 | if version(tf.version.VERSION) >= version("2.4.0"):
140 | names = [input.name for input in model.inputs]
141 | else:
142 | names = model.input_names
143 | return names
144 |
--------------------------------------------------------------------------------
/tf_keras_vis/utils/callbacks.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | warnings.warn(('`tf_keras_vis.utils.callbacks` module is deprecated. '
4 | 'Please use `tf_keras_vis.activation_maximization.callbacks` instead.'),
5 | DeprecationWarning)
6 |
7 | from ..activation_maximization.callbacks import Callback as OptimizerCallback # noqa: F401 E402
8 | from ..activation_maximization.callbacks import GifGenerator2D # noqa: F401 E402
9 | from ..activation_maximization.callbacks import GifGenerator2D as GifGenerator # noqa: F401 E402
10 | from ..activation_maximization.callbacks import PrintLogger as Print # noqa: F401 E402
11 |
--------------------------------------------------------------------------------
/tf_keras_vis/utils/input_modifiers.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | warnings.warn(
4 | "`tf_keras_vis.utils.input_modifiers` module is deprecated. "
5 | "Please use `tf_keras_vis.activation_maximization.input_modifiers` instead.",
6 | DeprecationWarning)
7 |
8 | from ..activation_maximization.input_modifiers import ( # noqa: E402,F401
9 | InputModifier, Jitter, Rotate, Rotate2D)
10 |
--------------------------------------------------------------------------------
/tf_keras_vis/utils/losses.py:
--------------------------------------------------------------------------------
1 | import warnings
2 |
3 | warnings.warn(('`tf_keras_vis.utils.losses` module is deprecated. '
4 | 'Please use `tf_keras_vis.utils.scores` instead.'), DeprecationWarning)
5 |
6 | from .scores import BinaryScore as BinaryLoss # noqa: F401 E402
7 | from .scores import CategoricalScore as CategoricalLoss # noqa: F401 E402
8 | from .scores import InactiveScore as InactiveLoss # noqa: F401 E402
9 | from .scores import Score as Loss # noqa: F401 E402
10 |
--------------------------------------------------------------------------------
/tf_keras_vis/utils/model_modifiers.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Union
3 |
4 | import tensorflow as tf
5 | from packaging.version import parse as version
6 |
7 | from .. import keras
8 |
9 | if version(tf.version.VERSION) >= version("2.16.0rc0"):
10 | from keras.src.layers.convolutional.base_conv import BaseConv as Conv
11 | elif version(tf.version.VERSION) >= version("2.13.0rc0"):
12 | from keras.src.layers.convolutional.base_conv import Conv
13 | elif version(tf.version.VERSION) >= version("2.9.0rc0"):
14 | from keras.layers.convolutional.base_conv import Conv
15 | elif version(tf.version.VERSION) < version("2.6.0rc0"):
16 | from tensorflow.python.keras.layers.convolutional import Conv
17 | else:
18 | from keras.layers.convolutional import Conv
19 |
20 | from . import find_layer
21 |
22 |
23 | class ModelModifier(ABC):
24 | """Abstract class for defining a model modifier.
25 | """
26 | @abstractmethod
27 | def __call__(self, model) -> Union[None, keras.Model]:
28 | """Implement modification to the model before processing gradient descent.
29 |
30 | Args:
31 | model: A model instance.
32 |
33 | Raises:
34 | NotImplementedError: This method must be overwritten.
35 |
36 | Returns: Modified model or None.
37 | """
38 | raise NotImplementedError()
39 |
40 |
41 | class ReplaceToLinear(ModelModifier):
42 | """A model modifier that replaces the activation functions of all output layers to
43 | `keras.activations.linear`.
44 |
45 | Please note that this modifier must be set the end of modifiers list
46 | that is passed to `ModelVisualization#__init__()`. For example::
47 |
48 | # When visualizing `intermediate-1` layer.
49 | ActivationMaximization(YOUR_MODEL,
50 | model_modifier=[ExtractIntermediateLayer("intermediate-1"),
51 | ReplaceToLinear()])
52 | """
53 | def __call__(self, model) -> None:
54 | layers = (model.get_layer(name=name) for name in model.output_names)
55 | for layer in layers:
56 | layer.activation = keras.activations.linear
57 |
58 |
59 | class ExtractIntermediateLayer(ModelModifier):
60 | """A model modifier that constructs new model instance
61 | whose output layer is an intermediate layer of `model`.
62 |
63 | This modifier will be used to visualize the features of the model layer.
64 | """
65 | def __init__(self, index_or_name) -> None:
66 | if not isinstance(index_or_name, (str, int)):
67 | raise TypeError("The type of `index_or_name` must be a object of string or integer."
68 | f"index_or_name: {index_or_name}")
69 | self.index_or_name = index_or_name
70 |
71 | def __call__(self, model) -> keras.Model:
72 | if isinstance(self.index_or_name, int):
73 | target_layer = model.get_layer(index=self.index_or_name)
74 | if isinstance(self.index_or_name, str):
75 | target_layer = model.get_layer(name=self.index_or_name)
76 | return keras.Model(inputs=model.inputs, outputs=target_layer.output)
77 |
78 |
79 | class GuidedBackpropagation(ModelModifier):
80 | """A model modifier that replaces the gradient calculation of activation functions to
81 | Guided calculation.
82 |
83 | For details on Guided back propagation, see the papers:
84 |
85 | References:
86 | * String For Simplicity: The All Convolutional Net (https://arxiv.org/pdf/1412.6806.pdf)
87 | * Grad-CAM: Why did you say that? Visual Explanations from Deep Networks via
88 | Gradient-based Localization (https://arxiv.org/pdf/1610.02391v1.pdf)
89 |
90 | Warnings:
91 | Please note that there is a discussion that Guided Backpropagation is not working well as
92 | model explanations.
93 |
94 | * Sanity Checks for Saliency Maps (https://arxiv.org/pdf/1810.03292.pdf)
95 | * Guided Grad-CAM is Broken! Sanity Checks for Saliency Maps
96 | (https://glassboxmedicine.com/2019/10/12/guided-grad-cam-is-broken-sanity-checks-for-saliency-maps/)
97 | """
98 | def __init__(self, target_activations=[keras.activations.relu]) -> None:
99 | self.target_activations = target_activations
100 |
101 | def _get_guided_activation(self, activation):
102 | @tf.custom_gradient
103 | def guided_activation(x):
104 | def grad(dy):
105 | return tf.cast(dy > 0, dy.dtype) * tf.cast(x > 0, dy.dtype) * dy
106 |
107 | return activation(x), grad
108 |
109 | return guided_activation
110 |
111 | def __call__(self, model) -> None:
112 | for layer in (layer for layer in model.layers if hasattr(layer, "activation")):
113 | if layer.activation in self.target_activations:
114 | layer.activation = self._get_guided_activation(layer.activation)
115 |
116 |
117 | class ExtractIntermediateLayerForGradcam(ModelModifier):
118 | def __init__(self, penultimate_layer=None, seek_conv_layer=True, include_model_outputs=True):
119 | self.penultimate_layer = penultimate_layer
120 | self.seek_conv_layer = seek_conv_layer
121 | self.include_model_outputs = include_model_outputs
122 |
123 | def __call__(self, model):
124 | _layer = self.penultimate_layer
125 | if not isinstance(_layer, keras.layers.Layer):
126 | if _layer is None:
127 | _layer = -1
128 | if isinstance(_layer, int) and _layer < len(model.layers):
129 | _layer = model.layers[_layer]
130 | elif isinstance(_layer, str):
131 | _layer = find_layer(model, lambda _l: _l.name == _layer)
132 | else:
133 | raise ValueError(f"Invalid argument. `penultimate_layer`={self.penultimate_layer}")
134 | if _layer is not None and self.seek_conv_layer:
135 | _layer = find_layer(model, lambda _l: isinstance(_l, Conv), offset=_layer)
136 | if _layer is None:
137 | raise ValueError("Unable to determine penultimate `Conv` layer. "
138 | f"`penultimate_layer`={self.penultimate_layer}")
139 | penultimate_output = _layer.output
140 | if len(penultimate_output.shape) < 3:
141 | raise ValueError(
142 | "Penultimate layer's output tensor MUST have "
143 | f"samples, spaces and channels dimensions. [{penultimate_output.shape}]")
144 | outputs = [penultimate_output]
145 | if self.include_model_outputs:
146 | outputs = model.outputs + outputs
147 | return keras.Model(inputs=model.inputs, outputs=outputs)
148 |
--------------------------------------------------------------------------------
/tf_keras_vis/utils/regularizers.py:
--------------------------------------------------------------------------------
1 | import warnings
2 | from abc import ABC, abstractmethod
3 |
4 | import numpy as np
5 | import tensorflow as tf
6 | from deprecated import deprecated
7 |
8 | from .. import keras
9 |
10 | warnings.warn(('`tf_keras_vis.utils.regularizers` module is deprecated. '
11 | 'Please use `tf_keras_vis.activation_maximization.regularizers` instead.'),
12 | DeprecationWarning)
13 |
14 |
15 | @deprecated(version='0.7.0',
16 | reason="Please use `tf_keras_vis.activation_maximization.regularizers.Regularizer`"
17 | " class instead of this.")
18 | class LegacyRegularizer(ABC):
19 | def __init__(self, name):
20 | self.name = name
21 |
22 | @abstractmethod
23 | def __call__(self, inputs):
24 | raise NotImplementedError()
25 |
26 |
27 | Regularizer = LegacyRegularizer
28 |
29 |
30 | @deprecated(version='0.7.0',
31 | reason="The class has a bug that the calculated value is incorrect (too small) "
32 | "when the `batch_size` is greater than one. So please use "
33 | "`tf_keras_vis.activation_maximization.regularizers.TotalVariation2D`"
34 | " class instead of this.")
35 | class TotalVariation2D(LegacyRegularizer):
36 | def __init__(self, weight=10., name='TotalVariation2D'):
37 | super().__init__(name)
38 | self.weight = weight
39 |
40 | def __call__(self, overall_inputs):
41 | tv = 0.
42 | for X in overall_inputs:
43 | tv += tf.image.total_variation(X) / np.prod(X.shape)
44 | return self.weight * tv
45 |
46 |
47 | @deprecated(
48 | version='0.6.0',
49 | reason="Please use `tf_keras_vis.activation_maximization.regularizers.TotalVariation2D`"
50 | " class instead of this.")
51 | class TotalVariation(TotalVariation2D):
52 | def __init__(self, weight=10.):
53 | super().__init__(weight=weight, name='TotalVariation') # pragma: no cover
54 |
55 |
56 | @deprecated(version='0.7.0',
57 | reason="The class has a bug that the calculated value is incorrect (too small). "
58 | "So please use `tf_keras_vis.activation_maximization.regularizers.Norm`"
59 | " class instead of this.")
60 | class Norm(LegacyRegularizer):
61 | def __init__(self, weight=10., p=2, name='Norm'):
62 | super().__init__(name)
63 | self.weight = weight
64 | self.p = p
65 |
66 | def __call__(self, overall_inputs):
67 | norm = 0.
68 | for X in overall_inputs:
69 | X = tf.reshape(X, (X.shape[0], -1))
70 | norm += tf.norm(X, ord=self.p, axis=-1) / X.shape[1]
71 | return self.weight * norm
72 |
73 |
74 | @deprecated(version='0.6.0',
75 | reason="Please use `tf_keras_vis.activation_maximization.regularizers.Norm`"
76 | " class instead of this.")
77 | class L2Norm(Norm):
78 | def __init__(self, weight=10.):
79 | super().__init__(weight=weight, p=2, name='L2Norm') # pragma: no cover
80 |
--------------------------------------------------------------------------------
/tf_keras_vis/utils/scores.py:
--------------------------------------------------------------------------------
1 | from abc import ABC, abstractmethod
2 | from typing import Union
3 |
4 | import tensorflow as tf
5 |
6 | from . import listify
7 |
8 |
9 | class Score(ABC):
10 | """Abstract class for defining a score function.
11 | """
12 | def __init__(self, name=None) -> None:
13 | """
14 | Args:
15 | name: Instance name. Defaults to None.
16 | """
17 | self.name = name
18 |
19 | @abstractmethod
20 | def __call__(self, output) -> Union[tf.Tensor, list, tuple]:
21 | """Implement collecting scores that are used in visualization modules.
22 |
23 | Args:
24 | output: A tf.Tensor that indicates a model output value.
25 |
26 | Raises:
27 | NotImplementedError: This method must be overwritten.
28 |
29 | Returns:
30 | Score values.
31 | """
32 | raise NotImplementedError()
33 |
34 |
35 | class InactiveScore(Score):
36 | """A score function that deactivate model output passed to `__call__()`.
37 |
38 | With a multiple output model, you can use this
39 | if you want a output to be excluded from targets of calculating gradients.
40 | """
41 | def __init__(self) -> None:
42 | super().__init__('InactiveScore')
43 |
44 | def __call__(self, output) -> tf.Tensor:
45 | return output * 0.0
46 |
47 |
48 | class BinaryScore(Score):
49 | """A score function that collects the scores from model output
50 | which is for binary classification.
51 | """
52 | def __init__(self, target_values) -> None:
53 | """
54 | Args:
55 | target_values: A bool or a list of them.
56 |
57 | Raises:
58 | ValueError: When target_values is None or an empty list.
59 | """
60 | super().__init__('BinaryScore')
61 | self.target_values = listify(target_values, return_empty_list_if_none=False)
62 | if None in self.target_values:
63 | raise ValueError(f"Can't accept None value. target_values: {target_values}")
64 | self.target_values = [bool(v) for v in self.target_values]
65 | if len(self.target_values) == 0:
66 | raise ValueError(f"target_values is required. target_values: {target_values}")
67 |
68 | def __call__(self, output) -> tf.Tensor:
69 | if not (output.ndim == 2 and output.shape[1] == 1):
70 | raise ValueError(f"`output` shape must be (batch_size, 1), but was {output.shape}")
71 | output = tf.reshape(output, (-1,))
72 | target_values = self.target_values
73 | if len(target_values) == 1 and len(target_values) < output.shape[0]:
74 | target_values = target_values * output.shape[0]
75 | return (2 * tf.constant(target_values, dtype=output.dtype) - 1.0) * output
76 |
77 |
78 | class CategoricalScore(Score):
79 | """A score function that collects the scores from model output
80 | which is for categorical classification.
81 | """
82 | def __init__(self, indices) -> None:
83 | """
84 | Args:
85 | indices: An integer or a list of them.
86 |
87 | Raises:
88 | ValueError: When indices is None or an empty list.
89 | """
90 | super().__init__('CategoricalScore')
91 | self.indices = listify(indices, return_empty_list_if_none=False)
92 | if None in self.indices:
93 | raise ValueError(f"Can't accept None. indices: {indices}")
94 | if len(self.indices) == 0:
95 | raise ValueError(f"`indices` is required. indices: {indices}")
96 |
97 | def __call__(self, output) -> tf.Tensor:
98 | if output.ndim < 2:
99 | raise ValueError("`output` ndim must be 2 or more (batch_size, ..., channels), "
100 | f"but was {output.ndim}")
101 | if output.shape[-1] <= max(self.indices):
102 | raise ValueError(
103 | f"Invalid index value. indices: {self.indices}, output.shape: {output.shape}")
104 | indices = self.indices
105 | if len(indices) == 1 and len(indices) < output.shape[0]:
106 | indices = indices * output.shape[0]
107 | score = [output[i, ..., index] for i, index in enumerate(indices)]
108 | score = tf.stack(score, axis=0)
109 | score = tf.math.reduce_mean(score, axis=tuple(range(score.ndim))[1:])
110 | return score
111 |
--------------------------------------------------------------------------------
/tf_keras_vis/utils/test.py:
--------------------------------------------------------------------------------
1 | from contextlib import contextmanager
2 |
3 | import numpy as np
4 | import pytest
5 | import tensorflow as tf
6 |
7 | from .. import keras
8 | from ..activation_maximization.callbacks import Callback
9 |
10 |
11 | def mock_dense_model():
12 | inputs = keras.layers.Input((8,), name='input_1')
13 | x = keras.layers.Dense(6, activation='relu', name='dense_1')(inputs)
14 | x = keras.layers.Dense(2, name='dense_2')(x)
15 | x = keras.layers.Activation('softmax', dtype=tf.float32, name='output_1')(x)
16 | return keras.models.Model(inputs=inputs, outputs=x)
17 |
18 |
19 | def mock_conv_model_with_sigmoid_output():
20 | inputs = keras.layers.Input((8, 8, 3), name='input_1')
21 | x = keras.layers.Conv2D(6, 3, activation='relu', name='conv_1')(inputs)
22 | x = keras.layers.GlobalAveragePooling2D()(x)
23 | x = keras.layers.Dense(1, name='dense_1')(x)
24 | x = keras.layers.Activation('sigmoid', dtype=tf.float32, name='output_1')(x)
25 | return keras.models.Model(inputs=inputs, outputs=x)
26 |
27 |
28 | def mock_conv_model():
29 | inputs = keras.layers.Input((8, 8, 3), name='input_1')
30 | x = keras.layers.Conv2D(6, 3, activation='relu', name='conv_1')(inputs)
31 | x = keras.layers.GlobalAveragePooling2D()(x)
32 | x = keras.layers.Dense(2, name='dense_1')(x)
33 | x = keras.layers.Activation('softmax', dtype=tf.float32, name='output_1')(x)
34 | return keras.models.Model(inputs=inputs, outputs=x)
35 |
36 |
37 | def mock_multiple_inputs_model():
38 | input_1 = keras.layers.Input((8, 8, 3), name='input_1')
39 | input_2 = keras.layers.Input((10, 10, 3), name='input_2')
40 | x1 = keras.layers.Conv2D(6, 3, padding='same', activation='relu', name='conv_1')(input_1)
41 | x2 = keras.layers.Conv2D(6, 3, activation='relu', name='conv_2')(input_2)
42 | x = keras.layers.Concatenate(axis=-1)([x1, x2])
43 | x = keras.layers.GlobalAveragePooling2D()(x)
44 | x = keras.layers.Dense(2, name='dense_1')(x)
45 | x = keras.layers.Activation('softmax', dtype=tf.float32, name='output_1')(x)
46 | return keras.models.Model(inputs=[input_1, input_2], outputs=x)
47 |
48 |
49 | def mock_multiple_outputs_model():
50 | inputs = keras.layers.Input((8, 8, 3), name='input_1')
51 | x = keras.layers.Conv2D(6, 3, activation='relu', name='conv_1')(inputs)
52 | x = keras.layers.GlobalAveragePooling2D()(x)
53 | x1 = keras.layers.Dense(2, name='dense_1')(x)
54 | x2 = keras.layers.Dense(1, name='dense_2')(x)
55 | x1 = keras.layers.Activation('softmax', dtype=tf.float32, name='output_1')(x1)
56 | x2 = keras.layers.Activation('sigmoid', dtype=tf.float32, name='output_2')(x2)
57 | return keras.models.Model(inputs=inputs, outputs=[x1, x2])
58 |
59 |
60 | def mock_multiple_io_model():
61 | input_1 = keras.layers.Input((8, 8, 3), name='input_1')
62 | input_2 = keras.layers.Input((10, 10, 3), name='input_2')
63 | x1 = keras.layers.Conv2D(6, 3, padding='same', activation='relu', name='conv_1')(input_1)
64 | x2 = keras.layers.Conv2D(6, 3, activation='relu', name='conv_2')(input_2)
65 | x = keras.layers.Concatenate(axis=-1)([x1, x2])
66 | x = keras.layers.GlobalAveragePooling2D()(x)
67 | x1 = keras.layers.Dense(2, name='dense_1')(x)
68 | x2 = keras.layers.Dense(1, name='dense_2')(x)
69 | x1 = keras.layers.Activation('softmax', dtype=tf.float32, name='output_1')(x1)
70 | x2 = keras.layers.Activation('sigmoid', dtype=tf.float32, name='output_2')(x2)
71 | return keras.models.Model(inputs=[input_1, input_2], outputs=[x1, x2])
72 |
73 |
74 | def mock_conv_model_with_float32_output():
75 | inputs = keras.layers.Input((8, 8, 3), name='input_1')
76 | x = keras.layers.Conv2D(6, 3, activation='relu', name='conv_1')(inputs)
77 | x = keras.layers.GlobalAveragePooling2D()(x)
78 | x = keras.layers.Dense(2, dtype=tf.float32, activation='softmax', name='dense_1')(x)
79 | return keras.models.Model(inputs=inputs, outputs=x)
80 |
81 |
82 | def dummy_sample(shape, dtype=np.float32):
83 | length = np.prod(shape)
84 | values = np.array(list(range(length)))
85 | values = np.reshape(values, shape)
86 | values = values.astype(dtype)
87 | return values
88 |
89 |
90 | def score_with_tensor(output):
91 | return output[:, 0]
92 |
93 |
94 | def score_with_tuple(output):
95 | return tuple(o[0] for o in output)
96 |
97 |
98 | def score_with_list(output):
99 | return list(o[0] for o in output)
100 |
101 |
102 | NO_ERROR = None
103 |
104 |
105 | @contextmanager
106 | def _does_not_raise():
107 | yield
108 |
109 |
110 | def assert_raises(e):
111 | if e is NO_ERROR:
112 | return _does_not_raise()
113 | else:
114 | return pytest.raises(e)
115 |
116 |
117 | class MockCallback(Callback):
118 | def __init__(self,
119 | raise_error_on_begin=False,
120 | raise_error_on_call=False,
121 | raise_error_on_end=False):
122 | self.on_begin_was_called = False
123 | self.on_call_was_called = False
124 | self.on_end_was_called = False
125 | self.raise_error_on_begin = raise_error_on_begin
126 | self.raise_error_on_call = raise_error_on_call
127 | self.raise_error_on_end = raise_error_on_end
128 |
129 | def on_begin(self, **kwargs):
130 | self.on_begin_was_called = True
131 | self.kwargs = kwargs
132 | if self.raise_error_on_begin:
133 | raise ValueError('Test')
134 |
135 | def __call__(self, *args, **kwargs):
136 | self.on_call_was_called = True
137 | self.args = args
138 | self.kwargs = kwargs
139 | if self.raise_error_on_call:
140 | raise ValueError('Test')
141 |
142 | def on_end(self):
143 | self.on_end_was_called = True
144 | if self.raise_error_on_end:
145 | raise ValueError('Test')
146 |
147 |
148 | class MockLegacyCallback(Callback):
149 | def __init__(self, callback):
150 | self.callback = callback
151 |
152 | def on_begin(self):
153 | self.callback.on_begin()
154 |
155 | def __call__(self, *args, **kwargs):
156 | self.callback(*args, **kwargs)
157 |
158 | def on_end(self):
159 | self.callback.on_end()
160 |
--------------------------------------------------------------------------------