├── .gitignore ├── LICENSE ├── README.md ├── construct_viz.py ├── nn_viz_00.py ├── nn_viz_01.py ├── nn_viz_02.py ├── nn_viz_03.py ├── nn_viz_04.py ├── nn_viz_05.py ├── nn_viz_06.py ├── nn_viz_07.py ├── nn_viz_08.py ├── nn_viz_09.py ├── nn_viz_10.py ├── nn_viz_11.py ├── nn_viz_12.py ├── nn_viz_13.py ├── nn_viz_14.py ├── nn_viz_15.py ├── nn_viz_16.py ├── nn_viz_17.py ├── nn_viz_18.py ├── nn_viz_19.py ├── nn_viz_20.py ├── nn_viz_21.py ├── nn_viz_22.py ├── nn_viz_23.py ├── nn_viz_24.py ├── nn_viz_25.py ├── nn_viz_26.py ├── nn_viz_27.py ├── nn_viz_28.py ├── nn_viz_29.py ├── nn_viz_30.py ├── nn_viz_31.py ├── nn_viz_32.py └── nn_viz_33.py /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | 106 | *.png 107 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Brandon Rohrer 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Autoencoder Visualization 2 | 3 | Custom visualization of a deep autoencoder neural network using Matplotlib. 4 | 5 | For a step-by-step tour of this code, with tutorial and explanation, visit 6 | [the Neural Network Visualization course](https://end-to-end-machine-learning.teachable.com/p/neural-network-visualization/) 7 | at the [End to End Machine Learning online school](https://end-to-end-machine-learning.teachable.com/). 8 | -------------------------------------------------------------------------------- /nn_viz_00.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate a autoencoder neural network visualization 3 | """ 4 | 5 | 6 | def main(): 7 | print("All the visualization code goes here") 8 | 9 | 10 | if __name__ == "__main__": 11 | main() 12 | -------------------------------------------------------------------------------- /nn_viz_01.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate a autoencoder neural network visualization 3 | """ 4 | 5 | # Changing these adjusts the size and layout of the visualization 6 | FIGURE_WIDTH = 16 7 | FIGURE_HEIGHT = 9 8 | RIGHT_BORDER = 0.7 9 | LEFT_BORDER = 0.7 10 | TOP_BORDER = 0.8 11 | BOTTOM_BORDER = 0.6 12 | 13 | N_IMAGE_PIXEL_COLS = 64 14 | N_IMAGE_PIXEL_ROWS = 48 15 | N_NODES_BY_LAYER = [10, 7, 5, 8] 16 | 17 | INPUT_IMAGE_BOTTOM = 5 18 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 19 | ERROR_IMAGE_SCALE = 0.7 20 | ERROR_GAP_SCALE = 0.3 21 | BETWEEN_LAYER_SCALE = 0.8 22 | BETWEEN_NODE_SCALE = 0.4 23 | 24 | 25 | def main(): 26 | print(f"Node images are {N_IMAGE_PIXEL_ROWS}" 27 | + f" by {N_IMAGE_PIXEL_COLS} pixels") 28 | 29 | 30 | if __name__ == "__main__": 31 | main() 32 | -------------------------------------------------------------------------------- /nn_viz_02.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | 5 | # Changing these adjusts the size and layout of the visualization 6 | FIGURE_WIDTH = 16 7 | FIGURE_HEIGHT = 9 8 | RIGHT_BORDER = 0.7 9 | LEFT_BORDER = 0.7 10 | TOP_BORDER = 0.8 11 | BOTTOM_BORDER = 0.6 12 | 13 | N_IMAGE_PIXEL_COLS = 64 14 | N_IMAGE_PIXEL_ROWS = 48 15 | N_NODES_BY_LAYER = [10, 7, 5, 8] 16 | 17 | INPUT_IMAGE_BOTTOM = 5 18 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 19 | ERROR_IMAGE_SCALE = 0.7 20 | ERROR_GAP_SCALE = 0.3 21 | BETWEEN_LAYER_SCALE = 0.8 22 | BETWEEN_NODE_SCALE = 0.4 23 | 24 | 25 | def main(): 26 | """ 27 | Build a visualization of an image autoencoder neural network, 28 | piece by piece. 29 | 30 | A central data structure in this example is the collection of parameters 31 | that define how the image is laid out. It is a set of nested dictionaries. 32 | """ 33 | p = construct_parameters() 34 | print("parameters:") 35 | print(p) 36 | 37 | 38 | def construct_parameters(): 39 | """ 40 | Build a dictionary of parameters that describe the size and location 41 | of the elements of the visualization. This is a convenient way to pass 42 | the collection of them around . 43 | """ 44 | parameters = {} 45 | 46 | # The figure as a whole 47 | parameters["figure"] = { 48 | "height": FIGURE_HEIGHT, 49 | "width": FIGURE_WIDTH, 50 | } 51 | 52 | return parameters 53 | 54 | 55 | if __name__ == "__main__": 56 | main() 57 | -------------------------------------------------------------------------------- /nn_viz_03.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | 5 | # Changing these adjusts the size and layout of the visualization 6 | FIGURE_WIDTH = 16 7 | FIGURE_HEIGHT = 9 8 | RIGHT_BORDER = 0.7 9 | LEFT_BORDER = 0.7 10 | TOP_BORDER = 0.8 11 | BOTTOM_BORDER = 0.6 12 | 13 | N_IMAGE_PIXEL_COLS = 64 14 | N_IMAGE_PIXEL_ROWS = 48 15 | N_NODES_BY_LAYER = [10, 7, 5, 8] 16 | 17 | INPUT_IMAGE_BOTTOM = 5 18 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 19 | ERROR_IMAGE_SCALE = 0.7 20 | ERROR_GAP_SCALE = 0.3 21 | BETWEEN_LAYER_SCALE = 0.8 22 | BETWEEN_NODE_SCALE = 0.4 23 | 24 | 25 | def main(): 26 | """ 27 | Build a visualization of an image autoencoder neural network, 28 | piece by piece. 29 | 30 | A central data structure in this example is the collection of parameters 31 | that define how the image is laid out. It is a set of nested dictionaries. 32 | """ 33 | p = construct_parameters() 34 | print("parameters:") 35 | for key, value in p.items(): 36 | print(key, ":", value) 37 | 38 | 39 | def construct_parameters(): 40 | """ 41 | Build a dictionary of parameters that describe the size and location 42 | of the elements of the visualization. This is a convenient way to pass 43 | the collection of them around . 44 | """ 45 | # Enforce square pixels. Each pixel will have the same height and width. 46 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 47 | 48 | parameters = {} 49 | 50 | # The figure as a whole 51 | parameters["figure"] = { 52 | "height": FIGURE_HEIGHT, 53 | "width": FIGURE_WIDTH, 54 | } 55 | 56 | # The input and output images 57 | parameters["input"] = { 58 | "n_cols": N_IMAGE_PIXEL_COLS, 59 | "n_rows": N_IMAGE_PIXEL_ROWS, 60 | "aspect_ratio": aspect_ratio, 61 | "image": { 62 | "bottom": INPUT_IMAGE_BOTTOM, 63 | "height": INPUT_IMAGE_HEIGHT, 64 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 65 | } 66 | } 67 | 68 | return parameters 69 | 70 | 71 | if __name__ == "__main__": 72 | main() 73 | -------------------------------------------------------------------------------- /nn_viz_04.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import numpy as np 5 | 6 | # Changing these adjusts the size and layout of the visualization 7 | FIGURE_WIDTH = 16 8 | FIGURE_HEIGHT = 9 9 | RIGHT_BORDER = 0.7 10 | LEFT_BORDER = 0.7 11 | TOP_BORDER = 0.8 12 | BOTTOM_BORDER = 0.6 13 | 14 | N_IMAGE_PIXEL_COLS = 64 15 | N_IMAGE_PIXEL_ROWS = 48 16 | N_NODES_BY_LAYER = [10, 7, 5, 8] 17 | 18 | INPUT_IMAGE_BOTTOM = 5 19 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 20 | ERROR_IMAGE_SCALE = 0.7 21 | ERROR_GAP_SCALE = 0.3 22 | BETWEEN_LAYER_SCALE = 0.8 23 | BETWEEN_NODE_SCALE = 0.4 24 | 25 | 26 | def main(): 27 | """ 28 | Build a visualization of an image autoencoder neural network, 29 | piece by piece. 30 | 31 | A central data structure in this example is the collection of parameters 32 | that define how the image is laid out. It is a set of nested dictionaries. 33 | """ 34 | p = construct_parameters() 35 | print("parameters:") 36 | for key, value in p.items(): 37 | print(key, ":", value) 38 | 39 | 40 | def construct_parameters(): 41 | """ 42 | Build a dictionary of parameters that describe the size and location 43 | of the elements of the visualization. This is a convenient way to pass 44 | the collection of them around . 45 | """ 46 | # Enforce square pixels. Each pixel will have the same height and width. 47 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 48 | 49 | parameters = {} 50 | 51 | # The figure as a whole 52 | parameters["figure"] = { 53 | "height": FIGURE_HEIGHT, 54 | "width": FIGURE_WIDTH, 55 | } 56 | 57 | # The input and output images 58 | parameters["input"] = { 59 | "n_cols": N_IMAGE_PIXEL_COLS, 60 | "n_rows": N_IMAGE_PIXEL_ROWS, 61 | "aspect_ratio": aspect_ratio, 62 | "image": { 63 | "bottom": INPUT_IMAGE_BOTTOM, 64 | "height": INPUT_IMAGE_HEIGHT, 65 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 66 | } 67 | } 68 | 69 | # The network as a whole 70 | parameters["network"] = { 71 | "n_nodes": N_NODES_BY_LAYER, 72 | "n_layers": len(N_NODES_BY_LAYER), 73 | "max_nodes": np.max(N_NODES_BY_LAYER), 74 | } 75 | 76 | # Individual node images 77 | parameters["node_image"] = { 78 | "height": 0, 79 | "width": 0, 80 | } 81 | 82 | return parameters 83 | 84 | 85 | if __name__ == "__main__": 86 | main() 87 | -------------------------------------------------------------------------------- /nn_viz_05.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import numpy as np 5 | 6 | # Changing these adjusts the size and layout of the visualization 7 | FIGURE_WIDTH = 16 8 | FIGURE_HEIGHT = 9 9 | RIGHT_BORDER = 0.7 10 | LEFT_BORDER = 0.7 11 | TOP_BORDER = 0.8 12 | BOTTOM_BORDER = 0.6 13 | 14 | N_IMAGE_PIXEL_COLS = 64 15 | N_IMAGE_PIXEL_ROWS = 48 16 | N_NODES_BY_LAYER = [10, 7, 5, 8] 17 | 18 | INPUT_IMAGE_BOTTOM = 5 19 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 20 | ERROR_IMAGE_SCALE = 0.7 21 | ERROR_GAP_SCALE = 0.3 22 | BETWEEN_LAYER_SCALE = 0.8 23 | BETWEEN_NODE_SCALE = 0.4 24 | 25 | 26 | def main(): 27 | """ 28 | Build a visualization of an image autoencoder neural network, 29 | piece by piece. 30 | 31 | A central data structure in this example is the collection of parameters 32 | that define how the image is laid out. It is a set of nested dictionaries. 33 | """ 34 | p = construct_parameters() 35 | print("parameters:") 36 | for key, value in p.items(): 37 | print(key, ":", value) 38 | 39 | 40 | def construct_parameters(): 41 | """ 42 | Build a dictionary of parameters that describe the size and location 43 | of the elements of the visualization. This is a convenient way to pass 44 | the collection of them around . 45 | """ 46 | # Enforce square pixels. Each pixel will have the same height and width. 47 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 48 | 49 | parameters = {} 50 | 51 | # The figure as a whole 52 | parameters["figure"] = { 53 | "height": FIGURE_HEIGHT, 54 | "width": FIGURE_WIDTH, 55 | } 56 | 57 | # The input and output images 58 | parameters["input"] = { 59 | "n_cols": N_IMAGE_PIXEL_COLS, 60 | "n_rows": N_IMAGE_PIXEL_ROWS, 61 | "aspect_ratio": aspect_ratio, 62 | "image": { 63 | "bottom": INPUT_IMAGE_BOTTOM, 64 | "height": INPUT_IMAGE_HEIGHT, 65 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 66 | } 67 | } 68 | 69 | # The network as a whole 70 | parameters["network"] = { 71 | "n_nodes": N_NODES_BY_LAYER, 72 | "n_layers": len(N_NODES_BY_LAYER), 73 | "max_nodes": np.max(N_NODES_BY_LAYER), 74 | } 75 | 76 | # Individual node images 77 | parameters["node_image"] = { 78 | "height": 0, 79 | "width": 0, 80 | } 81 | 82 | parameters["error_image"] = { 83 | "left": 0, 84 | "bottom": 0, 85 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 86 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 87 | } 88 | 89 | parameters["gap"] = { 90 | "right_border": RIGHT_BORDER, 91 | "left_border": LEFT_BORDER, 92 | "bottom_border": BOTTOM_BORDER, 93 | "top_border": TOP_BORDER, 94 | "between_layer": 0, 95 | "between_layer_scale": BETWEEN_LAYER_SCALE, 96 | "between_node": 0, 97 | "between_node_scale": BETWEEN_NODE_SCALE, 98 | "error_gap_scale": ERROR_GAP_SCALE, 99 | } 100 | 101 | return parameters 102 | 103 | 104 | if __name__ == "__main__": 105 | main() 106 | -------------------------------------------------------------------------------- /nn_viz_06.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Changing these adjusts the size and layout of the visualization 10 | FIGURE_WIDTH = 16 11 | FIGURE_HEIGHT = 9 12 | RIGHT_BORDER = 0.7 13 | LEFT_BORDER = 0.7 14 | TOP_BORDER = 0.8 15 | BOTTOM_BORDER = 0.6 16 | 17 | N_IMAGE_PIXEL_COLS = 64 18 | N_IMAGE_PIXEL_ROWS = 48 19 | N_NODES_BY_LAYER = [10, 7, 5, 8] 20 | 21 | INPUT_IMAGE_BOTTOM = 5 22 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 23 | ERROR_IMAGE_SCALE = 0.7 24 | ERROR_GAP_SCALE = 0.3 25 | BETWEEN_LAYER_SCALE = 0.8 26 | BETWEEN_NODE_SCALE = 0.4 27 | 28 | 29 | def main(): 30 | """ 31 | Build a visualization of an image autoencoder neural network, 32 | piece by piece. 33 | 34 | A central data structure in this example is the collection of parameters 35 | that define how the image is laid out. It is a set of nested dictionaries. 36 | """ 37 | p = construct_parameters() 38 | fig = create_background(p) 39 | save_nn_viz(fig, postfix="06_empty") 40 | 41 | 42 | def construct_parameters(): 43 | """ 44 | Build a dictionary of parameters that describe the size and location 45 | of the elements of the visualization. This is a convenient way to pass 46 | the collection of them around . 47 | """ 48 | # Enforce square pixels. Each pixel will have the same height and width. 49 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 50 | 51 | parameters = {} 52 | 53 | # The figure as a whole 54 | parameters["figure"] = { 55 | "height": FIGURE_HEIGHT, 56 | "width": FIGURE_WIDTH, 57 | } 58 | 59 | # The input and output images 60 | parameters["input"] = { 61 | "n_cols": N_IMAGE_PIXEL_COLS, 62 | "n_rows": N_IMAGE_PIXEL_ROWS, 63 | "aspect_ratio": aspect_ratio, 64 | "image": { 65 | "bottom": INPUT_IMAGE_BOTTOM, 66 | "height": INPUT_IMAGE_HEIGHT, 67 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 68 | } 69 | } 70 | 71 | # The network as a whole 72 | parameters["network"] = { 73 | "n_nodes": N_NODES_BY_LAYER, 74 | "n_layers": len(N_NODES_BY_LAYER), 75 | "max_nodes": np.max(N_NODES_BY_LAYER), 76 | } 77 | 78 | # Individual node images 79 | parameters["node_image"] = { 80 | "height": 0, 81 | "width": 0, 82 | } 83 | 84 | parameters["error_image"] = { 85 | "left": 0, 86 | "bottom": 0, 87 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 88 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 89 | } 90 | 91 | parameters["gap"] = { 92 | "right_border": RIGHT_BORDER, 93 | "left_border": LEFT_BORDER, 94 | "bottom_border": BOTTOM_BORDER, 95 | "top_border": TOP_BORDER, 96 | "between_layer": 0, 97 | "between_layer_scale": BETWEEN_LAYER_SCALE, 98 | "between_node": 0, 99 | "between_node_scale": BETWEEN_NODE_SCALE, 100 | "error_gap_scale": ERROR_GAP_SCALE, 101 | } 102 | 103 | return parameters 104 | 105 | 106 | def create_background(p): 107 | fig = plt.figure( 108 | figsize=(p["figure"]["width"], p["figure"]["height"]), 109 | ) 110 | return fig 111 | 112 | 113 | def save_nn_viz(fig, postfix="0"): 114 | """ 115 | Generate a new filename for each step of the process. 116 | """ 117 | base_name = "nn_viz_" 118 | filename = base_name + postfix + ".png" 119 | fig.savefig(filename) 120 | 121 | 122 | if __name__ == "__main__": 123 | main() 124 | -------------------------------------------------------------------------------- /nn_viz_07.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | 14 | # Changing these adjusts the size and layout of the visualization 15 | FIGURE_WIDTH = 16 16 | FIGURE_HEIGHT = 9 17 | RIGHT_BORDER = 0.7 18 | LEFT_BORDER = 0.7 19 | TOP_BORDER = 0.8 20 | BOTTOM_BORDER = 0.6 21 | 22 | N_IMAGE_PIXEL_COLS = 64 23 | N_IMAGE_PIXEL_ROWS = 48 24 | N_NODES_BY_LAYER = [10, 7, 5, 8] 25 | 26 | INPUT_IMAGE_BOTTOM = 5 27 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 28 | ERROR_IMAGE_SCALE = 0.7 29 | ERROR_GAP_SCALE = 0.3 30 | BETWEEN_LAYER_SCALE = 0.8 31 | BETWEEN_NODE_SCALE = 0.4 32 | 33 | 34 | def main(): 35 | """ 36 | Build a visualization of an image autoencoder neural network, 37 | piece by piece. 38 | 39 | A central data structure in this example is the collection of parameters 40 | that define how the image is laid out. It is a set of nested dictionaries. 41 | """ 42 | p = construct_parameters() 43 | fig = create_background(p) 44 | save_nn_viz(fig, postfix="07_background") 45 | 46 | 47 | def construct_parameters(): 48 | """ 49 | Build a dictionary of parameters that describe the size and location 50 | of the elements of the visualization. This is a convenient way to pass 51 | the collection of them around . 52 | """ 53 | # Enforce square pixels. Each pixel will have the same height and width. 54 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 55 | 56 | parameters = {} 57 | 58 | # The figure as a whole 59 | parameters["figure"] = { 60 | "height": FIGURE_HEIGHT, 61 | "width": FIGURE_WIDTH, 62 | } 63 | 64 | # The input and output images 65 | parameters["input"] = { 66 | "n_cols": N_IMAGE_PIXEL_COLS, 67 | "n_rows": N_IMAGE_PIXEL_ROWS, 68 | "aspect_ratio": aspect_ratio, 69 | "image": { 70 | "bottom": INPUT_IMAGE_BOTTOM, 71 | "height": INPUT_IMAGE_HEIGHT, 72 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 73 | } 74 | } 75 | 76 | # The network as a whole 77 | parameters["network"] = { 78 | "n_nodes": N_NODES_BY_LAYER, 79 | "n_layers": len(N_NODES_BY_LAYER), 80 | "max_nodes": np.max(N_NODES_BY_LAYER), 81 | } 82 | 83 | # Individual node images 84 | parameters["node_image"] = { 85 | "height": 0, 86 | "width": 0, 87 | } 88 | 89 | parameters["error_image"] = { 90 | "left": 0, 91 | "bottom": 0, 92 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 93 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 94 | } 95 | 96 | parameters["gap"] = { 97 | "right_border": RIGHT_BORDER, 98 | "left_border": LEFT_BORDER, 99 | "bottom_border": BOTTOM_BORDER, 100 | "top_border": TOP_BORDER, 101 | "between_layer": 0, 102 | "between_layer_scale": BETWEEN_LAYER_SCALE, 103 | "between_node": 0, 104 | "between_node_scale": BETWEEN_NODE_SCALE, 105 | "error_gap_scale": ERROR_GAP_SCALE, 106 | } 107 | 108 | return parameters 109 | 110 | 111 | def create_background(p): 112 | fig = plt.figure( 113 | edgecolor=TAN, 114 | facecolor=GREEN, 115 | figsize=(p["figure"]["width"], p["figure"]["height"]), 116 | linewidth=4, 117 | ) 118 | return fig 119 | 120 | 121 | def save_nn_viz(fig, postfix="0"): 122 | """ 123 | Generate a new filename for each step of the process. 124 | """ 125 | base_name = "nn_viz_" 126 | filename = base_name + postfix + ".png" 127 | fig.savefig( 128 | filename, 129 | edgecolor=fig.get_edgecolor(), 130 | facecolor=fig.get_facecolor(), 131 | ) 132 | 133 | 134 | if __name__ == "__main__": 135 | main() 136 | -------------------------------------------------------------------------------- /nn_viz_08.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | save_nn_viz(fig, postfix="08_hi_res") 46 | 47 | 48 | def construct_parameters(): 49 | """ 50 | Build a dictionary of parameters that describe the size and location 51 | of the elements of the visualization. This is a convenient way to pass 52 | the collection of them around . 53 | """ 54 | # Enforce square pixels. Each pixel will have the same height and width. 55 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 56 | 57 | parameters = {} 58 | 59 | # The figure as a whole 60 | parameters["figure"] = { 61 | "height": FIGURE_HEIGHT, 62 | "width": FIGURE_WIDTH, 63 | } 64 | 65 | # The input and output images 66 | parameters["input"] = { 67 | "n_cols": N_IMAGE_PIXEL_COLS, 68 | "n_rows": N_IMAGE_PIXEL_ROWS, 69 | "aspect_ratio": aspect_ratio, 70 | "image": { 71 | "bottom": INPUT_IMAGE_BOTTOM, 72 | "height": INPUT_IMAGE_HEIGHT, 73 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 74 | } 75 | } 76 | 77 | # The network as a whole 78 | parameters["network"] = { 79 | "n_nodes": N_NODES_BY_LAYER, 80 | "n_layers": len(N_NODES_BY_LAYER), 81 | "max_nodes": np.max(N_NODES_BY_LAYER), 82 | } 83 | 84 | # Individual node images 85 | parameters["node_image"] = { 86 | "height": 0, 87 | "width": 0, 88 | } 89 | 90 | parameters["error_image"] = { 91 | "left": 0, 92 | "bottom": 0, 93 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 94 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 95 | } 96 | 97 | parameters["gap"] = { 98 | "right_border": RIGHT_BORDER, 99 | "left_border": LEFT_BORDER, 100 | "bottom_border": BOTTOM_BORDER, 101 | "top_border": TOP_BORDER, 102 | "between_layer": 0, 103 | "between_layer_scale": BETWEEN_LAYER_SCALE, 104 | "between_node": 0, 105 | "between_node_scale": BETWEEN_NODE_SCALE, 106 | "error_gap_scale": ERROR_GAP_SCALE, 107 | } 108 | 109 | return parameters 110 | 111 | 112 | def create_background(p): 113 | fig = plt.figure( 114 | edgecolor=TAN, 115 | facecolor=GREEN, 116 | figsize=(p["figure"]["width"], p["figure"]["height"]), 117 | linewidth=4, 118 | ) 119 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 120 | ax_boss.set_xlim(0, 1) 121 | ax_boss.set_ylim(0, 1) 122 | return fig, ax_boss 123 | 124 | 125 | def save_nn_viz(fig, postfix="0"): 126 | """ 127 | Generate a new filename for each step of the process. 128 | """ 129 | base_name = "nn_viz_" 130 | filename = base_name + postfix + ".png" 131 | fig.savefig( 132 | filename, 133 | edgecolor=fig.get_edgecolor(), 134 | facecolor=fig.get_facecolor(), 135 | dpi=DPI, 136 | ) 137 | 138 | 139 | if __name__ == "__main__": 140 | main() 141 | -------------------------------------------------------------------------------- /nn_viz_09.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | 48 | 49 | def construct_parameters(): 50 | """ 51 | Build a dictionary of parameters that describe the size and location 52 | of the elements of the visualization. This is a convenient way to pass 53 | the collection of them around . 54 | """ 55 | # Enforce square pixels. Each pixel will have the same height and width. 56 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 57 | 58 | parameters = {} 59 | 60 | # The figure as a whole 61 | parameters["figure"] = { 62 | "height": FIGURE_HEIGHT, 63 | "width": FIGURE_WIDTH, 64 | } 65 | 66 | # The input and output images 67 | parameters["input"] = { 68 | "n_cols": N_IMAGE_PIXEL_COLS, 69 | "n_rows": N_IMAGE_PIXEL_ROWS, 70 | "aspect_ratio": aspect_ratio, 71 | "image": { 72 | "bottom": INPUT_IMAGE_BOTTOM, 73 | "height": INPUT_IMAGE_HEIGHT, 74 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 75 | } 76 | } 77 | 78 | # The network as a whole 79 | parameters["network"] = { 80 | "n_nodes": N_NODES_BY_LAYER, 81 | "n_layers": len(N_NODES_BY_LAYER), 82 | "max_nodes": np.max(N_NODES_BY_LAYER), 83 | } 84 | 85 | # Individual node images 86 | parameters["node_image"] = { 87 | "height": 0, 88 | "width": 0, 89 | } 90 | 91 | parameters["error_image"] = { 92 | "left": 0, 93 | "bottom": 0, 94 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 95 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 96 | } 97 | 98 | parameters["gap"] = { 99 | "right_border": RIGHT_BORDER, 100 | "left_border": LEFT_BORDER, 101 | "bottom_border": BOTTOM_BORDER, 102 | "top_border": TOP_BORDER, 103 | "between_layer": 0, 104 | "between_layer_scale": BETWEEN_LAYER_SCALE, 105 | "between_node": 0, 106 | "between_node_scale": BETWEEN_NODE_SCALE, 107 | "error_gap_scale": ERROR_GAP_SCALE, 108 | } 109 | 110 | return parameters 111 | 112 | 113 | def create_background(p): 114 | fig = plt.figure( 115 | edgecolor=TAN, 116 | facecolor=GREEN, 117 | figsize=(p["figure"]["width"], p["figure"]["height"]), 118 | linewidth=4, 119 | ) 120 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 121 | ax_boss.set_xlim(0, 1) 122 | ax_boss.set_ylim(0, 1) 123 | return fig, ax_boss 124 | 125 | 126 | def find_node_image_size(p): 127 | """ 128 | What should the height and width of each node image be? 129 | As big as possible, given the constraints. 130 | There are two possible constraints: 131 | 1. Fill the figure top-to-bottom. 132 | 2. Fill the figure side-to-side. 133 | To determine which of these limits the size of the node images, 134 | we'll calculate the image size assuming each constraint separately, 135 | then respect the one that results in the smaller node image. 136 | """ 137 | # First assume height is the limiting factor. 138 | total_space_to_fill = ( 139 | p["figure"]["height"] 140 | - p["gap"]["bottom_border"] 141 | - p["gap"]["top_border"] 142 | ) 143 | # Use the layer with the largest number of nodes (n_max). 144 | # Pack the images and the gaps as tight as possible. 145 | # In that case, if the image height is h, 146 | # the gaps will each be h * p["gap"]["between_node_scale"]. 147 | # There will be n_max nodes and (n_max - 1) gaps. 148 | # After a wee bit of algebra: 149 | height_constrained_by_height = ( 150 | total_space_to_fill / ( 151 | p["network"]["max_nodes"] 152 | + (p["network"]["max_nodes"] - 1) 153 | * p["gap"]["between_node_scale"] 154 | ) 155 | ) 156 | print("height constrained by height:", height_constrained_by_height) 157 | return p 158 | 159 | 160 | def save_nn_viz(fig, postfix="0"): 161 | """ 162 | Generate a new filename for each step of the process. 163 | """ 164 | base_name = "nn_viz_" 165 | filename = base_name + postfix + ".png" 166 | fig.savefig( 167 | filename, 168 | edgecolor=fig.get_edgecolor(), 169 | facecolor=fig.get_facecolor(), 170 | dpi=DPI, 171 | ) 172 | 173 | 174 | if __name__ == "__main__": 175 | main() 176 | -------------------------------------------------------------------------------- /nn_viz_10.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | 48 | 49 | def construct_parameters(): 50 | """ 51 | Build a dictionary of parameters that describe the size and location 52 | of the elements of the visualization. This is a convenient way to pass 53 | the collection of them around . 54 | """ 55 | # Enforce square pixels. Each pixel will have the same height and width. 56 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 57 | 58 | parameters = {} 59 | 60 | # The figure as a whole 61 | parameters["figure"] = { 62 | "height": FIGURE_HEIGHT, 63 | "width": FIGURE_WIDTH, 64 | } 65 | 66 | # The input and output images 67 | parameters["input"] = { 68 | "n_cols": N_IMAGE_PIXEL_COLS, 69 | "n_rows": N_IMAGE_PIXEL_ROWS, 70 | "aspect_ratio": aspect_ratio, 71 | "image": { 72 | "bottom": INPUT_IMAGE_BOTTOM, 73 | "height": INPUT_IMAGE_HEIGHT, 74 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 75 | } 76 | } 77 | 78 | # The network as a whole 79 | parameters["network"] = { 80 | "n_nodes": N_NODES_BY_LAYER, 81 | "n_layers": len(N_NODES_BY_LAYER), 82 | "max_nodes": np.max(N_NODES_BY_LAYER), 83 | } 84 | 85 | # Individual node images 86 | parameters["node_image"] = { 87 | "height": 0, 88 | "width": 0, 89 | } 90 | 91 | parameters["error_image"] = { 92 | "left": 0, 93 | "bottom": 0, 94 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 95 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 96 | } 97 | 98 | parameters["gap"] = { 99 | "right_border": RIGHT_BORDER, 100 | "left_border": LEFT_BORDER, 101 | "bottom_border": BOTTOM_BORDER, 102 | "top_border": TOP_BORDER, 103 | "between_layer": 0, 104 | "between_layer_scale": BETWEEN_LAYER_SCALE, 105 | "between_node": 0, 106 | "between_node_scale": BETWEEN_NODE_SCALE, 107 | "error_gap_scale": ERROR_GAP_SCALE, 108 | } 109 | 110 | return parameters 111 | 112 | 113 | def create_background(p): 114 | fig = plt.figure( 115 | edgecolor=TAN, 116 | facecolor=GREEN, 117 | figsize=(p["figure"]["width"], p["figure"]["height"]), 118 | linewidth=4, 119 | ) 120 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 121 | ax_boss.set_xlim(0, 1) 122 | ax_boss.set_ylim(0, 1) 123 | return fig, ax_boss 124 | 125 | 126 | def find_node_image_size(p): 127 | """ 128 | What should the height and width of each node image be? 129 | As big as possible, given the constraints. 130 | There are two possible constraints: 131 | 1. Fill the figure top-to-bottom. 132 | 2. Fill the figure side-to-side. 133 | To determine which of these limits the size of the node images, 134 | we'll calculate the image size assuming each constraint separately, 135 | then respect the one that results in the smaller node image. 136 | """ 137 | # First assume height is the limiting factor. 138 | total_space_to_fill = ( 139 | p["figure"]["height"] 140 | - p["gap"]["bottom_border"] 141 | - p["gap"]["top_border"] 142 | ) 143 | # Use the layer with the largest number of nodes (n_max). 144 | # Pack the images and the gaps as tight as possible. 145 | # In that case, if the image height is h, 146 | # the gaps will each be h * p["gap"]["between_node_scale"]. 147 | # There will be n_max nodes and (n_max - 1) gaps. 148 | # After a wee bit of algebra: 149 | height_constrained_by_height = ( 150 | total_space_to_fill / ( 151 | p["network"]["max_nodes"] 152 | + (p["network"]["max_nodes"] - 1) 153 | * p["gap"]["between_node_scale"] 154 | ) 155 | ) 156 | 157 | # Second assume width is the limiting factor. 158 | total_space_to_fill = ( 159 | p["figure"]["width"] 160 | - p["gap"]["left_border"] 161 | - p["gap"]["right_border"] 162 | - 2 * p["input"]["image"]["width"] 163 | ) 164 | # Again, pack the images as tightly as possible side-to-side. 165 | # In this case, if the image width is w, 166 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 167 | # There will be n_layer nodes and (n_layer + 1) gaps. 168 | # After another tidbit of algebra: 169 | width_constrained_by_width = ( 170 | total_space_to_fill / ( 171 | p["network"]["n_layers"] 172 | + (p["network"]["n_layers"] + 1) 173 | * p["gap"]["between_layer_scale"] 174 | ) 175 | ) 176 | 177 | # Figure out what the height would be for this width. 178 | height_constrained_by_width = ( 179 | width_constrained_by_width 180 | / p["input"]["aspect_ratio"] 181 | ) 182 | 183 | print("height constrained by width:", height_constrained_by_width) 184 | return p 185 | 186 | 187 | def save_nn_viz(fig, postfix="0"): 188 | """ 189 | Generate a new filename for each step of the process. 190 | """ 191 | base_name = "nn_viz_" 192 | filename = base_name + postfix + ".png" 193 | fig.savefig( 194 | filename, 195 | edgecolor=fig.get_edgecolor(), 196 | facecolor=fig.get_facecolor(), 197 | dpi=DPI, 198 | ) 199 | 200 | 201 | if __name__ == "__main__": 202 | main() 203 | -------------------------------------------------------------------------------- /nn_viz_11.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | print("node image dimensions", p["node_image"]) 48 | 49 | 50 | def construct_parameters(): 51 | """ 52 | Build a dictionary of parameters that describe the size and location 53 | of the elements of the visualization. This is a convenient way to pass 54 | the collection of them around . 55 | """ 56 | # Enforce square pixels. Each pixel will have the same height and width. 57 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 58 | 59 | parameters = {} 60 | 61 | # The figure as a whole 62 | parameters["figure"] = { 63 | "height": FIGURE_HEIGHT, 64 | "width": FIGURE_WIDTH, 65 | } 66 | 67 | # The input and output images 68 | parameters["input"] = { 69 | "n_cols": N_IMAGE_PIXEL_COLS, 70 | "n_rows": N_IMAGE_PIXEL_ROWS, 71 | "aspect_ratio": aspect_ratio, 72 | "image": { 73 | "bottom": INPUT_IMAGE_BOTTOM, 74 | "height": INPUT_IMAGE_HEIGHT, 75 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 76 | } 77 | } 78 | 79 | # The network as a whole 80 | parameters["network"] = { 81 | "n_nodes": N_NODES_BY_LAYER, 82 | "n_layers": len(N_NODES_BY_LAYER), 83 | "max_nodes": np.max(N_NODES_BY_LAYER), 84 | } 85 | 86 | # Individual node images 87 | parameters["node_image"] = { 88 | "height": 0, 89 | "width": 0, 90 | } 91 | 92 | parameters["error_image"] = { 93 | "left": 0, 94 | "bottom": 0, 95 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 96 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 97 | } 98 | 99 | parameters["gap"] = { 100 | "right_border": RIGHT_BORDER, 101 | "left_border": LEFT_BORDER, 102 | "bottom_border": BOTTOM_BORDER, 103 | "top_border": TOP_BORDER, 104 | "between_layer": 0, 105 | "between_layer_scale": BETWEEN_LAYER_SCALE, 106 | "between_node": 0, 107 | "between_node_scale": BETWEEN_NODE_SCALE, 108 | "error_gap_scale": ERROR_GAP_SCALE, 109 | } 110 | 111 | return parameters 112 | 113 | 114 | def create_background(p): 115 | fig = plt.figure( 116 | edgecolor=TAN, 117 | facecolor=GREEN, 118 | figsize=(p["figure"]["width"], p["figure"]["height"]), 119 | linewidth=4, 120 | ) 121 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 122 | ax_boss.set_xlim(0, 1) 123 | ax_boss.set_ylim(0, 1) 124 | return fig, ax_boss 125 | 126 | 127 | def find_node_image_size(p): 128 | """ 129 | What should the height and width of each node image be? 130 | As big as possible, given the constraints. 131 | There are two possible constraints: 132 | 1. Fill the figure top-to-bottom. 133 | 2. Fill the figure side-to-side. 134 | To determine which of these limits the size of the node images, 135 | we'll calculate the image size assuming each constraint separately, 136 | then respect the one that results in the smaller node image. 137 | """ 138 | # First assume height is the limiting factor. 139 | total_space_to_fill = ( 140 | p["figure"]["height"] 141 | - p["gap"]["bottom_border"] 142 | - p["gap"]["top_border"] 143 | ) 144 | # Use the layer with the largest number of nodes (n_max). 145 | # Pack the images and the gaps as tight as possible. 146 | # In that case, if the image height is h, 147 | # the gaps will each be h * p["gap"]["between_node_scale"]. 148 | # There will be n_max nodes and (n_max - 1) gaps. 149 | # After a wee bit of algebra: 150 | height_constrained_by_height = ( 151 | total_space_to_fill / ( 152 | p["network"]["max_nodes"] 153 | + (p["network"]["max_nodes"] - 1) 154 | * p["gap"]["between_node_scale"] 155 | ) 156 | ) 157 | 158 | # Second assume width is the limiting factor. 159 | total_space_to_fill = ( 160 | p["figure"]["width"] 161 | - p["gap"]["left_border"] 162 | - p["gap"]["right_border"] 163 | - 2 * p["input"]["image"]["width"] 164 | ) 165 | # Again, pack the images as tightly as possible side-to-side. 166 | # In this case, if the image width is w, 167 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 168 | # There will be n_layer nodes and (n_layer + 1) gaps. 169 | # After another tidbit of algebra: 170 | width_constrained_by_width = ( 171 | total_space_to_fill / ( 172 | p["network"]["n_layers"] 173 | + (p["network"]["n_layers"] + 1) 174 | * p["gap"]["between_layer_scale"] 175 | ) 176 | ) 177 | 178 | # Figure out what the height would be for this width. 179 | height_constrained_by_width = ( 180 | width_constrained_by_width 181 | / p["input"]["aspect_ratio"] 182 | ) 183 | 184 | # See which constraint is more restrictive, and go with that one. 185 | p["node_image"]["height"] = np.minimum( 186 | height_constrained_by_width, 187 | height_constrained_by_height) 188 | p["node_image"]["width"] = ( 189 | p["node_image"]["height"] 190 | * p["input"]["aspect_ratio"] 191 | ) 192 | return p 193 | 194 | 195 | def save_nn_viz(fig, postfix="0"): 196 | """ 197 | Generate a new filename for each step of the process. 198 | """ 199 | base_name = "nn_viz_" 200 | filename = base_name + postfix + ".png" 201 | fig.savefig( 202 | filename, 203 | edgecolor=fig.get_edgecolor(), 204 | facecolor=fig.get_facecolor(), 205 | dpi=DPI, 206 | ) 207 | 208 | 209 | if __name__ == "__main__": 210 | main() 211 | -------------------------------------------------------------------------------- /nn_viz_12.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | print("between layer gap:", p["gap"]["between_layer"]) 49 | 50 | 51 | def construct_parameters(): 52 | """ 53 | Build a dictionary of parameters that describe the size and location 54 | of the elements of the visualization. This is a convenient way to pass 55 | the collection of them around . 56 | """ 57 | # Enforce square pixels. Each pixel will have the same height and width. 58 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 59 | 60 | parameters = {} 61 | 62 | # The figure as a whole 63 | parameters["figure"] = { 64 | "height": FIGURE_HEIGHT, 65 | "width": FIGURE_WIDTH, 66 | } 67 | 68 | # The input and output images 69 | parameters["input"] = { 70 | "n_cols": N_IMAGE_PIXEL_COLS, 71 | "n_rows": N_IMAGE_PIXEL_ROWS, 72 | "aspect_ratio": aspect_ratio, 73 | "image": { 74 | "bottom": INPUT_IMAGE_BOTTOM, 75 | "height": INPUT_IMAGE_HEIGHT, 76 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 77 | } 78 | } 79 | 80 | # The network as a whole 81 | parameters["network"] = { 82 | "n_nodes": N_NODES_BY_LAYER, 83 | "n_layers": len(N_NODES_BY_LAYER), 84 | "max_nodes": np.max(N_NODES_BY_LAYER), 85 | } 86 | 87 | # Individual node images 88 | parameters["node_image"] = { 89 | "height": 0, 90 | "width": 0, 91 | } 92 | 93 | parameters["error_image"] = { 94 | "left": 0, 95 | "bottom": 0, 96 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 97 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 98 | } 99 | 100 | parameters["gap"] = { 101 | "right_border": RIGHT_BORDER, 102 | "left_border": LEFT_BORDER, 103 | "bottom_border": BOTTOM_BORDER, 104 | "top_border": TOP_BORDER, 105 | "between_layer": 0, 106 | "between_layer_scale": BETWEEN_LAYER_SCALE, 107 | "between_node": 0, 108 | "between_node_scale": BETWEEN_NODE_SCALE, 109 | "error_gap_scale": ERROR_GAP_SCALE, 110 | } 111 | 112 | return parameters 113 | 114 | 115 | def create_background(p): 116 | fig = plt.figure( 117 | edgecolor=TAN, 118 | facecolor=GREEN, 119 | figsize=(p["figure"]["width"], p["figure"]["height"]), 120 | linewidth=4, 121 | ) 122 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 123 | ax_boss.set_xlim(0, 1) 124 | ax_boss.set_ylim(0, 1) 125 | return fig, ax_boss 126 | 127 | 128 | def find_node_image_size(p): 129 | """ 130 | What should the height and width of each node image be? 131 | As big as possible, given the constraints. 132 | There are two possible constraints: 133 | 1. Fill the figure top-to-bottom. 134 | 2. Fill the figure side-to-side. 135 | To determine which of these limits the size of the node images, 136 | we'll calculate the image size assuming each constraint separately, 137 | then respect the one that results in the smaller node image. 138 | """ 139 | # First assume height is the limiting factor. 140 | total_space_to_fill = ( 141 | p["figure"]["height"] 142 | - p["gap"]["bottom_border"] 143 | - p["gap"]["top_border"] 144 | ) 145 | # Use the layer with the largest number of nodes (n_max). 146 | # Pack the images and the gaps as tight as possible. 147 | # In that case, if the image height is h, 148 | # the gaps will each be h * p["gap"]["between_node_scale"]. 149 | # There will be n_max nodes and (n_max - 1) gaps. 150 | # After a wee bit of algebra: 151 | height_constrained_by_height = ( 152 | total_space_to_fill / ( 153 | p["network"]["max_nodes"] 154 | + (p["network"]["max_nodes"] - 1) 155 | * p["gap"]["between_node_scale"] 156 | ) 157 | ) 158 | 159 | # Second assume width is the limiting factor. 160 | total_space_to_fill = ( 161 | p["figure"]["width"] 162 | - p["gap"]["left_border"] 163 | - p["gap"]["right_border"] 164 | - 2 * p["input"]["image"]["width"] 165 | ) 166 | # Again, pack the images as tightly as possible side-to-side. 167 | # In this case, if the image width is w, 168 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 169 | # There will be n_layer nodes and (n_layer + 1) gaps. 170 | # After another tidbit of algebra: 171 | width_constrained_by_width = ( 172 | total_space_to_fill / ( 173 | p["network"]["n_layers"] 174 | + (p["network"]["n_layers"] + 1) 175 | * p["gap"]["between_layer_scale"] 176 | ) 177 | ) 178 | 179 | # Figure out what the height would be for this width. 180 | height_constrained_by_width = ( 181 | width_constrained_by_width 182 | / p["input"]["aspect_ratio"] 183 | ) 184 | 185 | # See which constraint is more restrictive, and go with that one. 186 | p["node_image"]["height"] = np.minimum( 187 | height_constrained_by_width, 188 | height_constrained_by_height) 189 | p["node_image"]["width"] = ( 190 | p["node_image"]["height"] 191 | * p["input"]["aspect_ratio"] 192 | ) 193 | return p 194 | 195 | 196 | def find_between_layer_gap(p): 197 | """ 198 | How big is the horizontal spacing between_layers? 199 | This is also the spacing between the input image and the first layer 200 | and between the last layer and the output image. 201 | """ 202 | horizontal_gap_total = ( 203 | p["figure"]["width"] 204 | - 2 * p["input"]["image"]["width"] 205 | - p["network"]["n_layers"] * p["node_image"]["width"] 206 | - p["gap"]["left_border"] 207 | - p["gap"]["right_border"] 208 | ) 209 | n_horizontal_gaps = p["network"]["n_layers"] + 1 210 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 211 | return p 212 | 213 | 214 | def save_nn_viz(fig, postfix="0"): 215 | """ 216 | Generate a new filename for each step of the process. 217 | """ 218 | base_name = "nn_viz_" 219 | filename = base_name + postfix + ".png" 220 | fig.savefig( 221 | filename, 222 | edgecolor=fig.get_edgecolor(), 223 | facecolor=fig.get_facecolor(), 224 | dpi=DPI, 225 | ) 226 | 227 | 228 | if __name__ == "__main__": 229 | main() 230 | -------------------------------------------------------------------------------- /nn_viz_13.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | print("between node gap:", p["gap"]["between_node"]) 50 | 51 | 52 | def construct_parameters(): 53 | """ 54 | Build a dictionary of parameters that describe the size and location 55 | of the elements of the visualization. This is a convenient way to pass 56 | the collection of them around . 57 | """ 58 | # Enforce square pixels. Each pixel will have the same height and width. 59 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 60 | 61 | parameters = {} 62 | 63 | # The figure as a whole 64 | parameters["figure"] = { 65 | "height": FIGURE_HEIGHT, 66 | "width": FIGURE_WIDTH, 67 | } 68 | 69 | # The input and output images 70 | parameters["input"] = { 71 | "n_cols": N_IMAGE_PIXEL_COLS, 72 | "n_rows": N_IMAGE_PIXEL_ROWS, 73 | "aspect_ratio": aspect_ratio, 74 | "image": { 75 | "bottom": INPUT_IMAGE_BOTTOM, 76 | "height": INPUT_IMAGE_HEIGHT, 77 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 78 | } 79 | } 80 | 81 | # The network as a whole 82 | parameters["network"] = { 83 | "n_nodes": N_NODES_BY_LAYER, 84 | "n_layers": len(N_NODES_BY_LAYER), 85 | "max_nodes": np.max(N_NODES_BY_LAYER), 86 | } 87 | 88 | # Individual node images 89 | parameters["node_image"] = { 90 | "height": 0, 91 | "width": 0, 92 | } 93 | 94 | parameters["error_image"] = { 95 | "left": 0, 96 | "bottom": 0, 97 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 98 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 99 | } 100 | 101 | parameters["gap"] = { 102 | "right_border": RIGHT_BORDER, 103 | "left_border": LEFT_BORDER, 104 | "bottom_border": BOTTOM_BORDER, 105 | "top_border": TOP_BORDER, 106 | "between_layer": 0, 107 | "between_layer_scale": BETWEEN_LAYER_SCALE, 108 | "between_node": 0, 109 | "between_node_scale": BETWEEN_NODE_SCALE, 110 | "error_gap_scale": ERROR_GAP_SCALE, 111 | } 112 | 113 | return parameters 114 | 115 | 116 | def create_background(p): 117 | fig = plt.figure( 118 | edgecolor=TAN, 119 | facecolor=GREEN, 120 | figsize=(p["figure"]["width"], p["figure"]["height"]), 121 | linewidth=4, 122 | ) 123 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 124 | ax_boss.set_xlim(0, 1) 125 | ax_boss.set_ylim(0, 1) 126 | return fig, ax_boss 127 | 128 | 129 | def find_node_image_size(p): 130 | """ 131 | What should the height and width of each node image be? 132 | As big as possible, given the constraints. 133 | There are two possible constraints: 134 | 1. Fill the figure top-to-bottom. 135 | 2. Fill the figure side-to-side. 136 | To determine which of these limits the size of the node images, 137 | we'll calculate the image size assuming each constraint separately, 138 | then respect the one that results in the smaller node image. 139 | """ 140 | # First assume height is the limiting factor. 141 | total_space_to_fill = ( 142 | p["figure"]["height"] 143 | - p["gap"]["bottom_border"] 144 | - p["gap"]["top_border"] 145 | ) 146 | # Use the layer with the largest number of nodes (n_max). 147 | # Pack the images and the gaps as tight as possible. 148 | # In that case, if the image height is h, 149 | # the gaps will each be h * p["gap"]["between_node_scale"]. 150 | # There will be n_max nodes and (n_max - 1) gaps. 151 | # After a wee bit of algebra: 152 | height_constrained_by_height = ( 153 | total_space_to_fill / ( 154 | p["network"]["max_nodes"] 155 | + (p["network"]["max_nodes"] - 1) 156 | * p["gap"]["between_node_scale"] 157 | ) 158 | ) 159 | 160 | # Second assume width is the limiting factor. 161 | total_space_to_fill = ( 162 | p["figure"]["width"] 163 | - p["gap"]["left_border"] 164 | - p["gap"]["right_border"] 165 | - 2 * p["input"]["image"]["width"] 166 | ) 167 | # Again, pack the images as tightly as possible side-to-side. 168 | # In this case, if the image width is w, 169 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 170 | # There will be n_layer nodes and (n_layer + 1) gaps. 171 | # After another tidbit of algebra: 172 | width_constrained_by_width = ( 173 | total_space_to_fill / ( 174 | p["network"]["n_layers"] 175 | + (p["network"]["n_layers"] + 1) 176 | * p["gap"]["between_layer_scale"] 177 | ) 178 | ) 179 | 180 | # Figure out what the height would be for this width. 181 | height_constrained_by_width = ( 182 | width_constrained_by_width 183 | / p["input"]["aspect_ratio"] 184 | ) 185 | 186 | # See which constraint is more restrictive, and go with that one. 187 | p["node_image"]["height"] = np.minimum( 188 | height_constrained_by_width, 189 | height_constrained_by_height) 190 | p["node_image"]["width"] = ( 191 | p["node_image"]["height"] 192 | * p["input"]["aspect_ratio"] 193 | ) 194 | return p 195 | 196 | 197 | def find_between_layer_gap(p): 198 | """ 199 | How big is the horizontal spacing between_layers? 200 | This is also the spacing between the input image and the first layer 201 | and between the last layer and the output image. 202 | """ 203 | horizontal_gap_total = ( 204 | p["figure"]["width"] 205 | - 2 * p["input"]["image"]["width"] 206 | - p["network"]["n_layers"] * p["node_image"]["width"] 207 | - p["gap"]["left_border"] 208 | - p["gap"]["right_border"] 209 | ) 210 | n_horizontal_gaps = p["network"]["n_layers"] + 1 211 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 212 | return p 213 | 214 | 215 | def find_between_node_gap(p): 216 | """ 217 | How big is the vertical gap between_node images? 218 | """ 219 | vertical_gap_total = ( 220 | p["figure"]["height"] 221 | - p["gap"]["top_border"] 222 | - p["gap"]["bottom_border"] 223 | - p["network"]["max_nodes"] 224 | * p["node_image"]["height"] 225 | ) 226 | n_vertical_gaps = p["network"]["max_nodes"] - 1 227 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 228 | return p 229 | 230 | 231 | def save_nn_viz(fig, postfix="0"): 232 | """ 233 | Generate a new filename for each step of the process. 234 | """ 235 | base_name = "nn_viz_" 236 | filename = base_name + postfix + ".png" 237 | fig.savefig( 238 | filename, 239 | edgecolor=fig.get_edgecolor(), 240 | facecolor=fig.get_facecolor(), 241 | dpi=DPI, 242 | ) 243 | 244 | 245 | if __name__ == "__main__": 246 | main() 247 | -------------------------------------------------------------------------------- /nn_viz_14.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | p = find_error_image_position(p) 50 | print("error image position:", p["error_image"]) 51 | 52 | 53 | def construct_parameters(): 54 | """ 55 | Build a dictionary of parameters that describe the size and location 56 | of the elements of the visualization. This is a convenient way to pass 57 | the collection of them around . 58 | """ 59 | # Enforce square pixels. Each pixel will have the same height and width. 60 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 61 | 62 | parameters = {} 63 | 64 | # The figure as a whole 65 | parameters["figure"] = { 66 | "height": FIGURE_HEIGHT, 67 | "width": FIGURE_WIDTH, 68 | } 69 | 70 | # The input and output images 71 | parameters["input"] = { 72 | "n_cols": N_IMAGE_PIXEL_COLS, 73 | "n_rows": N_IMAGE_PIXEL_ROWS, 74 | "aspect_ratio": aspect_ratio, 75 | "image": { 76 | "bottom": INPUT_IMAGE_BOTTOM, 77 | "height": INPUT_IMAGE_HEIGHT, 78 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 79 | } 80 | } 81 | 82 | # The network as a whole 83 | parameters["network"] = { 84 | "n_nodes": N_NODES_BY_LAYER, 85 | "n_layers": len(N_NODES_BY_LAYER), 86 | "max_nodes": np.max(N_NODES_BY_LAYER), 87 | } 88 | 89 | # Individual node images 90 | parameters["node_image"] = { 91 | "height": 0, 92 | "width": 0, 93 | } 94 | 95 | parameters["error_image"] = { 96 | "left": 0, 97 | "bottom": 0, 98 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 99 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 100 | } 101 | 102 | parameters["gap"] = { 103 | "right_border": RIGHT_BORDER, 104 | "left_border": LEFT_BORDER, 105 | "bottom_border": BOTTOM_BORDER, 106 | "top_border": TOP_BORDER, 107 | "between_layer": 0, 108 | "between_layer_scale": BETWEEN_LAYER_SCALE, 109 | "between_node": 0, 110 | "between_node_scale": BETWEEN_NODE_SCALE, 111 | "error_gap_scale": ERROR_GAP_SCALE, 112 | } 113 | 114 | return parameters 115 | 116 | 117 | def create_background(p): 118 | fig = plt.figure( 119 | edgecolor=TAN, 120 | facecolor=GREEN, 121 | figsize=(p["figure"]["width"], p["figure"]["height"]), 122 | linewidth=4, 123 | ) 124 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 125 | ax_boss.set_xlim(0, 1) 126 | ax_boss.set_ylim(0, 1) 127 | return fig, ax_boss 128 | 129 | 130 | def find_node_image_size(p): 131 | """ 132 | What should the height and width of each node image be? 133 | As big as possible, given the constraints. 134 | There are two possible constraints: 135 | 1. Fill the figure top-to-bottom. 136 | 2. Fill the figure side-to-side. 137 | To determine which of these limits the size of the node images, 138 | we'll calculate the image size assuming each constraint separately, 139 | then respect the one that results in the smaller node image. 140 | """ 141 | # First assume height is the limiting factor. 142 | total_space_to_fill = ( 143 | p["figure"]["height"] 144 | - p["gap"]["bottom_border"] 145 | - p["gap"]["top_border"] 146 | ) 147 | # Use the layer with the largest number of nodes (n_max). 148 | # Pack the images and the gaps as tight as possible. 149 | # In that case, if the image height is h, 150 | # the gaps will each be h * p["gap"]["between_node_scale"]. 151 | # There will be n_max nodes and (n_max - 1) gaps. 152 | # After a wee bit of algebra: 153 | height_constrained_by_height = ( 154 | total_space_to_fill / ( 155 | p["network"]["max_nodes"] 156 | + (p["network"]["max_nodes"] - 1) 157 | * p["gap"]["between_node_scale"] 158 | ) 159 | ) 160 | 161 | # Second assume width is the limiting factor. 162 | total_space_to_fill = ( 163 | p["figure"]["width"] 164 | - p["gap"]["left_border"] 165 | - p["gap"]["right_border"] 166 | - 2 * p["input"]["image"]["width"] 167 | ) 168 | # Again, pack the images as tightly as possible side-to-side. 169 | # In this case, if the image width is w, 170 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 171 | # There will be n_layer nodes and (n_layer + 1) gaps. 172 | # After another tidbit of algebra: 173 | width_constrained_by_width = ( 174 | total_space_to_fill / ( 175 | p["network"]["n_layers"] 176 | + (p["network"]["n_layers"] + 1) 177 | * p["gap"]["between_layer_scale"] 178 | ) 179 | ) 180 | 181 | # Figure out what the height would be for this width. 182 | height_constrained_by_width = ( 183 | width_constrained_by_width 184 | / p["input"]["aspect_ratio"] 185 | ) 186 | 187 | # See which constraint is more restrictive, and go with that one. 188 | p["node_image"]["height"] = np.minimum( 189 | height_constrained_by_width, 190 | height_constrained_by_height) 191 | p["node_image"]["width"] = ( 192 | p["node_image"]["height"] 193 | * p["input"]["aspect_ratio"] 194 | ) 195 | return p 196 | 197 | 198 | def find_between_layer_gap(p): 199 | """ 200 | How big is the horizontal spacing between_layers? 201 | This is also the spacing between the input image and the first layer 202 | and between the last layer and the output image. 203 | """ 204 | horizontal_gap_total = ( 205 | p["figure"]["width"] 206 | - 2 * p["input"]["image"]["width"] 207 | - p["network"]["n_layers"] * p["node_image"]["width"] 208 | - p["gap"]["left_border"] 209 | - p["gap"]["right_border"] 210 | ) 211 | n_horizontal_gaps = p["network"]["n_layers"] + 1 212 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 213 | return p 214 | 215 | 216 | def find_between_node_gap(p): 217 | """ 218 | How big is the vertical gap between_node images? 219 | """ 220 | vertical_gap_total = ( 221 | p["figure"]["height"] 222 | - p["gap"]["top_border"] 223 | - p["gap"]["bottom_border"] 224 | - p["network"]["max_nodes"] 225 | * p["node_image"]["height"] 226 | ) 227 | n_vertical_gaps = p["network"]["max_nodes"] - 1 228 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 229 | return p 230 | 231 | 232 | def find_error_image_position(p): 233 | """ 234 | Where exactly should the error image be positioned? 235 | """ 236 | p["error_image"]["bottom"] = ( 237 | p["input"]["image"]["bottom"] 238 | - p["input"]["image"]["height"] 239 | * p["gap"]["error_gap_scale"] 240 | - p["error_image"]["height"] 241 | ) 242 | error_image_center = ( 243 | p["figure"]["width"] 244 | - p["gap"]["right_border"] 245 | - p["input"]["image"]["width"] / 2 246 | ) 247 | p["error_image"]["left"] = ( 248 | error_image_center 249 | - p["error_image"]["width"] / 2 250 | ) 251 | return p 252 | 253 | 254 | def save_nn_viz(fig, postfix="0"): 255 | """ 256 | Generate a new filename for each step of the process. 257 | """ 258 | base_name = "nn_viz_" 259 | filename = base_name + postfix + ".png" 260 | fig.savefig( 261 | filename, 262 | edgecolor=fig.get_edgecolor(), 263 | facecolor=fig.get_facecolor(), 264 | dpi=DPI, 265 | ) 266 | 267 | 268 | if __name__ == "__main__": 269 | main() 270 | -------------------------------------------------------------------------------- /nn_viz_15.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | p = find_error_image_position(p) 50 | 51 | add_input_image(fig, p) 52 | save_nn_viz(fig, postfix="15_input_random") 53 | 54 | 55 | def construct_parameters(): 56 | """ 57 | Build a dictionary of parameters that describe the size and location 58 | of the elements of the visualization. This is a convenient way to pass 59 | the collection of them around . 60 | """ 61 | # Enforce square pixels. Each pixel will have the same height and width. 62 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 63 | 64 | parameters = {} 65 | 66 | # The figure as a whole 67 | parameters["figure"] = { 68 | "height": FIGURE_HEIGHT, 69 | "width": FIGURE_WIDTH, 70 | } 71 | 72 | # The input and output images 73 | parameters["input"] = { 74 | "n_cols": N_IMAGE_PIXEL_COLS, 75 | "n_rows": N_IMAGE_PIXEL_ROWS, 76 | "aspect_ratio": aspect_ratio, 77 | "image": { 78 | "bottom": INPUT_IMAGE_BOTTOM, 79 | "height": INPUT_IMAGE_HEIGHT, 80 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 81 | } 82 | } 83 | 84 | # The network as a whole 85 | parameters["network"] = { 86 | "n_nodes": N_NODES_BY_LAYER, 87 | "n_layers": len(N_NODES_BY_LAYER), 88 | "max_nodes": np.max(N_NODES_BY_LAYER), 89 | } 90 | 91 | # Individual node images 92 | parameters["node_image"] = { 93 | "height": 0, 94 | "width": 0, 95 | } 96 | 97 | parameters["error_image"] = { 98 | "left": 0, 99 | "bottom": 0, 100 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 101 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 102 | } 103 | 104 | parameters["gap"] = { 105 | "right_border": RIGHT_BORDER, 106 | "left_border": LEFT_BORDER, 107 | "bottom_border": BOTTOM_BORDER, 108 | "top_border": TOP_BORDER, 109 | "between_layer": 0, 110 | "between_layer_scale": BETWEEN_LAYER_SCALE, 111 | "between_node": 0, 112 | "between_node_scale": BETWEEN_NODE_SCALE, 113 | "error_gap_scale": ERROR_GAP_SCALE, 114 | } 115 | 116 | return parameters 117 | 118 | 119 | def create_background(p): 120 | fig = plt.figure( 121 | edgecolor=TAN, 122 | facecolor=GREEN, 123 | figsize=(p["figure"]["width"], p["figure"]["height"]), 124 | linewidth=4, 125 | ) 126 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 127 | ax_boss.set_xlim(0, 1) 128 | ax_boss.set_ylim(0, 1) 129 | return fig, ax_boss 130 | 131 | 132 | def find_node_image_size(p): 133 | """ 134 | What should the height and width of each node image be? 135 | As big as possible, given the constraints. 136 | There are two possible constraints: 137 | 1. Fill the figure top-to-bottom. 138 | 2. Fill the figure side-to-side. 139 | To determine which of these limits the size of the node images, 140 | we'll calculate the image size assuming each constraint separately, 141 | then respect the one that results in the smaller node image. 142 | """ 143 | # First assume height is the limiting factor. 144 | total_space_to_fill = ( 145 | p["figure"]["height"] 146 | - p["gap"]["bottom_border"] 147 | - p["gap"]["top_border"] 148 | ) 149 | # Use the layer with the largest number of nodes (n_max). 150 | # Pack the images and the gaps as tight as possible. 151 | # In that case, if the image height is h, 152 | # the gaps will each be h * p["gap"]["between_node_scale"]. 153 | # There will be n_max nodes and (n_max - 1) gaps. 154 | # After a wee bit of algebra: 155 | height_constrained_by_height = ( 156 | total_space_to_fill / ( 157 | p["network"]["max_nodes"] 158 | + (p["network"]["max_nodes"] - 1) 159 | * p["gap"]["between_node_scale"] 160 | ) 161 | ) 162 | 163 | # Second assume width is the limiting factor. 164 | total_space_to_fill = ( 165 | p["figure"]["width"] 166 | - p["gap"]["left_border"] 167 | - p["gap"]["right_border"] 168 | - 2 * p["input"]["image"]["width"] 169 | ) 170 | # Again, pack the images as tightly as possible side-to-side. 171 | # In this case, if the image width is w, 172 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 173 | # There will be n_layer nodes and (n_layer + 1) gaps. 174 | # After another tidbit of algebra: 175 | width_constrained_by_width = ( 176 | total_space_to_fill / ( 177 | p["network"]["n_layers"] 178 | + (p["network"]["n_layers"] + 1) 179 | * p["gap"]["between_layer_scale"] 180 | ) 181 | ) 182 | 183 | # Figure out what the height would be for this width. 184 | height_constrained_by_width = ( 185 | width_constrained_by_width 186 | / p["input"]["aspect_ratio"] 187 | ) 188 | 189 | # See which constraint is more restrictive, and go with that one. 190 | p["node_image"]["height"] = np.minimum( 191 | height_constrained_by_width, 192 | height_constrained_by_height) 193 | p["node_image"]["width"] = ( 194 | p["node_image"]["height"] 195 | * p["input"]["aspect_ratio"] 196 | ) 197 | return p 198 | 199 | 200 | def find_between_layer_gap(p): 201 | """ 202 | How big is the horizontal spacing between_layers? 203 | This is also the spacing between the input image and the first layer 204 | and between the last layer and the output image. 205 | """ 206 | horizontal_gap_total = ( 207 | p["figure"]["width"] 208 | - 2 * p["input"]["image"]["width"] 209 | - p["network"]["n_layers"] * p["node_image"]["width"] 210 | - p["gap"]["left_border"] 211 | - p["gap"]["right_border"] 212 | ) 213 | n_horizontal_gaps = p["network"]["n_layers"] + 1 214 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 215 | return p 216 | 217 | 218 | def find_between_node_gap(p): 219 | """ 220 | How big is the vertical gap between_node images? 221 | """ 222 | vertical_gap_total = ( 223 | p["figure"]["height"] 224 | - p["gap"]["top_border"] 225 | - p["gap"]["bottom_border"] 226 | - p["network"]["max_nodes"] 227 | * p["node_image"]["height"] 228 | ) 229 | n_vertical_gaps = p["network"]["max_nodes"] - 1 230 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 231 | return p 232 | 233 | 234 | def find_error_image_position(p): 235 | """ 236 | Where exactly should the error image be positioned? 237 | """ 238 | p["error_image"]["bottom"] = ( 239 | p["input"]["image"]["bottom"] 240 | - p["input"]["image"]["height"] 241 | * p["gap"]["error_gap_scale"] 242 | - p["error_image"]["height"] 243 | ) 244 | error_image_center = ( 245 | p["figure"]["width"] 246 | - p["gap"]["right_border"] 247 | - p["input"]["image"]["width"] / 2 248 | ) 249 | p["error_image"]["left"] = ( 250 | error_image_center 251 | - p["error_image"]["width"] / 2 252 | ) 253 | return p 254 | 255 | 256 | def add_input_image(fig, p): 257 | """ 258 | All Axes to be added use the rectangle specification 259 | (left, bottom, width, height) 260 | """ 261 | absolute_pos = ( 262 | p["gap"]["left_border"], 263 | p["input"]["image"]["bottom"], 264 | p["input"]["image"]["width"], 265 | p["input"]["image"]["height"]) 266 | scaled_pos = ( 267 | absolute_pos[0] / p["figure"]["width"], 268 | absolute_pos[1] / p["figure"]["height"], 269 | absolute_pos[2] / p["figure"]["width"], 270 | absolute_pos[3] / p["figure"]["height"]) 271 | ax_input = fig.add_axes(scaled_pos) 272 | fill_patch = np.random.sample(size=( 273 | p["input"]["n_rows"], 274 | p["input"]["n_cols"], 275 | )) 276 | ax_input.imshow(fill_patch, cmap="inferno") 277 | 278 | 279 | def save_nn_viz(fig, postfix="0"): 280 | """ 281 | Generate a new filename for each step of the process. 282 | """ 283 | base_name = "nn_viz_" 284 | filename = base_name + postfix + ".png" 285 | fig.savefig( 286 | filename, 287 | edgecolor=fig.get_edgecolor(), 288 | facecolor=fig.get_facecolor(), 289 | dpi=DPI, 290 | ) 291 | 292 | 293 | if __name__ == "__main__": 294 | main() 295 | -------------------------------------------------------------------------------- /nn_viz_16.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | p = find_error_image_position(p) 50 | 51 | add_input_image(fig, p) 52 | save_nn_viz(fig, postfix="16_input_random_no_ticks") 53 | 54 | 55 | def construct_parameters(): 56 | """ 57 | Build a dictionary of parameters that describe the size and location 58 | of the elements of the visualization. This is a convenient way to pass 59 | the collection of them around . 60 | """ 61 | # Enforce square pixels. Each pixel will have the same height and width. 62 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 63 | 64 | parameters = {} 65 | 66 | # The figure as a whole 67 | parameters["figure"] = { 68 | "height": FIGURE_HEIGHT, 69 | "width": FIGURE_WIDTH, 70 | } 71 | 72 | # The input and output images 73 | parameters["input"] = { 74 | "n_cols": N_IMAGE_PIXEL_COLS, 75 | "n_rows": N_IMAGE_PIXEL_ROWS, 76 | "aspect_ratio": aspect_ratio, 77 | "image": { 78 | "bottom": INPUT_IMAGE_BOTTOM, 79 | "height": INPUT_IMAGE_HEIGHT, 80 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 81 | } 82 | } 83 | 84 | # The network as a whole 85 | parameters["network"] = { 86 | "n_nodes": N_NODES_BY_LAYER, 87 | "n_layers": len(N_NODES_BY_LAYER), 88 | "max_nodes": np.max(N_NODES_BY_LAYER), 89 | } 90 | 91 | # Individual node images 92 | parameters["node_image"] = { 93 | "height": 0, 94 | "width": 0, 95 | } 96 | 97 | parameters["error_image"] = { 98 | "left": 0, 99 | "bottom": 0, 100 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 101 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 102 | } 103 | 104 | parameters["gap"] = { 105 | "right_border": RIGHT_BORDER, 106 | "left_border": LEFT_BORDER, 107 | "bottom_border": BOTTOM_BORDER, 108 | "top_border": TOP_BORDER, 109 | "between_layer": 0, 110 | "between_layer_scale": BETWEEN_LAYER_SCALE, 111 | "between_node": 0, 112 | "between_node_scale": BETWEEN_NODE_SCALE, 113 | "error_gap_scale": ERROR_GAP_SCALE, 114 | } 115 | 116 | return parameters 117 | 118 | 119 | def create_background(p): 120 | fig = plt.figure( 121 | edgecolor=TAN, 122 | facecolor=GREEN, 123 | figsize=(p["figure"]["width"], p["figure"]["height"]), 124 | linewidth=4, 125 | ) 126 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 127 | ax_boss.set_xlim(0, 1) 128 | ax_boss.set_ylim(0, 1) 129 | return fig, ax_boss 130 | 131 | 132 | def find_node_image_size(p): 133 | """ 134 | What should the height and width of each node image be? 135 | As big as possible, given the constraints. 136 | There are two possible constraints: 137 | 1. Fill the figure top-to-bottom. 138 | 2. Fill the figure side-to-side. 139 | To determine which of these limits the size of the node images, 140 | we'll calculate the image size assuming each constraint separately, 141 | then respect the one that results in the smaller node image. 142 | """ 143 | # First assume height is the limiting factor. 144 | total_space_to_fill = ( 145 | p["figure"]["height"] 146 | - p["gap"]["bottom_border"] 147 | - p["gap"]["top_border"] 148 | ) 149 | # Use the layer with the largest number of nodes (n_max). 150 | # Pack the images and the gaps as tight as possible. 151 | # In that case, if the image height is h, 152 | # the gaps will each be h * p["gap"]["between_node_scale"]. 153 | # There will be n_max nodes and (n_max - 1) gaps. 154 | # After a wee bit of algebra: 155 | height_constrained_by_height = ( 156 | total_space_to_fill / ( 157 | p["network"]["max_nodes"] 158 | + (p["network"]["max_nodes"] - 1) 159 | * p["gap"]["between_node_scale"] 160 | ) 161 | ) 162 | 163 | # Second assume width is the limiting factor. 164 | total_space_to_fill = ( 165 | p["figure"]["width"] 166 | - p["gap"]["left_border"] 167 | - p["gap"]["right_border"] 168 | - 2 * p["input"]["image"]["width"] 169 | ) 170 | # Again, pack the images as tightly as possible side-to-side. 171 | # In this case, if the image width is w, 172 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 173 | # There will be n_layer nodes and (n_layer + 1) gaps. 174 | # After another tidbit of algebra: 175 | width_constrained_by_width = ( 176 | total_space_to_fill / ( 177 | p["network"]["n_layers"] 178 | + (p["network"]["n_layers"] + 1) 179 | * p["gap"]["between_layer_scale"] 180 | ) 181 | ) 182 | 183 | # Figure out what the height would be for this width. 184 | height_constrained_by_width = ( 185 | width_constrained_by_width 186 | / p["input"]["aspect_ratio"] 187 | ) 188 | 189 | # See which constraint is more restrictive, and go with that one. 190 | p["node_image"]["height"] = np.minimum( 191 | height_constrained_by_width, 192 | height_constrained_by_height) 193 | p["node_image"]["width"] = ( 194 | p["node_image"]["height"] 195 | * p["input"]["aspect_ratio"] 196 | ) 197 | return p 198 | 199 | 200 | def find_between_layer_gap(p): 201 | """ 202 | How big is the horizontal spacing between_layers? 203 | This is also the spacing between the input image and the first layer 204 | and between the last layer and the output image. 205 | """ 206 | horizontal_gap_total = ( 207 | p["figure"]["width"] 208 | - 2 * p["input"]["image"]["width"] 209 | - p["network"]["n_layers"] * p["node_image"]["width"] 210 | - p["gap"]["left_border"] 211 | - p["gap"]["right_border"] 212 | ) 213 | n_horizontal_gaps = p["network"]["n_layers"] + 1 214 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 215 | return p 216 | 217 | 218 | def find_between_node_gap(p): 219 | """ 220 | How big is the vertical gap between_node images? 221 | """ 222 | vertical_gap_total = ( 223 | p["figure"]["height"] 224 | - p["gap"]["top_border"] 225 | - p["gap"]["bottom_border"] 226 | - p["network"]["max_nodes"] 227 | * p["node_image"]["height"] 228 | ) 229 | n_vertical_gaps = p["network"]["max_nodes"] - 1 230 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 231 | return p 232 | 233 | 234 | def find_error_image_position(p): 235 | """ 236 | Where exactly should the error image be positioned? 237 | """ 238 | p["error_image"]["bottom"] = ( 239 | p["input"]["image"]["bottom"] 240 | - p["input"]["image"]["height"] 241 | * p["gap"]["error_gap_scale"] 242 | - p["error_image"]["height"] 243 | ) 244 | error_image_center = ( 245 | p["figure"]["width"] 246 | - p["gap"]["right_border"] 247 | - p["input"]["image"]["width"] / 2 248 | ) 249 | p["error_image"]["left"] = ( 250 | error_image_center 251 | - p["error_image"]["width"] / 2 252 | ) 253 | return p 254 | 255 | 256 | def add_input_image(fig, p): 257 | """ 258 | All Axes to be added use the rectangle specification 259 | (left, bottom, width, height) 260 | """ 261 | absolute_pos = ( 262 | p["gap"]["left_border"], 263 | p["input"]["image"]["bottom"], 264 | p["input"]["image"]["width"], 265 | p["input"]["image"]["height"]) 266 | scaled_pos = ( 267 | absolute_pos[0] / p["figure"]["width"], 268 | absolute_pos[1] / p["figure"]["height"], 269 | absolute_pos[2] / p["figure"]["width"], 270 | absolute_pos[3] / p["figure"]["height"]) 271 | ax_input = fig.add_axes(scaled_pos) 272 | fill_patch = np.random.sample(size=( 273 | p["input"]["n_rows"], 274 | p["input"]["n_cols"], 275 | )) 276 | ax_input.imshow(fill_patch, cmap="inferno") 277 | ax_input.tick_params(bottom=False, top=False, left=False, right=False) 278 | ax_input.tick_params( 279 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 280 | 281 | 282 | def save_nn_viz(fig, postfix="0"): 283 | """ 284 | Generate a new filename for each step of the process. 285 | """ 286 | base_name = "nn_viz_" 287 | filename = base_name + postfix + ".png" 288 | fig.savefig( 289 | filename, 290 | edgecolor=fig.get_edgecolor(), 291 | facecolor=fig.get_facecolor(), 292 | dpi=DPI, 293 | ) 294 | 295 | 296 | if __name__ == "__main__": 297 | main() 298 | -------------------------------------------------------------------------------- /nn_viz_17.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | p = find_error_image_position(p) 50 | 51 | add_input_image(fig, p) 52 | save_nn_viz(fig, postfix="17_input_random_tan_border") 53 | 54 | 55 | def construct_parameters(): 56 | """ 57 | Build a dictionary of parameters that describe the size and location 58 | of the elements of the visualization. This is a convenient way to pass 59 | the collection of them around . 60 | """ 61 | # Enforce square pixels. Each pixel will have the same height and width. 62 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 63 | 64 | parameters = {} 65 | 66 | # The figure as a whole 67 | parameters["figure"] = { 68 | "height": FIGURE_HEIGHT, 69 | "width": FIGURE_WIDTH, 70 | } 71 | 72 | # The input and output images 73 | parameters["input"] = { 74 | "n_cols": N_IMAGE_PIXEL_COLS, 75 | "n_rows": N_IMAGE_PIXEL_ROWS, 76 | "aspect_ratio": aspect_ratio, 77 | "image": { 78 | "bottom": INPUT_IMAGE_BOTTOM, 79 | "height": INPUT_IMAGE_HEIGHT, 80 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 81 | } 82 | } 83 | 84 | # The network as a whole 85 | parameters["network"] = { 86 | "n_nodes": N_NODES_BY_LAYER, 87 | "n_layers": len(N_NODES_BY_LAYER), 88 | "max_nodes": np.max(N_NODES_BY_LAYER), 89 | } 90 | 91 | # Individual node images 92 | parameters["node_image"] = { 93 | "height": 0, 94 | "width": 0, 95 | } 96 | 97 | parameters["error_image"] = { 98 | "left": 0, 99 | "bottom": 0, 100 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 101 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 102 | } 103 | 104 | parameters["gap"] = { 105 | "right_border": RIGHT_BORDER, 106 | "left_border": LEFT_BORDER, 107 | "bottom_border": BOTTOM_BORDER, 108 | "top_border": TOP_BORDER, 109 | "between_layer": 0, 110 | "between_layer_scale": BETWEEN_LAYER_SCALE, 111 | "between_node": 0, 112 | "between_node_scale": BETWEEN_NODE_SCALE, 113 | "error_gap_scale": ERROR_GAP_SCALE, 114 | } 115 | 116 | return parameters 117 | 118 | 119 | def create_background(p): 120 | fig = plt.figure( 121 | edgecolor=TAN, 122 | facecolor=GREEN, 123 | figsize=(p["figure"]["width"], p["figure"]["height"]), 124 | linewidth=4, 125 | ) 126 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 127 | ax_boss.set_xlim(0, 1) 128 | ax_boss.set_ylim(0, 1) 129 | return fig, ax_boss 130 | 131 | 132 | def find_node_image_size(p): 133 | """ 134 | What should the height and width of each node image be? 135 | As big as possible, given the constraints. 136 | There are two possible constraints: 137 | 1. Fill the figure top-to-bottom. 138 | 2. Fill the figure side-to-side. 139 | To determine which of these limits the size of the node images, 140 | we'll calculate the image size assuming each constraint separately, 141 | then respect the one that results in the smaller node image. 142 | """ 143 | # First assume height is the limiting factor. 144 | total_space_to_fill = ( 145 | p["figure"]["height"] 146 | - p["gap"]["bottom_border"] 147 | - p["gap"]["top_border"] 148 | ) 149 | # Use the layer with the largest number of nodes (n_max). 150 | # Pack the images and the gaps as tight as possible. 151 | # In that case, if the image height is h, 152 | # the gaps will each be h * p["gap"]["between_node_scale"]. 153 | # There will be n_max nodes and (n_max - 1) gaps. 154 | # After a wee bit of algebra: 155 | height_constrained_by_height = ( 156 | total_space_to_fill / ( 157 | p["network"]["max_nodes"] 158 | + (p["network"]["max_nodes"] - 1) 159 | * p["gap"]["between_node_scale"] 160 | ) 161 | ) 162 | 163 | # Second assume width is the limiting factor. 164 | total_space_to_fill = ( 165 | p["figure"]["width"] 166 | - p["gap"]["left_border"] 167 | - p["gap"]["right_border"] 168 | - 2 * p["input"]["image"]["width"] 169 | ) 170 | # Again, pack the images as tightly as possible side-to-side. 171 | # In this case, if the image width is w, 172 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 173 | # There will be n_layer nodes and (n_layer + 1) gaps. 174 | # After another tidbit of algebra: 175 | width_constrained_by_width = ( 176 | total_space_to_fill / ( 177 | p["network"]["n_layers"] 178 | + (p["network"]["n_layers"] + 1) 179 | * p["gap"]["between_layer_scale"] 180 | ) 181 | ) 182 | 183 | # Figure out what the height would be for this width. 184 | height_constrained_by_width = ( 185 | width_constrained_by_width 186 | / p["input"]["aspect_ratio"] 187 | ) 188 | 189 | # See which constraint is more restrictive, and go with that one. 190 | p["node_image"]["height"] = np.minimum( 191 | height_constrained_by_width, 192 | height_constrained_by_height) 193 | p["node_image"]["width"] = ( 194 | p["node_image"]["height"] 195 | * p["input"]["aspect_ratio"] 196 | ) 197 | return p 198 | 199 | 200 | def find_between_layer_gap(p): 201 | """ 202 | How big is the horizontal spacing between_layers? 203 | This is also the spacing between the input image and the first layer 204 | and between the last layer and the output image. 205 | """ 206 | horizontal_gap_total = ( 207 | p["figure"]["width"] 208 | - 2 * p["input"]["image"]["width"] 209 | - p["network"]["n_layers"] * p["node_image"]["width"] 210 | - p["gap"]["left_border"] 211 | - p["gap"]["right_border"] 212 | ) 213 | n_horizontal_gaps = p["network"]["n_layers"] + 1 214 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 215 | return p 216 | 217 | 218 | def find_between_node_gap(p): 219 | """ 220 | How big is the vertical gap between_node images? 221 | """ 222 | vertical_gap_total = ( 223 | p["figure"]["height"] 224 | - p["gap"]["top_border"] 225 | - p["gap"]["bottom_border"] 226 | - p["network"]["max_nodes"] 227 | * p["node_image"]["height"] 228 | ) 229 | n_vertical_gaps = p["network"]["max_nodes"] - 1 230 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 231 | return p 232 | 233 | 234 | def find_error_image_position(p): 235 | """ 236 | Where exactly should the error image be positioned? 237 | """ 238 | p["error_image"]["bottom"] = ( 239 | p["input"]["image"]["bottom"] 240 | - p["input"]["image"]["height"] 241 | * p["gap"]["error_gap_scale"] 242 | - p["error_image"]["height"] 243 | ) 244 | error_image_center = ( 245 | p["figure"]["width"] 246 | - p["gap"]["right_border"] 247 | - p["input"]["image"]["width"] / 2 248 | ) 249 | p["error_image"]["left"] = ( 250 | error_image_center 251 | - p["error_image"]["width"] / 2 252 | ) 253 | return p 254 | 255 | 256 | def add_input_image(fig, p): 257 | """ 258 | All Axes to be added use the rectangle specification 259 | (left, bottom, width, height) 260 | """ 261 | absolute_pos = ( 262 | p["gap"]["left_border"], 263 | p["input"]["image"]["bottom"], 264 | p["input"]["image"]["width"], 265 | p["input"]["image"]["height"]) 266 | scaled_pos = ( 267 | absolute_pos[0] / p["figure"]["width"], 268 | absolute_pos[1] / p["figure"]["height"], 269 | absolute_pos[2] / p["figure"]["width"], 270 | absolute_pos[3] / p["figure"]["height"]) 271 | ax_input = fig.add_axes(scaled_pos) 272 | fill_patch = np.random.sample(size=( 273 | p["input"]["n_rows"], 274 | p["input"]["n_cols"], 275 | )) 276 | ax_input.imshow(fill_patch, cmap="inferno") 277 | ax_input.tick_params(bottom=False, top=False, left=False, right=False) 278 | ax_input.tick_params( 279 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 280 | ax_input.spines["top"].set_color(TAN) 281 | ax_input.spines["bottom"].set_color(TAN) 282 | ax_input.spines["left"].set_color(TAN) 283 | ax_input.spines["right"].set_color(TAN) 284 | 285 | 286 | def save_nn_viz(fig, postfix="0"): 287 | """ 288 | Generate a new filename for each step of the process. 289 | """ 290 | base_name = "nn_viz_" 291 | filename = base_name + postfix + ".png" 292 | fig.savefig( 293 | filename, 294 | edgecolor=fig.get_edgecolor(), 295 | facecolor=fig.get_facecolor(), 296 | dpi=DPI, 297 | ) 298 | 299 | 300 | if __name__ == "__main__": 301 | main() 302 | -------------------------------------------------------------------------------- /nn_viz_18.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | p = find_error_image_position(p) 50 | 51 | add_input_image(fig, p) 52 | save_nn_viz(fig, postfix="18_input_random_refactored") 53 | 54 | 55 | def construct_parameters(): 56 | """ 57 | Build a dictionary of parameters that describe the size and location 58 | of the elements of the visualization. This is a convenient way to pass 59 | the collection of them around . 60 | """ 61 | # Enforce square pixels. Each pixel will have the same height and width. 62 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 63 | 64 | parameters = {} 65 | 66 | # The figure as a whole 67 | parameters["figure"] = { 68 | "height": FIGURE_HEIGHT, 69 | "width": FIGURE_WIDTH, 70 | } 71 | 72 | # The input and output images 73 | parameters["input"] = { 74 | "n_cols": N_IMAGE_PIXEL_COLS, 75 | "n_rows": N_IMAGE_PIXEL_ROWS, 76 | "aspect_ratio": aspect_ratio, 77 | "image": { 78 | "bottom": INPUT_IMAGE_BOTTOM, 79 | "height": INPUT_IMAGE_HEIGHT, 80 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 81 | } 82 | } 83 | 84 | # The network as a whole 85 | parameters["network"] = { 86 | "n_nodes": N_NODES_BY_LAYER, 87 | "n_layers": len(N_NODES_BY_LAYER), 88 | "max_nodes": np.max(N_NODES_BY_LAYER), 89 | } 90 | 91 | # Individual node images 92 | parameters["node_image"] = { 93 | "height": 0, 94 | "width": 0, 95 | } 96 | 97 | parameters["error_image"] = { 98 | "left": 0, 99 | "bottom": 0, 100 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 101 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 102 | } 103 | 104 | parameters["gap"] = { 105 | "right_border": RIGHT_BORDER, 106 | "left_border": LEFT_BORDER, 107 | "bottom_border": BOTTOM_BORDER, 108 | "top_border": TOP_BORDER, 109 | "between_layer": 0, 110 | "between_layer_scale": BETWEEN_LAYER_SCALE, 111 | "between_node": 0, 112 | "between_node_scale": BETWEEN_NODE_SCALE, 113 | "error_gap_scale": ERROR_GAP_SCALE, 114 | } 115 | 116 | return parameters 117 | 118 | 119 | def create_background(p): 120 | fig = plt.figure( 121 | edgecolor=TAN, 122 | facecolor=GREEN, 123 | figsize=(p["figure"]["width"], p["figure"]["height"]), 124 | linewidth=4, 125 | ) 126 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 127 | ax_boss.set_xlim(0, 1) 128 | ax_boss.set_ylim(0, 1) 129 | return fig, ax_boss 130 | 131 | 132 | def find_node_image_size(p): 133 | """ 134 | What should the height and width of each node image be? 135 | As big as possible, given the constraints. 136 | There are two possible constraints: 137 | 1. Fill the figure top-to-bottom. 138 | 2. Fill the figure side-to-side. 139 | To determine which of these limits the size of the node images, 140 | we'll calculate the image size assuming each constraint separately, 141 | then respect the one that results in the smaller node image. 142 | """ 143 | # First assume height is the limiting factor. 144 | total_space_to_fill = ( 145 | p["figure"]["height"] 146 | - p["gap"]["bottom_border"] 147 | - p["gap"]["top_border"] 148 | ) 149 | # Use the layer with the largest number of nodes (n_max). 150 | # Pack the images and the gaps as tight as possible. 151 | # In that case, if the image height is h, 152 | # the gaps will each be h * p["gap"]["between_node_scale"]. 153 | # There will be n_max nodes and (n_max - 1) gaps. 154 | # After a wee bit of algebra: 155 | height_constrained_by_height = ( 156 | total_space_to_fill / ( 157 | p["network"]["max_nodes"] 158 | + (p["network"]["max_nodes"] - 1) 159 | * p["gap"]["between_node_scale"] 160 | ) 161 | ) 162 | 163 | # Second assume width is the limiting factor. 164 | total_space_to_fill = ( 165 | p["figure"]["width"] 166 | - p["gap"]["left_border"] 167 | - p["gap"]["right_border"] 168 | - 2 * p["input"]["image"]["width"] 169 | ) 170 | # Again, pack the images as tightly as possible side-to-side. 171 | # In this case, if the image width is w, 172 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 173 | # There will be n_layer nodes and (n_layer + 1) gaps. 174 | # After another tidbit of algebra: 175 | width_constrained_by_width = ( 176 | total_space_to_fill / ( 177 | p["network"]["n_layers"] 178 | + (p["network"]["n_layers"] + 1) 179 | * p["gap"]["between_layer_scale"] 180 | ) 181 | ) 182 | 183 | # Figure out what the height would be for this width. 184 | height_constrained_by_width = ( 185 | width_constrained_by_width 186 | / p["input"]["aspect_ratio"] 187 | ) 188 | 189 | # See which constraint is more restrictive, and go with that one. 190 | p["node_image"]["height"] = np.minimum( 191 | height_constrained_by_width, 192 | height_constrained_by_height) 193 | p["node_image"]["width"] = ( 194 | p["node_image"]["height"] 195 | * p["input"]["aspect_ratio"] 196 | ) 197 | return p 198 | 199 | 200 | def find_between_layer_gap(p): 201 | """ 202 | How big is the horizontal spacing between_layers? 203 | This is also the spacing between the input image and the first layer 204 | and between the last layer and the output image. 205 | """ 206 | horizontal_gap_total = ( 207 | p["figure"]["width"] 208 | - 2 * p["input"]["image"]["width"] 209 | - p["network"]["n_layers"] * p["node_image"]["width"] 210 | - p["gap"]["left_border"] 211 | - p["gap"]["right_border"] 212 | ) 213 | n_horizontal_gaps = p["network"]["n_layers"] + 1 214 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 215 | return p 216 | 217 | 218 | def find_between_node_gap(p): 219 | """ 220 | How big is the vertical gap between_node images? 221 | """ 222 | vertical_gap_total = ( 223 | p["figure"]["height"] 224 | - p["gap"]["top_border"] 225 | - p["gap"]["bottom_border"] 226 | - p["network"]["max_nodes"] 227 | * p["node_image"]["height"] 228 | ) 229 | n_vertical_gaps = p["network"]["max_nodes"] - 1 230 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 231 | return p 232 | 233 | 234 | def find_error_image_position(p): 235 | """ 236 | Where exactly should the error image be positioned? 237 | """ 238 | p["error_image"]["bottom"] = ( 239 | p["input"]["image"]["bottom"] 240 | - p["input"]["image"]["height"] 241 | * p["gap"]["error_gap_scale"] 242 | - p["error_image"]["height"] 243 | ) 244 | error_image_center = ( 245 | p["figure"]["width"] 246 | - p["gap"]["right_border"] 247 | - p["input"]["image"]["width"] / 2 248 | ) 249 | p["error_image"]["left"] = ( 250 | error_image_center 251 | - p["error_image"]["width"] / 2 252 | ) 253 | return p 254 | 255 | 256 | def add_input_image(fig, p): 257 | """ 258 | All Axes to be added use the rectangle specification 259 | (left, bottom, width, height) 260 | """ 261 | absolute_pos = ( 262 | p["gap"]["left_border"], 263 | p["input"]["image"]["bottom"], 264 | p["input"]["image"]["width"], 265 | p["input"]["image"]["height"]) 266 | ax_input = add_image_axes(fig, p, absolute_pos) 267 | fill_patch = np.random.sample(size=( 268 | p["input"]["n_rows"], 269 | p["input"]["n_cols"], 270 | )) 271 | ax_input.imshow(fill_patch, cmap="inferno") 272 | 273 | 274 | def add_image_axes(fig, p, absolute_pos): 275 | """ 276 | Locate the Axes for the image corresponding to this node within the Figure. 277 | 278 | absolute_pos: Tuple of 279 | (left_position, bottom_position, width, height) 280 | in inches on the Figure. 281 | """ 282 | scaled_pos = ( 283 | absolute_pos[0] / p["figure"]["width"], 284 | absolute_pos[1] / p["figure"]["height"], 285 | absolute_pos[2] / p["figure"]["width"], 286 | absolute_pos[3] / p["figure"]["height"]) 287 | ax = fig.add_axes(scaled_pos) 288 | ax.tick_params(bottom=False, top=False, left=False, right=False) 289 | ax.tick_params( 290 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 291 | ax.spines["top"].set_color(TAN) 292 | ax.spines["bottom"].set_color(TAN) 293 | ax.spines["left"].set_color(TAN) 294 | ax.spines["right"].set_color(TAN) 295 | return ax 296 | 297 | 298 | def save_nn_viz(fig, postfix="0"): 299 | """ 300 | Generate a new filename for each step of the process. 301 | """ 302 | base_name = "nn_viz_" 303 | filename = base_name + postfix + ".png" 304 | fig.savefig( 305 | filename, 306 | edgecolor=fig.get_edgecolor(), 307 | facecolor=fig.get_facecolor(), 308 | dpi=DPI, 309 | ) 310 | 311 | 312 | if __name__ == "__main__": 313 | main() 314 | -------------------------------------------------------------------------------- /nn_viz_19.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | p = find_error_image_position(p) 50 | 51 | add_input_image(fig, p) 52 | save_nn_viz(fig, postfix="19_input_random_refactored") 53 | 54 | 55 | def construct_parameters(): 56 | """ 57 | Build a dictionary of parameters that describe the size and location 58 | of the elements of the visualization. This is a convenient way to pass 59 | the collection of them around . 60 | """ 61 | # Enforce square pixels. Each pixel will have the same height and width. 62 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 63 | 64 | parameters = {} 65 | 66 | # The figure as a whole 67 | parameters["figure"] = { 68 | "height": FIGURE_HEIGHT, 69 | "width": FIGURE_WIDTH, 70 | } 71 | 72 | # The input and output images 73 | parameters["input"] = { 74 | "n_cols": N_IMAGE_PIXEL_COLS, 75 | "n_rows": N_IMAGE_PIXEL_ROWS, 76 | "aspect_ratio": aspect_ratio, 77 | "image": { 78 | "bottom": INPUT_IMAGE_BOTTOM, 79 | "height": INPUT_IMAGE_HEIGHT, 80 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 81 | } 82 | } 83 | 84 | # The network as a whole 85 | parameters["network"] = { 86 | "n_nodes": N_NODES_BY_LAYER, 87 | "n_layers": len(N_NODES_BY_LAYER), 88 | "max_nodes": np.max(N_NODES_BY_LAYER), 89 | } 90 | 91 | # Individual node images 92 | parameters["node_image"] = { 93 | "height": 0, 94 | "width": 0, 95 | } 96 | 97 | parameters["error_image"] = { 98 | "left": 0, 99 | "bottom": 0, 100 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 101 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 102 | } 103 | 104 | parameters["gap"] = { 105 | "right_border": RIGHT_BORDER, 106 | "left_border": LEFT_BORDER, 107 | "bottom_border": BOTTOM_BORDER, 108 | "top_border": TOP_BORDER, 109 | "between_layer": 0, 110 | "between_layer_scale": BETWEEN_LAYER_SCALE, 111 | "between_node": 0, 112 | "between_node_scale": BETWEEN_NODE_SCALE, 113 | "error_gap_scale": ERROR_GAP_SCALE, 114 | } 115 | 116 | return parameters 117 | 118 | 119 | def create_background(p): 120 | fig = plt.figure( 121 | edgecolor=TAN, 122 | facecolor=GREEN, 123 | figsize=(p["figure"]["width"], p["figure"]["height"]), 124 | linewidth=4, 125 | ) 126 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 127 | ax_boss.set_xlim(0, 1) 128 | ax_boss.set_ylim(0, 1) 129 | return fig, ax_boss 130 | 131 | 132 | def find_node_image_size(p): 133 | """ 134 | What should the height and width of each node image be? 135 | As big as possible, given the constraints. 136 | There are two possible constraints: 137 | 1. Fill the figure top-to-bottom. 138 | 2. Fill the figure side-to-side. 139 | To determine which of these limits the size of the node images, 140 | we'll calculate the image size assuming each constraint separately, 141 | then respect the one that results in the smaller node image. 142 | """ 143 | # First assume height is the limiting factor. 144 | total_space_to_fill = ( 145 | p["figure"]["height"] 146 | - p["gap"]["bottom_border"] 147 | - p["gap"]["top_border"] 148 | ) 149 | # Use the layer with the largest number of nodes (n_max). 150 | # Pack the images and the gaps as tight as possible. 151 | # In that case, if the image height is h, 152 | # the gaps will each be h * p["gap"]["between_node_scale"]. 153 | # There will be n_max nodes and (n_max - 1) gaps. 154 | # After a wee bit of algebra: 155 | height_constrained_by_height = ( 156 | total_space_to_fill / ( 157 | p["network"]["max_nodes"] 158 | + (p["network"]["max_nodes"] - 1) 159 | * p["gap"]["between_node_scale"] 160 | ) 161 | ) 162 | 163 | # Second assume width is the limiting factor. 164 | total_space_to_fill = ( 165 | p["figure"]["width"] 166 | - p["gap"]["left_border"] 167 | - p["gap"]["right_border"] 168 | - 2 * p["input"]["image"]["width"] 169 | ) 170 | # Again, pack the images as tightly as possible side-to-side. 171 | # In this case, if the image width is w, 172 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 173 | # There will be n_layer nodes and (n_layer + 1) gaps. 174 | # After another tidbit of algebra: 175 | width_constrained_by_width = ( 176 | total_space_to_fill / ( 177 | p["network"]["n_layers"] 178 | + (p["network"]["n_layers"] + 1) 179 | * p["gap"]["between_layer_scale"] 180 | ) 181 | ) 182 | 183 | # Figure out what the height would be for this width. 184 | height_constrained_by_width = ( 185 | width_constrained_by_width 186 | / p["input"]["aspect_ratio"] 187 | ) 188 | 189 | # See which constraint is more restrictive, and go with that one. 190 | p["node_image"]["height"] = np.minimum( 191 | height_constrained_by_width, 192 | height_constrained_by_height) 193 | p["node_image"]["width"] = ( 194 | p["node_image"]["height"] 195 | * p["input"]["aspect_ratio"] 196 | ) 197 | return p 198 | 199 | 200 | def find_between_layer_gap(p): 201 | """ 202 | How big is the horizontal spacing between_layers? 203 | This is also the spacing between the input image and the first layer 204 | and between the last layer and the output image. 205 | """ 206 | horizontal_gap_total = ( 207 | p["figure"]["width"] 208 | - 2 * p["input"]["image"]["width"] 209 | - p["network"]["n_layers"] * p["node_image"]["width"] 210 | - p["gap"]["left_border"] 211 | - p["gap"]["right_border"] 212 | ) 213 | n_horizontal_gaps = p["network"]["n_layers"] + 1 214 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 215 | return p 216 | 217 | 218 | def find_between_node_gap(p): 219 | """ 220 | How big is the vertical gap between_node images? 221 | """ 222 | vertical_gap_total = ( 223 | p["figure"]["height"] 224 | - p["gap"]["top_border"] 225 | - p["gap"]["bottom_border"] 226 | - p["network"]["max_nodes"] 227 | * p["node_image"]["height"] 228 | ) 229 | n_vertical_gaps = p["network"]["max_nodes"] - 1 230 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 231 | return p 232 | 233 | 234 | def find_error_image_position(p): 235 | """ 236 | Where exactly should the error image be positioned? 237 | """ 238 | p["error_image"]["bottom"] = ( 239 | p["input"]["image"]["bottom"] 240 | - p["input"]["image"]["height"] 241 | * p["gap"]["error_gap_scale"] 242 | - p["error_image"]["height"] 243 | ) 244 | error_image_center = ( 245 | p["figure"]["width"] 246 | - p["gap"]["right_border"] 247 | - p["input"]["image"]["width"] / 2 248 | ) 249 | p["error_image"]["left"] = ( 250 | error_image_center 251 | - p["error_image"]["width"] / 2 252 | ) 253 | return p 254 | 255 | 256 | def add_input_image(fig, p): 257 | """ 258 | All Axes to be added use the rectangle specification 259 | (left, bottom, width, height) 260 | """ 261 | absolute_pos = ( 262 | p["gap"]["left_border"], 263 | p["input"]["image"]["bottom"], 264 | p["input"]["image"]["width"], 265 | p["input"]["image"]["height"]) 266 | ax_input = add_image_axes(fig, p, absolute_pos) 267 | add_filler_image( 268 | ax_input, 269 | p["input"]["n_rows"], 270 | p["input"]["n_cols"], 271 | ) 272 | 273 | 274 | def add_image_axes(fig, p, absolute_pos): 275 | """ 276 | Locate the Axes for the image corresponding to this node within the Figure. 277 | 278 | absolute_pos: Tuple of 279 | (left_position, bottom_position, width, height) 280 | in inches on the Figure. 281 | """ 282 | scaled_pos = ( 283 | absolute_pos[0] / p["figure"]["width"], 284 | absolute_pos[1] / p["figure"]["height"], 285 | absolute_pos[2] / p["figure"]["width"], 286 | absolute_pos[3] / p["figure"]["height"]) 287 | ax = fig.add_axes(scaled_pos) 288 | ax.tick_params(bottom=False, top=False, left=False, right=False) 289 | ax.tick_params( 290 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 291 | ax.spines["top"].set_color(TAN) 292 | ax.spines["bottom"].set_color(TAN) 293 | ax.spines["left"].set_color(TAN) 294 | ax.spines["right"].set_color(TAN) 295 | return ax 296 | 297 | 298 | def add_filler_image(ax, n_im_rows, n_im_cols): 299 | """ 300 | Add a chunk of image as a placeholder. 301 | """ 302 | fill_patch = np.random.sample(size=(n_im_rows, n_im_cols)) 303 | ax.imshow(fill_patch, cmap="inferno") 304 | 305 | 306 | def save_nn_viz(fig, postfix="0"): 307 | """ 308 | Generate a new filename for each step of the process. 309 | """ 310 | base_name = "nn_viz_" 311 | filename = base_name + postfix + ".png" 312 | fig.savefig( 313 | filename, 314 | edgecolor=fig.get_edgecolor(), 315 | facecolor=fig.get_facecolor(), 316 | dpi=DPI, 317 | ) 318 | 319 | 320 | if __name__ == "__main__": 321 | main() 322 | -------------------------------------------------------------------------------- /nn_viz_20.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | p = find_error_image_position(p) 50 | 51 | add_input_image(fig, p) 52 | add_node_images(fig, p) 53 | save_nn_viz(fig, postfix="20_layer_0_node_0") 54 | 55 | 56 | def construct_parameters(): 57 | """ 58 | Build a dictionary of parameters that describe the size and location 59 | of the elements of the visualization. This is a convenient way to pass 60 | the collection of them around . 61 | """ 62 | # Enforce square pixels. Each pixel will have the same height and width. 63 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 64 | 65 | parameters = {} 66 | 67 | # The figure as a whole 68 | parameters["figure"] = { 69 | "height": FIGURE_HEIGHT, 70 | "width": FIGURE_WIDTH, 71 | } 72 | 73 | # The input and output images 74 | parameters["input"] = { 75 | "n_cols": N_IMAGE_PIXEL_COLS, 76 | "n_rows": N_IMAGE_PIXEL_ROWS, 77 | "aspect_ratio": aspect_ratio, 78 | "image": { 79 | "bottom": INPUT_IMAGE_BOTTOM, 80 | "height": INPUT_IMAGE_HEIGHT, 81 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 82 | } 83 | } 84 | 85 | # The network as a whole 86 | parameters["network"] = { 87 | "n_nodes": N_NODES_BY_LAYER, 88 | "n_layers": len(N_NODES_BY_LAYER), 89 | "max_nodes": np.max(N_NODES_BY_LAYER), 90 | } 91 | 92 | # Individual node images 93 | parameters["node_image"] = { 94 | "height": 0, 95 | "width": 0, 96 | } 97 | 98 | parameters["error_image"] = { 99 | "left": 0, 100 | "bottom": 0, 101 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 102 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 103 | } 104 | 105 | parameters["gap"] = { 106 | "right_border": RIGHT_BORDER, 107 | "left_border": LEFT_BORDER, 108 | "bottom_border": BOTTOM_BORDER, 109 | "top_border": TOP_BORDER, 110 | "between_layer": 0, 111 | "between_layer_scale": BETWEEN_LAYER_SCALE, 112 | "between_node": 0, 113 | "between_node_scale": BETWEEN_NODE_SCALE, 114 | "error_gap_scale": ERROR_GAP_SCALE, 115 | } 116 | 117 | return parameters 118 | 119 | 120 | def create_background(p): 121 | fig = plt.figure( 122 | edgecolor=TAN, 123 | facecolor=GREEN, 124 | figsize=(p["figure"]["width"], p["figure"]["height"]), 125 | linewidth=4, 126 | ) 127 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 128 | ax_boss.set_xlim(0, 1) 129 | ax_boss.set_ylim(0, 1) 130 | return fig, ax_boss 131 | 132 | 133 | def find_node_image_size(p): 134 | """ 135 | What should the height and width of each node image be? 136 | As big as possible, given the constraints. 137 | There are two possible constraints: 138 | 1. Fill the figure top-to-bottom. 139 | 2. Fill the figure side-to-side. 140 | To determine which of these limits the size of the node images, 141 | we'll calculate the image size assuming each constraint separately, 142 | then respect the one that results in the smaller node image. 143 | """ 144 | # First assume height is the limiting factor. 145 | total_space_to_fill = ( 146 | p["figure"]["height"] 147 | - p["gap"]["bottom_border"] 148 | - p["gap"]["top_border"] 149 | ) 150 | # Use the layer with the largest number of nodes (n_max). 151 | # Pack the images and the gaps as tight as possible. 152 | # In that case, if the image height is h, 153 | # the gaps will each be h * p["gap"]["between_node_scale"]. 154 | # There will be n_max nodes and (n_max - 1) gaps. 155 | # After a wee bit of algebra: 156 | height_constrained_by_height = ( 157 | total_space_to_fill / ( 158 | p["network"]["max_nodes"] 159 | + (p["network"]["max_nodes"] - 1) 160 | * p["gap"]["between_node_scale"] 161 | ) 162 | ) 163 | 164 | # Second assume width is the limiting factor. 165 | total_space_to_fill = ( 166 | p["figure"]["width"] 167 | - p["gap"]["left_border"] 168 | - p["gap"]["right_border"] 169 | - 2 * p["input"]["image"]["width"] 170 | ) 171 | # Again, pack the images as tightly as possible side-to-side. 172 | # In this case, if the image width is w, 173 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 174 | # There will be n_layer nodes and (n_layer + 1) gaps. 175 | # After another tidbit of algebra: 176 | width_constrained_by_width = ( 177 | total_space_to_fill / ( 178 | p["network"]["n_layers"] 179 | + (p["network"]["n_layers"] + 1) 180 | * p["gap"]["between_layer_scale"] 181 | ) 182 | ) 183 | 184 | # Figure out what the height would be for this width. 185 | height_constrained_by_width = ( 186 | width_constrained_by_width 187 | / p["input"]["aspect_ratio"] 188 | ) 189 | 190 | # See which constraint is more restrictive, and go with that one. 191 | p["node_image"]["height"] = np.minimum( 192 | height_constrained_by_width, 193 | height_constrained_by_height) 194 | p["node_image"]["width"] = ( 195 | p["node_image"]["height"] 196 | * p["input"]["aspect_ratio"] 197 | ) 198 | return p 199 | 200 | 201 | def find_between_layer_gap(p): 202 | """ 203 | How big is the horizontal spacing between_layers? 204 | This is also the spacing between the input image and the first layer 205 | and between the last layer and the output image. 206 | """ 207 | horizontal_gap_total = ( 208 | p["figure"]["width"] 209 | - 2 * p["input"]["image"]["width"] 210 | - p["network"]["n_layers"] * p["node_image"]["width"] 211 | - p["gap"]["left_border"] 212 | - p["gap"]["right_border"] 213 | ) 214 | n_horizontal_gaps = p["network"]["n_layers"] + 1 215 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 216 | return p 217 | 218 | 219 | def find_between_node_gap(p): 220 | """ 221 | How big is the vertical gap between_node images? 222 | """ 223 | vertical_gap_total = ( 224 | p["figure"]["height"] 225 | - p["gap"]["top_border"] 226 | - p["gap"]["bottom_border"] 227 | - p["network"]["max_nodes"] 228 | * p["node_image"]["height"] 229 | ) 230 | n_vertical_gaps = p["network"]["max_nodes"] - 1 231 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 232 | return p 233 | 234 | 235 | def find_error_image_position(p): 236 | """ 237 | Where exactly should the error image be positioned? 238 | """ 239 | p["error_image"]["bottom"] = ( 240 | p["input"]["image"]["bottom"] 241 | - p["input"]["image"]["height"] 242 | * p["gap"]["error_gap_scale"] 243 | - p["error_image"]["height"] 244 | ) 245 | error_image_center = ( 246 | p["figure"]["width"] 247 | - p["gap"]["right_border"] 248 | - p["input"]["image"]["width"] / 2 249 | ) 250 | p["error_image"]["left"] = ( 251 | error_image_center 252 | - p["error_image"]["width"] / 2 253 | ) 254 | return p 255 | 256 | 257 | def add_input_image(fig, p): 258 | """ 259 | All Axes to be added use the rectangle specification 260 | (left, bottom, width, height) 261 | """ 262 | absolute_pos = ( 263 | p["gap"]["left_border"], 264 | p["input"]["image"]["bottom"], 265 | p["input"]["image"]["width"], 266 | p["input"]["image"]["height"]) 267 | ax_input = add_image_axes(fig, p, absolute_pos) 268 | add_filler_image( 269 | ax_input, 270 | p["input"]["n_rows"], 271 | p["input"]["n_cols"], 272 | ) 273 | 274 | 275 | def add_node_images(fig, p): 276 | node_image_left = ( 277 | p["gap"]["left_border"] 278 | + p["input"]["image"]["width"] 279 | + p["gap"]["between_layer"] 280 | ) 281 | n_nodes = p["network"]["n_nodes"][0] 282 | total_layer_height = ( 283 | n_nodes * p["node_image"]["height"] 284 | + (n_nodes - 1) * p["gap"]["between_node"] 285 | ) 286 | node_image_bottom = (p["figure"]["height"] - total_layer_height) / 2 287 | 288 | absolute_pos = ( 289 | node_image_left, 290 | node_image_bottom, 291 | p["node_image"]["width"], 292 | p["node_image"]["height"]) 293 | ax = add_image_axes(fig, p, absolute_pos) 294 | add_filler_image( 295 | ax, 296 | p["input"]["n_rows"], 297 | p["input"]["n_cols"], 298 | ) 299 | 300 | 301 | def add_image_axes(fig, p, absolute_pos): 302 | """ 303 | Locate the Axes for the image corresponding to this node within the Figure. 304 | 305 | absolute_pos: Tuple of 306 | (left_position, bottom_position, width, height) 307 | in inches on the Figure. 308 | """ 309 | scaled_pos = ( 310 | absolute_pos[0] / p["figure"]["width"], 311 | absolute_pos[1] / p["figure"]["height"], 312 | absolute_pos[2] / p["figure"]["width"], 313 | absolute_pos[3] / p["figure"]["height"]) 314 | ax = fig.add_axes(scaled_pos) 315 | ax.tick_params(bottom=False, top=False, left=False, right=False) 316 | ax.tick_params( 317 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 318 | ax.spines["top"].set_color(TAN) 319 | ax.spines["bottom"].set_color(TAN) 320 | ax.spines["left"].set_color(TAN) 321 | ax.spines["right"].set_color(TAN) 322 | return ax 323 | 324 | 325 | def add_filler_image(ax, n_im_rows, n_im_cols): 326 | """ 327 | Add a chunk of image as a placeholder. 328 | """ 329 | fill_patch = np.random.sample(size=(n_im_rows, n_im_cols)) 330 | ax.imshow(fill_patch, cmap="inferno") 331 | 332 | 333 | def save_nn_viz(fig, postfix="0"): 334 | """ 335 | Generate a new filename for each step of the process. 336 | """ 337 | base_name = "nn_viz_" 338 | filename = base_name + postfix + ".png" 339 | fig.savefig( 340 | filename, 341 | edgecolor=fig.get_edgecolor(), 342 | facecolor=fig.get_facecolor(), 343 | dpi=DPI, 344 | ) 345 | 346 | 347 | if __name__ == "__main__": 348 | main() 349 | -------------------------------------------------------------------------------- /nn_viz_21.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | p = find_error_image_position(p) 50 | 51 | add_input_image(fig, p) 52 | add_node_images(fig, p) 53 | save_nn_viz(fig, postfix="21_layer_0") 54 | 55 | 56 | def construct_parameters(): 57 | """ 58 | Build a dictionary of parameters that describe the size and location 59 | of the elements of the visualization. This is a convenient way to pass 60 | the collection of them around . 61 | """ 62 | # Enforce square pixels. Each pixel will have the same height and width. 63 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 64 | 65 | parameters = {} 66 | 67 | # The figure as a whole 68 | parameters["figure"] = { 69 | "height": FIGURE_HEIGHT, 70 | "width": FIGURE_WIDTH, 71 | } 72 | 73 | # The input and output images 74 | parameters["input"] = { 75 | "n_cols": N_IMAGE_PIXEL_COLS, 76 | "n_rows": N_IMAGE_PIXEL_ROWS, 77 | "aspect_ratio": aspect_ratio, 78 | "image": { 79 | "bottom": INPUT_IMAGE_BOTTOM, 80 | "height": INPUT_IMAGE_HEIGHT, 81 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 82 | } 83 | } 84 | 85 | # The network as a whole 86 | parameters["network"] = { 87 | "n_nodes": N_NODES_BY_LAYER, 88 | "n_layers": len(N_NODES_BY_LAYER), 89 | "max_nodes": np.max(N_NODES_BY_LAYER), 90 | } 91 | 92 | # Individual node images 93 | parameters["node_image"] = { 94 | "height": 0, 95 | "width": 0, 96 | } 97 | 98 | parameters["error_image"] = { 99 | "left": 0, 100 | "bottom": 0, 101 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 102 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 103 | } 104 | 105 | parameters["gap"] = { 106 | "right_border": RIGHT_BORDER, 107 | "left_border": LEFT_BORDER, 108 | "bottom_border": BOTTOM_BORDER, 109 | "top_border": TOP_BORDER, 110 | "between_layer": 0, 111 | "between_layer_scale": BETWEEN_LAYER_SCALE, 112 | "between_node": 0, 113 | "between_node_scale": BETWEEN_NODE_SCALE, 114 | "error_gap_scale": ERROR_GAP_SCALE, 115 | } 116 | 117 | return parameters 118 | 119 | 120 | def create_background(p): 121 | fig = plt.figure( 122 | edgecolor=TAN, 123 | facecolor=GREEN, 124 | figsize=(p["figure"]["width"], p["figure"]["height"]), 125 | linewidth=4, 126 | ) 127 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 128 | ax_boss.set_xlim(0, 1) 129 | ax_boss.set_ylim(0, 1) 130 | return fig, ax_boss 131 | 132 | 133 | def find_node_image_size(p): 134 | """ 135 | What should the height and width of each node image be? 136 | As big as possible, given the constraints. 137 | There are two possible constraints: 138 | 1. Fill the figure top-to-bottom. 139 | 2. Fill the figure side-to-side. 140 | To determine which of these limits the size of the node images, 141 | we'll calculate the image size assuming each constraint separately, 142 | then respect the one that results in the smaller node image. 143 | """ 144 | # First assume height is the limiting factor. 145 | total_space_to_fill = ( 146 | p["figure"]["height"] 147 | - p["gap"]["bottom_border"] 148 | - p["gap"]["top_border"] 149 | ) 150 | # Use the layer with the largest number of nodes (n_max). 151 | # Pack the images and the gaps as tight as possible. 152 | # In that case, if the image height is h, 153 | # the gaps will each be h * p["gap"]["between_node_scale"]. 154 | # There will be n_max nodes and (n_max - 1) gaps. 155 | # After a wee bit of algebra: 156 | height_constrained_by_height = ( 157 | total_space_to_fill / ( 158 | p["network"]["max_nodes"] 159 | + (p["network"]["max_nodes"] - 1) 160 | * p["gap"]["between_node_scale"] 161 | ) 162 | ) 163 | 164 | # Second assume width is the limiting factor. 165 | total_space_to_fill = ( 166 | p["figure"]["width"] 167 | - p["gap"]["left_border"] 168 | - p["gap"]["right_border"] 169 | - 2 * p["input"]["image"]["width"] 170 | ) 171 | # Again, pack the images as tightly as possible side-to-side. 172 | # In this case, if the image width is w, 173 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 174 | # There will be n_layer nodes and (n_layer + 1) gaps. 175 | # After another tidbit of algebra: 176 | width_constrained_by_width = ( 177 | total_space_to_fill / ( 178 | p["network"]["n_layers"] 179 | + (p["network"]["n_layers"] + 1) 180 | * p["gap"]["between_layer_scale"] 181 | ) 182 | ) 183 | 184 | # Figure out what the height would be for this width. 185 | height_constrained_by_width = ( 186 | width_constrained_by_width 187 | / p["input"]["aspect_ratio"] 188 | ) 189 | 190 | # See which constraint is more restrictive, and go with that one. 191 | p["node_image"]["height"] = np.minimum( 192 | height_constrained_by_width, 193 | height_constrained_by_height) 194 | p["node_image"]["width"] = ( 195 | p["node_image"]["height"] 196 | * p["input"]["aspect_ratio"] 197 | ) 198 | return p 199 | 200 | 201 | def find_between_layer_gap(p): 202 | """ 203 | How big is the horizontal spacing between_layers? 204 | This is also the spacing between the input image and the first layer 205 | and between the last layer and the output image. 206 | """ 207 | horizontal_gap_total = ( 208 | p["figure"]["width"] 209 | - 2 * p["input"]["image"]["width"] 210 | - p["network"]["n_layers"] * p["node_image"]["width"] 211 | - p["gap"]["left_border"] 212 | - p["gap"]["right_border"] 213 | ) 214 | n_horizontal_gaps = p["network"]["n_layers"] + 1 215 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 216 | return p 217 | 218 | 219 | def find_between_node_gap(p): 220 | """ 221 | How big is the vertical gap between_node images? 222 | """ 223 | vertical_gap_total = ( 224 | p["figure"]["height"] 225 | - p["gap"]["top_border"] 226 | - p["gap"]["bottom_border"] 227 | - p["network"]["max_nodes"] 228 | * p["node_image"]["height"] 229 | ) 230 | n_vertical_gaps = p["network"]["max_nodes"] - 1 231 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 232 | return p 233 | 234 | 235 | def find_error_image_position(p): 236 | """ 237 | Where exactly should the error image be positioned? 238 | """ 239 | p["error_image"]["bottom"] = ( 240 | p["input"]["image"]["bottom"] 241 | - p["input"]["image"]["height"] 242 | * p["gap"]["error_gap_scale"] 243 | - p["error_image"]["height"] 244 | ) 245 | error_image_center = ( 246 | p["figure"]["width"] 247 | - p["gap"]["right_border"] 248 | - p["input"]["image"]["width"] / 2 249 | ) 250 | p["error_image"]["left"] = ( 251 | error_image_center 252 | - p["error_image"]["width"] / 2 253 | ) 254 | return p 255 | 256 | 257 | def add_input_image(fig, p): 258 | """ 259 | All Axes to be added use the rectangle specification 260 | (left, bottom, width, height) 261 | """ 262 | absolute_pos = ( 263 | p["gap"]["left_border"], 264 | p["input"]["image"]["bottom"], 265 | p["input"]["image"]["width"], 266 | p["input"]["image"]["height"]) 267 | ax_input = add_image_axes(fig, p, absolute_pos) 268 | add_filler_image( 269 | ax_input, 270 | p["input"]["n_rows"], 271 | p["input"]["n_cols"], 272 | ) 273 | 274 | 275 | def add_node_images(fig, p): 276 | node_image_left = ( 277 | p["gap"]["left_border"] 278 | + p["input"]["image"]["width"] 279 | + p["gap"]["between_layer"] 280 | ) 281 | n_nodes = p["network"]["n_nodes"][0] 282 | total_layer_height = ( 283 | n_nodes * p["node_image"]["height"] 284 | + (n_nodes - 1) * p["gap"]["between_node"] 285 | ) 286 | layer_bottom = (p["figure"]["height"] - total_layer_height) / 2 287 | 288 | for i_node in range(n_nodes): 289 | node_image_bottom = ( 290 | layer_bottom + i_node * ( 291 | p["node_image"]["height"] + p["gap"]["between_node"])) 292 | 293 | absolute_pos = ( 294 | node_image_left, 295 | node_image_bottom, 296 | p["node_image"]["width"], 297 | p["node_image"]["height"]) 298 | ax = add_image_axes(fig, p, absolute_pos) 299 | add_filler_image( 300 | ax, 301 | p["input"]["n_rows"], 302 | p["input"]["n_cols"], 303 | ) 304 | 305 | 306 | def add_image_axes(fig, p, absolute_pos): 307 | """ 308 | Locate the Axes for the image corresponding to this node within the Figure. 309 | 310 | absolute_pos: Tuple of 311 | (left_position, bottom_position, width, height) 312 | in inches on the Figure. 313 | """ 314 | scaled_pos = ( 315 | absolute_pos[0] / p["figure"]["width"], 316 | absolute_pos[1] / p["figure"]["height"], 317 | absolute_pos[2] / p["figure"]["width"], 318 | absolute_pos[3] / p["figure"]["height"]) 319 | ax = fig.add_axes(scaled_pos) 320 | ax.tick_params(bottom=False, top=False, left=False, right=False) 321 | ax.tick_params( 322 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 323 | ax.spines["top"].set_color(TAN) 324 | ax.spines["bottom"].set_color(TAN) 325 | ax.spines["left"].set_color(TAN) 326 | ax.spines["right"].set_color(TAN) 327 | return ax 328 | 329 | 330 | def add_filler_image(ax, n_im_rows, n_im_cols): 331 | """ 332 | Add a chunk of image as a placeholder. 333 | """ 334 | fill_patch = np.random.sample(size=(n_im_rows, n_im_cols)) 335 | ax.imshow(fill_patch, cmap="inferno") 336 | 337 | 338 | def save_nn_viz(fig, postfix="0"): 339 | """ 340 | Generate a new filename for each step of the process. 341 | """ 342 | base_name = "nn_viz_" 343 | filename = base_name + postfix + ".png" 344 | fig.savefig( 345 | filename, 346 | edgecolor=fig.get_edgecolor(), 347 | facecolor=fig.get_facecolor(), 348 | dpi=DPI, 349 | ) 350 | 351 | 352 | if __name__ == "__main__": 353 | main() 354 | -------------------------------------------------------------------------------- /nn_viz_22.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | p = find_error_image_position(p) 50 | 51 | add_input_image(fig, p) 52 | for i_layer in range(p["network"]["n_layers"]): 53 | add_node_images(fig, i_layer, p) 54 | save_nn_viz(fig, postfix="22_all_layers") 55 | 56 | 57 | def construct_parameters(): 58 | """ 59 | Build a dictionary of parameters that describe the size and location 60 | of the elements of the visualization. This is a convenient way to pass 61 | the collection of them around . 62 | """ 63 | # Enforce square pixels. Each pixel will have the same height and width. 64 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 65 | 66 | parameters = {} 67 | 68 | # The figure as a whole 69 | parameters["figure"] = { 70 | "height": FIGURE_HEIGHT, 71 | "width": FIGURE_WIDTH, 72 | } 73 | 74 | # The input and output images 75 | parameters["input"] = { 76 | "n_cols": N_IMAGE_PIXEL_COLS, 77 | "n_rows": N_IMAGE_PIXEL_ROWS, 78 | "aspect_ratio": aspect_ratio, 79 | "image": { 80 | "bottom": INPUT_IMAGE_BOTTOM, 81 | "height": INPUT_IMAGE_HEIGHT, 82 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 83 | } 84 | } 85 | 86 | # The network as a whole 87 | parameters["network"] = { 88 | "n_nodes": N_NODES_BY_LAYER, 89 | "n_layers": len(N_NODES_BY_LAYER), 90 | "max_nodes": np.max(N_NODES_BY_LAYER), 91 | } 92 | 93 | # Individual node images 94 | parameters["node_image"] = { 95 | "height": 0, 96 | "width": 0, 97 | } 98 | 99 | parameters["error_image"] = { 100 | "left": 0, 101 | "bottom": 0, 102 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 103 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 104 | } 105 | 106 | parameters["gap"] = { 107 | "right_border": RIGHT_BORDER, 108 | "left_border": LEFT_BORDER, 109 | "bottom_border": BOTTOM_BORDER, 110 | "top_border": TOP_BORDER, 111 | "between_layer": 0, 112 | "between_layer_scale": BETWEEN_LAYER_SCALE, 113 | "between_node": 0, 114 | "between_node_scale": BETWEEN_NODE_SCALE, 115 | "error_gap_scale": ERROR_GAP_SCALE, 116 | } 117 | 118 | return parameters 119 | 120 | 121 | def create_background(p): 122 | fig = plt.figure( 123 | edgecolor=TAN, 124 | facecolor=GREEN, 125 | figsize=(p["figure"]["width"], p["figure"]["height"]), 126 | linewidth=4, 127 | ) 128 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 129 | ax_boss.set_xlim(0, 1) 130 | ax_boss.set_ylim(0, 1) 131 | return fig, ax_boss 132 | 133 | 134 | def find_node_image_size(p): 135 | """ 136 | What should the height and width of each node image be? 137 | As big as possible, given the constraints. 138 | There are two possible constraints: 139 | 1. Fill the figure top-to-bottom. 140 | 2. Fill the figure side-to-side. 141 | To determine which of these limits the size of the node images, 142 | we'll calculate the image size assuming each constraint separately, 143 | then respect the one that results in the smaller node image. 144 | """ 145 | # First assume height is the limiting factor. 146 | total_space_to_fill = ( 147 | p["figure"]["height"] 148 | - p["gap"]["bottom_border"] 149 | - p["gap"]["top_border"] 150 | ) 151 | # Use the layer with the largest number of nodes (n_max). 152 | # Pack the images and the gaps as tight as possible. 153 | # In that case, if the image height is h, 154 | # the gaps will each be h * p["gap"]["between_node_scale"]. 155 | # There will be n_max nodes and (n_max - 1) gaps. 156 | # After a wee bit of algebra: 157 | height_constrained_by_height = ( 158 | total_space_to_fill / ( 159 | p["network"]["max_nodes"] 160 | + (p["network"]["max_nodes"] - 1) 161 | * p["gap"]["between_node_scale"] 162 | ) 163 | ) 164 | 165 | # Second assume width is the limiting factor. 166 | total_space_to_fill = ( 167 | p["figure"]["width"] 168 | - p["gap"]["left_border"] 169 | - p["gap"]["right_border"] 170 | - 2 * p["input"]["image"]["width"] 171 | ) 172 | # Again, pack the images as tightly as possible side-to-side. 173 | # In this case, if the image width is w, 174 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 175 | # There will be n_layer nodes and (n_layer + 1) gaps. 176 | # After another tidbit of algebra: 177 | width_constrained_by_width = ( 178 | total_space_to_fill / ( 179 | p["network"]["n_layers"] 180 | + (p["network"]["n_layers"] + 1) 181 | * p["gap"]["between_layer_scale"] 182 | ) 183 | ) 184 | 185 | # Figure out what the height would be for this width. 186 | height_constrained_by_width = ( 187 | width_constrained_by_width 188 | / p["input"]["aspect_ratio"] 189 | ) 190 | 191 | # See which constraint is more restrictive, and go with that one. 192 | p["node_image"]["height"] = np.minimum( 193 | height_constrained_by_width, 194 | height_constrained_by_height) 195 | p["node_image"]["width"] = ( 196 | p["node_image"]["height"] 197 | * p["input"]["aspect_ratio"] 198 | ) 199 | return p 200 | 201 | 202 | def find_between_layer_gap(p): 203 | """ 204 | How big is the horizontal spacing between_layers? 205 | This is also the spacing between the input image and the first layer 206 | and between the last layer and the output image. 207 | """ 208 | horizontal_gap_total = ( 209 | p["figure"]["width"] 210 | - 2 * p["input"]["image"]["width"] 211 | - p["network"]["n_layers"] * p["node_image"]["width"] 212 | - p["gap"]["left_border"] 213 | - p["gap"]["right_border"] 214 | ) 215 | n_horizontal_gaps = p["network"]["n_layers"] + 1 216 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 217 | return p 218 | 219 | 220 | def find_between_node_gap(p): 221 | """ 222 | How big is the vertical gap between_node images? 223 | """ 224 | vertical_gap_total = ( 225 | p["figure"]["height"] 226 | - p["gap"]["top_border"] 227 | - p["gap"]["bottom_border"] 228 | - p["network"]["max_nodes"] 229 | * p["node_image"]["height"] 230 | ) 231 | n_vertical_gaps = p["network"]["max_nodes"] - 1 232 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 233 | return p 234 | 235 | 236 | def find_error_image_position(p): 237 | """ 238 | Where exactly should the error image be positioned? 239 | """ 240 | p["error_image"]["bottom"] = ( 241 | p["input"]["image"]["bottom"] 242 | - p["input"]["image"]["height"] 243 | * p["gap"]["error_gap_scale"] 244 | - p["error_image"]["height"] 245 | ) 246 | error_image_center = ( 247 | p["figure"]["width"] 248 | - p["gap"]["right_border"] 249 | - p["input"]["image"]["width"] / 2 250 | ) 251 | p["error_image"]["left"] = ( 252 | error_image_center 253 | - p["error_image"]["width"] / 2 254 | ) 255 | return p 256 | 257 | 258 | def add_input_image(fig, p): 259 | """ 260 | All Axes to be added use the rectangle specification 261 | (left, bottom, width, height) 262 | """ 263 | absolute_pos = ( 264 | p["gap"]["left_border"], 265 | p["input"]["image"]["bottom"], 266 | p["input"]["image"]["width"], 267 | p["input"]["image"]["height"]) 268 | ax_input = add_image_axes(fig, p, absolute_pos) 269 | add_filler_image( 270 | ax_input, 271 | p["input"]["n_rows"], 272 | p["input"]["n_cols"], 273 | ) 274 | 275 | 276 | def add_node_images(fig, i_layer, p): 277 | """ 278 | Add in all the node images for a single layer 279 | """ 280 | node_image_left = ( 281 | p["gap"]["left_border"] 282 | + p["input"]["image"]["width"] 283 | + i_layer * p["node_image"]["width"] 284 | + (i_layer + 1) * p["gap"]["between_layer"] 285 | ) 286 | n_nodes = p["network"]["n_nodes"][i_layer] 287 | total_layer_height = ( 288 | n_nodes * p["node_image"]["height"] 289 | + (n_nodes - 1) * p["gap"]["between_node"] 290 | ) 291 | layer_bottom = (p["figure"]["height"] - total_layer_height) / 2 292 | for i_node in range(n_nodes): 293 | node_image_bottom = ( 294 | layer_bottom + i_node * ( 295 | p["node_image"]["height"] + p["gap"]["between_node"])) 296 | 297 | absolute_pos = ( 298 | node_image_left, 299 | node_image_bottom, 300 | p["node_image"]["width"], 301 | p["node_image"]["height"]) 302 | ax = add_image_axes(fig, p, absolute_pos) 303 | add_filler_image( 304 | ax, 305 | p["input"]["n_rows"], 306 | p["input"]["n_cols"], 307 | ) 308 | 309 | 310 | def add_image_axes(fig, p, absolute_pos): 311 | """ 312 | Locate the Axes for the image corresponding to this node within the Figure. 313 | 314 | absolute_pos: Tuple of 315 | (left_position, bottom_position, width, height) 316 | in inches on the Figure. 317 | """ 318 | scaled_pos = ( 319 | absolute_pos[0] / p["figure"]["width"], 320 | absolute_pos[1] / p["figure"]["height"], 321 | absolute_pos[2] / p["figure"]["width"], 322 | absolute_pos[3] / p["figure"]["height"]) 323 | ax = fig.add_axes(scaled_pos) 324 | ax.tick_params(bottom=False, top=False, left=False, right=False) 325 | ax.tick_params( 326 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 327 | ax.spines["top"].set_color(TAN) 328 | ax.spines["bottom"].set_color(TAN) 329 | ax.spines["left"].set_color(TAN) 330 | ax.spines["right"].set_color(TAN) 331 | return ax 332 | 333 | 334 | def add_filler_image(ax, n_im_rows, n_im_cols): 335 | """ 336 | Add a chunk of image as a placeholder. 337 | """ 338 | fill_patch = np.random.sample(size=(n_im_rows, n_im_cols)) 339 | ax.imshow(fill_patch, cmap="inferno") 340 | 341 | 342 | def save_nn_viz(fig, postfix="0"): 343 | """ 344 | Generate a new filename for each step of the process. 345 | """ 346 | base_name = "nn_viz_" 347 | filename = base_name + postfix + ".png" 348 | fig.savefig( 349 | filename, 350 | edgecolor=fig.get_edgecolor(), 351 | facecolor=fig.get_facecolor(), 352 | dpi=DPI, 353 | ) 354 | 355 | 356 | if __name__ == "__main__": 357 | main() 358 | -------------------------------------------------------------------------------- /nn_viz_23.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | p = find_error_image_position(p) 50 | 51 | add_input_image(fig, p) 52 | for i_layer in range(p["network"]["n_layers"]): 53 | add_node_images(fig, i_layer, p) 54 | add_output_image(fig, p) 55 | save_nn_viz(fig, postfix="23_output_image") 56 | 57 | 58 | def construct_parameters(): 59 | """ 60 | Build a dictionary of parameters that describe the size and location 61 | of the elements of the visualization. This is a convenient way to pass 62 | the collection of them around . 63 | """ 64 | # Enforce square pixels. Each pixel will have the same height and width. 65 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 66 | 67 | parameters = {} 68 | 69 | # The figure as a whole 70 | parameters["figure"] = { 71 | "height": FIGURE_HEIGHT, 72 | "width": FIGURE_WIDTH, 73 | } 74 | 75 | # The input and output images 76 | parameters["input"] = { 77 | "n_cols": N_IMAGE_PIXEL_COLS, 78 | "n_rows": N_IMAGE_PIXEL_ROWS, 79 | "aspect_ratio": aspect_ratio, 80 | "image": { 81 | "bottom": INPUT_IMAGE_BOTTOM, 82 | "height": INPUT_IMAGE_HEIGHT, 83 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 84 | } 85 | } 86 | 87 | # The network as a whole 88 | parameters["network"] = { 89 | "n_nodes": N_NODES_BY_LAYER, 90 | "n_layers": len(N_NODES_BY_LAYER), 91 | "max_nodes": np.max(N_NODES_BY_LAYER), 92 | } 93 | 94 | # Individual node images 95 | parameters["node_image"] = { 96 | "height": 0, 97 | "width": 0, 98 | } 99 | 100 | parameters["error_image"] = { 101 | "left": 0, 102 | "bottom": 0, 103 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 104 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 105 | } 106 | 107 | parameters["gap"] = { 108 | "right_border": RIGHT_BORDER, 109 | "left_border": LEFT_BORDER, 110 | "bottom_border": BOTTOM_BORDER, 111 | "top_border": TOP_BORDER, 112 | "between_layer": 0, 113 | "between_layer_scale": BETWEEN_LAYER_SCALE, 114 | "between_node": 0, 115 | "between_node_scale": BETWEEN_NODE_SCALE, 116 | "error_gap_scale": ERROR_GAP_SCALE, 117 | } 118 | 119 | return parameters 120 | 121 | 122 | def create_background(p): 123 | fig = plt.figure( 124 | edgecolor=TAN, 125 | facecolor=GREEN, 126 | figsize=(p["figure"]["width"], p["figure"]["height"]), 127 | linewidth=4, 128 | ) 129 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 130 | ax_boss.set_xlim(0, 1) 131 | ax_boss.set_ylim(0, 1) 132 | return fig, ax_boss 133 | 134 | 135 | def find_node_image_size(p): 136 | """ 137 | What should the height and width of each node image be? 138 | As big as possible, given the constraints. 139 | There are two possible constraints: 140 | 1. Fill the figure top-to-bottom. 141 | 2. Fill the figure side-to-side. 142 | To determine which of these limits the size of the node images, 143 | we'll calculate the image size assuming each constraint separately, 144 | then respect the one that results in the smaller node image. 145 | """ 146 | # First assume height is the limiting factor. 147 | total_space_to_fill = ( 148 | p["figure"]["height"] 149 | - p["gap"]["bottom_border"] 150 | - p["gap"]["top_border"] 151 | ) 152 | # Use the layer with the largest number of nodes (n_max). 153 | # Pack the images and the gaps as tight as possible. 154 | # In that case, if the image height is h, 155 | # the gaps will each be h * p["gap"]["between_node_scale"]. 156 | # There will be n_max nodes and (n_max - 1) gaps. 157 | # After a wee bit of algebra: 158 | height_constrained_by_height = ( 159 | total_space_to_fill / ( 160 | p["network"]["max_nodes"] 161 | + (p["network"]["max_nodes"] - 1) 162 | * p["gap"]["between_node_scale"] 163 | ) 164 | ) 165 | 166 | # Second assume width is the limiting factor. 167 | total_space_to_fill = ( 168 | p["figure"]["width"] 169 | - p["gap"]["left_border"] 170 | - p["gap"]["right_border"] 171 | - 2 * p["input"]["image"]["width"] 172 | ) 173 | # Again, pack the images as tightly as possible side-to-side. 174 | # In this case, if the image width is w, 175 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 176 | # There will be n_layer nodes and (n_layer + 1) gaps. 177 | # After another tidbit of algebra: 178 | width_constrained_by_width = ( 179 | total_space_to_fill / ( 180 | p["network"]["n_layers"] 181 | + (p["network"]["n_layers"] + 1) 182 | * p["gap"]["between_layer_scale"] 183 | ) 184 | ) 185 | 186 | # Figure out what the height would be for this width. 187 | height_constrained_by_width = ( 188 | width_constrained_by_width 189 | / p["input"]["aspect_ratio"] 190 | ) 191 | 192 | # See which constraint is more restrictive, and go with that one. 193 | p["node_image"]["height"] = np.minimum( 194 | height_constrained_by_width, 195 | height_constrained_by_height) 196 | p["node_image"]["width"] = ( 197 | p["node_image"]["height"] 198 | * p["input"]["aspect_ratio"] 199 | ) 200 | return p 201 | 202 | 203 | def find_between_layer_gap(p): 204 | """ 205 | How big is the horizontal spacing between_layers? 206 | This is also the spacing between the input image and the first layer 207 | and between the last layer and the output image. 208 | """ 209 | horizontal_gap_total = ( 210 | p["figure"]["width"] 211 | - 2 * p["input"]["image"]["width"] 212 | - p["network"]["n_layers"] * p["node_image"]["width"] 213 | - p["gap"]["left_border"] 214 | - p["gap"]["right_border"] 215 | ) 216 | n_horizontal_gaps = p["network"]["n_layers"] + 1 217 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 218 | return p 219 | 220 | 221 | def find_between_node_gap(p): 222 | """ 223 | How big is the vertical gap between_node images? 224 | """ 225 | vertical_gap_total = ( 226 | p["figure"]["height"] 227 | - p["gap"]["top_border"] 228 | - p["gap"]["bottom_border"] 229 | - p["network"]["max_nodes"] 230 | * p["node_image"]["height"] 231 | ) 232 | n_vertical_gaps = p["network"]["max_nodes"] - 1 233 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 234 | return p 235 | 236 | 237 | def find_error_image_position(p): 238 | """ 239 | Where exactly should the error image be positioned? 240 | """ 241 | p["error_image"]["bottom"] = ( 242 | p["input"]["image"]["bottom"] 243 | - p["input"]["image"]["height"] 244 | * p["gap"]["error_gap_scale"] 245 | - p["error_image"]["height"] 246 | ) 247 | error_image_center = ( 248 | p["figure"]["width"] 249 | - p["gap"]["right_border"] 250 | - p["input"]["image"]["width"] / 2 251 | ) 252 | p["error_image"]["left"] = ( 253 | error_image_center 254 | - p["error_image"]["width"] / 2 255 | ) 256 | return p 257 | 258 | 259 | def add_input_image(fig, p): 260 | """ 261 | All Axes to be added use the rectangle specification 262 | (left, bottom, width, height) 263 | """ 264 | absolute_pos = ( 265 | p["gap"]["left_border"], 266 | p["input"]["image"]["bottom"], 267 | p["input"]["image"]["width"], 268 | p["input"]["image"]["height"]) 269 | ax_input = add_image_axes(fig, p, absolute_pos) 270 | add_filler_image( 271 | ax_input, 272 | p["input"]["n_rows"], 273 | p["input"]["n_cols"], 274 | ) 275 | 276 | 277 | def add_node_images(fig, i_layer, p): 278 | """ 279 | Add in all the node images for a single layer 280 | """ 281 | node_image_left = ( 282 | p["gap"]["left_border"] 283 | + p["input"]["image"]["width"] 284 | + i_layer * p["node_image"]["width"] 285 | + (i_layer + 1) * p["gap"]["between_layer"] 286 | ) 287 | n_nodes = p["network"]["n_nodes"][i_layer] 288 | total_layer_height = ( 289 | n_nodes * p["node_image"]["height"] 290 | + (n_nodes - 1) * p["gap"]["between_node"] 291 | ) 292 | layer_bottom = (p["figure"]["height"] - total_layer_height) / 2 293 | for i_node in range(n_nodes): 294 | node_image_bottom = ( 295 | layer_bottom + i_node * ( 296 | p["node_image"]["height"] + p["gap"]["between_node"])) 297 | 298 | absolute_pos = ( 299 | node_image_left, 300 | node_image_bottom, 301 | p["node_image"]["width"], 302 | p["node_image"]["height"]) 303 | ax = add_image_axes(fig, p, absolute_pos) 304 | add_filler_image( 305 | ax, 306 | p["input"]["n_rows"], 307 | p["input"]["n_cols"], 308 | ) 309 | 310 | 311 | def add_output_image(fig, p): 312 | output_image_left = ( 313 | p["figure"]["width"] 314 | - p["input"]["image"]["width"] 315 | - p["gap"]["right_border"] 316 | ) 317 | absolute_pos = ( 318 | output_image_left, 319 | p["input"]["image"]["bottom"], 320 | p["input"]["image"]["width"], 321 | p["input"]["image"]["height"]) 322 | ax_output = add_image_axes(fig, p, absolute_pos) 323 | add_filler_image( 324 | ax_output, 325 | p["input"]["n_rows"], 326 | p["input"]["n_cols"], 327 | ) 328 | 329 | 330 | def add_image_axes(fig, p, absolute_pos): 331 | """ 332 | Locate the Axes for the image corresponding to this node within the Figure. 333 | 334 | absolute_pos: Tuple of 335 | (left_position, bottom_position, width, height) 336 | in inches on the Figure. 337 | """ 338 | scaled_pos = ( 339 | absolute_pos[0] / p["figure"]["width"], 340 | absolute_pos[1] / p["figure"]["height"], 341 | absolute_pos[2] / p["figure"]["width"], 342 | absolute_pos[3] / p["figure"]["height"]) 343 | ax = fig.add_axes(scaled_pos) 344 | ax.tick_params(bottom=False, top=False, left=False, right=False) 345 | ax.tick_params( 346 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 347 | ax.spines["top"].set_color(TAN) 348 | ax.spines["bottom"].set_color(TAN) 349 | ax.spines["left"].set_color(TAN) 350 | ax.spines["right"].set_color(TAN) 351 | return ax 352 | 353 | 354 | def add_filler_image(ax, n_im_rows, n_im_cols): 355 | """ 356 | Add a chunk of image as a placeholder. 357 | """ 358 | fill_patch = np.random.sample(size=(n_im_rows, n_im_cols)) 359 | ax.imshow(fill_patch, cmap="inferno") 360 | 361 | 362 | def save_nn_viz(fig, postfix="0"): 363 | """ 364 | Generate a new filename for each step of the process. 365 | """ 366 | base_name = "nn_viz_" 367 | filename = base_name + postfix + ".png" 368 | fig.savefig( 369 | filename, 370 | edgecolor=fig.get_edgecolor(), 371 | facecolor=fig.get_facecolor(), 372 | dpi=DPI, 373 | ) 374 | 375 | 376 | if __name__ == "__main__": 377 | main() 378 | -------------------------------------------------------------------------------- /nn_viz_24.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | 9 | # Choose a color palette 10 | BLUE = "#04253a" 11 | GREEN = "#4c837a" 12 | TAN = "#e1ddbf" 13 | DPI = 300 14 | 15 | # Changing these adjusts the size and layout of the visualization 16 | FIGURE_WIDTH = 16 17 | FIGURE_HEIGHT = 9 18 | RIGHT_BORDER = 0.7 19 | LEFT_BORDER = 0.7 20 | TOP_BORDER = 0.8 21 | BOTTOM_BORDER = 0.6 22 | 23 | N_IMAGE_PIXEL_COLS = 64 24 | N_IMAGE_PIXEL_ROWS = 48 25 | N_NODES_BY_LAYER = [10, 7, 5, 8] 26 | 27 | INPUT_IMAGE_BOTTOM = 5 28 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 29 | ERROR_IMAGE_SCALE = 0.7 30 | ERROR_GAP_SCALE = 0.3 31 | BETWEEN_LAYER_SCALE = 0.8 32 | BETWEEN_NODE_SCALE = 0.4 33 | 34 | 35 | def main(): 36 | """ 37 | Build a visualization of an image autoencoder neural network, 38 | piece by piece. 39 | 40 | A central data structure in this example is the collection of parameters 41 | that define how the image is laid out. It is a set of nested dictionaries. 42 | """ 43 | p = construct_parameters() 44 | fig, ax_boss = create_background(p) 45 | 46 | p = find_node_image_size(p) 47 | p = find_between_layer_gap(p) 48 | p = find_between_node_gap(p) 49 | p = find_error_image_position(p) 50 | 51 | add_input_image(fig, p) 52 | for i_layer in range(p["network"]["n_layers"]): 53 | add_node_images(fig, i_layer, p) 54 | add_output_image(fig, p) 55 | add_error_image(fig, p) 56 | save_nn_viz(fig, postfix="24_error_image") 57 | 58 | 59 | def construct_parameters(): 60 | """ 61 | Build a dictionary of parameters that describe the size and location 62 | of the elements of the visualization. This is a convenient way to pass 63 | the collection of them around . 64 | """ 65 | # Enforce square pixels. Each pixel will have the same height and width. 66 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 67 | 68 | parameters = {} 69 | 70 | # The figure as a whole 71 | parameters["figure"] = { 72 | "height": FIGURE_HEIGHT, 73 | "width": FIGURE_WIDTH, 74 | } 75 | 76 | # The input and output images 77 | parameters["input"] = { 78 | "n_cols": N_IMAGE_PIXEL_COLS, 79 | "n_rows": N_IMAGE_PIXEL_ROWS, 80 | "aspect_ratio": aspect_ratio, 81 | "image": { 82 | "bottom": INPUT_IMAGE_BOTTOM, 83 | "height": INPUT_IMAGE_HEIGHT, 84 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 85 | } 86 | } 87 | 88 | # The network as a whole 89 | parameters["network"] = { 90 | "n_nodes": N_NODES_BY_LAYER, 91 | "n_layers": len(N_NODES_BY_LAYER), 92 | "max_nodes": np.max(N_NODES_BY_LAYER), 93 | } 94 | 95 | # Individual node images 96 | parameters["node_image"] = { 97 | "height": 0, 98 | "width": 0, 99 | } 100 | 101 | parameters["error_image"] = { 102 | "left": 0, 103 | "bottom": 0, 104 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 105 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 106 | } 107 | 108 | parameters["gap"] = { 109 | "right_border": RIGHT_BORDER, 110 | "left_border": LEFT_BORDER, 111 | "bottom_border": BOTTOM_BORDER, 112 | "top_border": TOP_BORDER, 113 | "between_layer": 0, 114 | "between_layer_scale": BETWEEN_LAYER_SCALE, 115 | "between_node": 0, 116 | "between_node_scale": BETWEEN_NODE_SCALE, 117 | "error_gap_scale": ERROR_GAP_SCALE, 118 | } 119 | 120 | return parameters 121 | 122 | 123 | def create_background(p): 124 | fig = plt.figure( 125 | edgecolor=TAN, 126 | facecolor=GREEN, 127 | figsize=(p["figure"]["width"], p["figure"]["height"]), 128 | linewidth=4, 129 | ) 130 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 131 | ax_boss.set_xlim(0, 1) 132 | ax_boss.set_ylim(0, 1) 133 | return fig, ax_boss 134 | 135 | 136 | def find_node_image_size(p): 137 | """ 138 | What should the height and width of each node image be? 139 | As big as possible, given the constraints. 140 | There are two possible constraints: 141 | 1. Fill the figure top-to-bottom. 142 | 2. Fill the figure side-to-side. 143 | To determine which of these limits the size of the node images, 144 | we'll calculate the image size assuming each constraint separately, 145 | then respect the one that results in the smaller node image. 146 | """ 147 | # First assume height is the limiting factor. 148 | total_space_to_fill = ( 149 | p["figure"]["height"] 150 | - p["gap"]["bottom_border"] 151 | - p["gap"]["top_border"] 152 | ) 153 | # Use the layer with the largest number of nodes (n_max). 154 | # Pack the images and the gaps as tight as possible. 155 | # In that case, if the image height is h, 156 | # the gaps will each be h * p["gap"]["between_node_scale"]. 157 | # There will be n_max nodes and (n_max - 1) gaps. 158 | # After a wee bit of algebra: 159 | height_constrained_by_height = ( 160 | total_space_to_fill / ( 161 | p["network"]["max_nodes"] 162 | + (p["network"]["max_nodes"] - 1) 163 | * p["gap"]["between_node_scale"] 164 | ) 165 | ) 166 | 167 | # Second assume width is the limiting factor. 168 | total_space_to_fill = ( 169 | p["figure"]["width"] 170 | - p["gap"]["left_border"] 171 | - p["gap"]["right_border"] 172 | - 2 * p["input"]["image"]["width"] 173 | ) 174 | # Again, pack the images as tightly as possible side-to-side. 175 | # In this case, if the image width is w, 176 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 177 | # There will be n_layer nodes and (n_layer + 1) gaps. 178 | # After another tidbit of algebra: 179 | width_constrained_by_width = ( 180 | total_space_to_fill / ( 181 | p["network"]["n_layers"] 182 | + (p["network"]["n_layers"] + 1) 183 | * p["gap"]["between_layer_scale"] 184 | ) 185 | ) 186 | 187 | # Figure out what the height would be for this width. 188 | height_constrained_by_width = ( 189 | width_constrained_by_width 190 | / p["input"]["aspect_ratio"] 191 | ) 192 | 193 | # See which constraint is more restrictive, and go with that one. 194 | p["node_image"]["height"] = np.minimum( 195 | height_constrained_by_width, 196 | height_constrained_by_height) 197 | p["node_image"]["width"] = ( 198 | p["node_image"]["height"] 199 | * p["input"]["aspect_ratio"] 200 | ) 201 | return p 202 | 203 | 204 | def find_between_layer_gap(p): 205 | """ 206 | How big is the horizontal spacing between_layers? 207 | This is also the spacing between the input image and the first layer 208 | and between the last layer and the output image. 209 | """ 210 | horizontal_gap_total = ( 211 | p["figure"]["width"] 212 | - 2 * p["input"]["image"]["width"] 213 | - p["network"]["n_layers"] * p["node_image"]["width"] 214 | - p["gap"]["left_border"] 215 | - p["gap"]["right_border"] 216 | ) 217 | n_horizontal_gaps = p["network"]["n_layers"] + 1 218 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 219 | return p 220 | 221 | 222 | def find_between_node_gap(p): 223 | """ 224 | How big is the vertical gap between_node images? 225 | """ 226 | vertical_gap_total = ( 227 | p["figure"]["height"] 228 | - p["gap"]["top_border"] 229 | - p["gap"]["bottom_border"] 230 | - p["network"]["max_nodes"] 231 | * p["node_image"]["height"] 232 | ) 233 | n_vertical_gaps = p["network"]["max_nodes"] - 1 234 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 235 | return p 236 | 237 | 238 | def find_error_image_position(p): 239 | """ 240 | Where exactly should the error image be positioned? 241 | """ 242 | p["error_image"]["bottom"] = ( 243 | p["input"]["image"]["bottom"] 244 | - p["input"]["image"]["height"] 245 | * p["gap"]["error_gap_scale"] 246 | - p["error_image"]["height"] 247 | ) 248 | error_image_center = ( 249 | p["figure"]["width"] 250 | - p["gap"]["right_border"] 251 | - p["input"]["image"]["width"] / 2 252 | ) 253 | p["error_image"]["left"] = ( 254 | error_image_center 255 | - p["error_image"]["width"] / 2 256 | ) 257 | return p 258 | 259 | 260 | def add_input_image(fig, p): 261 | """ 262 | All Axes to be added use the rectangle specification 263 | (left, bottom, width, height) 264 | """ 265 | absolute_pos = ( 266 | p["gap"]["left_border"], 267 | p["input"]["image"]["bottom"], 268 | p["input"]["image"]["width"], 269 | p["input"]["image"]["height"]) 270 | ax_input = add_image_axes(fig, p, absolute_pos) 271 | add_filler_image( 272 | ax_input, 273 | p["input"]["n_rows"], 274 | p["input"]["n_cols"], 275 | ) 276 | 277 | 278 | def add_node_images(fig, i_layer, p): 279 | """ 280 | Add in all the node images for a single layer 281 | """ 282 | node_image_left = ( 283 | p["gap"]["left_border"] 284 | + p["input"]["image"]["width"] 285 | + i_layer * p["node_image"]["width"] 286 | + (i_layer + 1) * p["gap"]["between_layer"] 287 | ) 288 | n_nodes = p["network"]["n_nodes"][i_layer] 289 | total_layer_height = ( 290 | n_nodes * p["node_image"]["height"] 291 | + (n_nodes - 1) * p["gap"]["between_node"] 292 | ) 293 | layer_bottom = (p["figure"]["height"] - total_layer_height) / 2 294 | for i_node in range(n_nodes): 295 | node_image_bottom = ( 296 | layer_bottom + i_node * ( 297 | p["node_image"]["height"] + p["gap"]["between_node"])) 298 | 299 | absolute_pos = ( 300 | node_image_left, 301 | node_image_bottom, 302 | p["node_image"]["width"], 303 | p["node_image"]["height"]) 304 | ax = add_image_axes(fig, p, absolute_pos) 305 | add_filler_image( 306 | ax, 307 | p["input"]["n_rows"], 308 | p["input"]["n_cols"], 309 | ) 310 | 311 | 312 | def add_output_image(fig, p): 313 | output_image_left = ( 314 | p["figure"]["width"] 315 | - p["input"]["image"]["width"] 316 | - p["gap"]["right_border"] 317 | ) 318 | absolute_pos = ( 319 | output_image_left, 320 | p["input"]["image"]["bottom"], 321 | p["input"]["image"]["width"], 322 | p["input"]["image"]["height"]) 323 | ax_output = add_image_axes(fig, p, absolute_pos) 324 | add_filler_image( 325 | ax_output, 326 | p["input"]["n_rows"], 327 | p["input"]["n_cols"], 328 | ) 329 | 330 | 331 | def add_error_image(fig, p): 332 | absolute_pos = ( 333 | p["error_image"]["left"], 334 | p["error_image"]["bottom"], 335 | p["error_image"]["width"], 336 | p["error_image"]["height"]) 337 | ax_error = add_image_axes(fig, p, absolute_pos) 338 | add_filler_image( 339 | ax_error, 340 | p["input"]["n_rows"], 341 | p["input"]["n_cols"], 342 | ) 343 | 344 | 345 | def add_image_axes(fig, p, absolute_pos): 346 | """ 347 | Locate the Axes for the image corresponding to this node within the Figure. 348 | 349 | absolute_pos: Tuple of 350 | (left_position, bottom_position, width, height) 351 | in inches on the Figure. 352 | """ 353 | scaled_pos = ( 354 | absolute_pos[0] / p["figure"]["width"], 355 | absolute_pos[1] / p["figure"]["height"], 356 | absolute_pos[2] / p["figure"]["width"], 357 | absolute_pos[3] / p["figure"]["height"]) 358 | ax = fig.add_axes(scaled_pos) 359 | ax.tick_params(bottom=False, top=False, left=False, right=False) 360 | ax.tick_params( 361 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 362 | ax.spines["top"].set_color(TAN) 363 | ax.spines["bottom"].set_color(TAN) 364 | ax.spines["left"].set_color(TAN) 365 | ax.spines["right"].set_color(TAN) 366 | return ax 367 | 368 | 369 | def add_filler_image(ax, n_im_rows, n_im_cols): 370 | """ 371 | Add a chunk of image as a placeholder. 372 | """ 373 | fill_patch = np.random.sample(size=(n_im_rows, n_im_cols)) 374 | ax.imshow(fill_patch, cmap="inferno") 375 | 376 | 377 | def save_nn_viz(fig, postfix="0"): 378 | """ 379 | Generate a new filename for each step of the process. 380 | """ 381 | base_name = "nn_viz_" 382 | filename = base_name + postfix + ".png" 383 | fig.savefig( 384 | filename, 385 | edgecolor=fig.get_edgecolor(), 386 | facecolor=fig.get_facecolor(), 387 | dpi=DPI, 388 | ) 389 | 390 | 391 | if __name__ == "__main__": 392 | main() 393 | -------------------------------------------------------------------------------- /nn_viz_25.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | from PIL import Image # noqa: E402 9 | 10 | # Choose a color palette 11 | BLUE = "#04253a" 12 | GREEN = "#4c837a" 13 | TAN = "#e1ddbf" 14 | DPI = 300 15 | 16 | # This is an image that will be used to fill in the visualization blocks 17 | # for each node in the neural network. If we were to build an actual neural 18 | # network and attach it, we could use visualizations of the nodes instead. 19 | FILLER_IMAGE_FILENAME = "512px-Cajal_cortex_drawings.png" 20 | # https://commons.wikimedia.org/wiki/File:Cajal_cortex_drawings.png 21 | # https://upload.wikimedia.org/wikipedia/commons/5/5b/Cajal_cortex_drawings.png 22 | # User:Looie496 created file, 23 | # Santiago Ramon y Cajal created artwork [Public domain] 24 | 25 | # Changing these adjusts the size and layout of the visualization 26 | FIGURE_WIDTH = 16 27 | FIGURE_HEIGHT = 9 28 | RIGHT_BORDER = 0.7 29 | LEFT_BORDER = 0.7 30 | TOP_BORDER = 0.8 31 | BOTTOM_BORDER = 0.6 32 | 33 | N_IMAGE_PIXEL_COLS = 64 34 | N_IMAGE_PIXEL_ROWS = 48 35 | N_NODES_BY_LAYER = [10, 7, 5, 8] 36 | 37 | INPUT_IMAGE_BOTTOM = 5 38 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 39 | ERROR_IMAGE_SCALE = 0.7 40 | ERROR_GAP_SCALE = 0.3 41 | BETWEEN_LAYER_SCALE = 0.8 42 | BETWEEN_NODE_SCALE = 0.4 43 | 44 | 45 | def main(): 46 | """ 47 | Build a visualization of an image autoencoder neural network, 48 | piece by piece. 49 | 50 | A central data structure in this example is the collection of parameters 51 | that define how the image is laid out. It is a set of nested dictionaries. 52 | """ 53 | p = construct_parameters() 54 | fig, ax_boss = create_background(p) 55 | 56 | p = find_node_image_size(p) 57 | p = find_between_layer_gap(p) 58 | p = find_between_node_gap(p) 59 | p = find_error_image_position(p) 60 | 61 | filler_image = load_filler_image() 62 | add_input_image(fig, p, filler_image) 63 | for i_layer in range(p["network"]["n_layers"]): 64 | add_node_images(fig, i_layer, p, filler_image) 65 | add_output_image(fig, p, filler_image) 66 | add_error_image(fig, p, filler_image) 67 | save_nn_viz(fig, postfix="25_filler_image") 68 | 69 | 70 | def construct_parameters(): 71 | """ 72 | Build a dictionary of parameters that describe the size and location 73 | of the elements of the visualization. This is a convenient way to pass 74 | the collection of them around . 75 | """ 76 | # Enforce square pixels. Each pixel will have the same height and width. 77 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 78 | 79 | parameters = {} 80 | 81 | # The figure as a whole 82 | parameters["figure"] = { 83 | "height": FIGURE_HEIGHT, 84 | "width": FIGURE_WIDTH, 85 | } 86 | 87 | # The input and output images 88 | parameters["input"] = { 89 | "n_cols": N_IMAGE_PIXEL_COLS, 90 | "n_rows": N_IMAGE_PIXEL_ROWS, 91 | "aspect_ratio": aspect_ratio, 92 | "image": { 93 | "bottom": INPUT_IMAGE_BOTTOM, 94 | "height": INPUT_IMAGE_HEIGHT, 95 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 96 | } 97 | } 98 | 99 | # The network as a whole 100 | parameters["network"] = { 101 | "n_nodes": N_NODES_BY_LAYER, 102 | "n_layers": len(N_NODES_BY_LAYER), 103 | "max_nodes": np.max(N_NODES_BY_LAYER), 104 | } 105 | 106 | # Individual node images 107 | parameters["node_image"] = { 108 | "height": 0, 109 | "width": 0, 110 | } 111 | 112 | parameters["error_image"] = { 113 | "left": 0, 114 | "bottom": 0, 115 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 116 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 117 | } 118 | 119 | parameters["gap"] = { 120 | "right_border": RIGHT_BORDER, 121 | "left_border": LEFT_BORDER, 122 | "bottom_border": BOTTOM_BORDER, 123 | "top_border": TOP_BORDER, 124 | "between_layer": 0, 125 | "between_layer_scale": BETWEEN_LAYER_SCALE, 126 | "between_node": 0, 127 | "between_node_scale": BETWEEN_NODE_SCALE, 128 | "error_gap_scale": ERROR_GAP_SCALE, 129 | } 130 | 131 | return parameters 132 | 133 | 134 | def create_background(p): 135 | fig = plt.figure( 136 | edgecolor=TAN, 137 | facecolor=GREEN, 138 | figsize=(p["figure"]["width"], p["figure"]["height"]), 139 | linewidth=4, 140 | ) 141 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 142 | ax_boss.set_xlim(0, 1) 143 | ax_boss.set_ylim(0, 1) 144 | return fig, ax_boss 145 | 146 | 147 | def find_node_image_size(p): 148 | """ 149 | What should the height and width of each node image be? 150 | As big as possible, given the constraints. 151 | There are two possible constraints: 152 | 1. Fill the figure top-to-bottom. 153 | 2. Fill the figure side-to-side. 154 | To determine which of these limits the size of the node images, 155 | we'll calculate the image size assuming each constraint separately, 156 | then respect the one that results in the smaller node image. 157 | """ 158 | # First assume height is the limiting factor. 159 | total_space_to_fill = ( 160 | p["figure"]["height"] 161 | - p["gap"]["bottom_border"] 162 | - p["gap"]["top_border"] 163 | ) 164 | # Use the layer with the largest number of nodes (n_max). 165 | # Pack the images and the gaps as tight as possible. 166 | # In that case, if the image height is h, 167 | # the gaps will each be h * p["gap"]["between_node_scale"]. 168 | # There will be n_max nodes and (n_max - 1) gaps. 169 | # After a wee bit of algebra: 170 | height_constrained_by_height = ( 171 | total_space_to_fill / ( 172 | p["network"]["max_nodes"] 173 | + (p["network"]["max_nodes"] - 1) 174 | * p["gap"]["between_node_scale"] 175 | ) 176 | ) 177 | 178 | # Second assume width is the limiting factor. 179 | total_space_to_fill = ( 180 | p["figure"]["width"] 181 | - p["gap"]["left_border"] 182 | - p["gap"]["right_border"] 183 | - 2 * p["input"]["image"]["width"] 184 | ) 185 | # Again, pack the images as tightly as possible side-to-side. 186 | # In this case, if the image width is w, 187 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 188 | # There will be n_layer nodes and (n_layer + 1) gaps. 189 | # After another tidbit of algebra: 190 | width_constrained_by_width = ( 191 | total_space_to_fill / ( 192 | p["network"]["n_layers"] 193 | + (p["network"]["n_layers"] + 1) 194 | * p["gap"]["between_layer_scale"] 195 | ) 196 | ) 197 | 198 | # Figure out what the height would be for this width. 199 | height_constrained_by_width = ( 200 | width_constrained_by_width 201 | / p["input"]["aspect_ratio"] 202 | ) 203 | 204 | # See which constraint is more restrictive, and go with that one. 205 | p["node_image"]["height"] = np.minimum( 206 | height_constrained_by_width, 207 | height_constrained_by_height) 208 | p["node_image"]["width"] = ( 209 | p["node_image"]["height"] 210 | * p["input"]["aspect_ratio"] 211 | ) 212 | return p 213 | 214 | 215 | def find_between_layer_gap(p): 216 | """ 217 | How big is the horizontal spacing between_layers? 218 | This is also the spacing between the input image and the first layer 219 | and between the last layer and the output image. 220 | """ 221 | horizontal_gap_total = ( 222 | p["figure"]["width"] 223 | - 2 * p["input"]["image"]["width"] 224 | - p["network"]["n_layers"] * p["node_image"]["width"] 225 | - p["gap"]["left_border"] 226 | - p["gap"]["right_border"] 227 | ) 228 | n_horizontal_gaps = p["network"]["n_layers"] + 1 229 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 230 | return p 231 | 232 | 233 | def find_between_node_gap(p): 234 | """ 235 | How big is the vertical gap between_node images? 236 | """ 237 | vertical_gap_total = ( 238 | p["figure"]["height"] 239 | - p["gap"]["top_border"] 240 | - p["gap"]["bottom_border"] 241 | - p["network"]["max_nodes"] 242 | * p["node_image"]["height"] 243 | ) 244 | n_vertical_gaps = p["network"]["max_nodes"] - 1 245 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 246 | return p 247 | 248 | 249 | def find_error_image_position(p): 250 | """ 251 | Where exactly should the error image be positioned? 252 | """ 253 | p["error_image"]["bottom"] = ( 254 | p["input"]["image"]["bottom"] 255 | - p["input"]["image"]["height"] 256 | * p["gap"]["error_gap_scale"] 257 | - p["error_image"]["height"] 258 | ) 259 | error_image_center = ( 260 | p["figure"]["width"] 261 | - p["gap"]["right_border"] 262 | - p["input"]["image"]["width"] / 2 263 | ) 264 | p["error_image"]["left"] = ( 265 | error_image_center 266 | - p["error_image"]["width"] / 2 267 | ) 268 | return p 269 | 270 | 271 | def add_input_image(fig, p, filler_image): 272 | """ 273 | All Axes to be added use the rectangle specification 274 | (left, bottom, width, height) 275 | """ 276 | absolute_pos = ( 277 | p["gap"]["left_border"], 278 | p["input"]["image"]["bottom"], 279 | p["input"]["image"]["width"], 280 | p["input"]["image"]["height"]) 281 | ax_input = add_image_axes(fig, p, absolute_pos) 282 | add_filler_image( 283 | ax_input, 284 | p["input"]["n_rows"], 285 | p["input"]["n_cols"], 286 | filler_image 287 | ) 288 | 289 | 290 | def add_node_images(fig, i_layer, p, filler_image): 291 | """ 292 | Add in all the node images for a single layer 293 | """ 294 | node_image_left = ( 295 | p["gap"]["left_border"] 296 | + p["input"]["image"]["width"] 297 | + i_layer * p["node_image"]["width"] 298 | + (i_layer + 1) * p["gap"]["between_layer"] 299 | ) 300 | n_nodes = p["network"]["n_nodes"][i_layer] 301 | total_layer_height = ( 302 | n_nodes * p["node_image"]["height"] 303 | + (n_nodes - 1) * p["gap"]["between_node"] 304 | ) 305 | layer_bottom = (p["figure"]["height"] - total_layer_height) / 2 306 | for i_node in range(n_nodes): 307 | node_image_bottom = ( 308 | layer_bottom + i_node * ( 309 | p["node_image"]["height"] + p["gap"]["between_node"])) 310 | 311 | absolute_pos = ( 312 | node_image_left, 313 | node_image_bottom, 314 | p["node_image"]["width"], 315 | p["node_image"]["height"]) 316 | ax = add_image_axes(fig, p, absolute_pos) 317 | add_filler_image( 318 | ax, 319 | p["input"]["n_rows"], 320 | p["input"]["n_cols"], 321 | filler_image 322 | ) 323 | 324 | 325 | def add_output_image(fig, p, filler_image): 326 | output_image_left = ( 327 | p["figure"]["width"] 328 | - p["input"]["image"]["width"] 329 | - p["gap"]["right_border"] 330 | ) 331 | absolute_pos = ( 332 | output_image_left, 333 | p["input"]["image"]["bottom"], 334 | p["input"]["image"]["width"], 335 | p["input"]["image"]["height"]) 336 | ax_output = add_image_axes(fig, p, absolute_pos) 337 | add_filler_image( 338 | ax_output, 339 | p["input"]["n_rows"], 340 | p["input"]["n_cols"], 341 | filler_image 342 | ) 343 | 344 | 345 | def add_error_image(fig, p, filler_image): 346 | absolute_pos = ( 347 | p["error_image"]["left"], 348 | p["error_image"]["bottom"], 349 | p["error_image"]["width"], 350 | p["error_image"]["height"]) 351 | ax_error = add_image_axes(fig, p, absolute_pos) 352 | add_filler_image( 353 | ax_error, 354 | p["input"]["n_rows"], 355 | p["input"]["n_cols"], 356 | filler_image 357 | ) 358 | 359 | 360 | def add_image_axes(fig, p, absolute_pos): 361 | """ 362 | Locate the Axes for the image corresponding to this node within the Figure. 363 | 364 | absolute_pos: Tuple of 365 | (left_position, bottom_position, width, height) 366 | in inches on the Figure. 367 | """ 368 | scaled_pos = ( 369 | absolute_pos[0] / p["figure"]["width"], 370 | absolute_pos[1] / p["figure"]["height"], 371 | absolute_pos[2] / p["figure"]["width"], 372 | absolute_pos[3] / p["figure"]["height"]) 373 | ax = fig.add_axes(scaled_pos) 374 | ax.tick_params(bottom=False, top=False, left=False, right=False) 375 | ax.tick_params( 376 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 377 | ax.spines["top"].set_color(TAN) 378 | ax.spines["bottom"].set_color(TAN) 379 | ax.spines["left"].set_color(TAN) 380 | ax.spines["right"].set_color(TAN) 381 | return ax 382 | 383 | 384 | def load_filler_image(): 385 | """ 386 | Get an image to fill in the node Axes for decoration. 387 | """ 388 | img = Image.open(FILLER_IMAGE_FILENAME) 389 | img.load() 390 | color_img = np.asarray(img, dtype="int32") 391 | # Average the three color channels together to create 392 | # a monochrome image. 393 | bw_img = np.mean(color_img, axis=2, dtype="int32") 394 | return bw_img 395 | 396 | 397 | def add_filler_image(ax, n_im_rows, n_im_cols, filler_image): 398 | """ 399 | Add a chunk of image as a placeholder. 400 | """ 401 | # Note that row 0 is at the top of the image, as is 402 | # conventional in converting arrays to images. 403 | top = np.random.randint(filler_image.shape[0] - n_im_rows) 404 | left = np.random.randint(filler_image.shape[1] - n_im_cols) 405 | bottom = top + n_im_rows 406 | right = left + n_im_cols 407 | fill_patch = filler_image[top: bottom, left: right] 408 | ax.imshow(fill_patch, cmap="inferno") 409 | 410 | 411 | def save_nn_viz(fig, postfix="0"): 412 | """ 413 | Generate a new filename for each step of the process. 414 | """ 415 | base_name = "nn_viz_" 416 | filename = base_name + postfix + ".png" 417 | fig.savefig( 418 | filename, 419 | edgecolor=fig.get_edgecolor(), 420 | facecolor=fig.get_facecolor(), 421 | dpi=DPI, 422 | ) 423 | 424 | 425 | if __name__ == "__main__": 426 | main() 427 | -------------------------------------------------------------------------------- /nn_viz_26.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | from PIL import Image # noqa: E402 9 | 10 | # Choose a color palette 11 | BLUE = "#04253a" 12 | GREEN = "#4c837a" 13 | TAN = "#e1ddbf" 14 | DPI = 300 15 | 16 | # This is an image that will be used to fill in the visualization blocks 17 | # for each node in the neural network. If we were to build an actual neural 18 | # network and attach it, we could use visualizations of the nodes instead. 19 | FILLER_IMAGE_FILENAME = "512px-Cajal_cortex_drawings.png" 20 | # https://commons.wikimedia.org/wiki/File:Cajal_cortex_drawings.png 21 | # https://upload.wikimedia.org/wikipedia/commons/5/5b/Cajal_cortex_drawings.png 22 | # User:Looie496 created file, 23 | # Santiago Ramon y Cajal created artwork [Public domain] 24 | 25 | # Changing these adjusts the size and layout of the visualization 26 | FIGURE_WIDTH = 16 27 | FIGURE_HEIGHT = 9 28 | RIGHT_BORDER = 0.7 29 | LEFT_BORDER = 0.7 30 | TOP_BORDER = 0.8 31 | BOTTOM_BORDER = 0.6 32 | 33 | N_IMAGE_PIXEL_COLS = 64 34 | N_IMAGE_PIXEL_ROWS = 48 35 | N_NODES_BY_LAYER = [10, 7, 5, 8] 36 | 37 | INPUT_IMAGE_BOTTOM = 5 38 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 39 | ERROR_IMAGE_SCALE = 0.7 40 | ERROR_GAP_SCALE = 0.3 41 | BETWEEN_LAYER_SCALE = 0.8 42 | BETWEEN_NODE_SCALE = 0.4 43 | 44 | 45 | def main(): 46 | """ 47 | Build a visualization of an image autoencoder neural network, 48 | piece by piece. 49 | 50 | A central data structure in this example is the collection of parameters 51 | that define how the image is laid out. It is a set of nested dictionaries. 52 | """ 53 | p = construct_parameters() 54 | fig, ax_boss = create_background(p) 55 | 56 | p = find_node_image_size(p) 57 | p = find_between_layer_gap(p) 58 | p = find_between_node_gap(p) 59 | p = find_error_image_position(p) 60 | 61 | filler_image = load_filler_image() 62 | image_axes = [] 63 | add_input_image(fig, image_axes, p, filler_image) 64 | for i_layer in range(p["network"]["n_layers"]): 65 | add_node_images(fig, i_layer, image_axes, p, filler_image) 66 | add_output_image(fig, image_axes, p, filler_image) 67 | add_error_image(fig, image_axes, p, filler_image) 68 | for i_layer, layer in enumerate(image_axes): 69 | print(f"layer {i_layer - 1}: {len(layer)} Axes") 70 | 71 | 72 | def construct_parameters(): 73 | """ 74 | Build a dictionary of parameters that describe the size and location 75 | of the elements of the visualization. This is a convenient way to pass 76 | the collection of them around . 77 | """ 78 | # Enforce square pixels. Each pixel will have the same height and width. 79 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 80 | 81 | parameters = {} 82 | 83 | # The figure as a whole 84 | parameters["figure"] = { 85 | "height": FIGURE_HEIGHT, 86 | "width": FIGURE_WIDTH, 87 | } 88 | 89 | # The input and output images 90 | parameters["input"] = { 91 | "n_cols": N_IMAGE_PIXEL_COLS, 92 | "n_rows": N_IMAGE_PIXEL_ROWS, 93 | "aspect_ratio": aspect_ratio, 94 | "image": { 95 | "bottom": INPUT_IMAGE_BOTTOM, 96 | "height": INPUT_IMAGE_HEIGHT, 97 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 98 | } 99 | } 100 | 101 | # The network as a whole 102 | parameters["network"] = { 103 | "n_nodes": N_NODES_BY_LAYER, 104 | "n_layers": len(N_NODES_BY_LAYER), 105 | "max_nodes": np.max(N_NODES_BY_LAYER), 106 | } 107 | 108 | # Individual node images 109 | parameters["node_image"] = { 110 | "height": 0, 111 | "width": 0, 112 | } 113 | 114 | parameters["error_image"] = { 115 | "left": 0, 116 | "bottom": 0, 117 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 118 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 119 | } 120 | 121 | parameters["gap"] = { 122 | "right_border": RIGHT_BORDER, 123 | "left_border": LEFT_BORDER, 124 | "bottom_border": BOTTOM_BORDER, 125 | "top_border": TOP_BORDER, 126 | "between_layer": 0, 127 | "between_layer_scale": BETWEEN_LAYER_SCALE, 128 | "between_node": 0, 129 | "between_node_scale": BETWEEN_NODE_SCALE, 130 | "error_gap_scale": ERROR_GAP_SCALE, 131 | } 132 | 133 | return parameters 134 | 135 | 136 | def create_background(p): 137 | fig = plt.figure( 138 | edgecolor=TAN, 139 | facecolor=GREEN, 140 | figsize=(p["figure"]["width"], p["figure"]["height"]), 141 | linewidth=4, 142 | ) 143 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 144 | ax_boss.set_xlim(0, 1) 145 | ax_boss.set_ylim(0, 1) 146 | return fig, ax_boss 147 | 148 | 149 | def find_node_image_size(p): 150 | """ 151 | What should the height and width of each node image be? 152 | As big as possible, given the constraints. 153 | There are two possible constraints: 154 | 1. Fill the figure top-to-bottom. 155 | 2. Fill the figure side-to-side. 156 | To determine which of these limits the size of the node images, 157 | we'll calculate the image size assuming each constraint separately, 158 | then respect the one that results in the smaller node image. 159 | """ 160 | # First assume height is the limiting factor. 161 | total_space_to_fill = ( 162 | p["figure"]["height"] 163 | - p["gap"]["bottom_border"] 164 | - p["gap"]["top_border"] 165 | ) 166 | # Use the layer with the largest number of nodes (n_max). 167 | # Pack the images and the gaps as tight as possible. 168 | # In that case, if the image height is h, 169 | # the gaps will each be h * p["gap"]["between_node_scale"]. 170 | # There will be n_max nodes and (n_max - 1) gaps. 171 | # After a wee bit of algebra: 172 | height_constrained_by_height = ( 173 | total_space_to_fill / ( 174 | p["network"]["max_nodes"] 175 | + (p["network"]["max_nodes"] - 1) 176 | * p["gap"]["between_node_scale"] 177 | ) 178 | ) 179 | 180 | # Second assume width is the limiting factor. 181 | total_space_to_fill = ( 182 | p["figure"]["width"] 183 | - p["gap"]["left_border"] 184 | - p["gap"]["right_border"] 185 | - 2 * p["input"]["image"]["width"] 186 | ) 187 | # Again, pack the images as tightly as possible side-to-side. 188 | # In this case, if the image width is w, 189 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 190 | # There will be n_layer nodes and (n_layer + 1) gaps. 191 | # After another tidbit of algebra: 192 | width_constrained_by_width = ( 193 | total_space_to_fill / ( 194 | p["network"]["n_layers"] 195 | + (p["network"]["n_layers"] + 1) 196 | * p["gap"]["between_layer_scale"] 197 | ) 198 | ) 199 | 200 | # Figure out what the height would be for this width. 201 | height_constrained_by_width = ( 202 | width_constrained_by_width 203 | / p["input"]["aspect_ratio"] 204 | ) 205 | 206 | # See which constraint is more restrictive, and go with that one. 207 | p["node_image"]["height"] = np.minimum( 208 | height_constrained_by_width, 209 | height_constrained_by_height) 210 | p["node_image"]["width"] = ( 211 | p["node_image"]["height"] 212 | * p["input"]["aspect_ratio"] 213 | ) 214 | return p 215 | 216 | 217 | def find_between_layer_gap(p): 218 | """ 219 | How big is the horizontal spacing between_layers? 220 | This is also the spacing between the input image and the first layer 221 | and between the last layer and the output image. 222 | """ 223 | horizontal_gap_total = ( 224 | p["figure"]["width"] 225 | - 2 * p["input"]["image"]["width"] 226 | - p["network"]["n_layers"] * p["node_image"]["width"] 227 | - p["gap"]["left_border"] 228 | - p["gap"]["right_border"] 229 | ) 230 | n_horizontal_gaps = p["network"]["n_layers"] + 1 231 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 232 | return p 233 | 234 | 235 | def find_between_node_gap(p): 236 | """ 237 | How big is the vertical gap between_node images? 238 | """ 239 | vertical_gap_total = ( 240 | p["figure"]["height"] 241 | - p["gap"]["top_border"] 242 | - p["gap"]["bottom_border"] 243 | - p["network"]["max_nodes"] 244 | * p["node_image"]["height"] 245 | ) 246 | n_vertical_gaps = p["network"]["max_nodes"] - 1 247 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 248 | return p 249 | 250 | 251 | def find_error_image_position(p): 252 | """ 253 | Where exactly should the error image be positioned? 254 | """ 255 | p["error_image"]["bottom"] = ( 256 | p["input"]["image"]["bottom"] 257 | - p["input"]["image"]["height"] 258 | * p["gap"]["error_gap_scale"] 259 | - p["error_image"]["height"] 260 | ) 261 | error_image_center = ( 262 | p["figure"]["width"] 263 | - p["gap"]["right_border"] 264 | - p["input"]["image"]["width"] / 2 265 | ) 266 | p["error_image"]["left"] = ( 267 | error_image_center 268 | - p["error_image"]["width"] / 2 269 | ) 270 | return p 271 | 272 | 273 | def add_input_image(fig, image_axes, p, filler_image): 274 | """ 275 | All Axes to be added use the rectangle specification 276 | (left, bottom, width, height) 277 | """ 278 | absolute_pos = ( 279 | p["gap"]["left_border"], 280 | p["input"]["image"]["bottom"], 281 | p["input"]["image"]["width"], 282 | p["input"]["image"]["height"]) 283 | ax_input = add_image_axes(fig, image_axes, p, absolute_pos) 284 | add_filler_image( 285 | ax_input, 286 | p["input"]["n_rows"], 287 | p["input"]["n_cols"], 288 | filler_image 289 | ) 290 | image_axes.append([ax_input]) 291 | 292 | 293 | def add_node_images(fig, i_layer, image_axes, p, filler_image): 294 | """ 295 | Add in all the node images for a single layer 296 | """ 297 | node_image_left = ( 298 | p["gap"]["left_border"] 299 | + p["input"]["image"]["width"] 300 | + i_layer * p["node_image"]["width"] 301 | + (i_layer + 1) * p["gap"]["between_layer"] 302 | ) 303 | n_nodes = p["network"]["n_nodes"][i_layer] 304 | total_layer_height = ( 305 | n_nodes * p["node_image"]["height"] 306 | + (n_nodes - 1) * p["gap"]["between_node"] 307 | ) 308 | layer_bottom = (p["figure"]["height"] - total_layer_height) / 2 309 | layer_axes = [] 310 | for i_node in range(n_nodes): 311 | node_image_bottom = ( 312 | layer_bottom + i_node * ( 313 | p["node_image"]["height"] + p["gap"]["between_node"])) 314 | 315 | absolute_pos = ( 316 | node_image_left, 317 | node_image_bottom, 318 | p["node_image"]["width"], 319 | p["node_image"]["height"]) 320 | ax = add_image_axes(fig, image_axes, p, absolute_pos) 321 | add_filler_image( 322 | ax, 323 | p["input"]["n_rows"], 324 | p["input"]["n_cols"], 325 | filler_image 326 | ) 327 | layer_axes.append(ax) 328 | image_axes.append(layer_axes) 329 | 330 | 331 | def add_output_image(fig, image_axes, p, filler_image): 332 | output_image_left = ( 333 | p["figure"]["width"] 334 | - p["input"]["image"]["width"] 335 | - p["gap"]["right_border"] 336 | ) 337 | absolute_pos = ( 338 | output_image_left, 339 | p["input"]["image"]["bottom"], 340 | p["input"]["image"]["width"], 341 | p["input"]["image"]["height"]) 342 | ax_output = add_image_axes(fig, image_axes, p, absolute_pos) 343 | add_filler_image( 344 | ax_output, 345 | p["input"]["n_rows"], 346 | p["input"]["n_cols"], 347 | filler_image 348 | ) 349 | image_axes.append([ax_output]) 350 | 351 | 352 | def add_error_image(fig, image_axes, p, filler_image): 353 | absolute_pos = ( 354 | p["error_image"]["left"], 355 | p["error_image"]["bottom"], 356 | p["error_image"]["width"], 357 | p["error_image"]["height"]) 358 | ax_error = add_image_axes(fig, image_axes, p, absolute_pos) 359 | add_filler_image( 360 | ax_error, 361 | p["input"]["n_rows"], 362 | p["input"]["n_cols"], 363 | filler_image 364 | ) 365 | 366 | 367 | def add_image_axes(fig, image_axes, p, absolute_pos): 368 | """ 369 | Locate the Axes for the image corresponding to this node within the Figure. 370 | 371 | absolute_pos: Tuple of 372 | (left_position, bottom_position, width, height) 373 | in inches on the Figure. 374 | """ 375 | scaled_pos = ( 376 | absolute_pos[0] / p["figure"]["width"], 377 | absolute_pos[1] / p["figure"]["height"], 378 | absolute_pos[2] / p["figure"]["width"], 379 | absolute_pos[3] / p["figure"]["height"]) 380 | ax = fig.add_axes(scaled_pos) 381 | ax.tick_params(bottom=False, top=False, left=False, right=False) 382 | ax.tick_params( 383 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 384 | ax.spines["top"].set_color(TAN) 385 | ax.spines["bottom"].set_color(TAN) 386 | ax.spines["left"].set_color(TAN) 387 | ax.spines["right"].set_color(TAN) 388 | return ax 389 | 390 | 391 | def load_filler_image(): 392 | """ 393 | Get an image to fill in the node Axes for decoration. 394 | """ 395 | img = Image.open(FILLER_IMAGE_FILENAME) 396 | img.load() 397 | color_img = np.asarray(img, dtype="int32") 398 | # Average the three color channels together to create 399 | # a monochrome image. 400 | bw_img = np.mean(color_img, axis=2, dtype="int32") 401 | return bw_img 402 | 403 | 404 | def add_filler_image(ax, n_im_rows, n_im_cols, filler_image): 405 | """ 406 | Add a chunk of image as a placeholder. 407 | """ 408 | # Note that row 0 is at the top of the image, as is 409 | # conventional in converting arrays to images. 410 | top = np.random.randint(filler_image.shape[0] - n_im_rows) 411 | left = np.random.randint(filler_image.shape[1] - n_im_cols) 412 | bottom = top + n_im_rows 413 | right = left + n_im_cols 414 | fill_patch = filler_image[top: bottom, left: right] 415 | ax.imshow(fill_patch, cmap="inferno") 416 | 417 | 418 | def save_nn_viz(fig, postfix="0"): 419 | """ 420 | Generate a new filename for each step of the process. 421 | """ 422 | base_name = "nn_viz_" 423 | filename = base_name + postfix + ".png" 424 | fig.savefig( 425 | filename, 426 | edgecolor=fig.get_edgecolor(), 427 | facecolor=fig.get_facecolor(), 428 | dpi=DPI, 429 | ) 430 | 431 | 432 | if __name__ == "__main__": 433 | main() 434 | -------------------------------------------------------------------------------- /nn_viz_27.py: -------------------------------------------------------------------------------- 1 | """ 2 | Generate an autoencoder neural network visualization 3 | """ 4 | import matplotlib 5 | matplotlib.use("agg") 6 | import matplotlib.pyplot as plt # noqa: E402 7 | import numpy as np # noqa: E402 8 | from PIL import Image # noqa: E402 9 | 10 | # Choose a color palette 11 | BLUE = "#04253a" 12 | GREEN = "#4c837a" 13 | TAN = "#e1ddbf" 14 | DPI = 300 15 | 16 | # This is an image that will be used to fill in the visualization blocks 17 | # for each node in the neural network. If we were to build an actual neural 18 | # network and attach it, we could use visualizations of the nodes instead. 19 | FILLER_IMAGE_FILENAME = "512px-Cajal_cortex_drawings.png" 20 | # https://commons.wikimedia.org/wiki/File:Cajal_cortex_drawings.png 21 | # https://upload.wikimedia.org/wikipedia/commons/5/5b/Cajal_cortex_drawings.png 22 | # User:Looie496 created file, 23 | # Santiago Ramon y Cajal created artwork [Public domain] 24 | 25 | # Changing these adjusts the size and layout of the visualization 26 | FIGURE_WIDTH = 16 27 | FIGURE_HEIGHT = 9 28 | RIGHT_BORDER = 0.7 29 | LEFT_BORDER = 0.7 30 | TOP_BORDER = 0.8 31 | BOTTOM_BORDER = 0.6 32 | 33 | N_IMAGE_PIXEL_COLS = 64 34 | N_IMAGE_PIXEL_ROWS = 48 35 | N_NODES_BY_LAYER = [10, 7, 5, 8] 36 | 37 | INPUT_IMAGE_BOTTOM = 5 38 | INPUT_IMAGE_HEIGHT = 0.25 * FIGURE_HEIGHT 39 | ERROR_IMAGE_SCALE = 0.7 40 | ERROR_GAP_SCALE = 0.3 41 | BETWEEN_LAYER_SCALE = 0.8 42 | BETWEEN_NODE_SCALE = 0.4 43 | 44 | 45 | def main(): 46 | """ 47 | Build a visualization of an image autoencoder neural network, 48 | piece by piece. 49 | 50 | A central data structure in this example is the collection of parameters 51 | that define how the image is laid out. It is a set of nested dictionaries. 52 | """ 53 | p = construct_parameters() 54 | fig, ax_boss = create_background(p) 55 | 56 | p = find_node_image_size(p) 57 | p = find_between_layer_gap(p) 58 | p = find_between_node_gap(p) 59 | p = find_error_image_position(p) 60 | 61 | filler_image = load_filler_image() 62 | image_axes = [] 63 | add_input_image(fig, image_axes, p, filler_image) 64 | for i_layer in range(p["network"]["n_layers"]): 65 | add_node_images(fig, i_layer, image_axes, p, filler_image) 66 | add_output_image(fig, image_axes, p, filler_image) 67 | add_error_image(fig, image_axes, p, filler_image) 68 | add_layer_connections(ax_boss, image_axes) 69 | save_nn_viz(fig, "27_single_connection") 70 | 71 | 72 | def construct_parameters(): 73 | """ 74 | Build a dictionary of parameters that describe the size and location 75 | of the elements of the visualization. This is a convenient way to pass 76 | the collection of them around . 77 | """ 78 | # Enforce square pixels. Each pixel will have the same height and width. 79 | aspect_ratio = N_IMAGE_PIXEL_COLS / N_IMAGE_PIXEL_ROWS 80 | 81 | parameters = {} 82 | 83 | # The figure as a whole 84 | parameters["figure"] = { 85 | "height": FIGURE_HEIGHT, 86 | "width": FIGURE_WIDTH, 87 | } 88 | 89 | # The input and output images 90 | parameters["input"] = { 91 | "n_cols": N_IMAGE_PIXEL_COLS, 92 | "n_rows": N_IMAGE_PIXEL_ROWS, 93 | "aspect_ratio": aspect_ratio, 94 | "image": { 95 | "bottom": INPUT_IMAGE_BOTTOM, 96 | "height": INPUT_IMAGE_HEIGHT, 97 | "width": INPUT_IMAGE_HEIGHT * aspect_ratio, 98 | } 99 | } 100 | 101 | # The network as a whole 102 | parameters["network"] = { 103 | "n_nodes": N_NODES_BY_LAYER, 104 | "n_layers": len(N_NODES_BY_LAYER), 105 | "max_nodes": np.max(N_NODES_BY_LAYER), 106 | } 107 | 108 | # Individual node images 109 | parameters["node_image"] = { 110 | "height": 0, 111 | "width": 0, 112 | } 113 | 114 | parameters["error_image"] = { 115 | "left": 0, 116 | "bottom": 0, 117 | "width": parameters["input"]["image"]["width"] * ERROR_IMAGE_SCALE, 118 | "height": parameters["input"]["image"]["height"] * ERROR_IMAGE_SCALE, 119 | } 120 | 121 | parameters["gap"] = { 122 | "right_border": RIGHT_BORDER, 123 | "left_border": LEFT_BORDER, 124 | "bottom_border": BOTTOM_BORDER, 125 | "top_border": TOP_BORDER, 126 | "between_layer": 0, 127 | "between_layer_scale": BETWEEN_LAYER_SCALE, 128 | "between_node": 0, 129 | "between_node_scale": BETWEEN_NODE_SCALE, 130 | "error_gap_scale": ERROR_GAP_SCALE, 131 | } 132 | 133 | return parameters 134 | 135 | 136 | def create_background(p): 137 | fig = plt.figure( 138 | edgecolor=TAN, 139 | facecolor=GREEN, 140 | figsize=(p["figure"]["width"], p["figure"]["height"]), 141 | linewidth=4, 142 | ) 143 | ax_boss = fig.add_axes((0, 0, 1, 1), facecolor="none") 144 | ax_boss.set_xlim(0, 1) 145 | ax_boss.set_ylim(0, 1) 146 | return fig, ax_boss 147 | 148 | 149 | def find_node_image_size(p): 150 | """ 151 | What should the height and width of each node image be? 152 | As big as possible, given the constraints. 153 | There are two possible constraints: 154 | 1. Fill the figure top-to-bottom. 155 | 2. Fill the figure side-to-side. 156 | To determine which of these limits the size of the node images, 157 | we'll calculate the image size assuming each constraint separately, 158 | then respect the one that results in the smaller node image. 159 | """ 160 | # First assume height is the limiting factor. 161 | total_space_to_fill = ( 162 | p["figure"]["height"] 163 | - p["gap"]["bottom_border"] 164 | - p["gap"]["top_border"] 165 | ) 166 | # Use the layer with the largest number of nodes (n_max). 167 | # Pack the images and the gaps as tight as possible. 168 | # In that case, if the image height is h, 169 | # the gaps will each be h * p["gap"]["between_node_scale"]. 170 | # There will be n_max nodes and (n_max - 1) gaps. 171 | # After a wee bit of algebra: 172 | height_constrained_by_height = ( 173 | total_space_to_fill / ( 174 | p["network"]["max_nodes"] 175 | + (p["network"]["max_nodes"] - 1) 176 | * p["gap"]["between_node_scale"] 177 | ) 178 | ) 179 | 180 | # Second assume width is the limiting factor. 181 | total_space_to_fill = ( 182 | p["figure"]["width"] 183 | - p["gap"]["left_border"] 184 | - p["gap"]["right_border"] 185 | - 2 * p["input"]["image"]["width"] 186 | ) 187 | # Again, pack the images as tightly as possible side-to-side. 188 | # In this case, if the image width is w, 189 | # the gaps will each be w * p["gap"]["between_layer_scale"]. 190 | # There will be n_layer nodes and (n_layer + 1) gaps. 191 | # After another tidbit of algebra: 192 | width_constrained_by_width = ( 193 | total_space_to_fill / ( 194 | p["network"]["n_layers"] 195 | + (p["network"]["n_layers"] + 1) 196 | * p["gap"]["between_layer_scale"] 197 | ) 198 | ) 199 | 200 | # Figure out what the height would be for this width. 201 | height_constrained_by_width = ( 202 | width_constrained_by_width 203 | / p["input"]["aspect_ratio"] 204 | ) 205 | 206 | # See which constraint is more restrictive, and go with that one. 207 | p["node_image"]["height"] = np.minimum( 208 | height_constrained_by_width, 209 | height_constrained_by_height) 210 | p["node_image"]["width"] = ( 211 | p["node_image"]["height"] 212 | * p["input"]["aspect_ratio"] 213 | ) 214 | return p 215 | 216 | 217 | def find_between_layer_gap(p): 218 | """ 219 | How big is the horizontal spacing between_layers? 220 | This is also the spacing between the input image and the first layer 221 | and between the last layer and the output image. 222 | """ 223 | horizontal_gap_total = ( 224 | p["figure"]["width"] 225 | - 2 * p["input"]["image"]["width"] 226 | - p["network"]["n_layers"] * p["node_image"]["width"] 227 | - p["gap"]["left_border"] 228 | - p["gap"]["right_border"] 229 | ) 230 | n_horizontal_gaps = p["network"]["n_layers"] + 1 231 | p["gap"]["between_layer"] = horizontal_gap_total / n_horizontal_gaps 232 | return p 233 | 234 | 235 | def find_between_node_gap(p): 236 | """ 237 | How big is the vertical gap between_node images? 238 | """ 239 | vertical_gap_total = ( 240 | p["figure"]["height"] 241 | - p["gap"]["top_border"] 242 | - p["gap"]["bottom_border"] 243 | - p["network"]["max_nodes"] 244 | * p["node_image"]["height"] 245 | ) 246 | n_vertical_gaps = p["network"]["max_nodes"] - 1 247 | p["gap"]["between_node"] = vertical_gap_total / n_vertical_gaps 248 | return p 249 | 250 | 251 | def find_error_image_position(p): 252 | """ 253 | Where exactly should the error image be positioned? 254 | """ 255 | p["error_image"]["bottom"] = ( 256 | p["input"]["image"]["bottom"] 257 | - p["input"]["image"]["height"] 258 | * p["gap"]["error_gap_scale"] 259 | - p["error_image"]["height"] 260 | ) 261 | error_image_center = ( 262 | p["figure"]["width"] 263 | - p["gap"]["right_border"] 264 | - p["input"]["image"]["width"] / 2 265 | ) 266 | p["error_image"]["left"] = ( 267 | error_image_center 268 | - p["error_image"]["width"] / 2 269 | ) 270 | return p 271 | 272 | 273 | def add_input_image(fig, image_axes, p, filler_image): 274 | """ 275 | All Axes to be added use the rectangle specification 276 | (left, bottom, width, height) 277 | """ 278 | absolute_pos = ( 279 | p["gap"]["left_border"], 280 | p["input"]["image"]["bottom"], 281 | p["input"]["image"]["width"], 282 | p["input"]["image"]["height"]) 283 | ax_input = add_image_axes(fig, image_axes, p, absolute_pos) 284 | add_filler_image( 285 | ax_input, 286 | p["input"]["n_rows"], 287 | p["input"]["n_cols"], 288 | filler_image 289 | ) 290 | image_axes.append([ax_input]) 291 | 292 | 293 | def add_node_images(fig, i_layer, image_axes, p, filler_image): 294 | """ 295 | Add in all the node images for a single layer 296 | """ 297 | node_image_left = ( 298 | p["gap"]["left_border"] 299 | + p["input"]["image"]["width"] 300 | + i_layer * p["node_image"]["width"] 301 | + (i_layer + 1) * p["gap"]["between_layer"] 302 | ) 303 | n_nodes = p["network"]["n_nodes"][i_layer] 304 | total_layer_height = ( 305 | n_nodes * p["node_image"]["height"] 306 | + (n_nodes - 1) * p["gap"]["between_node"] 307 | ) 308 | layer_bottom = (p["figure"]["height"] - total_layer_height) / 2 309 | layer_axes = [] 310 | for i_node in range(n_nodes): 311 | node_image_bottom = ( 312 | layer_bottom + i_node * ( 313 | p["node_image"]["height"] + p["gap"]["between_node"])) 314 | 315 | absolute_pos = ( 316 | node_image_left, 317 | node_image_bottom, 318 | p["node_image"]["width"], 319 | p["node_image"]["height"]) 320 | ax = add_image_axes(fig, image_axes, p, absolute_pos) 321 | add_filler_image( 322 | ax, 323 | p["input"]["n_rows"], 324 | p["input"]["n_cols"], 325 | filler_image 326 | ) 327 | layer_axes.append(ax) 328 | image_axes.append(layer_axes) 329 | 330 | 331 | def add_output_image(fig, image_axes, p, filler_image): 332 | output_image_left = ( 333 | p["figure"]["width"] 334 | - p["input"]["image"]["width"] 335 | - p["gap"]["right_border"] 336 | ) 337 | absolute_pos = ( 338 | output_image_left, 339 | p["input"]["image"]["bottom"], 340 | p["input"]["image"]["width"], 341 | p["input"]["image"]["height"]) 342 | ax_output = add_image_axes(fig, image_axes, p, absolute_pos) 343 | add_filler_image( 344 | ax_output, 345 | p["input"]["n_rows"], 346 | p["input"]["n_cols"], 347 | filler_image 348 | ) 349 | image_axes.append([ax_output]) 350 | 351 | 352 | def add_error_image(fig, image_axes, p, filler_image): 353 | absolute_pos = ( 354 | p["error_image"]["left"], 355 | p["error_image"]["bottom"], 356 | p["error_image"]["width"], 357 | p["error_image"]["height"]) 358 | ax_error = add_image_axes(fig, image_axes, p, absolute_pos) 359 | add_filler_image( 360 | ax_error, 361 | p["input"]["n_rows"], 362 | p["input"]["n_cols"], 363 | filler_image 364 | ) 365 | 366 | 367 | def add_image_axes(fig, image_axes, p, absolute_pos): 368 | """ 369 | Locate the Axes for the image corresponding to this node within the Figure. 370 | 371 | absolute_pos: Tuple of 372 | (left_position, bottom_position, width, height) 373 | in inches on the Figure. 374 | """ 375 | scaled_pos = ( 376 | absolute_pos[0] / p["figure"]["width"], 377 | absolute_pos[1] / p["figure"]["height"], 378 | absolute_pos[2] / p["figure"]["width"], 379 | absolute_pos[3] / p["figure"]["height"]) 380 | ax = fig.add_axes(scaled_pos) 381 | ax.tick_params(bottom=False, top=False, left=False, right=False) 382 | ax.tick_params( 383 | labelbottom=False, labeltop=False, labelleft=False, labelright=False) 384 | ax.spines["top"].set_color(TAN) 385 | ax.spines["bottom"].set_color(TAN) 386 | ax.spines["left"].set_color(TAN) 387 | ax.spines["right"].set_color(TAN) 388 | return ax 389 | 390 | 391 | def load_filler_image(): 392 | """ 393 | Get an image to fill in the node Axes for decoration. 394 | """ 395 | img = Image.open(FILLER_IMAGE_FILENAME) 396 | img.load() 397 | color_img = np.asarray(img, dtype="int32") 398 | # Average the three color channels together to create 399 | # a monochrome image. 400 | bw_img = np.mean(color_img, axis=2, dtype="int32") 401 | return bw_img 402 | 403 | 404 | def add_filler_image(ax, n_im_rows, n_im_cols, filler_image): 405 | """ 406 | Add a chunk of image as a placeholder. 407 | """ 408 | # Note that row 0 is at the top of the image, as is 409 | # conventional in converting arrays to images. 410 | top = np.random.randint(filler_image.shape[0] - n_im_rows) 411 | left = np.random.randint(filler_image.shape[1] - n_im_cols) 412 | bottom = top + n_im_rows 413 | right = left + n_im_cols 414 | fill_patch = filler_image[top: bottom, left: right] 415 | ax.imshow(fill_patch, cmap="inferno") 416 | 417 | 418 | def add_layer_connections(ax_boss, image_axes): 419 | """ 420 | Add in the connectors between all the layers 421 | Treat the input image as the first layer and the output layer as the last. 422 | """ 423 | x_start = image_axes[0][0].get_position().x1 424 | x_end = image_axes[1][0].get_position().x0 425 | 426 | ax_start = image_axes[0][0] 427 | ax_start_pos = ax_start.get_position() 428 | y_start_min = ax_start_pos.y0 429 | y_start_max = ax_start_pos.y1 430 | y_start = (y_start_min + y_start_max) / 2 431 | 432 | ax_end = image_axes[1][0] 433 | ax_end_pos = ax_end.get_position() 434 | y_end_min = ax_end_pos.y0 435 | y_end_max = ax_end_pos.y1 436 | y_end = (y_end_min + y_end_max) / 2 437 | 438 | x = [x_start, x_end] 439 | y = [y_start, y_end] 440 | ax_boss.plot(x, y, color=TAN) 441 | 442 | 443 | def save_nn_viz(fig, postfix="0"): 444 | """ 445 | Generate a new filename for each step of the process. 446 | """ 447 | base_name = "nn_viz_" 448 | filename = base_name + postfix + ".png" 449 | fig.savefig( 450 | filename, 451 | edgecolor=fig.get_edgecolor(), 452 | facecolor=fig.get_facecolor(), 453 | dpi=DPI, 454 | ) 455 | 456 | 457 | if __name__ == "__main__": 458 | main() 459 | --------------------------------------------------------------------------------