├── demos ├── glider_stages.gif └── demos.ipynb ├── resources ├── checkpoint ├── test_model.ckpt.meta ├── test_model.ckpt.index ├── test_model.ckpt.data-00000-of-00001 └── tf_v1_version │ ├── utils.py │ ├── train_ca.py │ ├── ca_funcs.py │ └── train CA demo.ipynb ├── tests └── test_ca.py ├── setup.py ├── .gitignore ├── README.md ├── README.md~ ├── utils.py ├── train_ca.py └── ca_funcs.py /demos/glider_stages.gif: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/williamgilpin/convoca/HEAD/demos/glider_stages.gif -------------------------------------------------------------------------------- /resources/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "test_model.ckpt" 2 | all_model_checkpoint_paths: "test_model.ckpt" 3 | -------------------------------------------------------------------------------- /resources/test_model.ckpt.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/williamgilpin/convoca/HEAD/resources/test_model.ckpt.meta -------------------------------------------------------------------------------- /resources/test_model.ckpt.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/williamgilpin/convoca/HEAD/resources/test_model.ckpt.index -------------------------------------------------------------------------------- /resources/test_model.ckpt.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/williamgilpin/convoca/HEAD/resources/test_model.ckpt.data-00000-of-00001 -------------------------------------------------------------------------------- /tests/test_ca.py: -------------------------------------------------------------------------------- 1 | import unittest 2 | from convca import ca_funcs 3 | # https://github.com/google/pybadges/blob/master/tests/test_precalculated_text_measurer.py 4 | # https://docs.python.org/3/library/unittest.html 5 | 6 | 7 | class TestConvNetObject(unittest.TestCase): 8 | 9 | 10 | 11 | 12 | 13 | 14 | 15 | if __name__ == '__main__': 16 | unittest.main() -------------------------------------------------------------------------------- /setup.py: -------------------------------------------------------------------------------- 1 | from distutils.core import setup 2 | 3 | setup( 4 | name='convoca', 5 | version='0.1', 6 | description='Train convolutional neural networks to represent cellular automata', 7 | author='William Gilpin', 8 | author_email='firstnamelastname(as one word)@googleemailservice', 9 | requires=[ 'numpy', 'matplotlib', 'tensorflow'], 10 | py_modules=['config'], 11 | package_data={ 12 | 'ca_funcs': ['*'], 13 | 'train_ca': ['*'], 14 | 'utils': ['*'], 15 | 'tests.test_ca': ['*'], 16 | }, 17 | ) -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | 2 | # Created by https://www.gitignore.io/api/text,python 3 | 4 | ### Python ### 5 | # Byte-compiled / optimized / DLL files 6 | __pycache__/ 7 | *.py[cod] 8 | *$py.class 9 | 10 | # C extensions 11 | *.so 12 | 13 | # Distribution / packaging 14 | .Python 15 | build/ 16 | develop-eggs/ 17 | dist/ 18 | downloads/ 19 | eggs/ 20 | .eggs/ 21 | lib/ 22 | lib64/ 23 | parts/ 24 | sdist/ 25 | var/ 26 | wheels/ 27 | *.egg-info/ 28 | .installed.cfg 29 | *.egg 30 | MANIFEST 31 | 32 | # PyInstaller 33 | # Usually these files are written by a python script from a template 34 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 35 | *.manifest 36 | *.spec 37 | 38 | # Installer logs 39 | pip-log.txt 40 | pip-delete-this-directory.txt 41 | 42 | # Unit test / coverage reports 43 | htmlcov/ 44 | .tox/ 45 | .coverage 46 | .coverage.* 47 | .cache 48 | nosetests.xml 49 | coverage.xml 50 | *.cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | 63 | # Flask stuff: 64 | instance/ 65 | .webassets-cache 66 | 67 | # Scrapy stuff: 68 | .scrapy 69 | 70 | # Sphinx documentation 71 | docs/_build/ 72 | 73 | # PyBuilder 74 | target/ 75 | 76 | # Jupyter Notebook 77 | .ipynb_checkpoints 78 | 79 | # pyenv 80 | .python-version 81 | 82 | # celery beat schedule file 83 | celerybeat-schedule 84 | 85 | # SageMath parsed files 86 | *.sage.py 87 | 88 | # Environments 89 | .env 90 | .venv 91 | env/ 92 | venv/ 93 | ENV/ 94 | env.bak/ 95 | venv.bak/ 96 | 97 | # Spyder project settings 98 | .spyderproject 99 | .spyproject 100 | 101 | # Rope project settings 102 | .ropeproject 103 | 104 | # mkdocs documentation 105 | /site 106 | 107 | # mypy 108 | .mypy_cache/ 109 | 110 | ### Python Patch ### 111 | .venv/ 112 | 113 | ### Text ### 114 | *.doc 115 | *.docx 116 | *.msg 117 | *.pages 118 | *.rtf 119 | *.txt 120 | *.wpd 121 | *.wps 122 | 123 | .DS_Store 124 | 125 | # End of https://www.gitignore.io/api/text,python -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # convoca 2 | 3 | Demonstrate and learn cellular automata using convolutional neural networks in TensorFlow 4 | 5 | ![Game of Life training stages](demos/glider_stages.gif) 6 | 7 | The video above shows different stages of training a network to learn Conway's Game of Life. 8 | This code both implements and analyzes known CA rulesets using TensorFlow, and it also learns the rules of unknown CA given an image sequence as training data. 9 | If you find this code useful, please consider citing the accompanying publication: 10 | 11 | > Gilpin, William. "Cellular automata as convolutional neural networks." Physical Review E 100.3 (2019): 032402. [arXiv](https://arxiv.org/abs/1809.02942) 12 | 13 | ## Demos and features 14 | 15 | The `demos.ipynb` illustrates a minimal example of training a CNN on the Game of Life, including example outputs. 16 | Models are instantiated using the `initialize_model(...)` function, which builds a network with a trainable convolutional filter as the first layer, which serves to extract information about the neighborhood of each cell. Repeated 1x1 convolutions in subsequent layers implement the CA rules, and a final softmax layer assigns an output state to each cell. 17 | For cases in which the CA ruleset is radially symmetric, the optional `SymmetricConvolution` layer imposes radially-symmetric structure on the learned convolutional kernels, which is often the case for natural systems with "totalistic" rules. An optional `Wraparound2D` layer also allows periodic boundary conditions to be implemented in the convolutions. 18 | 19 | ## Installation and Requirements 20 | 21 | Install directly from GitHub using 22 | 23 | pip install git+git://github.com/williamgilpin/convoca 24 | 25 | Typical installation with Miniconda. This code has been tested on macOS and Ubuntu. 26 | 27 | + Python >3.4 28 | + TensorFlow >2.0 29 | + numpy 30 | + matplotlib 31 | + Jupyter notebooks (for demos) 32 | 33 | ## Structure 34 | 35 | The package contains the following libraries 36 | 37 | `train_ca` : requires TensorFlow 38 | 39 | `ca_funcs` : requires TensorFlow 40 | 41 | `utils` : minor functions that support the main methods. Requires numpy only. 42 | 43 | `demos.ipynb` : demonstration of the code for learning the game of live 44 | 45 | 46 | ## Updates 47 | 48 | As of 2.26.2020, the code has been significantly re-factored to use Tensorflow 2.0 and Keras. The previous implementation has been placed in the "resources" directory, for reference. 49 | 50 | ## Planned future work 51 | 52 | + Add methods for simulating totalistic CA 53 | + Add methods for Moore neighborhood CA 54 | + Add demos recreating classic experiments, such as the results in Langton. Physica D, 1990. 55 | + Add statistical physics calculations such as an efficient calculation of "activity" for a CA 56 | + CA on graphs using an adjacency matrix --> grid convolutional operator -------------------------------------------------------------------------------- /README.md~: -------------------------------------------------------------------------------- 1 | # convoca 2 | 3 | Demonstrate and learn cellular automata using convolutional neural networks in TensorFlow 4 | 5 | ![Game of Life training stages](demos/glider_stages.gif) 6 | 7 | The video above shows different stages of training a network to learn Conway's Game of Life. 8 | This code can both implement and analyze known CA rulesets using TensorFlow, as well as learn the rules of unknown CA given image sequences as training data. 9 | If you find this code useful, please consider citing the accompanying publication: 10 | 11 | > Gilpin, William. "Cellular automata as convolutional neural networks." Physical Review E 100.3 (2019): 032402. [arXiv](https://arxiv.org/abs/1809.02942) 12 | 13 | ## Demos and features 14 | 15 | The `demos.ipynb` illustrates a minimal example of training a CNN on the Game of Life, including example outputs. 16 | Models are instantiated using the `initialize_model(...)` function, which builds a network with a trainable convolutional filter as the first layer, which serves to extract information about the neighborhood of each cell. Repeated 1x1 convolutions in subsequent layers implement the CA rules, and a final softmax layer assigns an output state to each cell. 17 | For cases in which the CA ruleset is radially symmetric, the optional `SymmetricConvolution` layer imposes radially-symmetric structure on the learned convolutional kernels, which is often the case for natural systems with "totalistic" rules. An optional `Wraparound2D` layer also allows periodic boundary conditions to be implemented in the convolutions. 18 | 19 | ## Installation and Requirements 20 | 21 | Install directly from GitHub using 22 | 23 | pip install git+git://github.com/williamgilpin/convoca 24 | 25 | Typical installation with Miniconda. This code has been tested on macOS and Ubuntu. 26 | 27 | + Python >3.4 28 | + TensorFlow >2.0 29 | + numpy 30 | + matplotlib 31 | + Jupyter notebooks (for demos) 32 | 33 | ## Structure 34 | 35 | The package contains the following libraries 36 | 37 | `train_ca` : requires TensorFlow 38 | 39 | `ca_funcs` : requires TensorFlow 40 | 41 | `utils` : minor functions that support the main methods. Requires numpy only. 42 | 43 | `demos.ipynb` : demonstration of the code for learning the game of live 44 | 45 | 46 | ## Updates 47 | 48 | As of 2.26.2020, the code has been significantly re-factored to use Tensorflow 2.0 and Keras. The previous implementation has been placed in the "resources" directory, for reference. 49 | 50 | ## Planned future work 51 | 52 | + Add methods for simulating totalistic CA 53 | + Add methods for Moore neighborhood CA 54 | + Add demos recreating classic experiments, such as the results in Langton. Physica D, 1990. 55 | + Add statistical physics calculations such as an efficient calculation of "activity" for a CA 56 | + CA on graphs using an adjacency matrix --> grid convolutional operator -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import collections 4 | 5 | from itertools import product 6 | 7 | def fixed_aspect_ratio(ratio): 8 | ''' 9 | Set a fixed aspect ratio on matplotlib plots 10 | regardless of axis units 11 | ''' 12 | xvals, yvals = (plt.gca().axes.get_xlim(), 13 | plt.gca().axes.get_ylim()) 14 | 15 | xrange = xvals[1]-xvals[0] 16 | yrange = yvals[1]-yvals[0] 17 | plt.gca().set_aspect(ratio*(xrange/yrange), adjustable='box') 18 | 19 | def better_savefig(name, dpi=72, pad=0.0, remove_border=True): 20 | ''' 21 | This function is for saving images without a bounding box and at the proper resolution 22 | The tiff files produced are huge because compression is not supported py matplotlib 23 | 24 | 25 | name : str 26 | The string containing the name of the desired save file and its resolution 27 | 28 | dpi : int 29 | The desired dots per linear inch 30 | 31 | pad : float 32 | Add a tiny amount of whitespace if necessary 33 | 34 | remove_border : bool 35 | Whether to remove axes and padding (for example, for images) 36 | 37 | ''' 38 | if remove_border: 39 | plt.gca().set_axis_off() 40 | plt.subplots_adjust(top = 1+pad, bottom = 0+pad, right = 1+pad, left = 0+pad, 41 | hspace = 0, wspace = 0) 42 | plt.margins(0,0) 43 | plt.gca().xaxis.set_major_locator(plt.NullLocator()) 44 | plt.gca().yaxis.set_major_locator(plt.NullLocator()) 45 | 46 | plt.savefig(name, bbox_inches='tight', pad_inches=0, dpi=dpi) 47 | 48 | def cmap1D(all_col, N): 49 | '''Generate a continuous colormap between two values 50 | 51 | Parameters 52 | ---------- 53 | 54 | all_col : list of 3-tuples 55 | The colors to linearly interpolate 56 | 57 | N : int 58 | The number of values to interpolate 59 | 60 | Returns 61 | ------- 62 | 63 | col_list : list of tuples 64 | An ordered list of colors for the colormap 65 | 66 | ''' 67 | 68 | n_col = len(all_col) 69 | all_col = [np.array([item/255. for item in col]) for col in all_col] 70 | 71 | all_vr = list() 72 | runlens= [len(thing) for thing in np.array_split(range(N), n_col-1)] 73 | for col1, col2, runlen in zip(all_col[:-1], all_col[1:], runlens): 74 | vr = list() 75 | for ii in range(3): 76 | vr.append(np.linspace(col1[ii], col2[ii], runlen)) 77 | vr = np.array(vr).T 78 | all_vr.extend(vr) 79 | return [tuple(thing) for thing in all_vr] 80 | 81 | def tup2str(tup, delim=''): 82 | '''Convert a tuple to an ordered string''' 83 | return delim.join([str(item) for item in tup]) 84 | 85 | 86 | def get_slope(vec): 87 | m, b = np.polyfit(np.arange(0,len(vec)), vec, 1) 88 | return (m, b) 89 | 90 | 91 | def bin2int(arr, axis=0): 92 | """ 93 | Convert a binary array to an integer along the 94 | specified axis 95 | 96 | Dev: this overflows when the size of the numbers is greater 97 | than 64 bits 98 | """ 99 | pow2 = 2**np.arange(arr.shape[axis], dtype=np.uint64) 100 | return np.sum(arr*pow2, axis=axis).astype(int) 101 | 102 | def all_combinations(m,d=9): 103 | ''' 104 | Make an array of all d dimensional inputs 105 | consisting of m possible values 106 | ''' 107 | 108 | sq = int(np.sqrt(d)) 109 | 110 | indices = np.tile(np.array([np.arange(m)]).T,d) 111 | 112 | all_combos = list(product(*list(indices.T))) 113 | out = np.reshape(np.array(all_combos),(-1, sq, sq)) 114 | 115 | return out 116 | 117 | 118 | def relu(arr0): 119 | arr = np.copy(arr0) 120 | arr[arr<=0] = 0 121 | return arr 122 | 123 | def normalize_hist(hist_dict0): 124 | ''' 125 | Given a histogram in dictionary form consisting 126 | of 'key' : count, generate a new histogram normalized 127 | by the count totals 128 | ''' 129 | 130 | hist_dict = hist_dict0.copy() 131 | 132 | all_vals = list(hist_dict.values()) 133 | sum_vals = np.sum(all_vals) 134 | 135 | # modify in place 136 | hist_dict.update((k, v/sum_vals) for k, v in hist_dict.items()) 137 | 138 | return hist_dict 139 | 140 | 141 | def shannon_entropy(pi_set0): 142 | ''' 143 | Given a set of probabilities, compute the Shannon 144 | entropy, dropping any zeros 145 | ''' 146 | pi_set = np.array(pi_set0) 147 | pi_set_nonzero = np.copy(pi_set[pi_set>0]) 148 | 149 | hi = pi_set_nonzero.dot(np.log2(pi_set_nonzero)) 150 | 151 | out = -np.sum(hi) 152 | 153 | return out 154 | 155 | def layer_entropy(arr): 156 | ''' 157 | Find entropy an array assuming that the last 158 | axis are binary features, and the earlier axes 159 | index samples 160 | 161 | Interpretation: Finds the average firing rate of 162 | each neuron across all training examples. 163 | Assumes on/off firing rate 164 | 165 | ''' 166 | 167 | num_feats = arr.shape[-1] 168 | arr_flat = np.reshape(arr, (-1, num_feats)) 169 | pf = np.mean(arr_flat, axis=0) 170 | 171 | ent_vals = [shannon_entropy([pf_val, 1-pf_val]) for pf_val in pf] 172 | 173 | return np.array(ent_vals) 174 | 175 | 176 | def find_dead(arr, axis=-1): 177 | ''' 178 | Given an array, count the number of axes where 179 | all samples evaluated to the same value 180 | 181 | Inputs: 182 | arra : np.array 183 | an array of shape (n_samples, n_features) 184 | 185 | Returns: 186 | where_dead : list 187 | The axes of the dead neurons 188 | ''' 189 | where_dead = list() 190 | for ax_ind in range(arr.shape[axis]): 191 | vals = arr[...,ax_ind] 192 | val_med = np.median(vals) 193 | if np.allclose(vals, val_med): 194 | where_dead.append(ax_ind) 195 | 196 | return where_dead 197 | 198 | 199 | 200 | 201 | 202 | 203 | -------------------------------------------------------------------------------- /resources/tf_v1_version /utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import tensorflow as tf 4 | import collections 5 | 6 | from itertools import product 7 | 8 | def fixed_aspect_ratio(ratio): 9 | ''' 10 | Set a fixed aspect ratio on matplotlib plots 11 | regardless of axis units 12 | ''' 13 | xvals, yvals = (plt.gca().axes.get_xlim(), 14 | plt.gca().axes.get_ylim()) 15 | 16 | xrange = xvals[1]-xvals[0] 17 | yrange = yvals[1]-yvals[0] 18 | plt.gca().set_aspect(ratio*(xrange/yrange), adjustable='box') 19 | 20 | def better_savefig(name, dpi=72, pad=0.0, remove_border=True): 21 | ''' 22 | This function is for saving images without a bounding box and at the proper resolution 23 | The tiff files produced are huge because compression is not supported py matplotlib 24 | 25 | 26 | name : str 27 | The string containing the name of the desired save file and its resolution 28 | 29 | dpi : int 30 | The desired dots per linear inch 31 | 32 | pad : float 33 | Add a tiny amount of whitespace if necessary 34 | 35 | remove_border : bool 36 | Whether to remove axes and padding (for example, for images) 37 | 38 | ''' 39 | if remove_border: 40 | plt.gca().set_axis_off() 41 | plt.subplots_adjust(top = 1+pad, bottom = 0+pad, right = 1+pad, left = 0+pad, 42 | hspace = 0, wspace = 0) 43 | plt.margins(0,0) 44 | plt.gca().xaxis.set_major_locator(plt.NullLocator()) 45 | plt.gca().yaxis.set_major_locator(plt.NullLocator()) 46 | 47 | plt.savefig(name, bbox_inches='tight', pad_inches=0, dpi=dpi) 48 | 49 | def cmap1D(all_col, N): 50 | '''Generate a continuous colormap between two values 51 | 52 | Parameters 53 | ---------- 54 | 55 | all_col : list of 3-tuples 56 | The colors to linearly interpolate 57 | 58 | N : int 59 | The number of values to interpolate 60 | 61 | Returns 62 | ------- 63 | 64 | col_list : list of tuples 65 | An ordered list of colors for the colormap 66 | 67 | ''' 68 | 69 | n_col = len(all_col) 70 | all_col = [np.array([item/255. for item in col]) for col in all_col] 71 | 72 | all_vr = list() 73 | runlens= [len(thing) for thing in np.array_split(range(N), n_col-1)] 74 | for col1, col2, runlen in zip(all_col[:-1], all_col[1:], runlens): 75 | vr = list() 76 | for ii in range(3): 77 | vr.append(np.linspace(col1[ii], col2[ii], runlen)) 78 | vr = np.array(vr).T 79 | all_vr.extend(vr) 80 | return [tuple(thing) for thing in all_vr] 81 | 82 | def tup2str(tup, delim=''): 83 | '''Convert a tuple to an ordered string''' 84 | return delim.join([str(item) for item in tup]) 85 | 86 | 87 | def get_slope(vec): 88 | m, b = np.polyfit(np.arange(0,len(vec)), vec, 1) 89 | return (m, b) 90 | 91 | 92 | def bin2int(arr, axis=0): 93 | """ 94 | Convert a binary array to an integer along the 95 | specified axis 96 | 97 | Dev: this overflows when the size of the numbers is greater 98 | than 64 bits 99 | """ 100 | pow2 = 2**np.arange(arr.shape[axis], dtype=np.uint64) 101 | return np.sum(arr*pow2, axis=axis).astype(int) 102 | 103 | 104 | 105 | 106 | def all_combinations(m,d=9): 107 | ''' 108 | Make an array of all d dimensional inputs 109 | consisting of m possible values 110 | ''' 111 | 112 | sq = int(np.sqrt(d)) 113 | 114 | indices = np.tile(np.array([np.arange(m)]).T,d) 115 | 116 | all_combos = list(product(*list(indices.T))) 117 | out = np.reshape(np.array(all_combos),(-1, sq, sq)) 118 | 119 | return out 120 | 121 | 122 | def relu(arr0): 123 | arr = np.copy(arr0) 124 | arr[arr<=0] = 0 125 | return arr 126 | 127 | def normalize_hist(hist_dict0): 128 | ''' 129 | Given a histogram in dictionary form consisting 130 | of 'key' : count, generate a new histogram normalized 131 | by the count totals 132 | ''' 133 | 134 | hist_dict = hist_dict0.copy() 135 | 136 | all_vals = list(hist_dict.values()) 137 | sum_vals = np.sum(all_vals) 138 | 139 | # modify in place 140 | hist_dict.update((k, v/sum_vals) for k, v in hist_dict.items()) 141 | 142 | return hist_dict 143 | 144 | 145 | def shannon_entropy(pi_set0): 146 | ''' 147 | Given a set of probabilities, compute the Shannon 148 | entropy, dropping any zeros 149 | ''' 150 | pi_set = np.array(pi_set0) 151 | pi_set_nonzero = np.copy(pi_set[pi_set>0]) 152 | 153 | hi = pi_set_nonzero.dot(np.log2(pi_set_nonzero)) 154 | 155 | out = -np.sum(hi) 156 | 157 | return out 158 | 159 | def layer_entropy(arr): 160 | ''' 161 | Find entropy an array assuming that the last 162 | axis are binary features, and the earlier axes 163 | index samples 164 | 165 | Interpretation: Finds the average firing rate of 166 | each neuron across all training examples. 167 | Assumes on/off firing rate 168 | 169 | ''' 170 | 171 | num_feats = arr.shape[-1] 172 | arr_flat = np.reshape(arr, (-1, num_feats)) 173 | pf = np.mean(arr_flat, axis=0) 174 | 175 | ent_vals = [shannon_entropy([pf_val, 1-pf_val]) for pf_val in pf] 176 | 177 | return np.array(ent_vals) 178 | 179 | 180 | def find_dead(arr, axis=-1): 181 | ''' 182 | Given an array, count the number of axes where 183 | all samples evaluated to the same value 184 | 185 | Inputs: 186 | arra : np.array 187 | an array of shape (n_samples, n_features) 188 | 189 | Returns: 190 | where_dead : list 191 | The axes of the dead neurons 192 | ''' 193 | where_dead = list() 194 | for ax_ind in range(arr.shape[axis]): 195 | vals = arr[...,ax_ind] 196 | val_med = np.median(vals) 197 | if np.allclose(vals, val_med): 198 | where_dead.append(ax_ind) 199 | 200 | return where_dead 201 | 202 | 203 | 204 | 205 | 206 | 207 | -------------------------------------------------------------------------------- /train_ca.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | 4 | import collections 5 | 6 | def periodic_padding(imbatch, padding=1): 7 | ''' 8 | Create a periodic padding (wrap) around an image batch, to emulate 9 | periodic boundary conditions. Padding occurs along the middle two axes 10 | ''' 11 | pad_u = imbatch[:, -padding:, :] 12 | pad_b = imbatch[:, :padding, :] 13 | 14 | partial_image = tf.concat([pad_u, imbatch, pad_b], axis=1) 15 | 16 | pad_l = partial_image[..., -padding:, :] 17 | pad_r = partial_image[..., :padding, :] 18 | 19 | padded_imbatch = tf.concat([pad_l, partial_image, pad_r], axis=2) 20 | 21 | 22 | return padded_imbatch 23 | 24 | class Wraparound2D(tf.keras.layers.Layer): 25 | """ 26 | Apply periodic boundary conditions on an image by padding 27 | along the axes 28 | padding : int or tuple, the amount to wrap around 29 | """ 30 | 31 | def __init__(self, padding=2, **kwargs): 32 | super(Wraparound2D, self).__init__() 33 | self.padding = padding 34 | 35 | def get_config(self): 36 | 37 | config = super().get_config().copy() 38 | config.update({ 39 | 'vocab_size': 0, 40 | 'num_layers': 1, 41 | 'units': 0, 42 | 'dropout': 0, 43 | }) 44 | return config 45 | 46 | def call(self, inputs): 47 | return periodic_padding(inputs, self.padding) 48 | 49 | def initialize_model(shape, layer_dims, nhood=1, num_classes=2, totalistic=False, 50 | nhood_type="moore", bc="periodic"): 51 | """ 52 | Given a domain size and layer specification, initialize a model that assigns 53 | each pixel a class 54 | shape : the horizontal and vertical dimensions of the CA image 55 | layer_dims : list of number of hidden units per layer 56 | num_classes : int, the number of output classes for the automaton 57 | totalistic : bool, whether to assume that the CA is radially symmetric, making 58 | it outer totalistic 59 | nhood_type : string, default "moore". The type of neighborhood to use for the 60 | CA. Currently, the only other option, "Neumann," only works when "totalistic" 61 | is set to True 62 | bc : string, whether to use "periodic" or "constant" (zero padded) boundary conditions 63 | """ 64 | wspan, hspan = shape 65 | diameter = 2*nhood+1 66 | model = tf.keras.Sequential() 67 | model.add(tf.keras.layers.InputLayer((wspan, hspan, 1))) 68 | 69 | if bc == "periodic": 70 | model.add(Wraparound2D(padding=nhood)) 71 | conv_pad = 'valid' 72 | else: 73 | conv_pad = 'same' 74 | 75 | if totalistic: 76 | model.add(SymmetricConvolution(nhood, n_type=nhood_type, bc=bc)) 77 | model.add(tf.keras.layers.Reshape(target_shape=(-1, nhood+1))) 78 | else: 79 | model.add(tf.keras.layers.Conv2D(layer_dims[0], kernel_size=[diameter, diameter], padding=conv_pad, 80 | activation='relu', kernel_initializer=tf.keras.initializers.he_normal(), 81 | bias_initializer=tf.keras.initializers.he_normal())) 82 | model.add(tf.keras.layers.Reshape(target_shape=(-1, layer_dims[0]))) 83 | 84 | for i in range(1, len(layer_dims)): 85 | model.add(tf.keras.layers.Dense(layer_dims[i], activation='relu', 86 | kernel_initializer=tf.keras.initializers.he_normal(), 87 | bias_initializer=tf.keras.initializers.he_normal())) 88 | model.add(tf.keras.layers.Dense(num_classes, activation='relu', 89 | kernel_initializer=tf.keras.initializers.he_normal(), 90 | bias_initializer=tf.keras.initializers.he_normal())) 91 | #model.add(tf.keras.layers.Reshape(target_shape=(-1, wspan, hspan))) 92 | return model 93 | 94 | 95 | def logit_to_pred(logits, shape=None): 96 | """ 97 | Given logits in the form of a network output, convert them to 98 | images 99 | """ 100 | 101 | labels = tf.argmax(tf.nn.softmax(logits), 102 | axis=-1), 103 | if shape: 104 | out = tf.reshape(labels, shape) 105 | return out 106 | 107 | def augment_data(x, y, n=None): 108 | """ 109 | Generate an augmented training dataset with random reflections 110 | and 90 degree rotations 111 | x, y : Image sets of shape (Samples, Width, Height, Channels) 112 | training images and next images 113 | n : number of training examples 114 | """ 115 | n_data = x.shape[0] 116 | 117 | if not n: 118 | n = n_data 119 | x_out, y_out = list(), list() 120 | 121 | for i in range(n): 122 | r = tf.random.uniform((1,), minval=0, maxval=n_data, dtype=tf.int32)[0] 123 | x_r, y_r = x[r], y[r] 124 | 125 | if tf.random.uniform((1,))[0]<0.5: 126 | x_r = tf.image.flip_left_right(x_r) 127 | y_r = tf.image.flip_left_right(y_r) 128 | if tf.random.uniform((1,))[0]<0.5: 129 | x_r = tf.image.flip_up_down(x_r) 130 | y_r = tf.image.flip_up_down(y_r) 131 | 132 | num_rots = tf.random.uniform((1,), minval=0, maxval=4, dtype=tf.int32)[0] 133 | x_r = tf.image.rot90(x_r, k=num_rots) 134 | y_r = tf.image.rot90(y_r, k=num_rots) 135 | 136 | x_out.append(x_r), y_out.append(y_r) 137 | return tf.stack(x_out), tf.stack(y_out) 138 | 139 | 140 | 141 | 142 | def make_square_filters(rad): 143 | """ 144 | rad : the pixel radius for the filters 145 | """ 146 | m = 2*rad + 1 147 | square_filters = tf.stack([tf.pad(tf.ones([i, i]), [[int((m-i)/2), int((m-i)/2)], 148 | [int((m-i)/2), int((m-i)/2)]]) 149 | for i in range(1, m+1, 2)]) 150 | square_filters = [square_filters[0]] + [item for item in square_filters[1:] - square_filters[:-1]] 151 | square_filters = tf.stack(square_filters)[..., tf.newaxis] 152 | 153 | return square_filters 154 | 155 | def make_circular_filters(rad): 156 | """ 157 | rad : the pixel radius for the filters 158 | """ 159 | 160 | m = 2*rad + 1 161 | 162 | qq = tf.range(m) - int((m-1)/2) 163 | pp = tf.sqrt(tf.cast(qq[..., None]**2 + qq[None, ...]**2, tf.float32)) 164 | 165 | val_range = tf.cast(tf.range((m+1)/2), tf.float32) 166 | circ_filters = make_square_filters(rad)*val_range[..., None, None, None] 167 | rr = circ_filters*(1/pp)[None, ..., None] 168 | rr = tf.where(tf.math.is_nan(rr), tf.zeros_like(rr), rr) 169 | return tf.stack([make_square_filters(rad)[0]] + [item for item in rr][1:]) 170 | 171 | class SymmetricConvolution(tf.keras.layers.Layer): 172 | """ 173 | A non-trainable convolutional layer that extracts the 174 | summed values in the neighborhood of each pixel. No activation 175 | is applied because this feature extractor does not change during training 176 | parametrized by the radius 177 | r : int, the max neighborhood size 178 | nhood_type : "moore" (default) uses the Moore neighborhood, while "neumann" 179 | uses the generalized von Neumann neighborhood, which is similar 180 | to a circle at large neighborhood radii 181 | bc : "periodic" or "constant" 182 | TODO : implement the "hard" von Neumann neighborhood 183 | """ 184 | 185 | def __init__(self, r, nhood_type="moore", bc="periodic", **kwargs): 186 | super(SymmetricConvolution, self).__init__() 187 | 188 | self.r = r 189 | 190 | if nhood_type == "moore": 191 | filters = make_square_filters(r) 192 | elif nhood_type == "neumann": 193 | filters = make_circular_filters(r) 194 | else: 195 | filters = make_square_filters(r) 196 | warnings.warn("Neighborhood specification not recognized.") 197 | self.filters = tf.squeeze(tf.transpose(filters))[..., None, :] 198 | 199 | if bc == "periodic": 200 | self.pad_type="VALID" 201 | else: 202 | self.pad_type="SAME" 203 | 204 | def get_config(self): 205 | config = super().get_config().copy() 206 | config.update({ 207 | 'num_layers': 1, 208 | 'units': 0, 209 | 'dropout': 0, 210 | }) 211 | return config 212 | 213 | def call(self, inputs): 214 | return tf.nn.convolution(inputs, self.filters, padding=self.pad_type) -------------------------------------------------------------------------------- /resources/tf_v1_version /train_ca.py: -------------------------------------------------------------------------------- 1 | # import numpy as np 2 | import tensorflow as tf 3 | import numpy as np 4 | 5 | import collections 6 | 7 | from ca_funcs import periodic_padding, conv_cast, kaiming_normal 8 | 9 | 10 | class ConvNet(object): 11 | """ 12 | A convolutional neural network with one convolutional layer, 13 | arbitrary hidden layers, and a summation for the final layer: 14 | 15 | conv - relu - Nx[affine - relu] - sum 16 | 17 | There are two ways to initialize this object: either by passing 18 | architecture parameters to an explicit constructor function, or by 19 | passing a string to a TensorFlow .ckpt file 20 | 21 | The lack of pooling or a final score prediction operation makes 22 | this approach similar to a "soft committee machine" 23 | 24 | The structure of this class is meant to mirror the standard implementation 25 | of the ConvNet used by CS231n, among others: http://cs231n.stanford.edu/index.html 26 | 27 | 28 | Dev notes: Would be wonderful to make this class Eager compatible 29 | once the training process for Eager is a little more smooth. For now, just use 30 | an InterativeSession() to reduce some boilerplate 31 | 32 | Dev notes: add doctests 33 | 34 | """ 35 | 36 | def __init__(self, sess, ckpt_path='', input_dim=(10, 10), layer_dims=[10,10,10,10,10], 37 | weight_scale=1e-3, filter_size=3, pad_size=1, num_classes=2, strides=[1,1,1,1], 38 | reg=0.0, fix_layers=False): 39 | """ 40 | Initialize a new network and define internal parameters. 41 | 42 | Inputs: 43 | 44 | sess : tf.Session(), tf.InteractiveSession() 45 | ckpt_path : str pointing to a TensorFlow checkpoint, 46 | if you want to initialize from a trained model 47 | input_dim: Tuple (H, W) giving size of the input array 48 | layer_dims: List of dimensions for each layer, with the first dimension denoting 49 | the number of convolutional filters 50 | filter_size: Size of convolutional filters to use in the convolutional layer 51 | pad_size: the amount of padding to use for the boundary conditions 52 | num_classes: Number of scores to produce from the final affine layer. 53 | weight_scale: Weight amplitude for Kaiming normalization 54 | reg: Scalar for weight of L2 regularization during training 55 | fix_layers: bool for dealing with a bug where tensorflow sometimes saves the network 56 | twice in one file 57 | 58 | Dev: 59 | """ 60 | 61 | self.sess = sess 62 | self.test = tf.constant('init successful', name='test') # for debugging scope 63 | 64 | ## General properties 65 | self.wspan, self.hspan = input_dim 66 | self.weight_scale = weight_scale 67 | self.pad_size = pad_size 68 | self.all_strides = strides 69 | self.filter_size = filter_size 70 | 71 | self.X = tf.placeholder(tf.float32, shape=input_dim) 72 | self.y = tf.placeholder(tf.float32, shape=input_dim) 73 | 74 | if ckpt_path: 75 | self.ckpt_loaded = True 76 | self.ckpt_path = ckpt_path 77 | meta_path = ckpt_path+'.meta' 78 | saver = tf.train.import_meta_graph(meta_path) 79 | 80 | all_vars = tf.trainable_variables() 81 | if fix_layers: 82 | all_vars = all_vars[:int(len(all_vars)/2)] # I do not understand why import_meta_graph does this 83 | all_var_shapes = [np.array(var.get_shape().as_list()) for var in all_vars] 84 | 85 | print(str(len(all_vars)/2) + ' layers detected') 86 | 87 | (self.filter_size, _, _, self.num_filters) = all_var_shapes[0] 88 | 89 | self.layer_dims = np.squeeze(np.array(all_var_shapes[1::2])) 90 | self.num_filters = self.layer_dims[0] 91 | self.num_layers = len(self.layer_dims) 92 | self.num_hidden = self.num_layers - 1 93 | 94 | assert self.load_from_ckpt() 95 | 96 | else: 97 | self.ckpt_loaded = False 98 | self.layer_dims = layer_dims 99 | self.num_filters = self.layer_dims[0] 100 | self.num_layers = len(self.layer_dims) 101 | self.num_hidden = self.num_layers - 1 102 | 103 | assert self.init_new_model() 104 | 105 | 106 | 107 | 108 | 109 | 110 | def init_new_model(self): 111 | """ 112 | Initialize a new model when a .ckpt model is not been specified in the 113 | constructor 114 | """ 115 | weight_scale = self.weight_scale 116 | filter_size = self.filter_size 117 | layer_dims = self.layer_dims 118 | 119 | self.conv_filters_params = { 120 | 'Wfilt': tf.Variable(weight_scale*kaiming_normal([filter_size, filter_size, 1, self.num_filters]), 121 | name='Wfilt'), 122 | 'bfilt': tf.Variable(weight_scale*kaiming_normal([self.num_filters,]), name='bfilt'), 123 | } 124 | 125 | self.hidden_params = {} 126 | for ii in range(1, self.num_layers): 127 | wh_name = 'Wh'+str(ii) 128 | bh_name = 'bh'+str(ii) 129 | self.hidden_params[wh_name] = tf.Variable(weight_scale*kaiming_normal([layer_dims[ii-1], 130 | layer_dims[ii]]), name=wh_name) 131 | self.hidden_params[bh_name] = tf.Variable(weight_scale*kaiming_normal([layer_dims[ii],]), name=bh_name) 132 | 133 | self.sess.run(tf.global_variables_initializer()) 134 | 135 | return True 136 | 137 | 138 | def load_from_ckpt(self, no_names=False): 139 | """Load a TensorFlow checkpoint file 140 | no_names : bool 141 | If ckpt file doesn't have correct Tensor names, 142 | automatically determine the appropriate names based on the currently 143 | defined variables. This will fail cryptically if any variables in the 144 | initialized graph have a different shape than the initialized model. 145 | This usually should not be necessary unless the .meta file is missing 146 | """ 147 | 148 | 149 | all_vars = tf.trainable_variables() 150 | self.sess.run(tf.global_variables_initializer()) 151 | self.conv_filters_params = { 152 | 'Wfilt': all_vars[0], 153 | 'bfilt': all_vars[1], 154 | } 155 | 156 | self.hidden_params = {} 157 | for ii in range(1, self.num_layers): 158 | wh_name = 'Wh'+str(ii) 159 | bh_name = 'bh'+str(ii) 160 | self.hidden_params[wh_name] = all_vars[2*ii] 161 | 162 | self.hidden_params[bh_name] = all_vars[2*ii+1] 163 | 164 | if no_names: 165 | all_vars = tf.trainable_variables() 166 | #var_names = [v.name for v in tf.trainable_variables()] 167 | 168 | blank_names = ['Variable_'+str(ind) for ind in range(len(all_vars))] 169 | blank_names[0] = 'Variable' 170 | 171 | name_dict = dict(zip(blank_names, all_vars)) 172 | 173 | saver = tf.train.Saver(max_to_keep=None, var_list=name_dict) 174 | 175 | else: 176 | saver = tf.train.Saver(max_to_keep=None) 177 | 178 | saver.restore(self.sess, self.ckpt_path) 179 | 180 | return True 181 | 182 | 183 | def tester(self): 184 | """ 185 | For testing whether various methods were loaded correctly 186 | """ 187 | print(self.sess.run(tf.report_uninitialized_variables())) 188 | print(self.sess.run(self.test)) 189 | 190 | 191 | def ca_cnn(self, ic_tf): 192 | """ 193 | Create the CNN using Tensorflow 194 | """ 195 | num_hidden = self.num_hidden 196 | wspan, hspan = self.wspan, self.hspan 197 | pad_size = self.pad_size 198 | 199 | # # Expand and reshape initial conditions 200 | state_pad = periodic_padding(ic_tf, pad_size) 201 | current_state = tf.cast(tf.reshape(state_pad,[1, hspan+2*pad_size, wspan+2*pad_size, 1]), tf.float32) 202 | 203 | # First convolutional layer 204 | conv1 = tf.nn.conv2d(current_state, self.conv_filters_params['Wfilt'], 205 | strides=self.all_strides, padding='VALID') 206 | conv1_b = tf.nn.bias_add(conv1,self.conv_filters_params['bfilt']) 207 | conv1_activated = tf.nn.relu(conv1_b) 208 | conv1_flat = tf.reshape(conv1_activated, [wspan*hspan, self.num_filters]) 209 | 210 | # Cycle through the hidden layers 211 | curr = conv1_flat 212 | for ii in range(1, self.num_layers): 213 | neural_state = tf.nn.bias_add( tf.matmul(curr, self.hidden_params['Wh'+str(ii)]), 214 | self.hidden_params['bh'+str(ii)]) 215 | neural_state_activated = tf.nn.relu(neural_state) 216 | curr = neural_state_activated 217 | 218 | final_state = tf.reduce_sum(curr, axis=1) 219 | # # output layer is just a sum (soft committee) over final states 220 | out_layer = tf.reshape(final_state, (wspan, hspan)) 221 | 222 | return out_layer 223 | 224 | def loss(): 225 | """ 226 | Compute the L2 loss 227 | """ 228 | y = tf.placeholder(tf.float32, shape=(self.wspan, self.hspan)) 229 | loss = tf.reduce_sum(tf.nn.l2_loss(self.ca_model - y)) 230 | 231 | def ca_map(self, ic_tf0): 232 | """ 233 | Show where the model above was activated 234 | Dev: this function needs to be parallelized 235 | """ 236 | 237 | num_hidden = self.num_hidden 238 | wspan, hspan = self.wspan, self.hspan 239 | pad_size = self.pad_size 240 | 241 | 242 | ic_tf = conv_cast(ic_tf0) 243 | 244 | 245 | all_where_bools = list() 246 | 247 | # Expand and reshape initial conditions 248 | state_pad = periodic_padding(ic_tf, pad_size) 249 | current_state = tf.cast(tf.reshape(state_pad,[1, hspan+2*pad_size, wspan+2*pad_size, 1]), tf.float32) 250 | 251 | # First convolutional layer 252 | conv1 = tf.nn.conv2d(current_state, self.conv_filters_params['Wfilt'], 253 | strides=self.all_strides, padding='VALID') 254 | conv1_b = tf.nn.bias_add(conv1, self.conv_filters_params['bfilt']) 255 | conv1_activated = tf.nn.relu(conv1_b) 256 | conv1_flat = tf.reshape(conv1_activated, [wspan*hspan, self.num_filters]) 257 | 258 | where_bool1 = tf.greater(conv1_flat, 0) 259 | where_sum1 = tf.reduce_sum(tf.cast(where_bool1, tf.float32), axis=[0]) 260 | all_where_bools.append(where_bool1) 261 | 262 | # Cycle through the hidden layers 263 | curr = conv1_flat 264 | for ii in range(1, 1+num_hidden): 265 | neural_state = tf.nn.bias_add( tf.matmul(curr, self.hidden_params['Wh'+str(ii)]), 266 | self.hidden_params['bh'+str(ii)]) 267 | neural_state_activated = tf.nn.relu(neural_state) 268 | curr = neural_state_activated 269 | 270 | where_bool_curr = tf.greater(neural_state_activated, 0) 271 | where_sum_curr = tf.reduce_sum(tf.cast(where_bool_curr, tf.float32), axis=[0]) 272 | all_where_bools.append(where_bool_curr) 273 | 274 | return all_where_bools 275 | 276 | def get_features(self, data): 277 | """ 278 | Feed a batch of training data to the modeland record the resulting 279 | activation patterns. Assumes that the session has a trained model 280 | available 281 | 282 | Arguments 283 | --------- 284 | data : input data of dimension (batch_size, dim1, dim2) 285 | 286 | Returns 287 | ------- 288 | all_out : list of PxMxN arrays, where the list 289 | indexes the layer/depth, P indexes the batch 290 | M indexes the (flattened) dimensionality of the input data, 291 | and N indexes individual neurons in the layer 292 | 293 | """ 294 | 295 | X_train = np.copy(data) 296 | 297 | wspan, hspan = X_train.shape[-2:] 298 | 299 | all_out = list() 300 | for ind in range(self.num_layers): 301 | all_out.append(list()) 302 | 303 | X = tf.placeholder(tf.float32, shape=(self.wspan, self.hspan)) 304 | ca_map_inner = self.ca_map(X) 305 | 306 | for ind in range(self.num_layers): 307 | 308 | all_outs = list() ## can probably delete this line 309 | for X_train_item in X_train: 310 | out = self.sess.run(ca_map_inner, feed_dict={X: X_train_item}) 311 | out = [np.squeeze(item) for item in out] 312 | 313 | all_out[ind].append(out[ind]) 314 | 315 | all_out = [np.array(item).astype(np.float) for item in all_out] 316 | 317 | return all_out 318 | 319 | def load_ca(self, model_path): 320 | """ 321 | Given a path to a checkpoint file, load the trained model and fill out the appropriate parameters 322 | Must be done within an open Tensorflow session 323 | """ 324 | X = tf.placeholder(tf.float32, shape=(self.wspan, self.hspan)) 325 | init = tf.global_variables_initializer() 326 | 327 | # sess = currentSession 328 | 329 | self.sess.run(init) 330 | saver = tf.train.Saver(max_to_keep=None) 331 | 332 | # initialize a blank model 333 | ca_model = self.ca_cnn() 334 | 335 | saver.restore(self.sess, model_path) 336 | 337 | return ca_model 338 | 339 | 340 | 341 | -------------------------------------------------------------------------------- /ca_funcs.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import tensorflow as tf 4 | import collections 5 | 6 | from itertools import product 7 | 8 | from utils import * 9 | 10 | 11 | def make_table_walk(nbins, known_rule=''): 12 | ''' 13 | Walk across a table of CA rules, changing one 14 | index at a time. When a specific rules is given, incorporate it into the walk 15 | 16 | nbins : the number of rules, and entries 17 | 18 | known_rule : np.array, a known rule to include (consisting of ones and zeros) 19 | 20 | 21 | Dev: 22 | - Work with list of rules rather than just one or zero specified rules 23 | - Check ordering of the rules; right now this only takes the outputs of the 24 | truth table and assumes the ordering generated by "all_combinations" 25 | - A better algorithm would traverse the rule list in one loop and draw 26 | indices to hit from different, non-overlapping sets based on the current index 27 | value. Probably not much performance boost though, but at least conceptually 28 | simpler 29 | ''' 30 | 31 | selection_order = np.random.choice(range(nbins), nbins, replace=False) 32 | 33 | all_rules = np.zeros((nbins,nbins)) 34 | 35 | if len(known_rule)==0: 36 | for ind in range(len(all_rules)): 37 | all_rules[ind:, selection_order[ind]] = 1 38 | else: 39 | 40 | num_on = int(np.sum(known_rule)) 41 | num_off = int(nbins - num_on) 42 | 43 | where_on = np.where(known_rule==1)[0] 44 | where_off = np.where(~(known_rule==1))[0] 45 | 46 | assert num_on==len(where_on) 47 | assert num_off==len(where_off) 48 | 49 | selection_order_indices = np.random.choice(range(num_on), num_on, replace=False) 50 | selection_order = where_on[selection_order_indices] 51 | for ind in range(len(selection_order)): 52 | all_rules[ind:, selection_order[ind]] = 1 53 | 54 | 55 | selection_order_indices = np.random.choice(range(num_off), num_off, replace=False) 56 | selection_order = where_off[selection_order_indices] 57 | for ind in range(len(selection_order)): 58 | all_rules[num_on+ind:, selection_order[ind]] = 1 59 | 60 | return all_rules 61 | 62 | 63 | def get_network_entropies(feature_map): 64 | ''' 65 | Given a list of directories containing fully-trained models, find the 66 | entropy of single-neuron firings, layer firings, and layer group firings 67 | in order to assess independence 68 | 69 | feature_map : list of lists 70 | A list of firing patterns 71 | (layer index, ) 72 | 73 | DEV: collections.Counter is actually faster than using the np.unique 74 | function, could try setting a global flag for Counter when the script is 75 | first loaded, and then use it if it is available? 76 | 77 | ''' 78 | 79 | neuron_ent = [layer_entropy(thing) for thing in feature_map] 80 | 81 | all_layer_ents = list() 82 | all_patterns = list() 83 | 84 | 85 | for layer in feature_map: 86 | flat_out = (np.reshape(layer, (-1,layer.shape[-1]))).astype(int) 87 | all_patterns.append(flat_out) 88 | vals, counts = np.unique(flat_out, axis=0, return_counts=True) 89 | counts = counts/np.sum(counts) 90 | all_layer_ents.append(shannon_entropy(counts)) 91 | 92 | layer_ent = all_layer_ents 93 | 94 | whole_pattern = np.hstack(all_patterns) 95 | vals, counts = np.unique(whole_pattern, axis=0, return_counts=True) 96 | counts = counts/np.sum(counts) 97 | whole_ent = shannon_entropy(counts) 98 | 99 | out = (whole_ent, layer_ent, neuron_ent) 100 | return out 101 | 102 | def periodic_padding(image, padding=1): 103 | ''' 104 | Create a periodic padding (wrap) around an image stack, to emulate periodic boundary conditions 105 | Adapted from https://github.com/tensorflow/tensorflow/issues/956 106 | 107 | If the image is 3-dimensional (like an image batch), padding occurs along the last two axes 108 | 109 | 110 | ''' 111 | if len(image.shape)==2: 112 | upper_pad = image[-padding:,:] 113 | lower_pad = image[:padding,:] 114 | 115 | partial_image = tf.concat([upper_pad, image, lower_pad], axis=0) 116 | 117 | left_pad = partial_image[:,-padding:] 118 | right_pad = partial_image[:,:padding] 119 | 120 | padded_image = tf.concat([left_pad, partial_image, right_pad], axis=1) 121 | 122 | elif len(image.shape)==3: 123 | upper_pad = image[:,-padding:,:] 124 | lower_pad = image[:,:padding,:] 125 | 126 | partial_image = tf.concat([upper_pad, image, lower_pad], axis=1) 127 | 128 | left_pad = partial_image[:,:,-padding:] 129 | right_pad = partial_image[:,:,:padding] 130 | 131 | padded_image = tf.concat([left_pad, partial_image, right_pad], axis=2) 132 | 133 | 134 | else: 135 | assert True, "Input data shape not understood." 136 | 137 | return padded_image 138 | 139 | 140 | def conv_cast(arr, cast_type=tf.float32): 141 | return tf.cast(tf.convert_to_tensor(arr), cast_type) 142 | 143 | def arr2tf(arr, var_type='None'): 144 | '''Given np.array, convert to a float32 tensor 145 | 146 | var_type: 'var' or 'const' 147 | Whether the created variable is a constant of fixed 148 | 149 | ''' 150 | 151 | arr_tf = tf.convert_to_tensor(arr) 152 | 153 | if var_type=='const': 154 | arr_tf = tf.constant(arr_tf) 155 | elif var_type=='var': 156 | arr_tf = tf.Variable(arr_tf) 157 | else: 158 | pass 159 | 160 | out = tf.cast(arr_tf, tf.float32) 161 | 162 | return out 163 | 164 | def categorize_images(image_stack, neighborhood="von neumann"): 165 | ''' 166 | Given an MxNxN stack of numpy images, performs periodic convolution with an SxSxT 167 | stack of kernels to produce an MxNxN output representing which of the T classes 168 | each pixel belongs to. Each class represents a distinct neighborhood arrangement 169 | around that point 170 | 171 | This function may be used to find the prior distribution of inputs in an image 172 | 173 | Returns 174 | ------- 175 | 176 | indices : tf.Tensor. Corresponds to the T labels for each pixel in the original 177 | image stack 178 | 179 | ''' 180 | 181 | if neighborhood=="von neumann": 182 | pad_size = 1 183 | all_filters = np.transpose(all_combinations(2,d=9), (2,1,0)) 184 | all_biases = 1-np.sum(all_filters,axis=(0,1)) 185 | all_filters[all_filters==0] -= np.prod(all_filters.shape[:2]) 186 | else: 187 | assert True, "Specified neighborhood type not implemented" 188 | 189 | state = conv_cast(image_stack) 190 | kernel = conv_cast(all_filters)[:,:,tf.newaxis,:] 191 | biases = conv_cast(all_biases) 192 | 193 | input_padded = periodic_padding(state, pad_size)[...,tf.newaxis] 194 | 195 | conv_image = tf.nn.conv2d(input_padded, kernel, strides=[1,1,1,1], padding='VALID') 196 | 197 | # last axis is one-hot representation telling us which of the D^M states we are in 198 | activation_image = tf.nn.relu(conv_image + biases) 199 | 200 | indices = tf.argmax(activation_image, axis=-1) 201 | 202 | return indices 203 | 204 | 205 | def image_entropy(im_stack, neighborhood="von neumann"): 206 | ''' 207 | Given a stack of images, compute the entropy of the symbol distribution for 208 | each image. Currently, this function assumes a von Neumann neighborhood 209 | around each pixel 210 | 211 | im_stack : MxNxN np.array, where M indexes the image batch 212 | and NxN are the image dimensions 213 | 214 | Development 215 | ----------- 216 | 217 | It would be nice if this whole process was pure Tensorflow, for speed 218 | 219 | ''' 220 | 221 | categ_im = categorize_images(im_stack) 222 | 223 | if tf.executing_eagerly(): 224 | categ_im_arr = categ_im.numpy() 225 | else: 226 | categ_im_arr = categ_im.eval() 227 | 228 | flat_categs = np.reshape(categ_im_arr,(categ_im_arr.shape[0], np.prod(categ_im_arr.shape[-2:]))) 229 | 230 | all_ents = np.zeros(flat_categs.shape[0]) 231 | 232 | for ind, flat_thing in enumerate(flat_categs): 233 | unique_keys, counts = np.unique(flat_thing, return_counts=True) 234 | counts = counts.astype(float) 235 | # dict(zip(unique_keys, counts)) # make histogram dict 236 | 237 | counts /= np.sum(counts) # normalize 238 | ent = shannon_entropy(counts) 239 | 240 | all_ents[ind] = ent 241 | 242 | return all_ents 243 | 244 | 245 | 246 | def make_ca(words, symbols, neighborhood="von neumann"): 247 | ''' 248 | Build an arbitrary cellular automaton in tensorflow 249 | The CA will take images of the form MxNxN as input, 250 | where M is the batch size and NxN is the image dimensions 251 | 252 | CA states are formulated as individual "rules" based 253 | on pattern matching 2^D = 2^9 single inputs 254 | 255 | Inputs 256 | ------ 257 | 258 | words: iterable of M x (...) input states corresponding to the 259 | rule table for the CA 260 | 261 | symbols : M-vector of assignments (next states) for each of the 262 | words, in the same order as the words vector 263 | 264 | Returns 265 | ------- 266 | 267 | my_ca : func. A function in Tensorflow 268 | 269 | Development 270 | ----------- 271 | 272 | Test to ensure that the generated function performs in both 273 | eager and traditional tensorflow environments 274 | 275 | ''' 276 | 277 | # this may not be true for a non-binary CA; generalize this later 278 | all_filters = words 279 | state_assignments = symbols 280 | 281 | 282 | if neighborhood=="von neumann": 283 | pad_size = 1 284 | all_filters = np.transpose(all_combinations(2,d=9), (2,1,0)) 285 | all_biases = 1-np.sum(all_filters,axis=(0,1)) 286 | all_filters[all_filters==0] -= np.prod(all_filters.shape[:2]) 287 | else: 288 | assert True, "Specified neighborhood type not implemented" 289 | 290 | kernel = conv_cast(all_filters)[:,:,tf.newaxis,:] 291 | biases = conv_cast(all_biases) 292 | state_assignments = conv_cast(state_assignments) 293 | def my_ca(image_stack): 294 | ''' 295 | Automatically generated function created by make_ca() 296 | Input array must already be a tensor when fed to the function 297 | ''' 298 | input_padded = periodic_padding(image_stack, pad_size)[...,tf.newaxis] 299 | 300 | conv_image = tf.nn.conv2d(input_padded, kernel, strides=[1,1,1,1], padding='VALID') 301 | 302 | # last axis is one-hot representation telling us which of the D^M states we are in 303 | activation_image = tf.nn.relu(conv_image + biases) 304 | 305 | #next_states = tf.matmul(activation_image, tf.expand_dims(state_assignments,1)) 306 | next_states = tf.reduce_sum(tf.multiply(activation_image, state_assignments[tf.newaxis,:]), axis=-1) 307 | 308 | return next_states 309 | 310 | return my_ca 311 | 312 | 313 | def make_game_of_life(): 314 | ''' 315 | Returns a simplified Tensorflow implementation of Conway's Game of Life 316 | ''' 317 | 318 | neighborhood_radius = 3 319 | pad_size = 1 320 | 321 | neighbor_filt = np.ones((neighborhood_radius,neighborhood_radius)) 322 | neighbor_filt[1,1] = 0 323 | middle_filt = np.zeros((neighborhood_radius,neighborhood_radius)) 324 | middle_filt[1,1] = 1 325 | all_filters = np.dstack((middle_filt, neighbor_filt, neighbor_filt, neighbor_filt, neighbor_filt)) 326 | all_biases = np.array([0, -1, -2, -3, -4]) 327 | total_filters = len(all_biases) 328 | kernel = conv_cast(all_filters)[:,:,tf.newaxis,:] 329 | biases = conv_cast(all_biases) 330 | 331 | wh1_arr = np.array([ 332 | [0, 0, 4/3, -8/3, -1/3], 333 | [3/2, 5/4, -5, -1/4, -1/4] 334 | ]).T 335 | bh1_arr = np.array([-1/3,-7/4]).T 336 | wh1 = conv_cast(wh1_arr) 337 | bh1 = conv_cast(bh1_arr) 338 | 339 | def my_ca(image_stack): 340 | ''' 341 | Automatically generated function created by make_ca() 342 | Input array must already be a tensor when fed to the function 343 | ''' 344 | input_padded = periodic_padding(image_stack, pad_size)[...,tf.newaxis] 345 | 346 | conv_image = tf.nn.conv2d(input_padded, kernel, strides=[1,1,1,1], padding='VALID') 347 | 348 | activation_image = tf.nn.relu(conv_image + biases) 349 | 350 | activated_flat = tf.reshape(activation_image, [-1, total_filters]) 351 | 352 | h1 = tf.nn.relu(tf.matmul(activated_flat, wh1) + bh1) 353 | 354 | scores = tf.reduce_sum(h1, axis=-1) 355 | next_states = tf.reshape(scores, [*activation_image.shape[:3],1]) 356 | 357 | return tf.squeeze(next_states) 358 | 359 | return my_ca 360 | 361 | 362 | 363 | def make_glider(dims0): 364 | """ 365 | Produce Glider initial conditions for Conway's Game of Life 366 | 367 | dims0 : int, float, or length 2 iterable 368 | 369 | """ 370 | 371 | dims = np.ravel(np.array([dims0])) 372 | 373 | if len(dims)==1: 374 | dims = np.squeeze([dims, dims]) 375 | dims = np.array(dims) 376 | 377 | # Check that provided dimensions are large enough 378 | for item in dims: 379 | assert item >= 3 380 | 381 | glider_center = np.array([[0,1,0], 382 | [0,0,1], 383 | [1,1,1]]) 384 | 385 | ins_inds = np.floor(dims/2).astype(int) 386 | 387 | out_arr = np.zeros(dims) 388 | out_arr[ins_inds[0]-1:ins_inds[0]+2, ins_inds[1]-1:ins_inds[1]+2] = glider_center 389 | 390 | return out_arr 391 | -------------------------------------------------------------------------------- /resources/tf_v1_version /ca_funcs.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import matplotlib.pyplot as plt 3 | import tensorflow as tf 4 | import collections 5 | 6 | from itertools import product 7 | 8 | from utils import * 9 | 10 | 11 | def make_table_walk(nbins, known_rule=''): 12 | ''' 13 | Walk across a table of CA rules, changing one 14 | index at a time. When a specific rules is given, incorporate it into the walk 15 | 16 | nbins : the number of rules, and entries 17 | 18 | known_rule : np.array, a known rule to include (consisting of ones and zeros) 19 | 20 | 21 | Dev: 22 | - Work with list of rules rather than just one or zero specified rules 23 | - Check ordering of the rules; right now this only takes the outputs of the 24 | truth table and assumes the ordering generated by "all_combinations" 25 | - A better algorithm would traverse the rule list in one loop and draw 26 | indices to hit from different, non-overlapping sets based on the current index 27 | value. Probably not much performance boost though, but at least conceptually 28 | simpler 29 | ''' 30 | 31 | selection_order = np.random.choice(range(nbins), nbins, replace=False) 32 | 33 | all_rules = np.zeros((nbins,nbins)) 34 | 35 | if len(known_rule)==0: 36 | for ind in range(len(all_rules)): 37 | all_rules[ind:, selection_order[ind]] = 1 38 | else: 39 | 40 | num_on = int(np.sum(known_rule)) 41 | num_off = int(nbins - num_on) 42 | 43 | where_on = np.where(known_rule==1)[0] 44 | where_off = np.where(~(known_rule==1))[0] 45 | 46 | assert num_on==len(where_on) 47 | assert num_off==len(where_off) 48 | 49 | selection_order_indices = np.random.choice(range(num_on), num_on, replace=False) 50 | selection_order = where_on[selection_order_indices] 51 | for ind in range(len(selection_order)): 52 | all_rules[ind:, selection_order[ind]] = 1 53 | 54 | 55 | selection_order_indices = np.random.choice(range(num_off), num_off, replace=False) 56 | selection_order = where_off[selection_order_indices] 57 | for ind in range(len(selection_order)): 58 | all_rules[num_on+ind:, selection_order[ind]] = 1 59 | 60 | return all_rules 61 | 62 | 63 | 64 | 65 | def get_network_entropies(feature_map): 66 | ''' 67 | Given a list of directories containing fully-trained models, find the 68 | entropy of single-neuron firings, layer firings, and layer group firings 69 | in order to assess independence 70 | 71 | feature_map : list of lists 72 | A list of firing patterns 73 | (layer index, ) 74 | 75 | DEV: collections.Counter is actually faster than using the np.unique 76 | function, could try setting a global flag for Counter when the script is 77 | first loaded, and then use it if it is available? 78 | 79 | ''' 80 | 81 | neuron_ent = [layer_entropy(thing) for thing in feature_map] 82 | 83 | all_layer_ents = list() 84 | all_patterns = list() 85 | 86 | 87 | for layer in feature_map: 88 | flat_out = (np.reshape(layer, (-1,layer.shape[-1]))).astype(int) 89 | all_patterns.append(flat_out) 90 | vals, counts = np.unique(flat_out, axis=0, return_counts=True) 91 | counts = counts/np.sum(counts) 92 | all_layer_ents.append(shannon_entropy(counts)) 93 | 94 | layer_ent = all_layer_ents 95 | 96 | whole_pattern = np.hstack(all_patterns) 97 | vals, counts = np.unique(whole_pattern, axis=0, return_counts=True) 98 | counts = counts/np.sum(counts) 99 | whole_ent = shannon_entropy(counts) 100 | 101 | out = (whole_ent, layer_ent, neuron_ent) 102 | return out 103 | 104 | 105 | 106 | def kaiming_normal(shape): 107 | ''' 108 | An implementation of the normalization described 109 | by He et al. https://arxiv.org/abs/1502.01852 110 | ''' 111 | fan_in, fan_out = np.prod(shape[:-1]), shape[-1] 112 | return tf.random_normal(shape)*np.sqrt(2.0/fan_in) 113 | 114 | 115 | 116 | def periodic_padding(image, padding=1): 117 | ''' 118 | Create a periodic padding (wrap) around an image stack, to emulate periodic boundary conditions 119 | Adapted from https://github.com/tensorflow/tensorflow/issues/956 120 | 121 | If the image is 3-dimensional (like an image batch), padding occurs along the last two axes 122 | 123 | 124 | ''' 125 | if len(image.shape)==2: 126 | upper_pad = image[-padding:,:] 127 | lower_pad = image[:padding,:] 128 | 129 | partial_image = tf.concat([upper_pad, image, lower_pad], axis=0) 130 | 131 | left_pad = partial_image[:,-padding:] 132 | right_pad = partial_image[:,:padding] 133 | 134 | padded_image = tf.concat([left_pad, partial_image, right_pad], axis=1) 135 | 136 | elif len(image.shape)==3: 137 | upper_pad = image[:,-padding:,:] 138 | lower_pad = image[:,:padding,:] 139 | 140 | partial_image = tf.concat([upper_pad, image, lower_pad], axis=1) 141 | 142 | left_pad = partial_image[:,:,-padding:] 143 | right_pad = partial_image[:,:,:padding] 144 | 145 | padded_image = tf.concat([left_pad, partial_image, right_pad], axis=2) 146 | 147 | 148 | else: 149 | assert True, "Input data shape not understood." 150 | 151 | return padded_image 152 | 153 | 154 | def conv_cast(arr, cast_type=tf.float32): 155 | return tf.cast(tf.convert_to_tensor(arr), cast_type) 156 | 157 | def arr2tf(arr, var_type='None'): 158 | '''Given np.array, convert to a float32 tensor 159 | 160 | var_type: 'var' or 'const' 161 | Whether the created variable is a constant of fixed 162 | 163 | ''' 164 | 165 | arr_tf = tf.convert_to_tensor(arr) 166 | 167 | if var_type=='const': 168 | arr_tf = tf.constant(arr_tf) 169 | elif var_type=='var': 170 | arr_tf = tf.Variable(arr_tf) 171 | else: 172 | pass 173 | 174 | out = tf.cast(arr_tf, tf.float32) 175 | 176 | return out 177 | 178 | def categorize_images(image_stack, neighborhood="von neumann"): 179 | ''' 180 | Given an MxNxN stack of numpy images, performs periodic convolution with an SxSxT 181 | stack of kernels to produce an MxNxN output representing which of the T classes 182 | each pixel belongs to. Each class represents a distinct neighborhood arrangement 183 | around that point 184 | 185 | This function may be used to find the prior distribution of inputs in an image 186 | 187 | Returns 188 | ------- 189 | 190 | indices : tf.Tensor. Corresponds to the T labels for each pixel in the original 191 | image stack 192 | 193 | ''' 194 | 195 | if neighborhood=="von neumann": 196 | pad_size = 1 197 | all_filters = np.transpose(all_combinations(2,d=9), (2,1,0)) 198 | all_biases = 1-np.sum(all_filters,axis=(0,1)) 199 | all_filters[all_filters==0] -= np.prod(all_filters.shape[:2]) 200 | else: 201 | assert True, "Specified neighborhood type not implemented" 202 | 203 | state = conv_cast(image_stack) 204 | kernel = conv_cast(all_filters)[:,:,tf.newaxis,:] 205 | biases = conv_cast(all_biases) 206 | 207 | input_padded = periodic_padding(state, pad_size)[...,tf.newaxis] 208 | 209 | conv_image = tf.nn.conv2d(input_padded, kernel, strides=[1,1,1,1], padding='VALID') 210 | 211 | # last axis is one-hot representation telling us which of the D^M states we are in 212 | activation_image = tf.nn.relu(conv_image + biases) 213 | 214 | indices = tf.argmax(activation_image, axis=-1) 215 | 216 | return indices 217 | 218 | 219 | def image_entropy(im_stack, neighborhood="von neumann"): 220 | ''' 221 | Given a stack of images, compute the entropy of the symbol distribution for 222 | each image. Currently, this function assumes a von Neumann neighborhood 223 | around each pixel 224 | 225 | im_stack : MxNxN np.array, where M indexes the image batch 226 | and NxN are the image dimensions 227 | 228 | Development 229 | ----------- 230 | 231 | It would be nice if this whole process was pure Tensorflow, for speed 232 | 233 | ''' 234 | 235 | categ_im = categorize_images(im_stack) 236 | 237 | if tf.executing_eagerly(): 238 | categ_im_arr = categ_im.numpy() 239 | else: 240 | categ_im_arr = categ_im.eval() 241 | 242 | flat_categs = np.reshape(categ_im_arr,(categ_im_arr.shape[0], np.prod(categ_im_arr.shape[-2:]))) 243 | 244 | all_ents = np.zeros(flat_categs.shape[0]) 245 | 246 | for ind, flat_thing in enumerate(flat_categs): 247 | unique_keys, counts = np.unique(flat_thing, return_counts=True) 248 | counts = counts.astype(float) 249 | # dict(zip(unique_keys, counts)) # make histogram dict 250 | 251 | counts /= np.sum(counts) # normalize 252 | ent = shannon_entropy(counts) 253 | 254 | all_ents[ind] = ent 255 | 256 | return all_ents 257 | 258 | 259 | 260 | def make_ca(words, symbols, neighborhood="von neumann"): 261 | ''' 262 | Build an arbitrary cellular automaton in tensorflow 263 | The CA will take images of the form MxNxN as input, 264 | where M is the batch size and NxN is the image dimensions 265 | 266 | CA states are formulated as individual "rules" based 267 | on pattern matching 2^D = 2^9 single inputs 268 | 269 | Inputs 270 | ------ 271 | 272 | words: iterable of M x (...) input states corresponding to the 273 | rule table for the CA 274 | 275 | symbols : M-vector of assignments (next states) for each of the 276 | words, in the same order as the words vector 277 | 278 | Returns 279 | ------- 280 | 281 | my_ca : func. A function in Tensorflow 282 | 283 | Development 284 | ----------- 285 | 286 | Test to ensure that the generated function performs in both 287 | eager and traditional tensorflow environments 288 | 289 | ''' 290 | 291 | # this may not be true for a non-binary CA; generalize this later 292 | all_filters = words 293 | state_assignments = symbols 294 | 295 | 296 | if neighborhood=="von neumann": 297 | pad_size = 1 298 | all_filters = np.transpose(all_combinations(2,d=9), (2,1,0)) 299 | all_biases = 1-np.sum(all_filters,axis=(0,1)) 300 | all_filters[all_filters==0] -= np.prod(all_filters.shape[:2]) 301 | else: 302 | assert True, "Specified neighborhood type not implemented" 303 | 304 | kernel = conv_cast(all_filters)[:,:,tf.newaxis,:] 305 | biases = conv_cast(all_biases) 306 | state_assignments = conv_cast(state_assignments) 307 | def my_ca(image_stack): 308 | ''' 309 | Automatically generated function created by make_ca() 310 | Input array must already be a tensor when fed to the function 311 | ''' 312 | input_padded = periodic_padding(image_stack, pad_size)[...,tf.newaxis] 313 | 314 | conv_image = tf.nn.conv2d(input_padded, kernel, strides=[1,1,1,1], padding='VALID') 315 | 316 | # last axis is one-hot representation telling us which of the D^M states we are in 317 | activation_image = tf.nn.relu(conv_image + biases) 318 | 319 | #next_states = tf.matmul(activation_image, tf.expand_dims(state_assignments,1)) 320 | next_states = tf.reduce_sum(tf.multiply(activation_image, state_assignments[tf.newaxis,:]), axis=-1) 321 | 322 | return next_states 323 | 324 | return my_ca 325 | 326 | 327 | def make_game_of_life(): 328 | ''' 329 | Returns a simplified Tensorflow implementation of Conway's Game of Life 330 | ''' 331 | 332 | neighborhood_radius = 3 333 | pad_size = 1 334 | 335 | neighbor_filt = np.ones((neighborhood_radius,neighborhood_radius)) 336 | neighbor_filt[1,1] = 0 337 | middle_filt = np.zeros((neighborhood_radius,neighborhood_radius)) 338 | middle_filt[1,1] = 1 339 | all_filters = np.dstack((middle_filt, neighbor_filt, neighbor_filt, neighbor_filt, neighbor_filt)) 340 | all_biases = np.array([0, -1, -2, -3, -4]) 341 | total_filters = len(all_biases) 342 | kernel = conv_cast(all_filters)[:,:,tf.newaxis,:] 343 | biases = conv_cast(all_biases) 344 | 345 | wh1_arr = np.array([ 346 | [0, 0, 4/3, -8/3, -1/3], 347 | [3/2, 5/4, -5, -1/4, -1/4] 348 | ]).T 349 | bh1_arr = np.array([-1/3,-7/4]).T 350 | wh1 = conv_cast(wh1_arr) 351 | bh1 = conv_cast(bh1_arr) 352 | 353 | def my_ca(image_stack): 354 | ''' 355 | Automatically generated function created by make_ca() 356 | Input array must already be a tensor when fed to the function 357 | ''' 358 | input_padded = periodic_padding(image_stack, pad_size)[...,tf.newaxis] 359 | 360 | conv_image = tf.nn.conv2d(input_padded, kernel, strides=[1,1,1,1], padding='VALID') 361 | 362 | activation_image = tf.nn.relu(conv_image + biases) 363 | 364 | activated_flat = tf.reshape(activation_image, [-1, total_filters]) 365 | 366 | h1 = tf.nn.relu(tf.matmul(activated_flat, wh1) + bh1) 367 | 368 | scores = tf.reduce_sum(h1, axis=-1) 369 | next_states = tf.reshape(scores, [*activation_image.shape[:3],1]) 370 | 371 | return tf.squeeze(next_states) 372 | 373 | return my_ca 374 | 375 | 376 | 377 | def make_glider(dims0): 378 | """ 379 | Produce Glider initial conditions for Conway's Game of Life 380 | 381 | dims0 : int, float, or length 2 iterable 382 | 383 | """ 384 | 385 | dims = np.ravel(np.array([dims0])) 386 | 387 | if len(dims)==1: 388 | dims = np.squeeze([dims, dims]) 389 | dims = np.array(dims) 390 | 391 | # Check that provided dimensions are large enough 392 | for item in dims: 393 | assert item >= 3 394 | 395 | glider_center = np.array([[0,1,0], 396 | [0,0,1], 397 | [1,1,1]]) 398 | 399 | ins_inds = np.floor(dims/2).astype(int) 400 | 401 | out_arr = np.zeros(dims) 402 | out_arr[ins_inds[0]-1:ins_inds[0]+2, ins_inds[1]-1:ins_inds[1]+2] = glider_center 403 | 404 | return out_arr 405 | -------------------------------------------------------------------------------- /resources/tf_v1_version /train CA demo.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Import requirements" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import matplotlib.pyplot as plt\n", 18 | "\n", 19 | "import tensorflow as tf\n", 20 | "\n", 21 | "%matplotlib inline\n", 22 | "plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\n", 23 | "plt.rcParams['image.interpolation'] = 'nearest'\n", 24 | "plt.rcParams['image.cmap'] = 'gray'\n", 25 | "\n", 26 | "import os\n", 27 | "import glob\n", 28 | "import pickle\n", 29 | "\n", 30 | "import sys\n", 31 | "sys.path.insert(0, '..')\n", 32 | "from ca_funcs import *\n", 33 | "from utils import *\n", 34 | "\n", 35 | "\n", 36 | "%load_ext autoreload\n", 37 | "%autoreload 2" 38 | ] 39 | }, 40 | { 41 | "cell_type": "markdown", 42 | "metadata": {}, 43 | "source": [ 44 | "# Create a new model" 45 | ] 46 | }, 47 | { 48 | "cell_type": "code", 49 | "execution_count": 2, 50 | "metadata": { 51 | "scrolled": false 52 | }, 53 | "outputs": [], 54 | "source": [ 55 | "from train_ca import *\n", 56 | "\n", 57 | "try:\n", 58 | " tf.reset_default_graph()\n", 59 | " sess.close()\n", 60 | "except:\n", 61 | " pass\n", 62 | "sess = tf.InteractiveSession()\n", 63 | "\n", 64 | "wspan, hspan = 10, 10\n", 65 | "layer_dims = [100,100,100,100,100]\n", 66 | "\n", 67 | "test_model = ConvNet(sess, input_dim=(wspan,hspan), layer_dims=layer_dims, weight_scale=1e0)" 68 | ] 69 | }, 70 | { 71 | "cell_type": "code", 72 | "execution_count": 7, 73 | "metadata": {}, 74 | "outputs": [ 75 | { 76 | "data": { 77 | "text/plain": [ 78 | "" 79 | ] 80 | }, 81 | "execution_count": 7, 82 | "metadata": {}, 83 | "output_type": "execute_result" 84 | }, 85 | { 86 | "data": { 87 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAPgAAAD8CAYAAABaQGkdAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAACkZJREFUeJzt3V+IlXUex/HPZ+dMONpGwu5FjeOOQeRKsBiHqIQusgvbogi8MChYb7zZyokgbO+6jYgiIhisIJS6MKGI/i1kFwshTRqUTYHorE4audD2D0UPfffinAVrdc4zO8+v55wv7xcEznT89UF895xz5mHGESEAOf2m6QEAyiFwIDECBxIjcCAxAgcSI3AgMQIHEiNwIDECBxJrFTm01YrR0dESRwOQdO7cOXU6Hfd7XJHAR0dHNTk5WeJoAJLm5uYqPY6n6EBiBA4kRuBAYgQOJEbgQGIEDiRWKXDbm2x/Yfuw7R2lRwGoR9/AbY9IelbSbZLWSbrH9rrSwwAsXZUr+PWSDkfEkYg4K+kVSXeVnQWgDlUCH5d0/LyP53uf+xnb22zP2J7pdDp17QOwBFUCv9D9rv/zrVgjYjoi2hHRbrWK3AELYJGqBD4vaeK8j1dJOlFmDoA6VQn8Q0lX215j+xJJWyS9XnYWgDr0fS4dER3b90t6R9KIpBci4lDxZQCWrNKL5Yh4U9KbhbcAqBl3sgGJETiQGIEDiRE4kBiBA4kROJAYgQOJETiQGIEDiRE4kBiBA4kROJAYgQOJETiQGIEDiRE4kBiBA4kROJAYgQOJETiQGIEDiRE4kBiBA4kROJAYgQOJETiQGIEDiRE4kBiBA4kROJAYgQOJETiQGIEDiRE4kFjfwG1P2N5ne9b2Idvbf41hAJauVeExHUkPR8QB27+V9JHtv0fEZ4W3AViivlfwiDgZEQd6v/5e0qyk8dLDACzdol6D256UtF7S/hJjANSrylN0SZLtSyW9KmkqIr67wL/fJmmbJLValY8FUFClK7jtUXXj3h0Rey/0mIiYjoh2RLQJHBgMVd5Ft6TnJc1GxJPlJwGoS5Ur+AZJ90m6xfbHvX/+XHgXgBr0fS4dEf+Q5F9hC4CacScbkBiBA4kROJAYgQOJETiQGHekDJmpqaki5z722GNFzt20aVORcz/44IMi52bDFRxIjMCBxAgcSIzAgcQIHEiMwIHECBxIjMCBxAgcSIzAgcQIHEiMwIHECBxIjMCBxAgcSIzAgcQIHEiMwIHECBxIjMCBxAgcSIzvqjpkVq9eXeTcZ555psi5mzdvLnLu1q1baz8z43dq5QoOJEbgQGIEDiRG4EBiBA4kRuBAYgQOJFY5cNsjtg/afqPkIAD1WcwVfLuk2VJDANSvUuC2V0m6XdLOsnMA1KnqFfwpSY9I+uliD7C9zfaM7ZlOp1PLOABL0zdw23dI+joiPlrocRExHRHtiGi3WtziDgyCKlfwDZLutD0n6RVJt9jeVXQVgFr0DTwiHo2IVRExKWmLpPci4t7iywAsGV8HBxJb1IvliHhf0vtFlgCoHVdwIDECBxIjcCAxAgcSI3AgMW45K2RqaqrIuY8//niRc1977bUi537zzTdFzl27dm3tZ65cubL2M5vGFRxIjMCBxAgcSIzAgcQIHEiMwIHECBxIjMCBxAgcSIzAgcQIHEiMwIHECBxIjMCBxAgcSIzAgcQIHEiMwIHECBxIjMCBxAgcSIzvqlrI6tWri5y7b9++Iufu2lXmJ0K/9NJLRc6dmJio/cwffvih9jObxhUcSIzAgcQIHEiMwIHECBxIjMCBxCoFbvty23tsf2571vaNpYcBWLqqXwd/WtLbEbHZ9iWSlhfcBKAmfQO3fZmkmyX9RZIi4qyks2VnAahDlafoV0k6JelF2wdt77S9ovAuADWoEnhL0nWSnouI9ZJ+lLTjlw+yvc32jO2ZTqdT80wA/48qgc9Lmo+I/b2P96gb/M9ExHREtCOi3WpxizswCPoGHhFfSTpu+5repzZK+qzoKgC1qHqpfUDS7t476EckbS03CUBdKgUeER9LahfeAqBm3MkGJEbgQGIEDiRG4EBiBA4kRuBAYo6I2g8dGxuLycnJ2s8dJmfOnCly7t13313k3PHx8SLnXnHFFUXOLfFdYI8ePVr7maXMzc3p9OnT7vc4ruBAYgQOJEbgQGIEDiRG4EBiBA4kRuBAYgQOJEbgQGIEDiRG4EBiBA4kRuBAYgQOJEbgQGIEDiRG4EBiBA4kRuBAYgQOJMbP+S1k2bJlRc596623ipyLnLiCA4kROJAYgQOJETiQGIEDiRE4kBiBA4lVCtz2Q7YP2f7U9su2y3yRF0Ct+gZue1zSg5LaEXGtpBFJW0oPA7B0VZ+itySN2W5JWi7pRLlJAOrSN/CI+FLSE5KOSTop6duIePeXj7O9zfaM7ZlOp1P/UgCLVuUp+kpJd0laI+lKSSts3/vLx0XEdES0I6LdanGLOzAIqjxFv1XS0Yg4FRHnJO2VdFPZWQDqUCXwY5JusL3ctiVtlDRbdhaAOlR5Db5f0h5JByR90vs904V3AaiBI6L2Q8fGxmJycrL2cwF0zc3N6fTp0+73OO5kAxIjcCAxAgcSI3AgMQIHEiNwIDECBxIjcCAxAgcSI3AgMQIHEiNwIDECBxIjcCAxAgcSI3AgMQIHEiNwIDECBxIjcCAxAgcSI3AgMQIHEiNwIDECBxIjcCAxAgcSK/KzyWyfkvTPCg/9naR/1T6gnGHaO0xbpeHaOwhb/xARv+/3oCKBV2V7JiLajQ1YpGHaO0xbpeHaO0xbeYoOJEbgQGJNBz7d8H9/sYZp7zBtlYZr79BsbfQ1OICymr6CAyioscBtb7L9he3Dtnc0taMf2xO299metX3I9vamN1Vhe8T2QdtvNL1lIbYvt73H9ue9P+Mbm960ENsP9f4efGr7ZdvLmt60kEYCtz0i6VlJt0laJ+ke2+ua2FJBR9LDEfFHSTdI+usAbz3fdkmzTY+o4GlJb0fEWkl/0gBvtj0u6UFJ7Yi4VtKIpC3NrlpYU1fw6yUdjogjEXFW0iuS7mpoy4Ii4mREHOj9+nt1/wKON7tqYbZXSbpd0s6mtyzE9mWSbpb0vCRFxNmI+Hezq/pqSRqz3ZK0XNKJhvcsqKnAxyUdP+/jeQ14NJJke1LSekn7m13S11OSHpH0U9ND+rhK0ilJL/ZeTuy0vaLpURcTEV9KekLSMUknJX0bEe82u2phTQXuC3xuoN/Ot32ppFclTUXEd03vuRjbd0j6OiI+anpLBS1J10l6LiLWS/pR0iC/H7NS3WeaayRdKWmF7XubXbWwpgKflzRx3serNMBPdWyPqhv37ojY2/SePjZIutP2nLovfW6xvavZSRc1L2k+Iv77jGiPusEPqlslHY2IUxFxTtJeSTc1vGlBTQX+oaSrba+xfYm6b1S83tCWBdm2uq8RZyPiyab39BMRj0bEqoiYVPfP9b2IGMirTER8Jem47Wt6n9oo6bMGJ/VzTNINtpf3/l5s1AC/KSh1nyL96iKiY/t+Se+o+07kCxFxqIktFWyQdJ+kT2x/3Pvc3yLizQY3ZfKApN29/9EfkbS14T0XFRH7be+RdEDdr64c1IDf1cadbEBi3MkGJEbgQGIEDiRG4EBiBA4kRuBAYgQOJEbgQGL/ARCAO1PKhmnwAAAAAElFTkSuQmCC\n", 88 | "text/plain": [ 89 | "
" 90 | ] 91 | }, 92 | "metadata": {}, 93 | "output_type": "display_data" 94 | } 95 | ], 96 | "source": [ 97 | "# Run untrained model on test data\n", 98 | "X = test_model.X\n", 99 | "ca_model = test_model.ca_cnn(X)\n", 100 | "out = sess.run(ca_model, feed_dict={X: make_glider((10,10))})\n", 101 | "\n", 102 | "plt.imshow(out)" 103 | ] 104 | }, 105 | { 106 | "cell_type": "markdown", 107 | "metadata": {}, 108 | "source": [ 109 | "# Train a new model" 110 | ] 111 | }, 112 | { 113 | "cell_type": "code", 114 | "execution_count": 2, 115 | "metadata": {}, 116 | "outputs": [], 117 | "source": [ 118 | "from train_ca import *\n", 119 | "\n", 120 | "try:\n", 121 | " tf.reset_default_graph()\n", 122 | " sess.close()\n", 123 | "except:\n", 124 | " pass\n", 125 | "sess = tf.InteractiveSession()" 126 | ] 127 | }, 128 | { 129 | "cell_type": "code", 130 | "execution_count": 3, 131 | "metadata": {}, 132 | "outputs": [ 133 | { 134 | "name": "stdout", 135 | "output_type": "stream", 136 | "text": [ 137 | "Epoch: 0001 cost=727148.993750000\n", 138 | "Epoch: 0151 cost=8.035618973\n", 139 | "Epoch: 0301 cost=6.478339553\n", 140 | "Epoch: 0451 cost=5.045743346\n", 141 | "Epoch: 0601 cost=3.301548845\n", 142 | "Epoch: 0751 cost=2.276217663\n", 143 | "Epoch: 0901 cost=0.748564085\n", 144 | "Epoch: 1051 cost=0.197094048\n", 145 | "Epoch: 1201 cost=0.045383141\n", 146 | "Epoch: 1351 cost=0.020902108\n" 147 | ] 148 | } 149 | ], 150 | "source": [ 151 | "## Make training data\n", 152 | "batch_size, wspan, hspan = (100,10,10)\n", 153 | "\n", 154 | "X_train = np.random.choice([0,1], (batch_size, wspan, hspan), p=[.5,.5])\n", 155 | "gol = make_game_of_life()\n", 156 | "y_train_tf = gol(conv_cast(X_train))\n", 157 | "y_train = y_train_tf.eval()\n", 158 | "\n", 159 | "\n", 160 | "\n", 161 | "## Define model parameters\n", 162 | "layer_dims = [100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100]\n", 163 | "learning_rate = 0.0001\n", 164 | "weight_scale = 1e0\n", 165 | "num_batches = 1\n", 166 | "training_epochs = 100\n", 167 | "batch_size = 10\n", 168 | "num_batches = 1\n", 169 | "training_epochs = 1500\n", 170 | "display_step = int(training_epochs/10)\n", 171 | "\n", 172 | "## Define model\n", 173 | "test_model = ConvNet(sess, input_dim=(wspan, hspan), layer_dims=layer_dims, weight_scale=weight_scale)\n", 174 | "X, y = test_model.X, test_model.y\n", 175 | "ca_model = test_model.ca_cnn(X)\n", 176 | "\n", 177 | "## Define loss and optimizer\n", 178 | "loss = tf.reduce_sum(tf.nn.l2_loss(ca_model - y))\n", 179 | "optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)\n", 180 | "train_op = optimizer.minimize(loss)\n", 181 | "\n", 182 | "\n", 183 | "## Re-initialize everything, since the optimizer needs to be initialized\n", 184 | "init = tf.global_variables_initializer()\n", 185 | "sess.run(init)\n", 186 | "\n", 187 | "## Now perform training\n", 188 | "all_losses = list()\n", 189 | "for epoch in range(training_epochs):\n", 190 | "\n", 191 | " avg_cost = 0.0\n", 192 | " avg_cost_rounded = 0.0\n", 193 | " \n", 194 | " for i in range(num_batches):\n", 195 | " batch_indices = np.random.choice(range(X_train.shape[0]), batch_size)\n", 196 | "\n", 197 | " for batch_ind in batch_indices:\n", 198 | " X_batch, y_batch = X_train[batch_ind], y_train[batch_ind]\n", 199 | " _, cost = sess.run([train_op, loss], feed_dict={X: X_batch, y: y_batch})\n", 200 | "\n", 201 | " avg_cost += cost/(num_batches*batch_size)\n", 202 | " all_losses.append(avg_cost) \n", 203 | " if epoch % display_step == 0:\n", 204 | " print(\"Epoch:\", '%04d' % (epoch+1), \"cost={:.9f}\".format(avg_cost))" 205 | ] 206 | }, 207 | { 208 | "cell_type": "code", 209 | "execution_count": 4, 210 | "metadata": {}, 211 | "outputs": [ 212 | { 213 | "data": { 214 | "text/plain": [ 215 | "Text(0.5,1,'Observed Output')" 216 | ] 217 | }, 218 | "execution_count": 4, 219 | "metadata": {}, 220 | "output_type": "execute_result" 221 | }, 222 | { 223 | "data": { 224 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAArwAAAD0CAYAAAB5LvVrAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAADkhJREFUeJzt3Xus5GV9x/HPF2G5tqUUrIIIaS1eSCpJtdqLlxTrpdpgwHqJWi9ob6KxNWqD1aKpbTFG66Vqqta2IA1e8NJqaVGrtUYkkUjqH1CrBVkU6wrIKrCgPP3j9xudc+ICwuyePV9fr2Rzzp7fzHN+szvPmfc888xujTECAABd7bXRJwAAALuS4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AHuYqnp6Vf3nRp8HbISqOq2qztzo8/hhmLN7PsG7CVTVpVX1sF38PTbdDxhYmOfI9VX1raVfb9zA8/l4VT1rF46/b1X9RVV9eb7dX6iqF1ZV3cbrH11Vo6r2XtH5rHQ8epvj8L+q6rqqurKq3lxVB2/0ee1K5uzG+5G5oUB7vznG+MhGn8Ru8u4kd0nyG0kuTnK/JGckOTLJ8zbwvOAWVdULkrwoydOSfDTJEUnelOS8qvqVMcaNu+k89h5jfGd3fK+ZObvBrPBuIouXTKrq1VV1dVX9b1U9aun4x+dnkBdU1Ter6gNVdch87KFVtXXdeJdW1cOq6pFJTk3yhHll7KLde8tg15lXj96z9PvTq+qjNXloVW2tqlOrats8J568dNl95/n25ar6WlW9par2Xzp+QlV9rqquraovVtUjq+qVSR6U5I3LK81Vda+qOq+qrqqqS6rq8Uvj/FRVfXAe54IkP3sLt+f4JA9PctIY4/NjjO+MMc5P8pQkz6mqe8yXW/PK0LpXcf5j/njNfI6/NP98+VRVvWH++XHx/L1ye8a7DX81/Iipqh9P8vIkzx1jnDvGuGmMcWmSxyc5KtN9eGG/qjq7qrZX1YVVdd+lcV5cVVfMxy5Z3E+raq+q+uN5Ln6jqt619Bi4WNE8uaq+nORjVXVuVZ2y7hwvqqoT58/N2UYE7+bzgCSXJDk0yauSvL1qzUsiv53kmUkOT/KdJK+/tQHHGOcm+fMkZ48xDhpj3PfWrgObyAuS/Pz84PCgJCcnedr4/v+rfpdM8+mITKtOf1NV95yPnZ7kmCTHJbnHfJmXJUlV/WKSf0jywiQHJ3lwkkvHGC9J8skkp8zz6ZSqOjDJeUnOSnLnJE9K8qaqOnb+Pn+d5IYkd800f595C7fn15N8Zoxx+fIXxxifSbI1yfE/8FprPXj+ePB8jp+ef/+AJF+a/zz+NMk5i2C4nePBsl9Osl+Sc5a/OMb4VpJ/yXTfXjgh06roIZnmzfurap95bp6S5P5jjB9L8ogkl87XeV6SxyZ5SKbHwKszza1lD0ly7/l6Z2Wai0mSqrpPpvD+kDnbj+DdfC4bY7x1jPHdJH+fabL99NLxM+ZnkN9O8tIkj6+qO23EicJu9v6qumbp17OTZIxxXaaVlNckOTPT6tLWddd96RhjxxjjE0k+lGneVJJnJ/nDMcZVY4ztmZ4YPnG+zslJ/naMcd4Y4+YxxhVjjIt3cm6PyRTD75hXdy5M8t4kj5vn50lJXjbG+PYY4/OZ5vbOHJrkqzs59tX5+O31f0n+al55OzvTk+tH34HxYNmhSbbtZCvB+vvuZ8cY7xlj3JRp7u6X5IFJvptk3yT3qap9xhiXjjG+OF/nd5O8ZIyxdYyxI8lpmebY8vbN0+Z5dn2S9yU5rqqOmo89Ock583XN2Wbs4d18rlx8Msa4bl7cPWjp+PIzyMuS7JM7Nplgs3jszvbwjjEuqKovZVqpede6w1fPTxAXLsu0OnRYkgOSfHbpRZRKsngCeWSSD9/GczsqyQOq6pqlr+2daQ/fYfPn6+fuzmxL8nM7OXbX+fjtdcXSyvfiPA6/A+PBsm1JDq0fvH92/X33e/NhjHFzTVvyDh9jfLKqnp8pZo+tqn9N8kdjjK9kmmfvq6qbl8b5btYuCi2Pu72qPpTpSezp88ffmQ+bs81Y4e3nyKXP757kpkyT6duZHryTJPMz1MOWLrs8YaCVqnpOplWhr2R6w8yyn5xfvly4+3y5bUmuT3LsGOPg+ddPjDEWTzAvz8737a2fT5cn+cTSOIuXEX8/ydczbT9aP3d35iOZHoiXL7/YYnFkko/NX1oz5zNt3djZ+S0csW6L1OLP4vaOB8s+nWRHkhOXvzjPv0dlehPbwpFLx/dKcrfM98UxxlljjF/NFKUjU6wm0zx71Lp5tt8Y44qlcdffV/8xyZPmPaz7J/n3pbHM2UYEbz9Pqar7VNUBSV6R5D3z9of/zvQmgEdX1T5J/iRTACx8LcnR8w8WaKOqjknyZ5m2NTw1yYuq6rh1F3t5VW2Z9/g+Jsm7xxg3J3lrktdW1Z3nsY6oqkfM13l7kmdU1fHzm2WOqKp7zce+luRnlsb/5yTHVNVT532I+1TV/avq3vP8PCfJaVV1wLyP8Gk7uz3zKvZHk7y3qo6tqjtV1QOTvDPJm8cYX5gv+rkkT5y/1/2SPG5pmK8nuXndOSbTCvjz5uv8Vqa9jotV7NszHnzPGOObmd609oaa3uC5T1UdnWmv7tZMq6cLv1BVJ87bEZ6fKZTPr6p7VtWvVdW+mfbQXp9pFTdJ3pLklYstClV1WFWdcCun9eFM4fyKTO9jWawOm7PNiJt+zkjyd5m2PuyX+Z87mX/Q/EGStyW5ItMzv+V9jO+eP36jqi7cXScLK/RPtfbf4X3f/GB5ZpLTxxgXzQ8spyY5Y37ATKa5cnWmVZF3Jvm9pb24L07yP5keaK/NtFJzz2TaJpHkGUlem+SbST6R6YEzSV6Xaa/f1VX1+nn/78MzvWT6lfl7np7vP+k8JdPWpCszzd933MptPSnTStS5Sb4138a3J3nu0mVemmkF+upMkXHW4sC8r/mVST4173d+4HzoM5leet02H3/cGOMbd2A8WGOM8apMc/DVSa7NdJ+7PMnx897ZhQ8keUKm+9tTk5w47+fdN8lfZrqPXpkp+E6dr/O6JB9M8m9VtT3J+Zne1HVL57MjU7w+LGvv0+ZsM7V26webWVV9PMmZY4y3bfS5wGZQVQ/NNGfuttHnstGq6ulJnjW/VAzs4czZH44VXgAAWhO8AAC0ZksDAACtWeEFAKA1wQsAQGu75H9aqyr7JGDJGKNu/VIbw3yFtcxX2Dxu63y1wgsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBobe+NPgF2nTHGSserqpWOBwAkW7ZsWel4N95440rH68AKLwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWtt7o0+AXaeqVjreGGOl4636/GB3W+WcMF9h17rqqqtWNtYhhxyysrGSZMeOHSsdb//991/peDfccMNKx9sIVngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoLUaY6x+0KrVD/ojYlf8feypqmqjT2G3GWPssTfWfN0z7Olz33zdM5ive4bt27evdLyDDjpopePttddq1zP35J9Pt3W+WuEFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNb23ugTYK2q2uhTABrwswTW2rJly8rGOvDAA1c2VrLac0uSMcZKx+vACi8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtFZjjNUPWrX6QWETG2PURp/DzpivsJb5CpvHbZ2vVngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACt1Rhjo88BAAB2GSu8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK39P0H0CdiwKezhAAAAAElFTkSuQmCC\n", 225 | "text/plain": [ 226 | "
" 227 | ] 228 | }, 229 | "metadata": {}, 230 | "output_type": "display_data" 231 | } 232 | ], 233 | "source": [ 234 | "X_test = make_glider(10)\n", 235 | "\n", 236 | "gol = make_game_of_life()\n", 237 | "y_test_tf = gol(conv_cast(X_test[np.newaxis,...]))\n", 238 | "y_test = np.squeeze(y_test_tf.eval())\n", 239 | "\n", 240 | "y_pred_test = sess.run(ca_model, feed_dict={X: X_test})\n", 241 | "\n", 242 | "plt.figure(figsize=(12,4))\n", 243 | "plt.subplot(1,3,1)\n", 244 | "plt.imshow(X_test)\n", 245 | "plt.axis('off')\n", 246 | "plt.title(\"Input\")\n", 247 | "\n", 248 | "\n", 249 | "plt.subplot(1,3,2)\n", 250 | "plt.imshow(y_test)\n", 251 | "plt.axis('off')\n", 252 | "plt.title(\"Expected Output\")\n", 253 | "\n", 254 | "plt.subplot(1,3,3)\n", 255 | "plt.imshow(y_pred_test)\n", 256 | "plt.axis('off')\n", 257 | "plt.title(\"Observed Output\")\n" 258 | ] 259 | }, 260 | { 261 | "cell_type": "code", 262 | "execution_count": 8, 263 | "metadata": {}, 264 | "outputs": [ 265 | { 266 | "name": "stdout", 267 | "output_type": "stream", 268 | "text": [ 269 | "INFO:tensorflow:convca/resources/test_model.ckpt is not in all_model_checkpoint_paths. Manually adding it.\n", 270 | "convca/resources/test_model.ckpt\n" 271 | ] 272 | } 273 | ], 274 | "source": [ 275 | "## Save the model\n", 276 | "saver = tf.train.Saver(max_to_keep=None)\n", 277 | "save_str = 'convca/resources/test_model.ckpt'\n", 278 | "saver.save(sess, save_str)\n", 279 | "print(save_str)" 280 | ] 281 | }, 282 | { 283 | "cell_type": "markdown", 284 | "metadata": {}, 285 | "source": [ 286 | "# Loading a saved model" 287 | ] 288 | }, 289 | { 290 | "cell_type": "code", 291 | "execution_count": 4, 292 | "metadata": {}, 293 | "outputs": [], 294 | "source": [ 295 | "from train_ca import *\n", 296 | "\n", 297 | "try:\n", 298 | " tf.reset_default_graph()\n", 299 | " sess.close()\n", 300 | "except:\n", 301 | " pass\n", 302 | "sess = tf.InteractiveSession()" 303 | ] 304 | }, 305 | { 306 | "cell_type": "code", 307 | "execution_count": 5, 308 | "metadata": {}, 309 | "outputs": [ 310 | { 311 | "name": "stdout", 312 | "output_type": "stream", 313 | "text": [ 314 | "12.0 layers detected\n", 315 | "INFO:tensorflow:Restoring parameters from ../resources/test_model.ckpt\n" 316 | ] 317 | } 318 | ], 319 | "source": [ 320 | "wspan, hspan = 10, 10\n", 321 | "model_str = '../resources/test_model.ckpt'\n", 322 | "\n", 323 | "\n", 324 | "## Define model\n", 325 | "test_model = ConvNet(sess, ckpt_path=model_str, input_dim=(wspan,hspan))\n", 326 | "X, y = test_model.X, test_model.y\n", 327 | "ca_model = test_model.ca_cnn(X)" 328 | ] 329 | }, 330 | { 331 | "cell_type": "code", 332 | "execution_count": 6, 333 | "metadata": {}, 334 | "outputs": [ 335 | { 336 | "data": { 337 | "text/plain": [ 338 | "Text(0.5,1,'Observed Output')" 339 | ] 340 | }, 341 | "execution_count": 6, 342 | "metadata": {}, 343 | "output_type": "execute_result" 344 | }, 345 | { 346 | "data": { 347 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAArwAAAD0CAYAAAB5LvVrAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4yLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvhp/UCwAADkhJREFUeJzt3Xus5GV9x/HPF2G5tqUUrIIIaS1eSCpJtdqLlxTrpdpgwHqJWi9ob6KxNWqD1aKpbTFG66Vqqta2IA1e8NJqaVGrtUYkkUjqH1CrBVkU6wrIKrCgPP3j9xudc+ICwuyePV9fr2Rzzp7fzHN+szvPmfc888xujTECAABd7bXRJwAAALuS4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AHuYqnp6Vf3nRp8HbISqOq2qztzo8/hhmLN7PsG7CVTVpVX1sF38PTbdDxhYmOfI9VX1raVfb9zA8/l4VT1rF46/b1X9RVV9eb7dX6iqF1ZV3cbrH11Vo6r2XtH5rHQ8epvj8L+q6rqqurKq3lxVB2/0ee1K5uzG+5G5oUB7vznG+MhGn8Ru8u4kd0nyG0kuTnK/JGckOTLJ8zbwvOAWVdULkrwoydOSfDTJEUnelOS8qvqVMcaNu+k89h5jfGd3fK+ZObvBrPBuIouXTKrq1VV1dVX9b1U9aun4x+dnkBdU1Ter6gNVdch87KFVtXXdeJdW1cOq6pFJTk3yhHll7KLde8tg15lXj96z9PvTq+qjNXloVW2tqlOrats8J568dNl95/n25ar6WlW9par2Xzp+QlV9rqquraovVtUjq+qVSR6U5I3LK81Vda+qOq+qrqqqS6rq8Uvj/FRVfXAe54IkP3sLt+f4JA9PctIY4/NjjO+MMc5P8pQkz6mqe8yXW/PK0LpXcf5j/njNfI6/NP98+VRVvWH++XHx/L1ye8a7DX81/Iipqh9P8vIkzx1jnDvGuGmMcWmSxyc5KtN9eGG/qjq7qrZX1YVVdd+lcV5cVVfMxy5Z3E+raq+q+uN5Ln6jqt619Bi4WNE8uaq+nORjVXVuVZ2y7hwvqqoT58/N2UYE7+bzgCSXJDk0yauSvL1qzUsiv53kmUkOT/KdJK+/tQHHGOcm+fMkZ48xDhpj3PfWrgObyAuS/Pz84PCgJCcnedr4/v+rfpdM8+mITKtOf1NV95yPnZ7kmCTHJbnHfJmXJUlV/WKSf0jywiQHJ3lwkkvHGC9J8skkp8zz6ZSqOjDJeUnOSnLnJE9K8qaqOnb+Pn+d5IYkd800f595C7fn15N8Zoxx+fIXxxifSbI1yfE/8FprPXj+ePB8jp+ef/+AJF+a/zz+NMk5i2C4nePBsl9Osl+Sc5a/OMb4VpJ/yXTfXjgh06roIZnmzfurap95bp6S5P5jjB9L8ogkl87XeV6SxyZ5SKbHwKszza1lD0ly7/l6Z2Wai0mSqrpPpvD+kDnbj+DdfC4bY7x1jPHdJH+fabL99NLxM+ZnkN9O8tIkj6+qO23EicJu9v6qumbp17OTZIxxXaaVlNckOTPT6tLWddd96RhjxxjjE0k+lGneVJJnJ/nDMcZVY4ztmZ4YPnG+zslJ/naMcd4Y4+YxxhVjjIt3cm6PyRTD75hXdy5M8t4kj5vn50lJXjbG+PYY4/OZ5vbOHJrkqzs59tX5+O31f0n+al55OzvTk+tH34HxYNmhSbbtZCvB+vvuZ8cY7xlj3JRp7u6X5IFJvptk3yT3qap9xhiXjjG+OF/nd5O8ZIyxdYyxI8lpmebY8vbN0+Z5dn2S9yU5rqqOmo89Ock583XN2Wbs4d18rlx8Msa4bl7cPWjp+PIzyMuS7JM7Nplgs3jszvbwjjEuqKovZVqpede6w1fPTxAXLsu0OnRYkgOSfHbpRZRKsngCeWSSD9/GczsqyQOq6pqlr+2daQ/fYfPn6+fuzmxL8nM7OXbX+fjtdcXSyvfiPA6/A+PBsm1JDq0fvH92/X33e/NhjHFzTVvyDh9jfLKqnp8pZo+tqn9N8kdjjK9kmmfvq6qbl8b5btYuCi2Pu72qPpTpSezp88ffmQ+bs81Y4e3nyKXP757kpkyT6duZHryTJPMz1MOWLrs8YaCVqnpOplWhr2R6w8yyn5xfvly4+3y5bUmuT3LsGOPg+ddPjDEWTzAvz8737a2fT5cn+cTSOIuXEX8/ydczbT9aP3d35iOZHoiXL7/YYnFkko/NX1oz5zNt3djZ+S0csW6L1OLP4vaOB8s+nWRHkhOXvzjPv0dlehPbwpFLx/dKcrfM98UxxlljjF/NFKUjU6wm0zx71Lp5tt8Y44qlcdffV/8xyZPmPaz7J/n3pbHM2UYEbz9Pqar7VNUBSV6R5D3z9of/zvQmgEdX1T5J/iRTACx8LcnR8w8WaKOqjknyZ5m2NTw1yYuq6rh1F3t5VW2Z9/g+Jsm7xxg3J3lrktdW1Z3nsY6oqkfM13l7kmdU1fHzm2WOqKp7zce+luRnlsb/5yTHVNVT532I+1TV/avq3vP8PCfJaVV1wLyP8Gk7uz3zKvZHk7y3qo6tqjtV1QOTvDPJm8cYX5gv+rkkT5y/1/2SPG5pmK8nuXndOSbTCvjz5uv8Vqa9jotV7NszHnzPGOObmd609oaa3uC5T1UdnWmv7tZMq6cLv1BVJ87bEZ6fKZTPr6p7VtWvVdW+mfbQXp9pFTdJ3pLklYstClV1WFWdcCun9eFM4fyKTO9jWawOm7PNiJt+zkjyd5m2PuyX+Z87mX/Q/EGStyW5ItMzv+V9jO+eP36jqi7cXScLK/RPtfbf4X3f/GB5ZpLTxxgXzQ8spyY5Y37ATKa5cnWmVZF3Jvm9pb24L07yP5keaK/NtFJzz2TaJpHkGUlem+SbST6R6YEzSV6Xaa/f1VX1+nn/78MzvWT6lfl7np7vP+k8JdPWpCszzd933MptPSnTStS5Sb4138a3J3nu0mVemmkF+upMkXHW4sC8r/mVST4173d+4HzoM5leet02H3/cGOMbd2A8WGOM8apMc/DVSa7NdJ+7PMnx897ZhQ8keUKm+9tTk5w47+fdN8lfZrqPXpkp+E6dr/O6JB9M8m9VtT3J+Zne1HVL57MjU7w+LGvv0+ZsM7V26webWVV9PMmZY4y3bfS5wGZQVQ/NNGfuttHnstGq6ulJnjW/VAzs4czZH44VXgAAWhO8AAC0ZksDAACtWeEFAKA1wQsAQGu75H9aqyr7JGDJGKNu/VIbw3yFtcxX2Dxu63y1wgsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBobe+NPgF2nTHGSserqpWOBwAkW7ZsWel4N95440rH68AKLwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWtt7o0+AXaeqVjreGGOl4636/GB3W+WcMF9h17rqqqtWNtYhhxyysrGSZMeOHSsdb//991/peDfccMNKx9sIVngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoLUaY6x+0KrVD/ojYlf8feypqmqjT2G3GWPssTfWfN0z7Olz33zdM5ive4bt27evdLyDDjpopePttddq1zP35J9Pt3W+WuEFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNb23ugTYK2q2uhTABrwswTW2rJly8rGOvDAA1c2VrLac0uSMcZKx+vACi8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtFZjjNUPWrX6QWETG2PURp/DzpivsJb5CpvHbZ2vVngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK0JXgAAWhO8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACt1Rhjo88BAAB2GSu8AAC0JngBAGhN8AIA0JrgBQCgNcELAEBrghcAgNYELwAArQleAABaE7wAALQmeAEAaE3wAgDQmuAFAKA1wQsAQGuCFwCA1gQvAACtCV4AAFoTvAAAtCZ4AQBoTfACANCa4AUAoDXBCwBAa4IXAIDWBC8AAK39P0H0CdiwKezhAAAAAElFTkSuQmCC\n", 348 | "text/plain": [ 349 | "
" 350 | ] 351 | }, 352 | "metadata": {}, 353 | "output_type": "display_data" 354 | } 355 | ], 356 | "source": [ 357 | "X_test = make_glider(10)\n", 358 | "\n", 359 | "gol = make_game_of_life()\n", 360 | "y_test_tf = gol(conv_cast(X_test[np.newaxis,...]))\n", 361 | "y_test = np.squeeze(y_test_tf.eval())\n", 362 | "\n", 363 | "y_pred_test = sess.run(ca_model, feed_dict={X: X_test})\n", 364 | "\n", 365 | "plt.figure(figsize=(12,4))\n", 366 | "plt.subplot(1,3,1)\n", 367 | "plt.imshow(X_test)\n", 368 | "plt.axis('off')\n", 369 | "plt.title(\"Input\")\n", 370 | "\n", 371 | "\n", 372 | "plt.subplot(1,3,2)\n", 373 | "plt.imshow(y_test)\n", 374 | "plt.axis('off')\n", 375 | "plt.title(\"Expected Output\")\n", 376 | "\n", 377 | "plt.subplot(1,3,3)\n", 378 | "plt.imshow(y_pred_test)\n", 379 | "plt.axis('off')\n", 380 | "plt.title(\"Observed Output\")\n" 381 | ] 382 | }, 383 | { 384 | "cell_type": "code", 385 | "execution_count": null, 386 | "metadata": {}, 387 | "outputs": [], 388 | "source": [] 389 | }, 390 | { 391 | "cell_type": "code", 392 | "execution_count": null, 393 | "metadata": {}, 394 | "outputs": [], 395 | "source": [] 396 | }, 397 | { 398 | "cell_type": "code", 399 | "execution_count": null, 400 | "metadata": {}, 401 | "outputs": [], 402 | "source": [] 403 | }, 404 | { 405 | "cell_type": "code", 406 | "execution_count": null, 407 | "metadata": {}, 408 | "outputs": [], 409 | "source": [] 410 | }, 411 | { 412 | "cell_type": "code", 413 | "execution_count": null, 414 | "metadata": {}, 415 | "outputs": [], 416 | "source": [] 417 | }, 418 | { 419 | "cell_type": "code", 420 | "execution_count": null, 421 | "metadata": {}, 422 | "outputs": [], 423 | "source": [] 424 | }, 425 | { 426 | "cell_type": "code", 427 | "execution_count": null, 428 | "metadata": {}, 429 | "outputs": [], 430 | "source": [] 431 | } 432 | ], 433 | "metadata": { 434 | "kernelspec": { 435 | "display_name": "Python 3", 436 | "language": "python", 437 | "name": "python3" 438 | }, 439 | "language_info": { 440 | "codemirror_mode": { 441 | "name": "ipython", 442 | "version": 3 443 | }, 444 | "file_extension": ".py", 445 | "mimetype": "text/x-python", 446 | "name": "python", 447 | "nbconvert_exporter": "python", 448 | "pygments_lexer": "ipython3", 449 | "version": "3.6.5" 450 | } 451 | }, 452 | "nbformat": 4, 453 | "nbformat_minor": 2 454 | } 455 | -------------------------------------------------------------------------------- /demos/demos.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "markdown", 5 | "metadata": {}, 6 | "source": [ 7 | "## Import requirements" 8 | ] 9 | }, 10 | { 11 | "cell_type": "code", 12 | "execution_count": 1, 13 | "metadata": {}, 14 | "outputs": [], 15 | "source": [ 16 | "import numpy as np\n", 17 | "import matplotlib.pyplot as plt\n", 18 | "\n", 19 | "import tensorflow as tf\n", 20 | "\n", 21 | "%matplotlib inline\n", 22 | "plt.rcParams['figure.figsize'] = (10.0, 8.0) # set default size of plots\n", 23 | "plt.rcParams['image.interpolation'] = 'nearest'\n", 24 | "plt.rcParams['image.cmap'] = 'gray'\n", 25 | "\n", 26 | "import os\n", 27 | "import glob\n", 28 | "import pickle\n", 29 | "\n", 30 | "import sys\n", 31 | "sys.path.insert(0, '..')\n", 32 | "from ca_funcs import make_glider, make_game_of_life\n", 33 | "from utils import *\n", 34 | "from train_ca import *\n", 35 | "\n", 36 | "\n", 37 | "%load_ext autoreload\n", 38 | "%autoreload 2" 39 | ] 40 | }, 41 | { 42 | "cell_type": "markdown", 43 | "metadata": {}, 44 | "source": [ 45 | "# Build and train a model" 46 | ] 47 | }, 48 | { 49 | "cell_type": "code", 50 | "execution_count": 2, 51 | "metadata": {}, 52 | "outputs": [], 53 | "source": [ 54 | "#### Make training data\n", 55 | "tf.random.set_seed(0)\n", 56 | "\n", 57 | "## Make training data\n", 58 | "train_size, wspan, hspan = (100, 10, 10)\n", 59 | "X_train = tf.convert_to_tensor(np.random.choice([0,1], (train_size, wspan, hspan), p=[.5,.5]), tf.float32)\n", 60 | "gol = make_game_of_life()\n", 61 | "Y_train = gol(tf.convert_to_tensor(X_train, tf.float32))\n", 62 | "\n", 63 | "X_train = X_train[..., tf.newaxis]\n", 64 | "Y_train = Y_train[..., tf.newaxis]\n" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": 29, 70 | "metadata": {}, 71 | "outputs": [ 72 | { 73 | "name": "stdout", 74 | "output_type": "stream", 75 | "text": [ 76 | "Model: \"sequential_12\"\n", 77 | "_________________________________________________________________\n", 78 | "Layer (type) Output Shape Param # \n", 79 | "=================================================================\n", 80 | "wraparound2d_12 (Wraparound2 (None, 12, 12, 1) 0 \n", 81 | "_________________________________________________________________\n", 82 | "conv2d_4 (Conv2D) (None, 10, 10, 10) 100 \n", 83 | "_________________________________________________________________\n", 84 | "reshape_12 (Reshape) (None, None, 10) 0 \n", 85 | "_________________________________________________________________\n", 86 | "dense_42 (Dense) (None, None, 10) 110 \n", 87 | "_________________________________________________________________\n", 88 | "dense_43 (Dense) (None, None, 10) 110 \n", 89 | "_________________________________________________________________\n", 90 | "dense_44 (Dense) (None, None, 2) 22 \n", 91 | "=================================================================\n", 92 | "Total params: 342\n", 93 | "Trainable params: 342\n", 94 | "Non-trainable params: 0\n", 95 | "_________________________________________________________________\n" 96 | ] 97 | } 98 | ], 99 | "source": [ 100 | "#### Define and build model\n", 101 | "tf.random.set_seed(0)\n", 102 | "layer_dims = [10, 10, 10]\n", 103 | "num_classes = 2\n", 104 | "\n", 105 | "try:\n", 106 | " del model\n", 107 | "except:\n", 108 | " pass\n", 109 | "\n", 110 | "loss = lambda x, y : tf.keras.losses.categorical_crossentropy(tf.reshape(x, shape=(-1, num_classes)), \n", 111 | " tf.reshape(y, shape=(-1, num_classes)), \n", 112 | " from_logits=True)\n", 113 | "model = initialize_model((wspan, hspan), layer_dims, num_classes=num_classes)\n", 114 | "# model = initialize_model((wspan, hspan), [10, 10, 10, 10], num_classes=num_classes, totalistic=True, bc=\"periodic\")\n", 115 | "model.compile(optimizer=tf.keras.optimizers.Adam(lr=1e-2), loss=loss)\n", 116 | "\n", 117 | "model.summary()" 118 | ] 119 | }, 120 | { 121 | "cell_type": "code", 122 | "execution_count": 26, 123 | "metadata": {}, 124 | "outputs": [ 125 | { 126 | "data": { 127 | "text/plain": [ 128 | "[]" 129 | ] 130 | }, 131 | "execution_count": 26, 132 | "metadata": {}, 133 | "output_type": "execute_result" 134 | }, 135 | { 136 | "data": { 137 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAlMAAAHSCAYAAADIRU4IAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAAgAElEQVR4nO3de3Cd9Z3f8c9XF9uSHWGwZa11joShcQBLkSysEFuQbXYJMXQT2NxmITVNMtlhdrpstt3ttCRlkpROhybN7DaZMpmQDW2WZMOSdLd1Nk6BLpuBLE5i+QK2sQ22uVgWlmU7yMa20O3bP3TkCiFZRzqX33N5v2bOoPOcx+d84cyBN8/znJ/M3QUAAID5qQg9AAAAQJwRUwAAAAUgpgAAAApATAEAABSAmAIAACgAMQUAAFCAqlAvvHz5cl+1alWolwcAAMjb9u3bT7h7/XSPBYupVatWqbu7O9TLAwAA5M3MXpnpMU7zAQAAFICYAgAAKAAxBQAAUABiCgAAoADEFAAAQAGIKQAAgAIQUwAAAAUgpgAAAApATAEAABSAmAIAACgAMQUAAFAAYgoAAKAAxBQAAEABiCkAAIACEFMAAAAFIKYAAAAKkNiYcnedOHFCIyMjoUcBAAAJltiY+uEPf6j6+nrt378/9CgAACDBEhtTmUxGktTT0xN4EgAAkGSJj6mjR48GngQAACRZYmOqsbFREjEFAABKK6+YMrObzeyAmR00s3umefzTZtZvZrtyt98v/qhzs2DBAq1YsYLTfAAAoKSqZtvBzColPSDpJkk9kraZ2WZ3f37Krn/t7neXYMZ5y2QyHJkCAAAllc+RqeskHXT3w+4+JOkRSbeVdqziIKYAAECp5RNTGUlHJt3vyW2b6mNm9pyZ/cjMmooyXYGy2Syn+QAAQEnlE1M2zTafcv/Hkla5e5uk/yvpu9M+kdldZtZtZt39/f1zm3QeMpmMTp48qcHBwZK/FgAASKd8YqpH0uQjTVlJvZN3cPeT7v5m7u63Ja2b7onc/UF373T3zvr6+vnMOycTyyP09vbOsicAAMD85BNT2yStNrMrzGyBpNslbZ68g5mtnHT3Vkn7ijfi/GWzWUks3AkAAEpn1m/zufuImd0t6TFJlZIecve9ZnafpG533yzpc2Z2q6QRSackfbqEM+eNhTsBAECpzRpTkuTuWyRtmbLti5N+/rykzxd3tMIRUwAAoNQSuwK6JNXV1WnJkiWc5gMAACWT6JgyM9aaAgAAJZXomJJYuBMAAJRW4mOKhTsBAEApJT6mMpmMXnvtNY2NjYUeBQAAJFAqYmpkZETHjx8PPQoAAEigxMcUC3cCAIBSSnxMsdYUAAAoJWIKAACgAImPqRUrVqiqqorTfAAAoCQSH1OVlZVauXIlR6YAAEBJJD6mJBbuBAAApZOKmGLhTgAAUCqpiCmOTAEAgFJJTUy98cYbOn36dOhRAABAwqQipli4EwAAlEoqYoq1pgAAQKkQUwAAAAVIVUxxmg8AABRbKmJq0aJFWrZsGUemAABA0aUipiSWRwAAAKWRmphi4U4AAFAKqYkpjkwBAIBSSFVMHT9+XENDQ6FHAQAACZKqmJKk3t7ewJMAAIAkSU1MTayCzqk+AABQTKmJKRbuBAAApZC6mOIbfQAAoJhSE1OXXnqpampqODIFAACKKjUxZWYsjwAAAIouNTEljZ/q4zQfAAAoplTFVDab5cgUAAAoqlTFVCaTUW9vr8bGxkKPAgAAEiJ1MTU0NKQTJ06EHgUAACREqmKKhTsBAECxpTKmXnnllcCTAACApEhVTLW2tqqqqkq/+MUvQo8CAAASIlUxVVtbq3Xr1unpp58OPQoAAEiIVMWUJL3vfe/Ttm3bdP78+dCjAACABEhlTA0PD2vbtm2hRwEAAAmQupi6/vrrJYlTfQAAoChSF1PLli1TS0sLMQUAAIoidTEljZ/qe+aZZzQ6Ohp6FAAAEHOpjakzZ87o2WefDT0KAACIuVTG1A033CBJ+vnPfx54EgAAEHepjKnm5mY1Nzdz3RQAAChYKmNKGj/V9/TTT8vdQ48CAABiLNUx1dfXp4MHD4YeBQAAxFiqY0pivSkAAFCY1MbUNddco2XLlhFTAACgIKmNKTPTDTfcwDf6AABAQVIbU9L4EgkHDx7UsWPHQo8CAABiKtUxxXVTAACgUKmOqWuvvVa1tbXEFAAAmLdUx1R1dbXWr19PTAEAgHlLdUxJ46f6nn32WQ0MDIQeBQAAxBAx9b73yd21devW0KMAAIAYSn1MrV+/XtXV1XryySdDjwIAAGIo9TG1ePFiXX/99XrsscdCjwIAAGIo9TElSRs3btRzzz2n1157LfQoAAAgZogpjceUJD3xxBOBJwEAAHFDTElqb2/XihUrONUHAADmjJiSVFFRoZtuukmPP/64xsbGQo8DAABihJjK2bhxo06cOKGdO3eGHgUAAMQIMZXzwQ9+UJI41QcAAOaEmMppaGjQ2rVriSkAADAnxNQkGzdu1DPPPKPTp0+HHgUAAMQEMTXJxo0bNTIyop/97GehRwEAADFBTE3S1dWl2tpaTvUBAIC85RVTZnazmR0ws4Nmds9F9vu4mbmZdRZvxPJZuHChfuu3fouYAgAAeZs1psysUtIDkm6RtEbSHWa2Zpr93iHpc5J+Wewhy2njxo06dOiQDh06FHoUAAAQA/kcmbpO0kF3P+zuQ5IekXTbNPv9R0lflTRYxPnKbuJXy3B0CgAA5COfmMpIOjLpfk9u2wVm1iGpyd3/roizBbF69WqtWrWKmAIAAHnJJ6Zsmm1+4UGzCkl/LulPZ30is7vMrNvMuvv7+/OfsozMTBs3btSTTz6poaGh0OMAAICIyyemeiQ1TbqfldQ76f47JLVK+pmZvSxpvaTN012E7u4Pununu3fW19fPf+oS++AHP6g33nhDW7duDT0KAACIuHxiapuk1WZ2hZktkHS7pM0TD7r7gLsvd/dV7r5K0i8k3eru3SWZuAxuvPFGVVZW6oknngg9CgAAiLhZY8rdRyTdLekxSfskPerue83sPjO7tdQDhnDJJZeotbVV3d2x7UEAAFAmVfns5O5bJG2Zsu2LM+z7/sLHCq+jo0M/+clP5O4ym+6yMQAAAFZAn1FHR4f6+/vV29s7+84AACC1iKkZdHR0SJJ27twZeBIAABBlxNQM2tvbJRFTAADg4oipGdTV1Wn16tXEFAAAuChi6iI6OjqIKQAAcFHE1EV0dHTo5Zdf1qlTp0KPAgAAIoqYuoiJi9B37doVeBIAABBVxNRF8I0+AAAwG2LqIlasWKFMJkNMAQCAGRFTs+AidAAAcDHE1Cw6Ojq0f/9+nTt3LvQoAAAggoipWXR0dGhsbEzPPfdc6FEAAEAEEVOz4CJ0AABwMcTULC6//HJdeumlxBQAAJgWMTULM+MidAAAMCNiKg8dHR3avXu3hoeHQ48CAAAihpjKQ0dHh958803t27cv9CgAACBiiKk8cBE6AACYCTGVh6uuuko1NTXEFAAAeBtiKg+VlZVqb28npgAAwNsQU3nq6OjQrl27NDY2FnoUAAAQIcRUnjo6OnT69Gm99NJLoUcBAAARQkzlaeIi9B07dgSeBAAARAkxlafW1lZVVVVx3RQAAHgLYipPixYt0po1a4gpAADwFsTUHLS1tWn37t2hxwAAABFCTM1Be3u7jh49qpMnT4YeBQAARAQxNQdtbW2SpOeeey7wJAAAICqIqTkgpgAAwFTE1Bw0NDSovr6emAIAABcQU3NgZmprayOmAADABcTUHLW3t2vPnj0aHR0NPQoAAIgAYmqO2traNDg4qIMHD4YeBQAARAAxNUcTF6E/++yzgScBAABRQEzN0TXXXKPKykqumwIAAJKIqTlbtGiRrr76amIKAABIIqbmhW/0AQCACcTUPLS1temVV17RwMBA6FEAAEBgxNQ8sBI6AACYQEzNAzEFAAAmEFPzkMlkdNlllxFTAACAmJoPfq0MAACYQEzNU1tbm3bv3q2xsbHQowAAgICIqXlqa2vT2bNn9dJLL4UeBQAABERMzRO/VgYAAEjE1Ly1tLSooqKC66YAAEg5YmqeamtrtXr1amIKAICUI6YKwDf6AAAAMVWAtrY2HTp0SG+88UboUQAAQCDEVAEmLkLfs2dP4EkAAEAoxFQB2tvbJfGNPgAA0oyYKkBzc7Pq6uq4bgoAgBQjpgow8WtlODIFAEB6EVMF6uzs1Pbt2zU8PBx6FAAAEAAxVaCuri4NDg5q165doUcBAAABEFMF2rBhgyRp69atgScBAAAhEFMFymazam5u1jPPPBN6FAAAEAAxVQQbNmwgpgAASCliqgi6urp05MgR9fT0hB4FAACUGTFVBF1dXZK4bgoAgDQipoqgvb1dNTU1nOoDACCFiKkiqK6u1nve8x5iCgCAFCKmiqSrq0s7duzQ+fPnQ48CAADKiJgqkq6uLo2MjGj79u2hRwEAAGVETBXJxOKdnOoDACBdiKkiWb58uVavXk1MAQCQMsRUEXV1dWnr1q1y99CjAACAMiGmiqirq0vHjx/X4cOHQ48CAADKhJgqoonFOznVBwBAeuQVU2Z2s5kdMLODZnbPNI//gZntNrNdZvZzM1tT/FGjb82aNaqrqyOmAABIkVljyswqJT0g6RZJayTdMU0s/ZW7v9vd10r6qqQ/K/qkMVBRUaH169fza2UAAEiRfI5MXSfpoLsfdvchSY9Ium3yDu5+etLdxZJSewV2V1eXdu/erdOnT8++MwAAiL18Yioj6cik+z25bW9hZn9oZoc0fmTqc9M9kZndZWbdZtbd398/n3kjr6urS2NjY/rVr34VehQAAFAG+cSUTbPtbUee3P0Bd/8nkv6dpHuneyJ3f9DdO929s76+fm6TxsR73/temRnXTQEAkBL5xFSPpKZJ97OSei+y/yOSfreQoeKsrq5Ora2tXDcFAEBK5BNT2yStNrMrzGyBpNslbZ68g5mtnnT3dyS9WLwR42fDhg0s3gkAQErMGlPuPiLpbkmPSdon6VF332tm95nZrbnd7jazvWa2S9KfSPpUySaOgeuuu04DAwM6dOhQ6FEAAECJVeWzk7tvkbRlyrYvTvr5j4s8V6ytW7dOktTd3a13vvOdgacBAAClxAroJdDS0qKFCxequ7s79CgAAKDEiKkSqK6u1tq1a7V9+/bQowAAgBIjpkpk3bp12r59u8bGxkKPAgAASoiYKpHOzk6dOXNGL76Y6i82AgCQeMRUiXR2dkoS100BAJBwxFSJXHPNNaqpqeG6KQAAEo6YKpGqqiqtXbuWI1MAACQcMVVCnZ2d2rFjh0ZHR0OPAgAASoSYKqF169bp7NmzeuGFF0KPAgAASoSYKiEuQgcAIPmIqRK6+uqrVVtbS0wBAJBgxFQJVVZWqqOjg5gCACDBiKkS6+zs1K5duzQyMhJ6FAAAUALEVIl1dnbq3Llz2r9/f+hRAABACRBTJbZu3TpJXIQOAEBSEVMl9q53vUtLliwhpgAASChiqsQqKyt17bXX8mtlAABIKGKqDNatW6ddu3ZpeHg49CgAAKDIiKky6Ozs1ODgoJ5//vnQowAAgCIjpsqAldABAEguYqoM3vnOd6quro7rpgAASCBiqgwqKip07bXXcmQKAIAEIqbKpLOzU88++ywXoQMAkDDEVJlcffXVGhoa0tGjR0OPAgAAioiYKpNMJiNJxBQAAAlDTJVJNpuVJPX09ASeBAAAFBMxVSYcmQIAIJmIqTJZunSpamtrOTIFAEDCEFNlYmbKZDIcmQIAIGGIqTLKZrMcmQIAIGGIqTLiyBQAAMlDTJVRNpvV0aNHNTY2FnoUAABQJMRUGWUyGY2MjKi/vz/0KAAAoEiIqTJirSkAAJKHmCoj1poCACB5iKky4sgUAADJQ0yV0YoVK1RZWcmRKQAAEoSYKqPKyko1NjZyZAoAgAQhpsqMtaYAAEgWYqrMWAUdAIBkIabKLJPJqKenR+4eehQAAFAExFSZZbNZnT17VqdPnw49CgAAKAJiqsxYawoAgGQhpsqMtaYAAEgWYqrMODIFAECyEFNl1tjYKIkjUwAAJAUxVWaLFi3S8uXLOTIFAEBCEFMBsNYUAADJQUwFwCroAAAkBzEVAEemAABIDmIqgGw2qxMnTmhwcDD0KAAAoEDEVAATyyP09vYGngQAABSKmApgYuFOrpsCACD+iKkAJo5Mcd0UAADxR0wFwJEpAACSg5gKoK6uTkuWLOHIFAAACUBMBZLNZjkyBQBAAhBTgWQyGY5MAQCQAMRUIByZAgAgGYipQDKZjHp7ezU6Ohp6FAAAUABiKpBsNqvR0VH19fWFHgUAABSAmApkYq0pTvUBABBvxFQgE2tNcRE6AADxRkwFwpEpAACSgZgKpL6+XtXV1RyZAgAg5oipQCoqKtTY2MiRKQAAYo6YCiibzXJkCgCAmCOmAspkMhyZAgAg5vKKKTO72cwOmNlBM7tnmsf/xMyeN7PnzOzvzezy4o+aPBNHptw99CgAAGCeZo0pM6uU9ICkWyStkXSHma2ZsttOSZ3u3ibpR5K+WuxBkyiTyej8+fN6/fXXQ48CAADmKZ8jU9dJOujuh919SNIjkm6bvIO7/4O7n8vd/YWkbHHHTCbWmgIAIP7yiamMpCOT7vfkts3ks5J+WshQadHY2ChJ6u3tDTwJAACYr6o89rFptk17kY+ZbZLUKemfzvD4XZLukqTm5uY8R0wuFu4EACD+8jky1SOpadL9rKS3HUoxsw9I+veSbnX3N6d7Ind/0N073b2zvr5+PvMmysSRKWIKAID4yiemtklabWZXmNkCSbdL2jx5BzPrkPQtjYfU8eKPmUwLFy7UsmXLOM0HAECMzRpT7j4i6W5Jj0naJ+lRd99rZveZ2a253f6LpCWSfmhmu8xs8wxPhylYawoAgHjL55opufsWSVumbPvipJ8/UOS5UoOYAgAg3lgBPbDGxkZO8wEAEGPEVGCZTEZ9fX0aHh4OPQoAAJgHYiqwTCYjd9exY8dCjwIAAOaBmAqMhTsBAIg3YiowFu4EACDeiKnAiCkAAOKNmAps+fLlqq6u5jQfAAAxRUwFVlFRoZUrV3JkCgCAmCKmIoCFOwEAiC9iKgJYuBMAgPgipiKAI1MAAMQXMRUBmUxGZ86c0ZkzZ0KPAgAA5oiYigAW7gQAIL6IqQhgrSkAAOKLmIoAYgoAgPgipiKA03wAAMQXMRUBS5YsUV1dHUemAACIIWIqIlgeAQCAeCKmIoKFOwEAiCdiKiI4MgUAQDwRUxGRyWT02muvaWxsLPQoAABgDoipiGhsbNTIyIj6+/tDjwIAAOaAmIoI1poCACCeiKmIIKYAAIgnYioiWLgTAIB4IqYi4jd+4zdUUVHBkSkAAGKGmIqIqqoqNTQ0EFMAAMQMMRUhLNwJAED8EFMRwsKdAADEDzEVIcQUAADxQ0xFSGNjo06dOqXBwcHQowAAgDwRUxEysdYU100BABAfxFSEsNYUAADxQ0xFCKugAwAQP8RUhBBTAADEDzEVIUuXLtWiRYs4zQcAQIwQUxFiZiyPAABAzBBTEUNMAQAQL8RUxPArZQAAiBdiKmImjky5e+hRAABAHoipiMlkMhocHNSvf/3r0KMAAIA8EFMRw8KdAADECzEVMaw1BQBAvBBTEZPNZiVJR44cCTwJAADIBzEVMZlMRmZGTAEAEBPEVMRUV1dr5cqVxBQAADFBTEVQU1MTMQUAQEwQUxHU1NSkV199NfQYAAAgD8RUBE0cmWLhTgAAoo+YiqCmpiadP39ep06dCj0KAACYBTEVQU1NTZJYHgEAgDggpiKoublZEjEFAEAcEFMRxJEpAADig5iKoIaGBlVXV/ONPgAAYoCYiqCKigplMhmOTAEAEAPEVESxcCcAAPFATEUUMQUAQDwQUxHV3Nyso0ePanR0NPQoAADgIoipiGpqatLw8LD6+vpCjwIAAC6CmIoolkcAACAeiKmIIqYAAIgHYiqiiCkAAOKBmIqoyy67TLW1tcQUAAARR0xFlJmpqamJVdABAIg4YirCWGsKAIDoI6YijJgCACD6iKkIa2pq0rFjxzQ0NBR6FAAAMANiKsKamprk7urt7Q09CgAAmAExFWHNzc2SWB4BAIAoyyumzOxmMztgZgfN7J5pHv9NM9thZiNm9vHij5lOE2tN8Y0+AACia9aYMrNKSQ9IukXSGkl3mNmaKbu9KunTkv6q2AOmGQt3AgAQfVV57HOdpIPufliSzOwRSbdJen5iB3d/OffYWAlmTK0lS5Zo6dKlxBQAABGWz2m+jKTJ/zXvyW2bMzO7y8y6zay7v79/Pk+ROiyPAABAtOUTUzbNNp/Pi7n7g+7e6e6d9fX183mK1CGmAACItnxiqkdS06T7WUl8V79MmpubuQAdAIAIyyemtklabWZXmNkCSbdL2lzasTChqalJp06d0rlz50KPAgAApjFrTLn7iKS7JT0maZ+kR919r5ndZ2a3SpKZvcfMeiR9QtK3zGxvKYdOE77RBwBAtOXzbT65+xZJW6Zs++Kkn7dp/PQfimxyTF111VWBpwEAAFOxAnrEcWQKAIBoI6YiLpsdP+BHTAEAEE3EVMQtXLhQDQ0NfKMPAICIIqZigLWmAACILmIqBogpAACii5iKgYmYcp/XwvMAAKCEiKkYaGpq0htvvKGBgYHQowAAgCmIqRhobm6WJC5CBwAggoipGGCtKQAAoouYioGJmOLIFAAA0UNMxUBjY6MWL16s/fv3hx4FAABMQUzFQEVFhdasWaM9e/aEHgUAAExBTMVES0uL9u7dG3oMAAAwBTEVE62trerr69OJEydCjwIAACYhpmKipaVFkjg6BQBAxBBTMdHa2iqJmAIAIGqIqZjIZDKqq6vjInQAACKGmIoJM+MidAAAIoiYipHW1lbt3buXX3gMAECEEFMx0tLSopMnT6qvry/0KAAAIIeYihEuQgcAIHqIqRiZWB6Bi9ABAIgOYipGGhoadNlll3FkCgCACCGmYsTMLlyEDgAAooGYipmWlhbt2bOHb/QBABARxFTMtLa26vTp0zp69GjoUQAAgIip2OEidAAAooWYihl+4TEAANFCTMXM8uXL1dDQQEwBABARxFQMTVyEDgAAwiOmYqi1tVXPP/+8xsbGQo8CAEDqEVMx1NLSorNnz+rVV18NPQoAAKlHTMUQ3+gDACA6iKkY4ht9AABEBzEVQ0uXLlUmk+HIFAAAEUBMxRS/ow8AgGggpmKqpaVF+/bt0+joaOhRAABINWIqplpaWjQ4OKjDhw+HHgUAgFQjpmKqtbVVEhehAwAQGjEVU2vWrJEk7d69O/AkAACkGzEVU0uWLFFHR4d+/OMfhx4FAIBUI6ZibNOmTdq2bZsOHDgQehQAAFKLmIqxO+64QxUVFXr44YdDjwIAQGoRUzG2cuVK3XTTTfr+97/PLz0GACAQYirmNm3apJdffln/+I//GHoUAABSiZiKuY985CNavHgxp/oAAAiEmIq5xYsX66Mf/ageffRRDQ4Ohh4HAIDUIaYSYNOmTRoYGNBPfvKT0KMAAJA6xFQC3HjjjVq5ciWn+gAACICYSoDKykp98pOf1JYtW3Ty5MnQ4wAAkCrEVEJs2rRJw8PDevTRR0OPAgBAqhBTCdHe3q7W1lZO9QEAUGbEVEKYme68805t3bpVhw4dCj0OAACpQUwlyCc/+UmZmb773e+GHgUAgNQgphIkm83qwx/+sL72ta/xy48BACgTYiphvvnNb6qmpubCBekAAKC0iKmEaWxs1Le//W11d3fry1/+cuhxAABIPGIqgT760Y/qM5/5jO6//349/fTToccBACDRiKmE+vrXv64rr7xSd955pwYGBkKPAwBAYhFTCfWOd7xD3/ve99TT06M/+qM/Cj0OAACJRUwl2Pr163Xvvffq4Ycf1g9+8IPQ4wAAkEjEVMLde++92rBhg+68807df//9GhsbCz0SAACJQkwlXFVVlX7605/qYx/7mL7whS/olltuUV9fX+ixAABIDGIqBS655BI98sgj+ta3vqWnnnpKa9eu1ZNPPhl6LAAAEoGYSgkz01133aVf/epXWrp0qT7wgQ/onnvu4Zt+AAAUiJhKmXe/+93q7u7Wpz/9aX3lK1/RqlWrdN999xFVAADMEzGVQosXL9ZDDz2kHTt26P3vf7++9KUvEVUAAMwTMZViHR0d+tu//du3RFVjY6M+9KEP6Rvf+IYOHDggdw89JgAAkWb5/MfSzG6W9HVJlZL+wt3/85THF0r6S0nrJJ2U9Hvu/vLFnrOzs9O7u7vnOTZKYefOnfrOd76jxx9/XC+++KIkqbm5WTfeeKNaW1t11VVX6V3vepeuuOIKVVVVBZ4WAIDyMbPt7t457WOzxZSZVUp6QdJNknokbZN0h7s/P2mffympzd3/wMxul/QRd/+9iz0vMRVtL730kh5//HE9/vjjeuqpp3TixIkLj1VXV+vKK69UY2OjGhoatGLFCjU0NKihoUGXXnqpLrnkkrfclixZopqaGplZwL8jAADmr9CY2iDpy+6+MXf/85Lk7vdP2uex3D5bzaxK0jFJ9X6RJyem4uXkyZM6cOCAXnjhBR04cEAvvviijh07pr6+PvX19enMmTOzPkdtba1qa2u1ePFi1dTUaNGiRW+5LVy4UAsWLHjbrbq6WtXV1W/5uaqq6i23ysrKC3+d+vPUW0VFxYx/nbhNvW9mb7k/dZuZXbg/9efJ26a7P9MNABAdF4upfM7VZCQdmXS/R9J7Z9rH3UfMbEDSMkknhERYtmyZurq61NXVNe3j58+f1/Hjx/X666/r9ddf18DAwIXb2bNnde7cuQu3s2fP6vz58xocHLxwO3XqlAYHBzU8PKw333xTQ0NDF27Dw8MaHh7W0NBQmf+uw5susGbbNvn+xM+z/bmp+8302Ez7Td1/usdne558/kwhz53P883ltaO67WLbi/06xfyzhb5OOfYL+TrlmjHU8xX6nJ/97Gf1iU98oojTzE0+MTXd393UI0757CMzu0vSXdL4tThIjpqaGl1++eW6/LH3c7oAAAXwSURBVPLLS/Ya7q7R0VENDw9rdHRUIyMjb7lNbBsdHX3bz5NvY2NjGhsbe8s2d3/b9snbxsbGLrz+5O2Tt03cJrZP/nnytunu53Ob+GdwsW2T70/8PNufm7rfTI/NtN/U/ad7fLbnyefPFPLc+TzfXF47qtsutr3Yr1PMP1vo68y0Xz77Fvolm2LPXew/W4w/X+rnK8ZzDg4OFmmS+cknpnokNU26n5XUO8M+PbnTfJdIOjX1idz9QUkPSuOn+eYzMNLLzC6c1gMAICryWRphm6TVZnaFmS2QdLukzVP22SzpU7mfPy7pyYtdLwUAAJAUs/4vfu4aqLslPabxpREecve9ZnafpG533yzpO5IeNrODGj8idXsphwYAAIiKvM6XuPsWSVumbPvipJ8HJYW78gsAACAQVkAHAAAoADEFAABQAGIKAACgAMQUAABAAYgpAACAAhBTAAAABSCmAAAACkBMAQAAFICYAgAAKAAxBQAAUABiCgAAoADEFAAAQAGIKQAAgAIQUwAAAAUgpgAAAApg7h7mhc36Jb1S4pdZLulEiV8D88N7E028L9HFexNNvC/RVez35nJ3r5/ugWAxVQ5m1u3unaHnwNvx3kQT70t08d5EE+9LdJXzveE0HwAAQAGIKQAAgAIkPaYeDD0AZsR7E028L9HFexNNvC/RVbb3JtHXTAEAAJRa0o9MAQAAlFRiY8rMbjazA2Z20MzuCT1PWplZk5n9g5ntM7O9ZvbHue2XmdkTZvZi7q+Xhp41rcys0sx2mtnf5e5fYWa/zL03f21mC0LPmDZmttTMfmRm+3OfnQ18ZqLBzP517t9le8zsB2a2iM9MGGb2kJkdN7M9k7ZN+zmxcd/INcFzZnZtMWdJZEyZWaWkByTdImmNpDvMbE3YqVJrRNKfuvs1ktZL+sPce3GPpL9399WS/j53H2H8saR9k+5/RdKf596bX0v6bJCp0u3rkv6Pu18tqV3j7w+fmcDMLCPpc5I63b1VUqWk28VnJpT/IenmKdtm+pzcIml17naXpG8Wc5BExpSk6yQddPfD7j4k6RFJtwWeKZXc/TV335H7+YzG/6OQ0fj78d3cbt+V9LthJkw3M8tK+h1Jf5G7b5J+W9KPcrvw3pSZmdVJ+k1J35Ekdx9y99fFZyYqqiTVmFmVpFpJr4nPTBDu/pSkU1M2z/Q5uU3SX/q4X0haamYrizVLUmMqI+nIpPs9uW0IyMxWSeqQ9EtJDe7+mjQeXJJWhJss1f6rpH8raSx3f5mk1919JHefz075XSmpX9J/z51+/QszWyw+M8G5+1FJX5P0qsYjakDSdvGZiZKZPicl7YKkxpRNs42vLQZkZksk/U9J/8rdT4eeB5KZfUjScXffPnnzNLvy2SmvKknXSvqmu3dIOitO6UVC7vqb2yRdIalR0mKNnz6ais9M9JT0321JjakeSU2T7mcl9QaaJfXMrFrjIfV9d/+b3Oa+iUOsub8eDzVfil0v6VYze1njp8J/W+NHqpbmTmFIfHZC6JHU4+6/zN3/kcbjis9MeB+Q9JK797v7sKS/kdQlPjNRMtPnpKRdkNSY2iZpde4bFgs0foHg5sAzpVLuGpzvSNrn7n826aHNkj6V+/lTkv53uWdLO3f/vLtn3X2Vxj8jT7r7P5f0D5I+ntuN96bM3P2YpCNmdlVu042SnhefmSh4VdJ6M6vN/btt4r3hMxMdM31ONkv6F7lv9a2XNDBxOrAYErtop5n9M43/X3alpIfc/T8FHimVzOwGSU9L2q3/f13OFzR+3dSjkpo1/i+oT7j71AsJUSZm9n5J/8bdP2RmV2r8SNVlknZK2uTub4acL23MbK3GvxSwQNJhSZ/R+P/88pkJzMz+g6Tf0/g3lXdK+n2NX3vDZ6bMzOwHkt4vabmkPklfkvS/NM3nJBe//03j3/47J+kz7t5dtFmSGlMAAADlkNTTfAAAAGVBTAEAABSAmAIAACgAMQUAAFAAYgoAAKAAxBQAAEABiCkAAIACEFMAAAAF+H/5PxHPlDMpjAAAAABJRU5ErkJggg==\n", 138 | "text/plain": [ 139 | "
" 140 | ] 141 | }, 142 | "metadata": { 143 | "needs_background": "light" 144 | }, 145 | "output_type": "display_data" 146 | } 147 | ], 148 | "source": [ 149 | "#### Run training\n", 150 | "Y_train_onehot = tf.squeeze(tf.one_hot(tf.cast(Y_train, tf.int32), num_classes))\n", 151 | "train_history = model.fit(x=X_train, y=Y_train_onehot, epochs=100, batch_size=10, verbose=0)\n", 152 | "\n", 153 | "plt.plot(train_history.history['loss'], 'k')" 154 | ] 155 | }, 156 | { 157 | "cell_type": "code", 158 | "execution_count": 27, 159 | "metadata": {}, 160 | "outputs": [ 161 | { 162 | "data": { 163 | "text/plain": [ 164 | "Text(0.5, 1.0, 'Observed Output')" 165 | ] 166 | }, 167 | "execution_count": 27, 168 | "metadata": {}, 169 | "output_type": "execute_result" 170 | }, 171 | { 172 | "data": { 173 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAqwAAADjCAYAAABAU0agAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjMsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy+AADFEAAANfUlEQVR4nO3dfdDlZV3H8c8Xl0CyIgNTEWHKQGEmmUmTHlQmTCVtcIBQRwkV7UlkLEdtMAydqHAczYfUSckKpEEQ1MIoxCBzRGZkZPIPyDQeFRIEBSQSuPrj9zt59s5lEXb3fPfm9ZrZue+9f+f87uvsnuuc97nOde/WGCMAANDVDqseAAAA3BvBCgBAa4IVAIDWBCsAAK0JVgAAWhOsAAC0JlgBtpKqeklV/euqxwGrUlUnVtVpqx7H98O87UmwbkNVdWVVPWMrf4/t7sEBFuY5ckdV3bb0690rHM+FVfXyrXj+narqT6rq6vl2f6mqXltVdR+vv3dVjarasIXGs0XPx/o3x92/VdW3q+r6qnpvVe266nFtTebtaqzrGwdsl351jPHJVQ9iGzkzySOT/EqSy5M8KcmpSfZMctwKxwWbVVWvSfK6JEcnuSDJHknek+T8qvqFMcb/bKNxbBhj3LUtvtfMvF0BK6wrsHi7oareWlU3V9V/VtUhS8cvnF+9XVJV36yqj1XVw+djB1XVtWvOd2VVPaOqnp3k+CTPn1emLtu2twy2nnnl5qyl359cVRfU5KCquraqjq+qG+c58aKly+40z7erq+qGqnpfVT106fihVfWFqvpWVX25qp5dVScleWqSdy+v9FbV46vq/Kr6RlVdUVVHLp3nx6rq4/N5Lknyk/dyew5O8swkh48xvjjGuGuMcXGSFyd5ZVU9br7cRu/MrHkX5V/mj7fMY/y5+fHlM1X1rvnx4/L5e+X+nO8+/NXwIFRVP5zkTUleNcY4b4zxnTHGlUmOTLJXpvvxws5VdUZV3VpVl1bVE5fO8/qqum4+dsXivlpVO1TV78/z8aaq+vDS8+BiRfGYqro6yaeq6ryqOnbNGC+rqsPmz83b7ZxgXZ2nJLkiyW5J3pLklKqN3k749SQvS/LoJHcleefmTjjGOC/JHyc5Y4zxsDHGEzd3HdiOvCbJT88P7E9NckySo8d3/3/pR2aaT3tkWvH5i6radz52cpJ9khyQ5HHzZd6YJFX1s0n+Jslrk+ya5GlJrhxjvCHJp5McO8+nY6vqB5Ocn+T0JI9I8sIk76mq/efv8+dJ/jvJozLN35fdy+355SSfG2Ncs/zFMcbnklyb5ODvea2NPW3+uOs8xs/Ov39Kkq/Mfx5/mOTsxZP9/TwfrPXzSXZOcvbyF8cYtyX5h0z374VDM61KPjzT3PloVe04z89jkzx5jPFDSZ6V5Mr5OscleV6Sp2d6Hrw50/xa9vQkT5ivd3qm+Zgkqar9MoXzuebt+iBYV+eqMcb7xxh3J/nrTBPlx5eOnzq/ers9yQlJjqyqh6xioLCNfbSqbln69YokGWN8O9MqxtuSnJZpZefaNdc9YYxx5xjjoiTnZpo3leQVSX53jPGNMcatmV7YvWC+zjFJ/nKMcf4Y454xxnVjjMs3MbbnZorZD84rK5cm+UiSI+b5eXiSN44xbh9jfDHT3N6U3ZJ8bRPHvjYfv7/+K8mfzateZ2R6cfycB3A+WGu3JDdu4q34tfffz48xzhpjfCfT/N05yYFJ7k6yU5L9qmrHMcaVY4wvz9f5zSRvGGNcO8a4M8mJmebZ8lbGE+e5dkeSc5IcUFV7zcdelOTs+brm7TpgD+vqXL/4ZIzx7Xlx9WFLx5dfvV2VZMc8sIkA24vnbWoP6xjjkqr6SqZVkg+vOXzz/AJv4apMKzO7J9klyeeX3sSoJIsXgHsm+cR9HNteSZ5SVbcsfW1Dpv1ru8+fr527m3Jjkp/axLFHzcfvr+uWVp4X43j0AzgfrHVjkt3qe+8fXXv//b85Mca4p6ZtbY8eY3y6ql6dKUb3r6p/TPJ7Y4yvZppr51TVPUvnuTsbL+wsn/fWqjo30wvRk+ePvzEfNm/XASusfe259Pljk3wn00S4PdOTb5JkfnW4+9Jll+/ssK5U1Sszrch8NdMPeyz70fmtv4XHzpe7MckdSfYfY+w6//qRMcbiBeI12fSetbXz6ZokFy2dZ/EW3G8n+Xqm7Ttr5+6mfDLTk+jy5RdbFPZM8qn5SxvN+UxbHzY1voU91mwxWvxZ3N/zwVqfTXJnksOWvzjPwUMy/RDWwp5Lx3dI8pjM98cxxuljjF/MFJUjU2wm01w7ZM1c23mMcd3SedfeX/82yQvnPZwPTfLPS+cyb7dzgrWvF1fVflW1S5I3Jzlr3j7w75k2sD+nqnZM8geZnsAXbkiy9/ygAOtGVe2T5I8ybQs4KsnrquqANRd7U1X9wLzH9blJzhxj3JPk/UneXlWPmM+1R1U9a77OKUleWlUHzz/osUdVPX4+dkOSn1g6/98n2aeqjpr34O1YVU+uqifM8/PsJCdW1S7zHrqjN3V75lXkC5J8pKr2r6qHVNWBST6U5L1jjC/NF/1CkhfM3+tJSY5YOs3Xk9yzZozJtAJ93HydX8u0z2+xinx/zgcbGWN8M9MPXb2rph9S3LGq9s60V/XaTKuXCz9TVYfNb+e/OlPoXlxV+1bVL1XVTpn2kN6RaRU1Sd6X5KTFW/xVtXtVHbqZYX0iU/i+OdPPcixWZ83bdUDU9HVqkr/KtHVg58z/VMb8IPE7ST6Q5LpMr7qW9/GdOX+8qaou3VaDhS3o72rjf4f1nPmJ7rQkJ48xLpufFI5Pcur8ZJdMc+XmTCsSH0ryW0t7UV+f5D8yPUl+K9Mqyb7JtM0gyUuTvD3JN5NclOlJL0nekWmf281V9c55/+szM73d+NX5e56c775oPDbT1p7rM83fD27mth6eaRXovCS3zbfxlCSvWrrMCZlWgG/OFAinLw7M+3pPSvKZeb/vgfOhz2V62/LG+fgRY4ybHsD54P8ZY7wl0zx8a5JvZbrfXZPk4Hnv6MLHkjw/033uqCSHzftZd0ryp5nup9dnCrbj5+u8I8nHk/xTVd2a5OJMP5R0b+O5M1N8PiMb36/N23WgNt4uQQdVdWGS08YYH1j1WGB7UFUHZZozj1n1WFatql6S5OXz26zAdsC83TwrrAAAtCZYAQBozZYAAABas8IKAEBrghUAgNbu9X+6qir7BWDJGKM2f6nVMWdhY53nrPkKG7u3+WqFFQCA1gQrAACtCVYAAFoTrAAAtCZYAQBoTbACANCaYAUAoDXBCgBAa4IVAIDWBCsAAK0JVgAAWhOsAAC0JlgBAGhNsAIA0JpgBQCgNcEKAEBrghUAgNYEKwAArQlWAABaE6wAALQmWAEAaE2wAgDQmmAFAKA1wQoAQGuCFQCA1gQrAACtCVYAAFoTrAAAtCZYAQBoTbACANCaYAUAoDXBCgBAa4IVAIDWBCsAAK0JVgAAWhOsAAC0JlgBAGhNsAIA0JpgBQCgNcEKAEBrghUAgNYEKwAArQlWAABaE6wAALQmWAEAaE2wAgDQmmAFAKA1wQoAQGuCFQCA1gQrAACtCVYAAFoTrAAAtCZYAQBoTbACANCaYAUAoDXBCgBAa4IVAIDWBCsAAK0JVgAAWhOsAAC0JlgBAGhNsAIA0JpgBQCgNcEKAEBrghUAgNYEKwAArQlWAABaE6wAALQmWAEAaE2wAgDQmmAFAKA1wQoAQGuCFQCA1gQrAACtCVYAAFoTrAAAtCZYAQBoTbACANDahlUPgM0bY2zR81XVFj0fAMDWZIUVAIDWBCsAAK0JVgAAWhOsAAC0JlgBAGhNsAIA0JpgBQCgNcEKAEBrghUAgNYEKwAArQlWAABaE6wAALQmWAEAaE2wAgDQmmAFAKA1wQoAQGuCFQCA1gQrAACtbVj1ANi8qtqi5xtjbNHzbenxwSpsyXlhzsLWZb4++FhhBQCgNcEKAEBrghUAgNYEKwAArQlWAABaE6wAALQmWAEAaE2wAgDQmmAFAKA1wQoAQGuCFQCA1gQrAACtCVYAAFoTrAAAtCZYAQBoTbACANCaYAUAoDXBCgBAa4IVAIDWNqx6AOvVGGPVQwC+D1W1xc5l/sPWZb4++FhhBQCgNcEKAEBrghUAgNYEKwAArQlWAABaE6wAALQmWAEAaE2wAgDQmmAFAKA1wQoAQGuCFQCA1gQrAACtCVYAAFoTrAAAtCZYAQBoTbACANCaYAUAoDXBCgBAa4IVAIDWNqx6AOtVVa16CMA64fEEth/m69ZhhRUAgNYEKwAArQlWAABaE6wAALQmWAEAaE2wAgDQmmAFAKA1wQoAQGuCFQCA1gQrAACtCVYAAFoTrAAAtCZYAQBoTbACANCaYAUAoDXBCgBAa4IVAIDWBCsAAK0JVgAAWtuw6gEArDdVteohAPeR+bp9sMIKAEBrghUAgNYEKwAArQlWAABaE6wAALQmWAEAaE2wAgDQmmAFAKA1wQoAQGuCFQCA1gQrAACtCVYAAFoTrAAAtCZYAQBoTbACANCaYAUAoDXBCgBAa4IVAIDWBCsAAK0JVgAAWhOsAAC0JlgBAGhNsAIA0JpgBQCgNcEKAEBrghUAgNYEKwAArQlWAABaE6wAALQmWAEAaE2wAgDQmmAFAKA1wQoAQGuCFQCA1gQrAACtCVYAAFoTrAAAtCZYAQBorcYYqx4DAABskhVWAABaE6wAALQmWAEAaE2wAgDQmmAFAKA1wQoAQGv/C6j834mXtpccAAAAAElFTkSuQmCC\n", 174 | "text/plain": [ 175 | "
" 176 | ] 177 | }, 178 | "metadata": { 179 | "needs_background": "light" 180 | }, 181 | "output_type": "display_data" 182 | } 183 | ], 184 | "source": [ 185 | "### Plot results\n", 186 | "\n", 187 | "## Generate testing data\n", 188 | "X_test = tf.convert_to_tensor(np.moveaxis(np.dstack([make_glider(10), make_glider(10)]), 2, 0), tf.float32)\n", 189 | "# X_test = tf.convert_to_tensor(make_glider(10), tf.float32)[tf.newaxis, ...]\n", 190 | "Y_test = gol(X_test)\n", 191 | "X_test = X_test[..., tf.newaxis]\n", 192 | "Y_test = Y_test[..., tf.newaxis]\n", 193 | "\n", 194 | "Y_pred = logit_to_pred(model(X_test), shape=(-1, wspan, hspan))\n", 195 | "\n", 196 | "plt.figure(figsize=(12,4))\n", 197 | "\n", 198 | "plt.subplot(1,3,1)\n", 199 | "plt.imshow(tf.squeeze(X_test[0]))\n", 200 | "plt.axis('off')\n", 201 | "plt.title(\"Input\")\n", 202 | "\n", 203 | "plt.subplot(1,3,2)\n", 204 | "plt.imshow(tf.squeeze(Y_test[0]))\n", 205 | "plt.axis('off')\n", 206 | "plt.title(\"Expected Output\")\n", 207 | "\n", 208 | "plt.subplot(1,3,3)\n", 209 | "plt.imshow(tf.squeeze(Y_pred[0]))\n", 210 | "plt.axis('off')\n", 211 | "plt.title(\"Observed Output\")\n", 212 | "\n" 213 | ] 214 | }, 215 | { 216 | "cell_type": "code", 217 | "execution_count": 345, 218 | "metadata": {}, 219 | "outputs": [], 220 | "source": [ 221 | "### Save and load a model\n", 222 | "model.save('path_to_my_model.h5')\n", 223 | "del model\n", 224 | "#model = tf.keras.models.load_model('path_to_my_model.h5', custom_objects={'Wraparound2D': Wraparound2D})" 225 | ] 226 | }, 227 | { 228 | "cell_type": "markdown", 229 | "metadata": {}, 230 | "source": [ 231 | "# Show activation patterns of hidden layers" 232 | ] 233 | }, 234 | { 235 | "cell_type": "code", 236 | "execution_count": 28, 237 | "metadata": {}, 238 | "outputs": [ 239 | { 240 | "ename": "IndexError", 241 | "evalue": "index 2 is out of bounds for axis 2 with size 2", 242 | "output_type": "error", 243 | "traceback": [ 244 | "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m", 245 | "\u001b[0;31mIndexError\u001b[0m Traceback (most recent call last)", 246 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mj\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m5\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mj\u001b[0m\u001b[0;34m==\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 18\u001b[0;31m \u001b[0mlayer_im\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhstack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mmin_max_scaler\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer_outs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m...\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 19\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0mpattern\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer_outs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mwspan\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhspan\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 247 | "\u001b[0;32m\u001b[0m in \u001b[0;36m\u001b[0;34m(.0)\u001b[0m\n\u001b[1;32m 16\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mj\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;36m5\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 17\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mj\u001b[0m\u001b[0;34m==\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 18\u001b[0;31m \u001b[0mlayer_im\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mhstack\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mmin_max_scaler\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer_outs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m...\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mi\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mi\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mrange\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;36m10\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 19\u001b[0m \u001b[0;32melse\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 20\u001b[0m \u001b[0mpattern\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mlayer_outs\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mj\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m(\u001b[0m\u001b[0mwspan\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mhspan\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m-\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n", 248 | "\u001b[0;31mIndexError\u001b[0m: index 2 is out of bounds for axis 2 with size 2" 249 | ] 250 | } 251 | ], 252 | "source": [ 253 | "import tensorflow.keras.backend as K\n", 254 | "\n", 255 | "inp = model.input # input placeholder\n", 256 | "outputs = [layer.output for layer in model.layers] # all layer outputs\n", 257 | "functor = K.function(inp, outputs) # evaluation function\n", 258 | "\n", 259 | "layer_outs = functor([X_test, 1.])\n", 260 | "\n", 261 | "\n", 262 | "\n", 263 | "# Plot activations of different neurons in different layers \n", 264 | "all_layer_activations = list()\n", 265 | "\n", 266 | "min_max_scaler = lambda x : (x - np.min(x))/(np.max(x) - np.min(x))\n", 267 | "# min_max_scaler = lambda x : (x - np.mean(x))\n", 268 | "for j in range(1, 5):\n", 269 | " if j==1:\n", 270 | " layer_im = np.hstack([min_max_scaler(layer_outs[1][0][..., i]) for i in range(10)])\n", 271 | " else:\n", 272 | " pattern = np.reshape(layer_outs[j][0], (wspan, hspan, -1))\n", 273 | " layer_im = np.hstack([min_max_scaler(pattern[..., i]) for i in range(10)])\n", 274 | " all_layer_activations.append(layer_im)\n", 275 | "\n", 276 | " \n", 277 | "plt.figure()\n", 278 | "plt.imshow(np.vstack(all_layer_activations))\n", 279 | "plt.title(\"Activations of hidden layers given \\\"Glider\\\" input\")\n", 280 | "\n", 281 | "plt.figure()\n", 282 | "plt.imshow(np.squeeze(np.dstack(model.layers[1].weights[0].numpy())))\n", 283 | "plt.title(\"Convolutional filters\")" 284 | ] 285 | }, 286 | { 287 | "cell_type": "code", 288 | "execution_count": null, 289 | "metadata": {}, 290 | "outputs": [], 291 | "source": [] 292 | }, 293 | { 294 | "cell_type": "code", 295 | "execution_count": null, 296 | "metadata": {}, 297 | "outputs": [], 298 | "source": [] 299 | } 300 | ], 301 | "metadata": { 302 | "kernelspec": { 303 | "display_name": "Python 3", 304 | "language": "python", 305 | "name": "python3" 306 | }, 307 | "language_info": { 308 | "codemirror_mode": { 309 | "name": "ipython", 310 | "version": 3 311 | }, 312 | "file_extension": ".py", 313 | "mimetype": "text/x-python", 314 | "name": "python", 315 | "nbconvert_exporter": "python", 316 | "pygments_lexer": "ipython3", 317 | "version": "3.6.9" 318 | } 319 | }, 320 | "nbformat": 4, 321 | "nbformat_minor": 4 322 | } 323 | --------------------------------------------------------------------------------