├── .gitignore ├── LICENSE ├── README.md ├── exp3S.py ├── generate_data.py ├── img ├── associative_recall_ntm_12_0.png ├── associative_recall_ntm_6_0.png ├── copy_ntm_20_0.png ├── copy_ntm_40_1.png ├── ntm_copy_read_head.png └── ntm_copy_write_head.png ├── ntm.py ├── produce_heat_maps.py ├── run_tasks.py └── utils.py /.gitignore: -------------------------------------------------------------------------------- 1 | head_logs 2 | 3 | # Byte-compiled / optimized / DLL files 4 | __pycache__/ 5 | *.py[cod] 6 | *$py.class 7 | 8 | # C extensions 9 | *.so 10 | 11 | # Distribution / packaging 12 | .Python 13 | env/ 14 | build/ 15 | develop-eggs/ 16 | dist/ 17 | downloads/ 18 | eggs/ 19 | .eggs/ 20 | lib/ 21 | lib64/ 22 | parts/ 23 | sdist/ 24 | var/ 25 | wheels/ 26 | *.egg-info/ 27 | .installed.cfg 28 | *.egg 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .coverage 44 | .coverage.* 45 | .cache 46 | nosetests.xml 47 | coverage.xml 48 | *.cover 49 | .hypothesis/ 50 | 51 | # Translations 52 | *.mo 53 | *.pot 54 | 55 | # Django stuff: 56 | *.log 57 | local_settings.py 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # dotenv 85 | .env 86 | 87 | # virtualenv 88 | .venv 89 | venv/ 90 | ENV/ 91 | 92 | # Spyder project settings 93 | .spyderproject 94 | .spyproject 95 | 96 | # Rope project settings 97 | .ropeproject 98 | 99 | # mkdocs documentation 100 | /site 101 | 102 | # mypy 103 | .mypy_cache/ 104 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | GNU LESSER GENERAL PUBLIC LICENSE 2 | Version 3, 29 June 2007 3 | 4 | Copyright (C) 2007 Free Software Foundation, Inc. 5 | Everyone is permitted to copy and distribute verbatim copies 6 | of this license document, but changing it is not allowed. 7 | 8 | 9 | This version of the GNU Lesser General Public License incorporates 10 | the terms and conditions of version 3 of the GNU General Public 11 | License, supplemented by the additional permissions listed below. 12 | 13 | 0. Additional Definitions. 14 | 15 | As used herein, "this License" refers to version 3 of the GNU Lesser 16 | General Public License, and the "GNU GPL" refers to version 3 of the GNU 17 | General Public License. 18 | 19 | "The Library" refers to a covered work governed by this License, 20 | other than an Application or a Combined Work as defined below. 21 | 22 | An "Application" is any work that makes use of an interface provided 23 | by the Library, but which is not otherwise based on the Library. 24 | Defining a subclass of a class defined by the Library is deemed a mode 25 | of using an interface provided by the Library. 26 | 27 | A "Combined Work" is a work produced by combining or linking an 28 | Application with the Library. The particular version of the Library 29 | with which the Combined Work was made is also called the "Linked 30 | Version". 31 | 32 | The "Minimal Corresponding Source" for a Combined Work means the 33 | Corresponding Source for the Combined Work, excluding any source code 34 | for portions of the Combined Work that, considered in isolation, are 35 | based on the Application, and not on the Linked Version. 36 | 37 | The "Corresponding Application Code" for a Combined Work means the 38 | object code and/or source code for the Application, including any data 39 | and utility programs needed for reproducing the Combined Work from the 40 | Application, but excluding the System Libraries of the Combined Work. 41 | 42 | 1. Exception to Section 3 of the GNU GPL. 43 | 44 | You may convey a covered work under sections 3 and 4 of this License 45 | without being bound by section 3 of the GNU GPL. 46 | 47 | 2. Conveying Modified Versions. 48 | 49 | If you modify a copy of the Library, and, in your modifications, a 50 | facility refers to a function or data to be supplied by an Application 51 | that uses the facility (other than as an argument passed when the 52 | facility is invoked), then you may convey a copy of the modified 53 | version: 54 | 55 | a) under this License, provided that you make a good faith effort to 56 | ensure that, in the event an Application does not supply the 57 | function or data, the facility still operates, and performs 58 | whatever part of its purpose remains meaningful, or 59 | 60 | b) under the GNU GPL, with none of the additional permissions of 61 | this License applicable to that copy. 62 | 63 | 3. Object Code Incorporating Material from Library Header Files. 64 | 65 | The object code form of an Application may incorporate material from 66 | a header file that is part of the Library. You may convey such object 67 | code under terms of your choice, provided that, if the incorporated 68 | material is not limited to numerical parameters, data structure 69 | layouts and accessors, or small macros, inline functions and templates 70 | (ten or fewer lines in length), you do both of the following: 71 | 72 | a) Give prominent notice with each copy of the object code that the 73 | Library is used in it and that the Library and its use are 74 | covered by this License. 75 | 76 | b) Accompany the object code with a copy of the GNU GPL and this license 77 | document. 78 | 79 | 4. Combined Works. 80 | 81 | You may convey a Combined Work under terms of your choice that, 82 | taken together, effectively do not restrict modification of the 83 | portions of the Library contained in the Combined Work and reverse 84 | engineering for debugging such modifications, if you also do each of 85 | the following: 86 | 87 | a) Give prominent notice with each copy of the Combined Work that 88 | the Library is used in it and that the Library and its use are 89 | covered by this License. 90 | 91 | b) Accompany the Combined Work with a copy of the GNU GPL and this license 92 | document. 93 | 94 | c) For a Combined Work that displays copyright notices during 95 | execution, include the copyright notice for the Library among 96 | these notices, as well as a reference directing the user to the 97 | copies of the GNU GPL and this license document. 98 | 99 | d) Do one of the following: 100 | 101 | 0) Convey the Minimal Corresponding Source under the terms of this 102 | License, and the Corresponding Application Code in a form 103 | suitable for, and under terms that permit, the user to 104 | recombine or relink the Application with a modified version of 105 | the Linked Version to produce a modified Combined Work, in the 106 | manner specified by section 6 of the GNU GPL for conveying 107 | Corresponding Source. 108 | 109 | 1) Use a suitable shared library mechanism for linking with the 110 | Library. A suitable mechanism is one that (a) uses at run time 111 | a copy of the Library already present on the user's computer 112 | system, and (b) will operate properly with a modified version 113 | of the Library that is interface-compatible with the Linked 114 | Version. 115 | 116 | e) Provide Installation Information, but only if you would otherwise 117 | be required to provide such information under section 6 of the 118 | GNU GPL, and only to the extent that such information is 119 | necessary to install and execute a modified version of the 120 | Combined Work produced by recombining or relinking the 121 | Application with a modified version of the Linked Version. (If 122 | you use option 4d0, the Installation Information must accompany 123 | the Minimal Corresponding Source and Corresponding Application 124 | Code. If you use option 4d1, you must provide the Installation 125 | Information in the manner specified by section 6 of the GNU GPL 126 | for conveying Corresponding Source.) 127 | 128 | 5. Combined Libraries. 129 | 130 | You may place library facilities that are a work based on the 131 | Library side by side in a single library together with other library 132 | facilities that are not Applications and are not covered by this 133 | License, and convey such a combined library under terms of your 134 | choice, if you do both of the following: 135 | 136 | a) Accompany the combined library with a copy of the same work based 137 | on the Library, uncombined with any other library facilities, 138 | conveyed under the terms of this License. 139 | 140 | b) Give prominent notice with the combined library that part of it 141 | is a work based on the Library, and explaining where to find the 142 | accompanying uncombined form of the same work. 143 | 144 | 6. Revised Versions of the GNU Lesser General Public License. 145 | 146 | The Free Software Foundation may publish revised and/or new versions 147 | of the GNU Lesser General Public License from time to time. Such new 148 | versions will be similar in spirit to the present version, but may 149 | differ in detail to address new problems or concerns. 150 | 151 | Each version is given a distinguishing version number. If the 152 | Library as you received it specifies that a certain numbered version 153 | of the GNU Lesser General Public License "or any later version" 154 | applies to it, you have the option of following the terms and 155 | conditions either of that published version or of any later version 156 | published by the Free Software Foundation. If the Library as you 157 | received it does not specify a version number of the GNU Lesser 158 | General Public License, you may choose any version of the GNU Lesser 159 | General Public License ever published by the Free Software Foundation. 160 | 161 | If the Library as you received it specifies that a proxy can decide 162 | whether future versions of the GNU Lesser General Public License shall 163 | apply, that proxy's public statement of acceptance of any version is 164 | permanent authorization for you to choose that version for the 165 | Library. 166 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | **Update 2019-05-26: Google has integrated our NTM implementation into the official TensorFlow release. For more details read here: https://www.scss.tcd.ie/joeran.beel/blog/2019/05/25/google-integrates-our-neural-turing-machine-implementation-in-tensorflow/** 2 | 3 | For a description of our implementation and experimental results please see the pre-print of our paper which will appear as a conference paper at ICANN 2018: https://arxiv.org/abs/1807.08518 4 | 5 | Our key contribution is not to implement a Neural Turing Machine in code but to make training stable and reliable. We do not observe the slow learning or gradients becoming NaN that other implementations have reported. 6 | 7 | You can cite the paper as follows: 8 | 9 | ``` 10 | @article{collierbeel2018ntms, title={Implementing Neural Turing Machines, 11 | author={Collier, Mark and Beel, Joeran}, 12 | journal={International Conference on Artificial Neural Networks, ICANN.}, year={2018}} 13 | ``` 14 | 15 | This work was done with [Joeran Beel](https://www.scss.tcd.ie/joeran.beel/) Ussher Assistant Professor in Intelligent Systems 16 | at the [Adapt Centre, Trinity College Dublin](https://www.adaptcentre.ie/) as part of my undergraduate thesis at Trinity College Dublin. 17 | 18 | # Neural Turing Machine 19 | 20 | This repository contains a stable, successful Tensorflow implementation of a Neural Turing Machine which has been tested on the Copy, Repeat Copy and Associative Recall tasks from the [original paper](https://arxiv.org/abs/1410.5401). 21 | 22 | ## Usage 23 | 24 | ```python 25 | from ntm import NTMCell 26 | 27 | cell = NTMCell(num_controller_layers, num_controller_units, num_memory_locations, memory_size, 28 | num_read_heads, num_write_heads, shift_range=3, output_dim=num_bits_per_output_vector, 29 | clip_value=clip_controller_output_to_value) 30 | 31 | outputs, _ = tf.nn.dynamic_rnn( 32 | cell=cell, 33 | inputs=inputs, 34 | time_major=False) 35 | ``` 36 | 37 | The implementation is derived from https://github.com/snowkylin/ntm, another open source NTM implementation. We make small but meaningful changes to the linked code that have a large effect on making our implementation more reliable to train and faster to converge as well as being easier to integrate with Tensorflow. Our contribution is: 38 | - We compare three different memory initialization schemes and find that initializing the memory contents of a Neural Turing Machine to small constant values works much better than random initilization or backpropagating through memory initialization. 39 | - We clip the outputs from the NTM controller into a range, which helps with optimization difficulties. 40 | - The NTMCell implements the [Tensorflow RNNCell interface](https://www.tensorflow.org/api_docs/python/tf/contrib/rnn/RNNCell) so can be used directly with [tf.nn.dynamic_rnn](https://www.tensorflow.org/api_docs/python/tf/nn/dynamic_rnn), etc. 41 | - We never see loss go to NaN as some other implementations report. 42 | - We implement 3 of the 5 tasks from the NTM paper. We run many experiments and report convergence speed and generalization performance for our implementation, compared to an LSTM, a DNC and for 3 memory contents initialization schemes. 43 | 44 | ## Sample Outputs 45 | 46 | Below are some sample outputs on the Copy and Associative Recall tasks. We replicated the hyperparameters from the [original paper](https://arxiv.org/abs/1410.5401) for the 2 tasks: 47 | 48 | - Memory Size: 128 X 20 49 | - Controller: LSTM - 100 units 50 | - Optimizer: RMSProp - learning rate = 10^-4 51 | 52 | The Copy task network was trained on sequences of length sampled from Uniform(1,20) with 8-dimensional random bit vectors. The Associative Recall task network was trained on sequences with the number of items sampled from Uniform(2,6) each item consisted of 3 6-dimensional random bit vectors. 53 | 54 | #### Example performance of NTM on Copy task with sequence length = 20 (output is perfect): 55 | ![Neural Turing Machine Copy Task - Seq len=20](/img/copy_ntm_20_0.png) 56 | 57 | #### Example performance of NTM on Copy task with sequence length = 40 (network only trained on sequences of length up to 20 - performance degrades on example after 36th input): 58 | ![Neural Turing Machine Copy Task - Seq len=40](/img/copy_ntm_40_1.png) 59 | 60 | #### Example performance of NTM on Associative Recall task with 6 items (output is perfect): 61 | ![Neural Turing Machine Associate Recall Task - Seq len=6 items](/img/associative_recall_ntm_6_0.png) 62 | 63 | #### Example performance of NTM on Associative Recall task with 12 items (despite only being trained on sequences of up to 6 items to network generalizes perfectly to 12 items): 64 | ![Neural Turing Machine Associate Recall Task - Seq len=12 items](/img/associative_recall_ntm_12_0.png) 65 | 66 | In order to interpret how the NTM used its external memory we trained a network with 32 memory locations on the Copy task and graphed the read and write head address locations over time. 67 | 68 | As you can see from the below graphs, the network first writes the sequence to memory and then reads it back in the same order it wrote it to memory. This uses both the content and location based addressing capabilities of the NTM. The pattern of writes followed by reads is what we would expect of a reasonable solution to the Copy task. 69 | 70 | #### Write head locations of NTM with 32 memory locations trained on Copy task: 71 | ![Write head locations of NTM with 32 memory locations trained on Copy task](/img/ntm_copy_write_head.png) 72 | 73 | #### Read head locations of NTM with 32 memory locations trained on Copy task: 74 | ![Read head locations of NTM with 32 memory locations trained on Copy task](/img/ntm_copy_read_head.png) 75 | 76 | Further results on memory initilization comparison and learning curves to come... 77 | -------------------------------------------------------------------------------- /exp3S.py: -------------------------------------------------------------------------------- 1 | from __future__ import print_function 2 | import bisect 3 | import numpy as np 4 | 5 | class Exp3S: 6 | def __init__(self, num_tasks, eta, beta, eps): 7 | self.N = num_tasks 8 | self.w = np.zeros(num_tasks) 9 | self.eta = eta 10 | self.beta = beta 11 | self.eps = eps 12 | self.rewards = [] 13 | self.t = 1 14 | self.w = np.zeros(self.N) 15 | self.max_rewards = 50000 16 | 17 | def draw_task(self): 18 | self.pi = (1 - self.eps) * self._softmax(self.w) + (self.eps / float(self.N)) 19 | self.k = np.random.choice(self.N, p=self.pi) 20 | return self.k 21 | 22 | def update_w(self, v, t): 23 | '''v is learning progress, t is time to process batch that provided v''' 24 | r_ = v/t 25 | self._reservoir_sample(r_) 26 | q_lo, q_hi = self._quantiles() 27 | self.r = self._r(q_lo, q_hi, r_) 28 | 29 | alpha_t = 1/float(self.t) 30 | r_b_t = np.asarray([((self.r if i == self.k else 0) + self.beta)/self.pi[i] for i in range(self.N)]) 31 | tmp = np.exp(self.w + self.eta * r_b_t) 32 | for i in range(self.N): 33 | s = 0 34 | for j in range(self.N): 35 | if i != j: 36 | s += tmp[j] 37 | self.w[i] = np.log((1 - alpha_t) * tmp[i] + (alpha_t/(self.N - 1)) * s) 38 | 39 | self.t += 1 40 | 41 | def _quantiles(self): 42 | q_lo_pos = int(0.2 * len(self.rewards)) 43 | q_hi_pos = int(0.8 * len(self.rewards)) - 1 44 | return self.rewards[q_lo_pos], self.rewards[q_hi_pos] 45 | 46 | def _reservoir_sample(self, r_): 47 | insert = False 48 | if len(self.rewards) >= self.max_rewards and np.random.random_sample() < 10.0/float(self.t): 49 | pos = np.random.randint(0, high=len(self.rewards)) 50 | del self.rewards[pos] 51 | insert = True 52 | if insert or len(self.rewards) < self.max_rewards: 53 | pos = bisect.bisect_left(self.rewards, r_) 54 | self.rewards.insert(pos, r_) 55 | 56 | def _r(self, q_lo, q_hi, r_): 57 | if r_ < q_lo: 58 | return -1.0 59 | elif r_ >= q_hi: 60 | return 1.0 61 | else: 62 | return (2.0*(r_ - q_lo)/float(q_hi - q_lo)) - 1.0 63 | 64 | def _softmax(self, w): 65 | e_w = np.exp(w) 66 | return e_w / np.sum(e_w) 67 | 68 | def test(): 69 | exp3s = Exp3S(20, 0.001, 0, 0.05) 70 | rewards = [float(i)/sum([j for j in range(20)]) for i in range(20)] 71 | 72 | for i in range(100000): 73 | k = exp3s.draw_task() 74 | reward = rewards[k] 75 | exp3s.update_w(reward, 1) 76 | 77 | if i % 10000 == 0: 78 | print('task:', k, 'reward:', reward, 'scaled_reward', exp3s.r) 79 | print('weights:', exp3s.w) 80 | print('prob:', exp3s.pi) 81 | 82 | print('final weights:', exp3s.w) 83 | 84 | # test() 85 | -------------------------------------------------------------------------------- /generate_data.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | from scipy import spatial 3 | import random 4 | 5 | def set_random_seed(seed): 6 | np.random.seed(seed) 7 | 8 | snap_boolean = np.vectorize(lambda x: 1.0 if x > 0.5 else 0.0) 9 | 10 | class CopyTaskData: 11 | def generate_batches(self, num_batches, batch_size, bits_per_vector=8, curriculum_point=20, max_seq_len=20, 12 | curriculum='uniform', pad_to_max_seq_len=False): 13 | batches = [] 14 | for i in range(num_batches): 15 | if curriculum == 'deterministic_uniform': 16 | seq_len = 1 + (i % max_seq_len) 17 | elif curriculum == 'uniform': 18 | seq_len = np.random.randint(low=1, high=max_seq_len+1) 19 | elif curriculum == 'none': 20 | seq_len = max_seq_len 21 | elif curriculum in ('naive', 'prediction_gain'): 22 | seq_len = curriculum_point 23 | elif curriculum == 'look_back': 24 | seq_len = curriculum_point if np.random.random_sample() < 0.9 else np.random.randint(low=1, high=curriculum_point+1) 25 | elif curriculum == 'look_back_and_forward': 26 | seq_len = curriculum_point if np.random.random_sample() < 0.8 else np.random.randint(low=1, high=max_seq_len+1) 27 | 28 | pad_to_len = max_seq_len if pad_to_max_seq_len else seq_len 29 | 30 | def generate_sequence(): 31 | return np.asarray([snap_boolean(np.append(np.random.rand(bits_per_vector), 0)) for _ in range(seq_len)] \ 32 | + [np.zeros(bits_per_vector+1) for _ in range(pad_to_len - seq_len)]) 33 | 34 | inputs = np.asarray([generate_sequence() for _ in range(batch_size)]).astype(np.float32) 35 | eos = np.ones([batch_size, 1, bits_per_vector + 1]) 36 | output_inputs = np.zeros_like(inputs) 37 | 38 | full_inputs = np.concatenate((inputs, eos, output_inputs), axis=1) 39 | 40 | batches.append((pad_to_len, full_inputs, inputs[:, :, :bits_per_vector])) 41 | return batches 42 | 43 | def error_per_seq(self, labels, outputs, num_seq): 44 | outputs[outputs >= 0.5] = 1.0 45 | outputs[outputs < 0.5] = 0.0 46 | bit_errors = np.sum(np.abs(labels - outputs)) 47 | return bit_errors/num_seq 48 | 49 | class RepeatCopyTaskData: 50 | def __init__(self, max_seq_len, max_repeats): 51 | self.max_seq_len = max_seq_len 52 | self.max_repeats = max_repeats 53 | 54 | def _normalize_num_repeats(self, n): 55 | return float(n)/self.max_repeats 56 | 57 | def generate_batches(self, num_batches, batch_size, bits_per_vector=8, curriculum_point=20, max_seq_len=20, 58 | curriculum='uniform', pad_to_max_seq_len=False): 59 | batches = [] 60 | for i in range(num_batches): 61 | if curriculum == 'deterministic_uniform': 62 | seq_len = 1 + (i % self.max_seq_len) 63 | num_repeats = 1 + (i % self.max_repeats) 64 | elif curriculum == 'uniform': 65 | seq_len = np.random.randint(low=1, high=self.max_seq_len+1) 66 | num_repeats = np.random.randint(low=1, high=self.max_repeats+1) 67 | elif curriculum == 'none': 68 | seq_len = self.max_seq_len 69 | num_repeats = self.max_repeats 70 | elif curriculum in ('naive', 'prediction_gain_bandit', 'prediction_gain_teacher'): 71 | seq_len, num_repeats = curriculum_point 72 | elif curriculum == 'look_back': 73 | seq_len = curriculum_point[0] if np.random.random_sample() < 0.9 else np.random.randint(low=1, high=curriculum_point[0]+1) 74 | num_repeats = curriculum_point[1] if np.random.random_sample() < 0.9 else np.random.randint(low=1, high=curriculum_point[1]+1) 75 | elif curriculum == 'look_back_and_forward': 76 | seq_len = curriculum_point[0] if np.random.random_sample() < 0.8 else np.random.randint(low=1, high=self.max_seq_len+1) 77 | num_repeats = curriculum_point[1] if np.random.random_sample() < 0.8 else np.random.randint(low=1, high=self.max_repeats+1) 78 | 79 | self.seq_len = seq_len 80 | self.num_repeats = num_repeats 81 | 82 | pad_to_len = self.max_seq_len if pad_to_max_seq_len else seq_len 83 | 84 | def generate_sequence(): 85 | return np.asarray([snap_boolean(np.append(np.random.rand(bits_per_vector), (0,0))) for _ in range(seq_len)] \ 86 | + [np.zeros(bits_per_vector+2) for _ in range(pad_to_len - seq_len)]) 87 | 88 | inputs = np.asarray([generate_sequence() for _ in range(batch_size)]).astype(np.float32) 89 | 90 | eos = np.ones([batch_size, 1, bits_per_vector + 2]) 91 | eos[:, :, bits_per_vector+1] = self._normalize_num_repeats(num_repeats) 92 | output_inputs = np.tile(np.zeros_like(inputs), (1, num_repeats, 1)) 93 | 94 | full_inputs = np.concatenate((inputs, eos, output_inputs), axis=1) 95 | 96 | targets = np.tile(inputs[:, :, :bits_per_vector], (1, num_repeats, 1)) 97 | 98 | batches.append((pad_to_len, full_inputs, targets)) 99 | return batches 100 | 101 | def error_per_seq(self, labels, outputs, num_seq): 102 | outputs[outputs >= 0.5] = 1.0 103 | outputs[outputs < 0.5] = 0.0 104 | bit_errors = np.sum(np.abs(labels - outputs)) 105 | return bit_errors/num_seq 106 | 107 | def last_seq_len(self): 108 | return self.seq_len 109 | 110 | class AssociativeRecallData: 111 | def generate_batches(self, num_batches, batch_size, bits_per_vector=6, curriculum_point=6, max_seq_len=6, 112 | curriculum='uniform', pad_to_max_seq_len=False): 113 | NUM_VECTORS_PER_ITEM = 3 114 | batches = [] 115 | for i in range(num_batches): 116 | if curriculum == 'deterministic_uniform': 117 | seq_len = 2 + (i % max_seq_len) 118 | elif curriculum == 'uniform': 119 | seq_len = np.random.randint(low=2, high=max_seq_len+1) 120 | elif curriculum == 'none': 121 | seq_len = max_seq_len 122 | elif curriculum in ('naive', 'prediction_gain_bandit', 'prediction_gain_teacher'): 123 | seq_len = curriculum_point 124 | elif curriculum == 'look_back': 125 | seq_len = curriculum_point if np.random.random_sample() < 0.9 else np.random.randint(low=2, high=curriculum_point+1) 126 | elif curriculum == 'look_back_and_forward': 127 | seq_len = curriculum_point if np.random.random_sample() < 0.8 else np.random.randint(low=2, high=max_seq_len+1) 128 | 129 | self.seq_len = seq_len 130 | 131 | pad_to_len = max_seq_len if pad_to_max_seq_len else seq_len 132 | 133 | def generate_item(seq_len): 134 | items = [[snap_boolean(np.append(np.random.rand(bits_per_vector), 0)) for _ in range(NUM_VECTORS_PER_ITEM)] for _ in range(seq_len)] 135 | 136 | query_item_num = seq_len = np.random.randint(low=0, high=seq_len-1) 137 | query_item = items[query_item_num] 138 | output_item = items[query_item_num+1] 139 | 140 | inputs = [sub_item for item in items for sub_item in item] 141 | 142 | return inputs, query_item, map(lambda sub_item: sub_item[:bits_per_vector], output_item) 143 | 144 | batch_inputs = [] 145 | batch_queries = [] 146 | batch_outputs = [] 147 | for _ in range(batch_size): 148 | inputs, query_item, output_item = generate_item(seq_len) 149 | batch_inputs.append(inputs) 150 | batch_queries.append(query_item) 151 | batch_outputs.append(output_item) 152 | 153 | batch_inputs = np.asarray(batch_inputs).astype(np.float32) 154 | batch_queries = np.asarray(batch_queries).astype(np.float32) 155 | batch_outputs = np.asarray(batch_outputs).astype(np.float32) 156 | eos = np.ones([batch_size, 1, bits_per_vector + 1]) 157 | eos[:, :, bits_per_vector] = 1 158 | output_inputs = np.zeros([batch_size, NUM_VECTORS_PER_ITEM, bits_per_vector + 1]) 159 | 160 | if pad_to_max_seq_len: 161 | full_inputs = np.concatenate(batch_inputs, eos, batch_queries, eos, np.zeros([batch_size, pad_to_len - seq_len, bits_per_vector + 1], output_inputs), axis=1) 162 | else: 163 | full_inputs = np.concatenate((batch_inputs, eos, batch_queries, eos, output_inputs), axis=1) 164 | 165 | batches.append((pad_to_len, full_inputs, batch_outputs)) 166 | return batches 167 | 168 | def error_per_seq(self, labels, outputs, num_seq): 169 | outputs[outputs >= 0.5] = 1.0 170 | outputs[outputs < 0.5] = 0.0 171 | bit_errors = np.sum(np.abs(labels - outputs)) 172 | return bit_errors/num_seq 173 | 174 | def last_seq_len(self): 175 | return self.seq_len 176 | 177 | def graph_label_to_one_hot(label): 178 | res = np.zeros(30) 179 | 180 | if label == -1: 181 | return res 182 | 183 | hundreds = label/100 184 | tens = (label % 100)/10 185 | singles = (label % 100) % 10 186 | 187 | res[hundreds] = 1 188 | res[tens + 10] = 1 189 | res[singles + 20] = 1 190 | return res 191 | 192 | def label_from_vectors(vectors): 193 | return map(lambda batch: np.argmax(batch, axis=-1), np.split(vectors, 9, axis=-1)) 194 | 195 | def generate_random_graph(num_nodes, k): 196 | node_labels = 1 + np.random.choice(999, num_nodes, replace=False) 197 | edge_label_candidates = 1 + np.random.choice(999, num_nodes, replace=False) 198 | tree = spatial.KDTree(np.random.uniform(size=(num_nodes, 2))) 199 | 200 | graph = {} 201 | graph_des_vectors = [] 202 | for node_idx in range(num_nodes): 203 | node_k = k[node_idx] 204 | _, indexes = tree.query(tree.data[node_idx], k=node_k+1) 205 | indexes = indexes[1:] 206 | connected_nodes = map(lambda idx: node_labels[idx], indexes) 207 | edge_labels = np.random.choice(edge_label_candidates, node_k, replace=False) 208 | 209 | graph[node_labels[node_idx]] = zip(connected_nodes, edge_labels) 210 | 211 | for connected_node, edge_label in zip(connected_nodes, edge_labels): 212 | graph_des_vectors.append( 213 | np.concatenate( 214 | (graph_label_to_one_hot(node_labels[node_idx]), 215 | graph_label_to_one_hot(edge_label), 216 | graph_label_to_one_hot(connected_node), 217 | np.zeros(2)) 218 | )) 219 | 220 | return graph, graph_des_vectors 221 | 222 | def generate_random_graphs(num_graphs, node_range=(3,10), out_degree=(2,4)): 223 | num_nodes = np.random.randint(low=node_range[0], high=node_range[1]+1) 224 | k = np.random.randint(low=out_degree[0], high=min(num_nodes-1, out_degree[1])+1, size=num_nodes) 225 | 226 | return [generate_random_graph(num_nodes, k) for _ in range(num_graphs)] 227 | 228 | class TraversalData: 229 | def __init__(self): 230 | self.lessons = [ 231 | ((3,10), (2,4), (1,1)), 232 | ((3,10), (2,4), (1,2)), 233 | ((5,10), (2,4), (1,3)), 234 | ((5,10), (2,4), (1,4)), 235 | ((10,15), (2,4), (1,4)), 236 | ((10,15), (2,4), (1,5)), 237 | ((10,20), (2,4), (1,5)), 238 | ((10,20), (2,4), (1,6)), 239 | ((10,30), (2,4), (1,6)), 240 | ((10,30), (2,4), (1,7)), 241 | ((10,30), (2,4), (1,8)), 242 | ((10,30), (2,4), (1,9)), 243 | ((10,40), (2,6), (1,10)), 244 | ((10,40), (2,6), (1,20)) 245 | ] 246 | self.num_lessons = len(self.lessons) 247 | self.termination_pattern = np.concatenate(( 248 | graph_label_to_one_hot(0), 249 | graph_label_to_one_hot(0), 250 | graph_label_to_one_hot(0) 251 | )) 252 | 253 | def generate_item(self, graph, graph_des_vectors, path_length): 254 | random.shuffle(graph_des_vectors) 255 | 256 | path = [] 257 | cur_node = random.choice(graph.keys()) 258 | for _ in range(path_length): 259 | next_node, edge_label = random.choice(graph[cur_node]) 260 | path.append((cur_node, next_node, edge_label)) 261 | cur_node = next_node 262 | 263 | outputs = map(lambda t: 264 | np.concatenate(( 265 | graph_label_to_one_hot(t[0]), 266 | graph_label_to_one_hot(t[1]), 267 | graph_label_to_one_hot(t[2]) 268 | )), 269 | path) 270 | 271 | first_query = np.concatenate(( 272 | graph_label_to_one_hot(path[0][0]), 273 | graph_label_to_one_hot(-1), 274 | graph_label_to_one_hot(path[0][2]), 275 | [1, 0] 276 | )) 277 | 278 | other_queries = map(lambda t: 279 | np.concatenate(( 280 | graph_label_to_one_hot(-1), 281 | graph_label_to_one_hot(-1), 282 | graph_label_to_one_hot(t[2]), 283 | [1, 0] 284 | )), 285 | path[1:]) 286 | 287 | query = [first_query] + other_queries 288 | 289 | output_inputs = [] 290 | for _ in range(len(outputs)): 291 | res = np.zeros(92) 292 | res[-1] = 1 293 | output_inputs.append(res) 294 | 295 | inputs = graph_des_vectors + query + output_inputs 296 | 297 | return inputs, outputs 298 | 299 | def generate_items(self, num_items, node_range, out_degree, path_length): 300 | batch_inputs = [] 301 | batch_outputs = [] 302 | 303 | path_length = np.random.randint(low=path_length[0], high=path_length[1]+1) 304 | for graph, graph_des_vectors in generate_random_graphs(num_items, node_range, out_degree): 305 | inputs, outputs = self.generate_item(graph, graph_des_vectors, path_length) 306 | batch_inputs.append(inputs) 307 | batch_outputs.append(outputs) 308 | 309 | return batch_inputs, batch_outputs 310 | 311 | def generate_batches(self, num_batches, batch_size, curriculum_point=1, curriculum='uniform', 312 | bits_per_vector=None, max_seq_len=None, pad_to_max_seq_len=None): 313 | batches = [] 314 | for i in range(num_batches): 315 | if curriculum == 'deterministic_uniform': 316 | lesson = ((i + 1) % self.num_lessons) 317 | elif curriculum == 'uniform': 318 | lesson = np.random.randint(low=1, high=self.num_lessons+1) 319 | elif curriculum == 'none': 320 | lesson = self.num_lessons 321 | elif curriculum in ('naive', 'prediction_gain_bandit', 'prediction_gain_teacher'): 322 | lesson = curriculum_point 323 | elif curriculum == 'look_back': 324 | lesson = curriculum_point if np.random.random_sample() < 0.9 else np.random.randint(low=1, high=curriculum_point+1) 325 | elif curriculum == 'look_back_and_forward': 326 | lesson = curriculum_point if np.random.random_sample() < 0.8 else np.random.randint(low=1, high=self.num_lessons+1) 327 | 328 | batch_inputs, batch_outputs = self.generate_items(batch_size, *self.lessons[lesson-1]) 329 | 330 | batch_outputs = map(lambda outputs: outputs + [self.termination_pattern], batch_outputs) # eos 331 | max_output_len = max(map(len, batch_outputs)) 332 | 333 | batch_inputs = np.asarray(batch_inputs).astype(np.float32) 334 | batch_outputs = np.asarray(batch_outputs).astype(np.float32) 335 | 336 | batches.append((max_output_len, batch_inputs, batch_outputs)) 337 | 338 | return batches 339 | 340 | def error_per_seq(self, labels, outputs, num_seq): 341 | seq_len = labels.shape[1] 342 | 343 | target_digits = label_from_vectors(labels) 344 | pred_digits = label_from_vectors(outputs) 345 | 346 | def create_labels(digits): 347 | labels = [] 348 | for i in range(num_seq): 349 | tmp = [] 350 | for j in range(seq_len): 351 | tmp.append(( 352 | digits[0][i][j], 353 | digits[1][i][j], 354 | digits[2][i][j] 355 | )) 356 | labels.append(tmp) 357 | return labels 358 | 359 | target_source_digits, pred_source_digits = create_labels(target_digits[:3]), create_labels(pred_digits[:3]) 360 | target_dest_digits, pred_dest_digits = create_labels(target_digits[3:6]), create_labels(pred_digits[3:6]) 361 | target_edge_digits, pred_edge_digits = create_labels(target_digits[6:9]), create_labels(pred_digits[6:9]) 362 | 363 | errors = 0 364 | for i in range(num_seq): 365 | source_digit_same = all(map(lambda t: t[0] == t[1], zip(target_source_digits[i], pred_source_digits[i]))) 366 | if not source_digit_same: 367 | errors += 1 368 | continue 369 | target_digit_same = all(map(lambda t: t[0] == t[1], zip(target_dest_digits[i], pred_dest_digits[i]))) 370 | if not target_digit_same: 371 | errors += 1 372 | continue 373 | target_edge_same = all(map(lambda t: t[0] == t[1], zip(target_edge_digits[i], pred_edge_digits[i]))) 374 | if not target_edge_same: 375 | errors += 1 376 | continue 377 | 378 | return errors/float(num_seq) 379 | 380 | -------------------------------------------------------------------------------- /img/associative_recall_ntm_12_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarkPKCollier/NeuralTuringMachine/5644e3be24469a3a0e9ffab0ea7c1922860e9223/img/associative_recall_ntm_12_0.png -------------------------------------------------------------------------------- /img/associative_recall_ntm_6_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarkPKCollier/NeuralTuringMachine/5644e3be24469a3a0e9ffab0ea7c1922860e9223/img/associative_recall_ntm_6_0.png -------------------------------------------------------------------------------- /img/copy_ntm_20_0.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarkPKCollier/NeuralTuringMachine/5644e3be24469a3a0e9ffab0ea7c1922860e9223/img/copy_ntm_20_0.png -------------------------------------------------------------------------------- /img/copy_ntm_40_1.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarkPKCollier/NeuralTuringMachine/5644e3be24469a3a0e9ffab0ea7c1922860e9223/img/copy_ntm_40_1.png -------------------------------------------------------------------------------- /img/ntm_copy_read_head.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarkPKCollier/NeuralTuringMachine/5644e3be24469a3a0e9ffab0ea7c1922860e9223/img/ntm_copy_read_head.png -------------------------------------------------------------------------------- /img/ntm_copy_write_head.png: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/MarkPKCollier/NeuralTuringMachine/5644e3be24469a3a0e9ffab0ea7c1922860e9223/img/ntm_copy_write_head.png -------------------------------------------------------------------------------- /ntm.py: -------------------------------------------------------------------------------- 1 | # credit: this code is derived from https://github.com/snowkylin/ntm 2 | # the major changes made are to make this compatible with the abstract class tf.contrib.rnn.RNNCell 3 | # an LSTM controller is used instead of a RNN controller 4 | # 3 memory inititialization schemes are offered instead of 1 5 | # the outputs of the controller heads are clipped to an absolute value 6 | # we find that our modification result in more reliable training (we never observe gradients going to NaN) and faster convergence 7 | 8 | import numpy as np 9 | import tensorflow as tf 10 | from tensorflow.python.util import nest 11 | import collections 12 | from utils import expand, learned_init, create_linear_initializer 13 | 14 | NTMControllerState = collections.namedtuple('NTMControllerState', ('controller_state', 'read_vector_list', 'w_list', 'M')) 15 | 16 | class NTMCell(tf.contrib.rnn.RNNCell): 17 | def __init__(self, controller_layers, controller_units, memory_size, memory_vector_dim, read_head_num, write_head_num, 18 | addressing_mode='content_and_location', shift_range=1, reuse=False, output_dim=None, clip_value=20, 19 | init_mode='constant'): 20 | self.controller_layers = controller_layers 21 | self.controller_units = controller_units 22 | self.memory_size = memory_size 23 | self.memory_vector_dim = memory_vector_dim 24 | self.read_head_num = read_head_num 25 | self.write_head_num = write_head_num 26 | self.addressing_mode = addressing_mode 27 | self.reuse = reuse 28 | self.clip_value = clip_value 29 | 30 | def single_cell(num_units): 31 | return tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0) 32 | 33 | self.controller = tf.contrib.rnn.MultiRNNCell([single_cell(self.controller_units) for _ in range(self.controller_layers)]) 34 | 35 | self.init_mode = init_mode 36 | 37 | self.step = 0 38 | self.output_dim = output_dim 39 | self.shift_range = shift_range 40 | 41 | self.o2p_initializer = create_linear_initializer(self.controller_units) 42 | self.o2o_initializer = create_linear_initializer(self.controller_units + self.memory_vector_dim * self.read_head_num) 43 | 44 | def __call__(self, x, prev_state): 45 | prev_read_vector_list = prev_state.read_vector_list 46 | 47 | controller_input = tf.concat([x] + prev_read_vector_list, axis=1) 48 | with tf.variable_scope('controller', reuse=self.reuse): 49 | controller_output, controller_state = self.controller(controller_input, prev_state.controller_state) 50 | 51 | num_parameters_per_head = self.memory_vector_dim + 1 + 1 + (self.shift_range * 2 + 1) + 1 52 | num_heads = self.read_head_num + self.write_head_num 53 | total_parameter_num = num_parameters_per_head * num_heads + self.memory_vector_dim * 2 * self.write_head_num 54 | with tf.variable_scope("o2p", reuse=(self.step > 0) or self.reuse): 55 | parameters = tf.contrib.layers.fully_connected( 56 | controller_output, total_parameter_num, activation_fn=None, 57 | weights_initializer=self.o2p_initializer) 58 | parameters = tf.clip_by_value(parameters, -self.clip_value, self.clip_value) 59 | head_parameter_list = tf.split(parameters[:, :num_parameters_per_head * num_heads], num_heads, axis=1) 60 | erase_add_list = tf.split(parameters[:, num_parameters_per_head * num_heads:], 2 * self.write_head_num, axis=1) 61 | 62 | prev_w_list = prev_state.w_list 63 | prev_M = prev_state.M 64 | w_list = [] 65 | for i, head_parameter in enumerate(head_parameter_list): 66 | k = tf.tanh(head_parameter[:, 0:self.memory_vector_dim]) 67 | beta = tf.nn.softplus(head_parameter[:, self.memory_vector_dim]) 68 | g = tf.sigmoid(head_parameter[:, self.memory_vector_dim + 1]) 69 | s = tf.nn.softmax( 70 | head_parameter[:, self.memory_vector_dim + 2:self.memory_vector_dim + 2 + (self.shift_range * 2 + 1)] 71 | ) 72 | gamma = tf.nn.softplus(head_parameter[:, -1]) + 1 73 | with tf.variable_scope('addressing_head_%d' % i): 74 | w = self.addressing(k, beta, g, s, gamma, prev_M, prev_w_list[i]) 75 | w_list.append(w) 76 | 77 | # Reading (Sec 3.1) 78 | 79 | read_w_list = w_list[:self.read_head_num] 80 | read_vector_list = [] 81 | for i in range(self.read_head_num): 82 | read_vector = tf.reduce_sum(tf.expand_dims(read_w_list[i], dim=2) * prev_M, axis=1) 83 | read_vector_list.append(read_vector) 84 | 85 | # Writing (Sec 3.2) 86 | 87 | write_w_list = w_list[self.read_head_num:] 88 | M = prev_M 89 | for i in range(self.write_head_num): 90 | w = tf.expand_dims(write_w_list[i], axis=2) 91 | erase_vector = tf.expand_dims(tf.sigmoid(erase_add_list[i * 2]), axis=1) 92 | add_vector = tf.expand_dims(tf.tanh(erase_add_list[i * 2 + 1]), axis=1) 93 | M = M * (tf.ones(M.get_shape()) - tf.matmul(w, erase_vector)) + tf.matmul(w, add_vector) 94 | 95 | if not self.output_dim: 96 | output_dim = x.get_shape()[1] 97 | else: 98 | output_dim = self.output_dim 99 | with tf.variable_scope("o2o", reuse=(self.step > 0) or self.reuse): 100 | NTM_output = tf.contrib.layers.fully_connected( 101 | tf.concat([controller_output] + read_vector_list, axis=1), output_dim, activation_fn=None, 102 | weights_initializer=self.o2o_initializer) 103 | NTM_output = tf.clip_by_value(NTM_output, -self.clip_value, self.clip_value) 104 | 105 | self.step += 1 106 | return NTM_output, NTMControllerState( 107 | controller_state=controller_state, read_vector_list=read_vector_list, w_list=w_list, M=M) 108 | 109 | def addressing(self, k, beta, g, s, gamma, prev_M, prev_w): 110 | 111 | # Sec 3.3.1 Focusing by Content 112 | 113 | # Cosine Similarity 114 | 115 | k = tf.expand_dims(k, axis=2) 116 | inner_product = tf.matmul(prev_M, k) 117 | k_norm = tf.sqrt(tf.reduce_sum(tf.square(k), axis=1, keep_dims=True)) 118 | M_norm = tf.sqrt(tf.reduce_sum(tf.square(prev_M), axis=2, keep_dims=True)) 119 | norm_product = M_norm * k_norm 120 | K = tf.squeeze(inner_product / (norm_product + 1e-8)) # eq (6) 121 | 122 | # Calculating w^c 123 | 124 | K_amplified = tf.exp(tf.expand_dims(beta, axis=1) * K) 125 | w_c = K_amplified / tf.reduce_sum(K_amplified, axis=1, keep_dims=True) # eq (5) 126 | 127 | if self.addressing_mode == 'content': # Only focus on content 128 | return w_c 129 | 130 | # Sec 3.3.2 Focusing by Location 131 | 132 | g = tf.expand_dims(g, axis=1) 133 | w_g = g * w_c + (1 - g) * prev_w # eq (7) 134 | 135 | s = tf.concat([s[:, :self.shift_range + 1], 136 | tf.zeros([s.get_shape()[0], self.memory_size - (self.shift_range * 2 + 1)]), 137 | s[:, -self.shift_range:]], axis=1) 138 | t = tf.concat([tf.reverse(s, axis=[1]), tf.reverse(s, axis=[1])], axis=1) 139 | s_matrix = tf.stack( 140 | [t[:, self.memory_size - i - 1:self.memory_size * 2 - i - 1] for i in range(self.memory_size)], 141 | axis=1 142 | ) 143 | w_ = tf.reduce_sum(tf.expand_dims(w_g, axis=1) * s_matrix, axis=2) # eq (8) 144 | w_sharpen = tf.pow(w_, tf.expand_dims(gamma, axis=1)) 145 | w = w_sharpen / tf.reduce_sum(w_sharpen, axis=1, keep_dims=True) # eq (9) 146 | 147 | return w 148 | 149 | def zero_state(self, batch_size, dtype): 150 | with tf.variable_scope('init', reuse=self.reuse): 151 | read_vector_list = [expand(tf.tanh(learned_init(self.memory_vector_dim)), dim=0, N=batch_size) 152 | for i in range(self.read_head_num)] 153 | 154 | w_list = [expand(tf.nn.softmax(learned_init(self.memory_size)), dim=0, N=batch_size) 155 | for i in range(self.read_head_num + self.write_head_num)] 156 | 157 | controller_init_state = self.controller.zero_state(batch_size, dtype) 158 | 159 | if self.init_mode == 'learned': 160 | M = expand(tf.tanh( 161 | tf.reshape( 162 | learned_init(self.memory_size * self.memory_vector_dim), 163 | [self.memory_size, self.memory_vector_dim]) 164 | ), dim=0, N=batch_size) 165 | elif self.init_mode == 'random': 166 | M = expand( 167 | tf.tanh(tf.get_variable('init_M', [self.memory_size, self.memory_vector_dim], 168 | initializer=tf.random_normal_initializer(mean=0.0, stddev=0.5))), 169 | dim=0, N=batch_size) 170 | elif self.init_mode == 'constant': 171 | M = expand( 172 | tf.get_variable('init_M', [self.memory_size, self.memory_vector_dim], 173 | initializer=tf.constant_initializer(1e-6)), 174 | dim=0, N=batch_size) 175 | 176 | return NTMControllerState( 177 | controller_state=controller_init_state, 178 | read_vector_list=read_vector_list, 179 | w_list=w_list, 180 | M=M) 181 | 182 | @property 183 | def state_size(self): 184 | return NTMControllerState( 185 | controller_state=self.controller.state_size, 186 | read_vector_list=[self.memory_vector_dim for _ in range(self.read_head_num)], 187 | w_list=[self.memory_size for _ in range(self.read_head_num + self.write_head_num)], 188 | M=tf.TensorShape([self.memory_size * self.memory_vector_dim])) 189 | 190 | @property 191 | def output_size(self): 192 | return self.output_dim 193 | -------------------------------------------------------------------------------- /produce_heat_maps.py: -------------------------------------------------------------------------------- 1 | import matplotlib 2 | matplotlib.use('macosx') 3 | import pickle 4 | import seaborn 5 | import matplotlib.pyplot as plt 6 | 7 | EXPERIMENT_NAME = 'copy_ntm' 8 | MANN = 'NTM' 9 | TASK = 'Copy' 10 | 11 | HEAD_LOG_FILE = 'head_logs/{0}.p'.format(EXPERIMENT_NAME) 12 | GENERALIZATION_HEAD_LOG_FILE = 'head_logs/generalization_{0}.p'.format(EXPERIMENT_NAME) 13 | 14 | outputs = pickle.load(open(HEAD_LOG_FILE, "rb")) 15 | outputs.update(pickle.load(open(GENERALIZATION_HEAD_LOG_FILE, "rb"))) 16 | 17 | def plot_figures(figures, nrows=1, ncols=1, width_ratios=None): 18 | fig, axeslist = plt.subplots(ncols=ncols, nrows=nrows, gridspec_kw={'width_ratios': width_ratios}) 19 | 20 | for ind, (title, fig) in enumerate(figures): 21 | axeslist.ravel()[ind].imshow(fig, cmap='gray', interpolation='nearest') 22 | axeslist.ravel()[ind].set_title(title) 23 | if TASK != 'Associative Recall' or ind == 0: 24 | axeslist.ravel()[ind].set_xlabel('Time ------->') 25 | 26 | if TASK == 'Associative Recall': 27 | plt.sca(axeslist[1]) 28 | plt.xticks([0, 1, 2]) 29 | plt.sca(axeslist[2]) 30 | plt.xticks([0, 1, 2]) 31 | 32 | if TASK == 'Copy': 33 | plt.sca(axeslist[1]) 34 | plt.yticks([]) 35 | 36 | plt.tight_layout() 37 | 38 | for seq_len, heat_maps_list in outputs.iteritems(): 39 | for step, heat_maps in enumerate(heat_maps_list[-2:] if len(heat_maps_list) >= 2 else heat_maps_list): 40 | inputs = heat_maps['inputs'].T 41 | labels = heat_maps['labels'].T 42 | outputs = heat_maps['outputs'].T 43 | 44 | if TASK == 'Copy': 45 | plot_figures([('{0} - {1} - Inputs'.format(MANN, TASK), inputs), ('Outputs', outputs)], 1, 2, width_ratios=[2, 1.1]) 46 | plt.savefig('head_logs/img/{0}_{1}_{2}'.format(EXPERIMENT_NAME, seq_len, step), bbox_inches='tight') 47 | plt.close() 48 | elif TASK == 'Associative Recall': 49 | plot_figures([('{0} - {1} - Inputs'.format(MANN, TASK), inputs), ('Labels', labels), ('Outputs', outputs)], 1, 3, width_ratios=[seq_len+2, 1, 1]) 50 | plt.savefig('head_logs/img/{0}_{1}_{2}'.format(EXPERIMENT_NAME, seq_len, step), bbox_inches='tight') 51 | plt.close() 52 | -------------------------------------------------------------------------------- /run_tasks.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | from generate_data import CopyTaskData, AssociativeRecallData 4 | from utils import expand, learned_init 5 | from exp3S import Exp3S 6 | 7 | import logging 8 | logging.basicConfig(level=logging.INFO) 9 | logger = logging.getLogger(__name__) 10 | 11 | import argparse 12 | 13 | parser = argparse.ArgumentParser() 14 | 15 | def str2bool(v): 16 | return v.lower() in ("yes", "true", "t", "1") 17 | 18 | parser.add_argument('--mann', type=str, default='ntm', help='none | ntm') 19 | parser.add_argument('--num_layers', type=int, default=1) 20 | parser.add_argument('--num_units', type=int, default=100) 21 | parser.add_argument('--num_memory_locations', type=int, default=128) 22 | parser.add_argument('--memory_size', type=int, default=20) 23 | parser.add_argument('--num_read_heads', type=int, default=1) 24 | parser.add_argument('--num_write_heads', type=int, default=1) 25 | parser.add_argument('--conv_shift_range', type=int, default=1, help='only necessary for ntm') 26 | parser.add_argument('--clip_value', type=int, default=20, help='Maximum absolute value of controller and outputs.') 27 | parser.add_argument('--init_mode', type=str, default='learned', help='learned | constant | random') 28 | 29 | parser.add_argument('--optimizer', type=str, default='Adam', help='RMSProp | Adam') 30 | parser.add_argument('--learning_rate', type=float, default=0.001) 31 | parser.add_argument('--max_grad_norm', type=float, default=50) 32 | parser.add_argument('--num_train_steps', type=int, default=31250) 33 | parser.add_argument('--batch_size', type=int, default=32) 34 | parser.add_argument('--eval_batch_size', type=int, default=640) 35 | 36 | parser.add_argument('--curriculum', type=str, default='none', help='none | uniform | naive | look_back | look_back_and_forward | prediction_gain') 37 | parser.add_argument('--pad_to_max_seq_len', type=str2bool, default=False) 38 | 39 | parser.add_argument('--task', type=str, default='copy', help='copy | associative_recall') 40 | parser.add_argument('--num_bits_per_vector', type=int, default=8) 41 | parser.add_argument('--max_seq_len', type=int, default=20) 42 | 43 | parser.add_argument('--verbose', type=str2bool, default=True, help='if true prints lots of feedback') 44 | parser.add_argument('--experiment_name', type=str, required=True) 45 | parser.add_argument('--job-dir', type=str, required=False) 46 | parser.add_argument('--steps_per_eval', type=int, default=200) 47 | parser.add_argument('--use_local_impl', type=str2bool, default=True, help='whether to use the repos local NTM implementation or the TF contrib version') 48 | 49 | args = parser.parse_args() 50 | 51 | if args.mann == 'ntm': 52 | if args.use_local_impl: 53 | from ntm import NTMCell 54 | else: 55 | from tensorflow.contrib.rnn.python.ops.rnn_cell import NTMCell 56 | 57 | if args.verbose: 58 | import pickle 59 | HEAD_LOG_FILE = 'head_logs/{0}.p'.format(args.experiment_name) 60 | GENERALIZATION_HEAD_LOG_FILE = 'head_logs/generalization_{0}.p'.format(args.experiment_name) 61 | 62 | class BuildModel(object): 63 | def __init__(self, max_seq_len, inputs): 64 | self.max_seq_len = max_seq_len 65 | self.inputs = inputs 66 | self._build_model() 67 | 68 | def _build_model(self): 69 | if args.mann == 'none': 70 | def single_cell(num_units): 71 | return tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0) 72 | 73 | cell = tf.contrib.rnn.OutputProjectionWrapper( 74 | tf.contrib.rnn.MultiRNNCell([single_cell(args.num_units) for _ in range(args.num_layers)]), 75 | args.num_bits_per_vector, 76 | activation=None) 77 | 78 | initial_state = tuple(tf.contrib.rnn.LSTMStateTuple( 79 | c=expand(tf.tanh(learned_init(args.num_units)), dim=0, N=args.batch_size), 80 | h=expand(tf.tanh(learned_init(args.num_units)), dim=0, N=args.batch_size)) 81 | for _ in range(args.num_layers)) 82 | 83 | elif args.mann == 'ntm': 84 | if args.use_local_impl: 85 | cell = NTMCell(args.num_layers, args.num_units, args.num_memory_locations, args.memory_size, 86 | args.num_read_heads, args.num_write_heads, addressing_mode='content_and_location', 87 | shift_range=args.conv_shift_range, reuse=False, output_dim=args.num_bits_per_vector, 88 | clip_value=args.clip_value, init_mode=args.init_mode) 89 | else: 90 | def single_cell(num_units): 91 | return tf.contrib.rnn.BasicLSTMCell(num_units, forget_bias=1.0) 92 | 93 | controller = tf.contrib.rnn.MultiRNNCell( 94 | [single_cell(args.num_units) for _ in range(args.num_layers)]) 95 | 96 | cell = NTMCell(controller, args.num_memory_locations, args.memory_size, 97 | args.num_read_heads, args.num_write_heads, shift_range=args.conv_shift_range, 98 | output_dim=args.num_bits_per_vector, 99 | clip_value=args.clip_value) 100 | 101 | output_sequence, _ = tf.nn.dynamic_rnn( 102 | cell=cell, 103 | inputs=self.inputs, 104 | time_major=False, 105 | dtype=tf.float32, 106 | initial_state=initial_state if args.mann == 'none' else None) 107 | 108 | if args.task == 'copy': 109 | self.output_logits = output_sequence[:, self.max_seq_len+1:, :] 110 | elif args.task == 'associative_recall': 111 | self.output_logits = output_sequence[:, 3*(self.max_seq_len+1)+2:, :] 112 | 113 | if args.task in ('copy', 'associative_recall'): 114 | self.outputs = tf.sigmoid(self.output_logits) 115 | 116 | class BuildTModel(BuildModel): 117 | def __init__(self, max_seq_len, inputs, outputs): 118 | super(BuildTModel, self).__init__(max_seq_len, inputs) 119 | 120 | if args.task in ('copy', 'associative_recall'): 121 | cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(labels=outputs, logits=self.output_logits) 122 | self.loss = tf.reduce_sum(cross_entropy)/args.batch_size 123 | 124 | if args.optimizer == 'RMSProp': 125 | optimizer = tf.train.RMSPropOptimizer(args.learning_rate, momentum=0.9, decay=0.9) 126 | elif args.optimizer == 'Adam': 127 | optimizer = tf.train.AdamOptimizer(learning_rate=args.learning_rate) 128 | 129 | trainable_variables = tf.trainable_variables() 130 | grads, _ = tf.clip_by_global_norm(tf.gradients(self.loss, trainable_variables), args.max_grad_norm) 131 | self.train_op = optimizer.apply_gradients(zip(grads, trainable_variables)) 132 | 133 | with tf.variable_scope('root'): 134 | max_seq_len_placeholder = tf.placeholder(tf.int32) 135 | inputs_placeholder = tf.placeholder(tf.float32, shape=(args.batch_size, None, args.num_bits_per_vector+1)) 136 | outputs_placeholder = tf.placeholder(tf.float32, shape=(args.batch_size, None, args.num_bits_per_vector)) 137 | model = BuildTModel(max_seq_len_placeholder, inputs_placeholder, outputs_placeholder) 138 | initializer = tf.global_variables_initializer() 139 | 140 | # training 141 | 142 | convergence_on_target_task = None 143 | convergence_on_multi_task = None 144 | performance_on_target_task = None 145 | performance_on_multi_task = None 146 | generalization_from_target_task = None 147 | generalization_from_multi_task = None 148 | if args.task == 'copy': 149 | data_generator = CopyTaskData() 150 | target_point = args.max_seq_len 151 | curriculum_point = 1 if args.curriculum not in ('prediction_gain', 'none') else target_point 152 | progress_error = 1.0 153 | convergence_error = 0.1 154 | 155 | if args.curriculum == 'prediction_gain': 156 | exp3s = Exp3S(args.max_seq_len, 0.001, 0, 0.05) 157 | elif args.task == 'associative_recall': 158 | data_generator = AssociativeRecallData() 159 | target_point = args.max_seq_len 160 | curriculum_point = 2 if args.curriculum not in ('prediction_gain', 'none') else target_point 161 | progress_error = 1.0 162 | convergence_error = 0.1 163 | 164 | if args.curriculum == 'prediction_gain': 165 | exp3s = Exp3S(args.max_seq_len-1, 0.001, 0, 0.05) 166 | 167 | sess = tf.Session() 168 | sess.run(initializer) 169 | 170 | if args.verbose: 171 | pickle.dump({target_point: []}, open(HEAD_LOG_FILE, "wb")) 172 | pickle.dump({}, open(GENERALIZATION_HEAD_LOG_FILE, "wb")) 173 | 174 | def run_eval(batches, store_heat_maps=False, generalization_num=None): 175 | task_loss = 0 176 | task_error = 0 177 | num_batches = len(batches) 178 | for seq_len, inputs, labels in batches: 179 | task_loss_, outputs = sess.run([model.loss, model.outputs], 180 | feed_dict={ 181 | inputs_placeholder: inputs, 182 | outputs_placeholder: labels, 183 | max_seq_len_placeholder: seq_len 184 | }) 185 | 186 | task_loss += task_loss_ 187 | task_error += data_generator.error_per_seq(labels, outputs, args.batch_size) 188 | 189 | if store_heat_maps: 190 | if generalization_num is None: 191 | tmp = pickle.load(open(HEAD_LOG_FILE, "rb")) 192 | tmp[target_point].append({ 193 | 'labels': labels[0], 194 | 'outputs': outputs[0], 195 | 'inputs': inputs[0] 196 | }) 197 | pickle.dump(tmp, open(HEAD_LOG_FILE, "wb")) 198 | else: 199 | tmp = pickle.load(open(GENERALIZATION_HEAD_LOG_FILE, "rb")) 200 | if tmp.get(generalization_num) is None: 201 | tmp[generalization_num] = [] 202 | tmp[generalization_num].append({ 203 | 'labels': labels[0], 204 | 'outputs': outputs[0], 205 | 'inputs': inputs[0] 206 | }) 207 | pickle.dump(tmp, open(GENERALIZATION_HEAD_LOG_FILE, "wb")) 208 | 209 | 210 | task_loss /= float(num_batches) 211 | task_error /= float(num_batches) 212 | return task_loss, task_error 213 | 214 | def eval_performance(curriculum_point, store_heat_maps=False): 215 | # target task 216 | batches = data_generator.generate_batches( 217 | int(int(args.eval_batch_size/2)/args.batch_size), 218 | args.batch_size, 219 | bits_per_vector=args.num_bits_per_vector, 220 | curriculum_point=None, 221 | max_seq_len=args.max_seq_len, 222 | curriculum='none', 223 | pad_to_max_seq_len=args.pad_to_max_seq_len 224 | ) 225 | 226 | target_task_loss, target_task_error = run_eval(batches, store_heat_maps=store_heat_maps) 227 | 228 | # multi-task 229 | 230 | batches = data_generator.generate_batches( 231 | int(args.eval_batch_size/args.batch_size), 232 | args.batch_size, 233 | bits_per_vector=args.num_bits_per_vector, 234 | curriculum_point=None, 235 | max_seq_len=args.max_seq_len, 236 | curriculum='deterministic_uniform', 237 | pad_to_max_seq_len=args.pad_to_max_seq_len 238 | ) 239 | 240 | multi_task_loss, multi_task_error = run_eval(batches) 241 | 242 | # curriculum point 243 | if curriculum_point is not None: 244 | batches = data_generator.generate_batches( 245 | int(int(args.eval_batch_size/4)/args.batch_size), 246 | args.batch_size, 247 | bits_per_vector=args.num_bits_per_vector, 248 | curriculum_point=curriculum_point, 249 | max_seq_len=args.max_seq_len, 250 | curriculum='naive', 251 | pad_to_max_seq_len=args.pad_to_max_seq_len 252 | ) 253 | 254 | curriculum_point_loss, curriculum_point_error = run_eval(batches) 255 | else: 256 | curriculum_point_error = curriculum_point_loss = None 257 | 258 | return target_task_error, target_task_loss, multi_task_error, multi_task_loss, curriculum_point_error, curriculum_point_loss 259 | 260 | def eval_generalization(): 261 | res = [] 262 | if args.task == 'copy': 263 | seq_lens = [40, 60, 80, 100, 120] 264 | elif args.task == 'associative_recall': 265 | seq_lens = [7, 8, 9, 10, 11, 12] 266 | 267 | for i in seq_lens: 268 | batches = data_generator.generate_batches( 269 | 6, 270 | args.batch_size, 271 | bits_per_vector=args.num_bits_per_vector, 272 | curriculum_point=i, 273 | max_seq_len=args.max_seq_len, 274 | curriculum='naive', 275 | pad_to_max_seq_len=False 276 | ) 277 | 278 | loss, error = run_eval(batches, store_heat_maps=args.verbose, generalization_num=i) 279 | res.append(error) 280 | return res 281 | 282 | for i in range(args.num_train_steps): 283 | if args.curriculum == 'prediction_gain': 284 | if args.task == 'copy': 285 | task = 1 + exp3s.draw_task() 286 | elif args.task == 'associative_recall': 287 | task = 2 + exp3s.draw_task() 288 | 289 | seq_len, inputs, labels = data_generator.generate_batches( 290 | 1, 291 | args.batch_size, 292 | bits_per_vector=args.num_bits_per_vector, 293 | curriculum_point=curriculum_point if args.curriculum != 'prediction_gain' else task, 294 | max_seq_len=args.max_seq_len, 295 | curriculum=args.curriculum, 296 | pad_to_max_seq_len=args.pad_to_max_seq_len 297 | )[0] 298 | 299 | train_loss, _, outputs = sess.run([model.loss, model.train_op, model.outputs], 300 | feed_dict={ 301 | inputs_placeholder: inputs, 302 | outputs_placeholder: labels, 303 | max_seq_len_placeholder: seq_len 304 | }) 305 | 306 | if args.curriculum == 'prediction_gain': 307 | loss, _ = run_eval([(seq_len, inputs, labels)]) 308 | v = train_loss - loss 309 | exp3s.update_w(v, seq_len) 310 | 311 | avg_errors_per_seq = data_generator.error_per_seq(labels, outputs, args.batch_size) 312 | 313 | if args.verbose: 314 | logger.info('Train loss ({0}): {1}'.format(i, train_loss)) 315 | logger.info('curriculum_point: {0}'.format(curriculum_point)) 316 | logger.info('Average errors/sequence: {0}'.format(avg_errors_per_seq)) 317 | logger.info('TRAIN_PARSABLE: {0},{1},{2},{3}'.format(i, curriculum_point, train_loss, avg_errors_per_seq)) 318 | 319 | if i % args.steps_per_eval == 0: 320 | target_task_error, target_task_loss, multi_task_error, multi_task_loss, curriculum_point_error, \ 321 | curriculum_point_loss = eval_performance(curriculum_point if args.curriculum != 'prediction_gain' else None, store_heat_maps=args.verbose) 322 | 323 | if convergence_on_multi_task is None and multi_task_error < convergence_error: 324 | convergence_on_multi_task = i 325 | 326 | if convergence_on_target_task is None and target_task_error < convergence_error: 327 | convergence_on_target_task = i 328 | 329 | gen_evaled = False 330 | if convergence_on_multi_task is not None and (performance_on_multi_task is None or multi_task_error < performance_on_multi_task): 331 | performance_on_multi_task = multi_task_error 332 | generalization_from_multi_task = eval_generalization() 333 | gen_evaled = True 334 | 335 | if convergence_on_target_task is not None and (performance_on_target_task is None or target_task_error < performance_on_target_task): 336 | performance_on_target_task = target_task_error 337 | if gen_evaled: 338 | generalization_from_target_task = generalization_from_multi_task 339 | else: 340 | generalization_from_target_task = eval_generalization() 341 | 342 | if curriculum_point_error < progress_error: 343 | if args.task == 'copy': 344 | curriculum_point = min(target_point, 2 * curriculum_point) 345 | elif args.task == 'associative_recall': 346 | curriculum_point = min(target_point, curriculum_point+1) 347 | 348 | logger.info('----EVAL----') 349 | logger.info('target task error/loss: {0},{1}'.format(target_task_error, target_task_loss)) 350 | logger.info('multi task error/loss: {0},{1}'.format(multi_task_error, multi_task_loss)) 351 | logger.info('curriculum point error/loss ({0}): {1},{2}'.format(curriculum_point, curriculum_point_error, curriculum_point_loss)) 352 | logger.info('EVAL_PARSABLE: {0},{1},{2},{3},{4},{5},{6},{7}'.format(i, target_task_error, target_task_loss, 353 | multi_task_error, multi_task_loss, curriculum_point, curriculum_point_error, curriculum_point_loss)) 354 | 355 | if convergence_on_multi_task is None: 356 | performance_on_multi_task = multi_task_error 357 | generalization_from_multi_task = eval_generalization() 358 | 359 | if convergence_on_target_task is None: 360 | performance_on_target_task = target_task_error 361 | generalization_from_target_task = eval_generalization() 362 | 363 | logger.info('----SUMMARY----') 364 | logger.info('convergence_on_target_task: {0}'.format(convergence_on_target_task)) 365 | logger.info('performance_on_target_task: {0}'.format(performance_on_target_task)) 366 | logger.info('convergence_on_multi_task: {0}'.format(convergence_on_multi_task)) 367 | logger.info('performance_on_multi_task: {0}'.format(performance_on_multi_task)) 368 | 369 | logger.info('SUMMARY_PARSABLE: {0},{1},{2},{3}'.format(convergence_on_target_task, performance_on_target_task, 370 | convergence_on_multi_task, performance_on_multi_task)) 371 | 372 | logger.info('generalization_from_target_task: {0}'.format(','.join(map(str, generalization_from_target_task)) if generalization_from_target_task is not None else None)) 373 | logger.info('generalization_from_multi_task: {0}'.format(','.join(map(str, generalization_from_multi_task)) if generalization_from_multi_task is not None else None)) 374 | 375 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | 4 | def expand(x, dim, N): 5 | return tf.concat([tf.expand_dims(x, dim) for _ in range(N)], axis=dim) 6 | 7 | def learned_init(units): 8 | return tf.squeeze(tf.contrib.layers.fully_connected(tf.ones([1, 1]), units, 9 | activation_fn=None, biases_initializer=None)) 10 | 11 | def create_linear_initializer(input_size, dtype=tf.float32): 12 | stddev = 1.0 / np.sqrt(input_size) 13 | return tf.truncated_normal_initializer(stddev=stddev, dtype=dtype) --------------------------------------------------------------------------------