├── .gitignore ├── README.md ├── attention.py ├── data.py ├── data └── toy.txt ├── gru.py ├── lstm.py ├── main_minibatch.py ├── rnn.py ├── sent_decoder.py ├── sent_encoder.py ├── updates.py ├── utils_pg.py ├── word_decoder.py └── word_encoder.py /.gitignore: -------------------------------------------------------------------------------- 1 | *.pyc 2 | /data/* 3 | /model/* 4 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | ## Hierarchical Encoder-Decoder 2 | ### Features 3 | - Sequences modelling for words, sentences, paragraphs and documents 4 | - LSTM and GRU in Theano 5 | - Attention modelling (plays a role of "copy" in encoder-decoder framework) 6 | 7 | ### References 8 | - Li, Jiwei, Minh-Thang Luong, and Dan Jurafsky. "A hierarchical neural autoencoder for paragraphs and documents." arXiv preprint arXiv:1506.01057 (2015). 9 | -------------------------------------------------------------------------------- /attention.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | import numpy as np 3 | import theano 4 | import theano.tensor as T 5 | from utils_pg import * 6 | 7 | class AttentionLayer(object): 8 | def __init__(self, layer_id, shape, sent_encs, sent_decs): 9 | prefix = "AttentionLayer_" 10 | layer_id = "_" + layer_id 11 | self.num_sents, self.out_size = shape 12 | 13 | # TODO fix the attention layer using standard attentnion modeling method 14 | self.W_a1 = init_weights((self.out_size, self.out_size), prefix + "W_a1" + layer_id) 15 | self.W_a2 = init_weights((self.out_size, self.out_size), prefix + "W_a2" + layer_id) 16 | self.W_a3 = init_weights((self.out_size, self.out_size), prefix + "W_a3" + layer_id) 17 | self.W_a4 = init_weights((self.out_size, self.out_size), prefix + "W_a4" + layer_id) 18 | self.U_a = init_weights((self.out_size, self.num_sents), prefix + "U_a" + layer_id) 19 | 20 | strength = T.dot(T.nnet.sigmoid(T.dot(sent_decs, self.W_a1) + T.dot(sent_encs, self.W_a2)), self.U_a) 21 | a = T.nnet.softmax(strength) 22 | c = T.dot(a, sent_encs) 23 | self.activation = T.tanh(T.dot(sent_decs, self.W_a3) + T.dot(c, self.W_a4)) 24 | 25 | self.params = [self.W_a1, self.W_a2, self.W_a3, self.W_a4, self.U_a] 26 | 27 | -------------------------------------------------------------------------------- /data.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | #pylint: skip-file 3 | import sys 4 | import os 5 | import numpy as np 6 | import theano 7 | import theano.tensor as T 8 | import cPickle, gzip 9 | 10 | curr_path = os.path.dirname(os.path.abspath(os.path.expanduser(__file__))) 11 | 12 | def load_stop_words(f_path = None): 13 | stop_words = {} 14 | if f_path == None: 15 | f = open(curr_path + "/data/stopwords.txt", "r") 16 | else: 17 | f = open(curr_path + "/" + f_path, "r") 18 | for line in f: 19 | line = line.strip('\n').lower() 20 | stop_words[line] = 1 21 | 22 | return stop_words 23 | 24 | def word_sequence(f_path, batch_size = 1): 25 | seqs = [] 26 | i2w = {} 27 | w2i = {} 28 | lines = [] 29 | tf = {} 30 | f = open(curr_path + "/" + f_path, "r") 31 | for line in f: 32 | line = line.strip('\n').lower() 33 | words = line.split() 34 | words.append("") # end symbol 35 | if len(words) < 3 or line == "====": 36 | continue 37 | lines.append(words) 38 | for w in words: 39 | if w not in w2i: 40 | i2w[len(w2i)] = w 41 | w2i[w] = len(w2i) 42 | tf[w] = 1 43 | else: 44 | tf[w] += 1 45 | f.close() 46 | 47 | for i in range(0, len(lines)): 48 | words = lines[i] 49 | x = np.zeros((len(words), len(w2i)), dtype = theano.config.floatX) 50 | for j in range(0, len(words)): 51 | x[j, w2i[words[j]]] = 1 52 | seqs.append(np.asmatrix(x)) 53 | 54 | data_xy = batch_sequences(seqs, i2w, w2i, batch_size) 55 | print "#dic = " + str(len(w2i)) 56 | return seqs, i2w, w2i, data_xy 57 | 58 | def batch_sequences(seqs, i2w, w2i, batch_size): 59 | data_xy = {} 60 | batch_x = [] 61 | batch_y = [] 62 | seqs_len = [] 63 | batch_id = 0 64 | dim = len(w2i) 65 | zeros_m = np.zeros((1, dim), dtype = theano.config.floatX) 66 | for i in xrange(len(seqs)): 67 | seq = seqs[i]; 68 | X = seq[0 : len(seq), ] 69 | Y = seq[0 : len(seq), ] 70 | batch_x.append(X) 71 | seqs_len.append(X.shape[0]) 72 | batch_y.append(Y) 73 | 74 | if len(batch_x) == batch_size or (i == len(seqs) - 1): 75 | max_len = np.max(seqs_len); 76 | mask = np.zeros((max_len, len(batch_x)), dtype = theano.config.floatX) 77 | 78 | concat_X = np.zeros((max_len, len(batch_x) * dim), dtype = theano.config.floatX) 79 | concat_Y = concat_X.copy() 80 | for b_i in xrange(len(batch_x)): 81 | X = batch_x[b_i] 82 | Y = batch_y[b_i] 83 | mask[0 : X.shape[0], b_i] = 1 84 | for r in xrange(max_len - X.shape[0]): 85 | X = np.concatenate((X, zeros_m), axis=0) 86 | Y = np.concatenate((Y, zeros_m), axis=0) 87 | concat_X[:, b_i * dim : (b_i + 1) * dim] = X 88 | concat_Y[:, b_i * dim : (b_i + 1) * dim] = Y 89 | data_xy[batch_id] = [concat_X, concat_Y, mask, len(batch_x)] 90 | batch_x = [] 91 | batch_y = [] 92 | seqs_len = [] 93 | batch_id += 1 94 | return data_xy 95 | 96 | -------------------------------------------------------------------------------- /data/toy.txt: -------------------------------------------------------------------------------- 1 | Palantir Technologies is a mission-driven company, and a core component of that mission is protecting our fundamental rights to privacy and civil liberties. 2 | Since its inception, Palantir has invested its intellectual and financial capital in engineering technology that can be used to solve the world’s hardest problems while simultaneously protecting individual liberty. 3 | Robust privacy and civil liberties protections are essential to building public confidence in the management of data, and thus are an essential part of any information system that uses Palantir software. 4 | For ten years, we’ve built our company by being unconventional and open-minded. 5 | We’ve been willing to try things that people said would never work, and we’ve adapted when we’ve been wrong. 6 | Where we’ve succeeded, it’s because we’ve been different. 7 | To succeed, we need the very best ideas of all kinds. 8 | To access the broadest and fullest set of ideas, our community must attract and encourage people of diverse backgrounds, perspectives, and life experiences. 9 | We work every day to build a truly diverse workforce, and to foster an environment that is respectful and receptive to new ideas. 10 | We celebrate difference and diversity — of background, approach, and identity. 11 | -------------------------------------------------------------------------------- /gru.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | import numpy as np 3 | import theano 4 | import theano.tensor as T 5 | from utils_pg import * 6 | 7 | class GRULayer(object): 8 | def __init__(self, rng, layer_id, shape, X, mask, is_train = 1, batch_size = 1, p = 0.5): 9 | prefix = "GRU_" 10 | layer_id = "_" + layer_id 11 | self.in_size, self.out_size = shape 12 | 13 | self.W_xr = init_weights((self.in_size, self.out_size), prefix + "W_xr" + layer_id) 14 | self.W_hr = init_weights((self.out_size, self.out_size), prefix + "W_hr" + layer_id) 15 | self.b_r = init_bias(self.out_size, prefix + "b_r" + layer_id) 16 | 17 | self.W_xz = init_weights((self.in_size, self.out_size), prefix + "W_xz" + layer_id) 18 | self.W_hz = init_weights((self.out_size, self.out_size), prefix + "W_hz" + layer_id) 19 | self.b_z = init_bias(self.out_size, prefix + "b_z" + layer_id) 20 | 21 | self.W_xh = init_weights((self.in_size, self.out_size), prefix + "W_xh" + layer_id) 22 | self.W_hh = init_weights((self.out_size, self.out_size), prefix + "W_hh" + layer_id) 23 | self.b_h = init_bias(self.out_size, prefix + "b_h" + layer_id) 24 | 25 | self.X = X 26 | self.M = mask 27 | 28 | def _active_mask(x, m, pre_h): 29 | x = T.reshape(x, (batch_size, self.in_size)) 30 | pre_h = T.reshape(pre_h, (batch_size, self.out_size)) 31 | 32 | r = T.nnet.sigmoid(T.dot(x, self.W_xr) + T.dot(pre_h, self.W_hr) + self.b_r) 33 | z = T.nnet.sigmoid(T.dot(x, self.W_xz) + T.dot(pre_h, self.W_hz) + self.b_z) 34 | gh = T.tanh(T.dot(x, self.W_xh) + T.dot(r * pre_h, self.W_hh) + self.b_h) 35 | h = (1 - z) * pre_h + z * gh 36 | 37 | h = h * m[:, None] + (1 - m[:, None]) * pre_h 38 | 39 | h = T.reshape(h, (1, batch_size * self.out_size)) 40 | return h 41 | h, updates = theano.scan(_active_mask, sequences = [self.X, self.M], 42 | outputs_info = [T.alloc(floatX(0.), 1, batch_size * self.out_size)]) 43 | # dic to matrix 44 | h = T.reshape(h, (self.X.shape[0], batch_size * self.out_size)) 45 | 46 | # dropout 47 | if p > 0: 48 | srng = T.shared_randomstreams.RandomStreams(rng.randint(999999)) 49 | drop_mask = srng.binomial(n = 1, p = 1-p, size = h.shape, dtype = theano.config.floatX) 50 | self.activation = T.switch(T.eq(is_train, 1), h * drop_mask, h * (1 - p)) 51 | else: 52 | self.activation = T.switch(T.eq(is_train, 1), h, h) 53 | 54 | self.params = [self.W_xr, self.W_hr, self.b_r, 55 | self.W_xz, self.W_hz, self.b_z, 56 | self.W_xh, self.W_hh, self.b_h] 57 | 58 | def _active(self, x, pre_h): 59 | r = T.nnet.sigmoid(T.dot(x, self.W_xr) + T.dot(pre_h, self.W_hr) + self.b_r) 60 | z = T.nnet.sigmoid(T.dot(x, self.W_xz) + T.dot(pre_h, self.W_hz) + self.b_z) 61 | gh = T.tanh(T.dot(x, self.W_xh) + T.dot(r * pre_h, self.W_hh) + self.b_h) 62 | h = z * pre_h + (1 - z) * gh 63 | return h 64 | 65 | 66 | class BdGRU(object): 67 | # Bidirectional GRU Layer. 68 | def __init__(self, rng, layer_id, shape, X, mask, is_train = 1, batch_size = 1, p = 0.5): 69 | fwd = GRULayer(rng, "_fwd_" + layer_id, shape, X, mask, is_train, batch_size, p) 70 | bwd = GRULayer(rng, "_bwd_" + layer_id, shape, X[::-1], mask[::-1], is_train, batch_size, p) 71 | self.params = fwd.params + bwd.params 72 | self.activation = T.concatenate([fwd.activation, bwd.activation[::-1]], axis=1) 73 | 74 | -------------------------------------------------------------------------------- /lstm.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | import numpy as np 3 | import theano 4 | import theano.tensor as T 5 | from utils_pg import * 6 | 7 | class LSTMLayer(object): 8 | def __init__(self, rng, layer_id, shape, X, mask, is_train = 1, batch_size = 1, p = 0.5): 9 | prefix = "LSTM_" 10 | layer_id = "_" + layer_id 11 | self.in_size, self.out_size = shape 12 | 13 | self.W_xi = init_weights((self.in_size, self.out_size), prefix + "W_xi" + layer_id) 14 | self.W_hi = init_weights((self.out_size, self.out_size), prefix + "W_hi" + layer_id) 15 | self.W_ci = init_weights((self.out_size, self.out_size), prefix + "W_ci" + layer_id) 16 | self.b_i = init_bias(self.out_size, prefix + "b_i" + layer_id) 17 | 18 | self.W_xf = init_weights((self.in_size, self.out_size), prefix + "W_xf" + layer_id) 19 | self.W_hf = init_weights((self.out_size, self.out_size), prefix + "W_hf" + layer_id) 20 | self.W_cf = init_weights((self.out_size, self.out_size), prefix + "W_cf" + layer_id) 21 | self.b_f = init_bias(self.out_size, prefix + "b_f" + layer_id) 22 | 23 | self.W_xc = init_weights((self.in_size, self.out_size), prefix + "W_xc" + layer_id) 24 | self.W_hc = init_weights((self.out_size, self.out_size), prefix + "W_hc" + layer_id) 25 | self.b_c = init_bias(self.out_size, prefix + "b_c" + layer_id) 26 | 27 | self.W_xo = init_weights((self.in_size, self.out_size), prefix + "W_xo" + layer_id) 28 | self.W_ho = init_weights((self.out_size, self.out_size), prefix + "W_ho" + layer_id) 29 | self.W_co = init_weights((self.out_size, self.out_size), prefix + "W_co" + layer_id) 30 | self.b_o = init_bias(self.out_size, prefix + "b_o" + layer_id) 31 | 32 | self.X = X 33 | self.M = mask 34 | 35 | 36 | def _active_mask(x, m, pre_h, pre_c): 37 | x = T.reshape(x, (batch_size, self.in_size)) 38 | pre_h = T.reshape(pre_h, (batch_size, self.out_size)) 39 | pre_c = T.reshape(pre_c, (batch_size, self.out_size)) 40 | 41 | i = T.nnet.sigmoid(T.dot(x, self.W_xi) + T.dot(pre_h, self.W_hi) + T.dot(pre_c, self.W_ci) + self.b_i) 42 | f = T.nnet.sigmoid(T.dot(x, self.W_xf) + T.dot(pre_h, self.W_hf) + T.dot(pre_c, self.W_cf) + self.b_f) 43 | gc = T.tanh(T.dot(x, self.W_xc) + T.dot(pre_h, self.W_hc) + self.b_c) 44 | c = f * pre_c + i * gc 45 | o = T.nnet.sigmoid(T.dot(x, self.W_xo) + T.dot(pre_h, self.W_ho) + T.dot(c, self.W_co) + self.b_o) 46 | h = o * T.tanh(c) 47 | 48 | c = c * m[:, None] 49 | h = h * m[:, None] 50 | c = T.reshape(c, (1, batch_size * self.out_size)) 51 | h = T.reshape(h, (1, batch_size * self.out_size)) 52 | return h, c 53 | [h, c], updates = theano.scan(_active_mask, 54 | sequences = [self.X, self.M], 55 | outputs_info = [T.alloc(floatX(0.), 1, batch_size * self.out_size), 56 | T.alloc(floatX(0.), 1, batch_size * self.out_size)]) 57 | 58 | h = T.reshape(h, (self.X.shape[0], batch_size * self.out_size)) 59 | # dropout 60 | if p > 0: 61 | srng = T.shared_randomstreams.RandomStreams(rng.randint(999999)) 62 | drop_mask = srng.binomial(n = 1, p = 1-p, size = h.shape, dtype = theano.config.floatX) 63 | self.activation = T.switch(T.eq(is_train, 1), h * drop_mask, h * (1 - p)) 64 | else: 65 | self.activation = T.switch(T.eq(is_train, 1), h, h) 66 | 67 | self.params = [self.W_xi, self.W_hi, self.W_ci, self.b_i, 68 | self.W_xf, self.W_hf, self.W_cf, self.b_f, 69 | self.W_xc, self.W_hc, self.b_c, 70 | self.W_xo, self.W_ho, self.W_co, self.b_o] 71 | 72 | def _active(self, x, pre_h, pre_c): 73 | i = T.nnet.sigmoid(T.dot(x, self.W_xi) + T.dot(pre_h, self.W_hi) + T.dot(pre_c, self.W_ci) + self.b_i) 74 | f = T.nnet.sigmoid(T.dot(x, self.W_xf) + T.dot(pre_h, self.W_hf) + T.dot(pre_c, self.W_cf) + self.b_f) 75 | gc = T.tanh(T.dot(x, self.W_xc) + T.dot(pre_h, self.W_hc) + self.b_c) 76 | c = f * pre_c + i * gc 77 | o = T.nnet.sigmoid(T.dot(x, self.W_xo) + T.dot(pre_h, self.W_ho) + T.dot(c, self.W_co) + self.b_o) 78 | h = o * T.tanh(c) 79 | return h, c 80 | 81 | class BdLSTM(object): 82 | # Bidirectional LSTM Layer. 83 | def __init__(self, rng, layer_id, shape, X, mask, is_train = 1, batch_size = 1, p = 0.5): 84 | fwd = LSTMLayer(rng, "_fwd_" + layer_id, shape, X, mask, is_train, batch_size, p) 85 | bwd = LSTMLayer(rng, "_bwd_" + layer_id, shape, X[::-1], mask[::-1], is_train, batch_size, p) 86 | self.params = fwd.params + bwd.params 87 | self.activation = T.concatenate([fwd.activation, bwd.activation[::-1]], axis=1) 88 | 89 | -------------------------------------------------------------------------------- /main_minibatch.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | import time 3 | import sys 4 | import numpy as np 5 | import theano 6 | import theano.tensor as T 7 | from utils_pg import * 8 | from rnn import * 9 | import data 10 | 11 | use_gpu(0) # -1:cpu; 0,1,2,..: gpu 12 | 13 | e = 0.01 14 | lr = 0.1 15 | drop_rate = 0. 16 | batch_size = 1000 17 | hidden_size = [500] 18 | # try: gru, lstm 19 | cell = "gru" 20 | # try: sgd, momentum, rmsprop, adagrad, adadelta, adam, nesterov_momentum 21 | optimizer = "nesterov_momentum" 22 | 23 | seqs, i2w, w2i, data_xy = data.word_sequence("/data/toy.txt", batch_size) 24 | dim_x = len(w2i) 25 | dim_y = len(w2i) 26 | num_sents = data_xy[0][3] 27 | print "#features = ", dim_x, "#labels = ", dim_y 28 | 29 | print "compiling..." 30 | model = RNN(dim_x, dim_y, hidden_size, cell, optimizer, drop_rate, num_sents) 31 | 32 | print "training..." 33 | start = time.time() 34 | g_error = 9999.9999 35 | for i in xrange(2000): 36 | error = 0.0 37 | in_start = time.time() 38 | for batch_id, xy in data_xy.items(): 39 | X = xy[0] 40 | mask = xy[2] 41 | local_batch_size = xy[3] 42 | cost, sents = model.train(X, mask, lr, local_batch_size) 43 | error += cost 44 | #print i, g_error, (batch_id + 1), "/", len(data_xy), cost 45 | in_time = time.time() - in_start 46 | 47 | for s in xrange(int(sents.shape[1] / dim_y)): 48 | xs = sents[:, s * dim_y : (s + 1) * dim_y] 49 | for w_i in xrange(xs.shape[0]): 50 | w = i2w[np.argmax(xs[w_i, :])] 51 | if w == "": 52 | break 53 | print w, 54 | print "\n" 55 | 56 | error /= len(data_xy); 57 | if error < g_error: 58 | g_error = error 59 | 60 | print "Iter = " + str(i) + ", Error = " + str(error) + ", Time = " + str(in_time) 61 | if error <= e: 62 | break 63 | 64 | print "Finished. Time = " + str(time.time() - start) 65 | 66 | print "save model..." 67 | save_model("./model/hed.model", model) 68 | 69 | -------------------------------------------------------------------------------- /rnn.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | import numpy as np 3 | import theano 4 | import theano.tensor as T 5 | 6 | from gru import * 7 | from lstm import * 8 | from word_encoder import * 9 | from sent_encoder import * 10 | from sent_decoder import * 11 | from attention import * 12 | from word_decoder import * 13 | from updates import * 14 | 15 | class RNN(object): 16 | def __init__(self, in_size, out_size, hidden_size, 17 | cell = "gru", optimizer = "rmsprop", p = 0.5, num_sents = 1): 18 | 19 | self.X = T.matrix("X") 20 | self.in_size = in_size 21 | self.out_size = out_size 22 | self.hidden_size = hidden_size 23 | self.cell = cell 24 | self.drop_rate = p 25 | self.num_sents = num_sents 26 | self.is_train = T.iscalar('is_train') # for dropout 27 | self.batch_size = T.iscalar('batch_size') # for mini-batch training 28 | self.mask = T.matrix("mask") 29 | self.optimizer = optimizer 30 | self.define_layers() 31 | self.define_train_test_funcs() 32 | 33 | def define_layers(self): 34 | self.layers = [] 35 | self.params = [] 36 | rng = np.random.RandomState(1234) 37 | # LM layers 38 | word_encoder_layer = WordEncoderLayer(rng, self.X, self.in_size, self.out_size, self.hidden_size, 39 | self.cell, self.optimizer, self.drop_rate, 40 | self.is_train, self.batch_size, self.mask) 41 | self.layers += word_encoder_layer.layers 42 | self.params += word_encoder_layer.params 43 | 44 | i = len(self.layers) - 1 45 | 46 | # encoder layer 47 | layer_input = word_encoder_layer.activation 48 | encoder_layer = SentEncoderLayer(self.cell, rng, str(i + 1), (word_encoder_layer.hidden_size, word_encoder_layer.hidden_size), 49 | layer_input, self.mask, self.is_train, self.batch_size, self.drop_rate) 50 | self.layers.append(encoder_layer) 51 | self.params += encoder_layer.params 52 | 53 | # codes is a vector 54 | codes = encoder_layer.activation 55 | codes = T.reshape(codes, (1, encoder_layer.out_size)) 56 | # sentence decoder 57 | sent_decoder_layer = SentDecoderLayer(self.cell, rng, str(i + 2), (encoder_layer.out_size, encoder_layer.in_size), 58 | codes, self.mask, self.is_train, self.batch_size, self.drop_rate) 59 | self.layers.append(sent_decoder_layer) 60 | self.params += sent_decoder_layer.params 61 | 62 | # attention layer (syncrhonous update) 63 | sent_encs = encoder_layer.sent_encs 64 | sent_decs = sent_decoder_layer.activation 65 | attention_layer = AttentionLayer(str(i + 3), (self.num_sents, sent_decoder_layer.out_size), sent_encs, sent_decs) 66 | 67 | # reshape to a row with num_sentences samples 68 | sents_codes = attention_layer.activation 69 | sents_codes = T.reshape(sents_codes, (1, self.batch_size * sent_decoder_layer.out_size)) 70 | 71 | # word decoder 72 | word_decoder_layer = WordDecoderLayer(self.cell, rng, str(i + 4), (sent_decoder_layer.out_size, self.out_size), 73 | sents_codes, self.mask, self.is_train, self.batch_size, self.drop_rate) 74 | self.layers.append(word_decoder_layer) 75 | self.params += word_decoder_layer.params 76 | 77 | self.activation = word_decoder_layer.activation 78 | 79 | # https://github.com/fchollet/keras/pull/9/files 80 | self.epsilon = 1.0e-15 81 | def categorical_crossentropy(self, y_pred, y_true): 82 | y_pred = T.clip(y_pred, self.epsilon, 1.0 - self.epsilon) 83 | m = T.reshape(self.mask, (self.mask.shape[0] * self.batch_size, 1)) 84 | ce = T.nnet.categorical_crossentropy(y_pred, y_true) 85 | ce = T.reshape(ce, (self.mask.shape[0] * self.batch_size, 1)) 86 | return T.sum(ce * m) / T.sum(m) 87 | 88 | 89 | def define_train_test_funcs(self): 90 | pYs = T.reshape(self.activation, (self.mask.shape[0] * self.batch_size, self.out_size)) 91 | tYs = T.reshape(self.X, (self.mask.shape[0] * self.batch_size, self.out_size)) 92 | cost = self.categorical_crossentropy(pYs, tYs) 93 | 94 | gparams = [] 95 | for param in self.params: 96 | gparam = T.grad(cost, param) 97 | gparams.append(gparam) 98 | 99 | lr = T.scalar("lr") 100 | # eval(): string to function 101 | optimizer = eval(self.optimizer) 102 | updates = optimizer(self.params, gparams, lr) 103 | 104 | #updates = sgd(self.params, gparams, lr) 105 | #updates = momentum(self.params, gparams, lr) 106 | #updates = rmsprop(self.params, gparams, lr) 107 | #updates = adagrad(self.params, gparams, lr) 108 | #updates = adadelta(self.params, gparams, lr) 109 | #updates = adam(self.params, gparams, lr) 110 | 111 | self.train = theano.function(inputs = [self.X, self.mask, lr, self.batch_size], 112 | givens = {self.is_train : np.cast['int32'](1)}, 113 | outputs = [cost, self.activation], 114 | updates = updates) 115 | 116 | -------------------------------------------------------------------------------- /sent_decoder.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | import numpy as np 3 | import theano 4 | import theano.tensor as T 5 | from utils_pg import * 6 | from lstm import * 7 | from gru import * 8 | 9 | class SentDecoderLayer(object): 10 | def __init__(self, cell, rng, layer_id, shape, X, mask, is_train = 1, batch_size = 1, p = 0.5): 11 | prefix = "SentDecoderLayer_" 12 | layer_id = "_" + layer_id 13 | self.in_size, self.out_size = shape 14 | self.X = X 15 | self.summs = batch_size 16 | 17 | self.W_hy = init_weights((self.in_size, self.out_size), prefix + "W_hy" + layer_id) 18 | self.b_y = init_bias(self.out_size, prefix + "b_y" + layer_id) 19 | 20 | if cell == "gru": 21 | self.decoder = GRULayer(rng, prefix + layer_id, shape, self.X, mask, is_train, 1, p) 22 | def _active(pre_h, x): 23 | h = self.decoder._active(x, pre_h) 24 | y = T.tanh(T.dot(h, self.W_hy) + self.b_y) 25 | return h, y 26 | [h, y], updates = theano.scan(_active, n_steps = self.summs, sequences = [], 27 | outputs_info = [{'initial':self.X, 'taps':[-1]}, 28 | T.alloc(floatX(0.), 1, self.out_size)]) 29 | elif cell == "lstm": 30 | self.decoder = LSTMLayer(rng, prefix + layer_id, shape, self.X, mask, is_train, 1, p) 31 | def _active(pre_h, pre_c, x): 32 | h, c = self.decoder._active(x, pre_h, pre_c) 33 | y = T.tanh(T.dot(h, self.W_hy) + self.b_y) 34 | return h, c, y 35 | [h, c, y], updates = theano.scan(_active, n_steps = self.summs, sequences = [], 36 | outputs_info = [{'initial':self.X, 'taps':[-1]}, 37 | {'initial':self.X, 'taps':[-1]}, 38 | T.alloc(floatX(0.), 1, self.out_size)]) 39 | 40 | y = T.reshape(y, (self.summs, self.out_size)) 41 | self.activation = y 42 | 43 | self.params = self.decoder.params + [self.W_hy, self.b_y] 44 | 45 | -------------------------------------------------------------------------------- /sent_encoder.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | import numpy as np 3 | import theano 4 | import theano.tensor as T 5 | from utils_pg import * 6 | from gru import * 7 | from lstm import * 8 | 9 | 10 | class SentEncoderLayer(object): 11 | def __init__(self, cell, rng, layer_id, shape, X, mask, is_train = 1, batch_size = 1, p = 0.5): 12 | prefix = "SentEncoder_" 13 | self.in_size, self.out_size = shape 14 | 15 | ''' 16 | def code(j): 17 | i = mask[:, j].sum() - 1 18 | i = T.cast(i, 'int32') 19 | sent_x = X[i, j * self.in_size : (j + 1) * self.in_size] 20 | return sent_x 21 | sent_X, updates = theano.scan(lambda i: code(i), sequences=[T.arange(mask.shape[1])]) 22 | ''' 23 | sent_X = T.reshape(X[X.shape[0] - 1, :], (batch_size, self.in_size)) 24 | mask = T.reshape(T.ones_like(sent_X)[:,0], (batch_size, 1)) 25 | 26 | if cell == "gru": 27 | self.encoder = GRULayer(rng, prefix + layer_id, shape, sent_X, mask, is_train, 1, p) 28 | elif cell == "lstm": 29 | self.encoder = LSTMLayer(rng, prefix + layer_id, shape, sent_X, mask, is_train, 1, p) 30 | 31 | self.activation = self.encoder.activation[self.encoder.activation.shape[0] - 1,:] 32 | self.sent_encs = sent_X 33 | self.params = self.encoder.params 34 | -------------------------------------------------------------------------------- /updates.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | #https://github.com/Lasagne/Lasagne/blob/master/lasagne/updates.py 3 | import numpy as np 4 | import theano 5 | import theano.tensor as T 6 | 7 | ''' 8 | def clip_norm(g, c, n): 9 | if c > 0: 10 | g = K.switch(n >= c, g * c / n, g) 11 | return g 12 | 13 | def clip(x, min_value, max_value): 14 | if max_value < min_value: 15 | max_value = min_value 16 | return T.clip(x, min_value, max_value) 17 | ''' 18 | 19 | def sgd(params, gparams, learning_rate = 0.1): 20 | updates = [] 21 | for p, g in zip(params, gparams): 22 | updates.append((p, p - learning_rate * g)) 23 | return updates 24 | 25 | def momentum(params, gparams, learning_rate = 0.1, momentum = 0.9): 26 | updates = [] 27 | for p, g in zip(params, gparams): 28 | v = p.get_value(borrow = True) 29 | velocity = theano.shared(np.zeros(v.shape, dtype = v.dtype), broadcastable = p.broadcastable) 30 | x = momentum * velocity - learning_rate * g 31 | updates.append((velocity, x)) 32 | updates.append((p, p + x)) 33 | return updates 34 | 35 | def nesterov_momentum(params, gparams, learning_rate = 0.1, momentum = 0.9): 36 | updates = [] 37 | for p, g in zip(params, gparams): 38 | v = p.get_value(borrow = True) 39 | velocity = theano.shared(np.zeros(v.shape, dtype = v.dtype), broadcastable = p.broadcastable) 40 | x = momentum * velocity - learning_rate * g 41 | updates.append((velocity, x)) 42 | inc = momentum * x - learning_rate * g 43 | updates.append((p, p + inc)) 44 | return updates 45 | 46 | def rmsprop(params, gparams, learning_rate = 0.001, rho = 0.9, epsilon = 1e-6): 47 | updates = [] 48 | for p, g in zip(params, gparams): 49 | v = p.get_value(borrow = True) 50 | acc = theano.shared(np.zeros(v.shape, dtype = v.dtype), broadcastable = p.broadcastable) 51 | acc_new = rho * acc + (1 - rho) * g ** 2 52 | updates.append((acc, acc_new)) 53 | updates.append((p, p - learning_rate * g / T.sqrt(acc_new + epsilon))) 54 | return updates 55 | 56 | def adagrad(params, gparams, learning_rate = 0.01, epsilon = 1e-6): 57 | updates = [] 58 | for p, g in zip(params, gparams): 59 | v = p.get_value(borrow = True) 60 | acc = theano.shared(np.zeros(v.shape, dtype = v.dtype), broadcastable = p.broadcastable) 61 | acc_new = acc + g ** 2 62 | updates.append((acc, acc_new)) 63 | updates.append((p, p - learning_rate * g / T.sqrt(acc_new + epsilon))) 64 | return updates 65 | 66 | def adadelta(params, gparams, learning_rate = 1.0, rho = 0.95, epsilon = 1e-6): 67 | updates = [] 68 | for p, g in zip(params, gparams): 69 | v = p.get_value(borrow = True) 70 | acc = theano.shared(np.zeros(v.shape, dtype = v.dtype), broadcastable = p.broadcastable) 71 | delta_acc = theano.shared(np.zeros(v.shape, dtype = v.dtype), broadcastable = p.broadcastable) 72 | 73 | acc_new = rho * acc + (1 - rho) * g ** 2 74 | updates.append((acc, acc_new)) 75 | 76 | update = (g * T.sqrt(delta_acc + epsilon) / T.sqrt(acc_new + epsilon)) 77 | updates.append((p, p - learning_rate * update)) 78 | 79 | delta_acc_new = rho * delta_acc + (1 - rho) * update ** 2 80 | updates.append((delta_acc, delta_acc_new)) 81 | return updates 82 | 83 | def adam(params, gparams, learning_rate = 0.001, beta1 = 0.9, beta2 = 0.999, epsilon = 1e-8): 84 | updates = [] 85 | t_pre = theano.shared(np.asarray(.0, dtype=theano.config.floatX)) 86 | t = t_pre + 1 87 | a_t = learning_rate * T.sqrt(1 - beta2 ** t) / (1 - beta1 ** t) 88 | for p, g in zip(params, gparams): 89 | v = p.get_value(borrow = True) 90 | m_pre = theano.shared(np.zeros(v.shape, dtype = v.dtype), broadcastable = p.broadcastable) 91 | v_pre = theano.shared(np.zeros(v.shape, dtype = v.dtype), broadcastable = p.broadcastable) 92 | 93 | m_t = beta1 * m_pre + (1 - beta1) * g 94 | v_t = beta2 * v_pre + (1 - beta2) * g ** 2 95 | step = a_t * m_t / (T.sqrt(v_t) + epsilon) 96 | 97 | updates.append((m_pre, m_t)) 98 | updates.append((v_pre, v_t)) 99 | updates.append((p, p - step)) 100 | 101 | updates.append((t_pre, t)) 102 | return updates 103 | -------------------------------------------------------------------------------- /utils_pg.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | import numpy as np 3 | import theano 4 | import theano.tensor as T 5 | import cPickle as pickle 6 | 7 | # set use gpu programatically 8 | import theano.sandbox.cuda 9 | def use_gpu(gpu_id): 10 | if gpu_id > -1: 11 | theano.sandbox.cuda.use("gpu" + str(gpu_id)) 12 | 13 | def floatX(X): 14 | return np.asarray(X, dtype=theano.config.floatX) 15 | 16 | def init_weights(shape, name): 17 | return theano.shared(floatX(np.random.randn(*shape) * 0.1), name) 18 | 19 | def init_gradws(shape, name): 20 | return theano.shared(floatX(np.zeros(shape)), name) 21 | 22 | def init_bias(size, name): 23 | return theano.shared(floatX(np.zeros((size,))), name) 24 | 25 | def save_model(f, model): 26 | ps = {} 27 | for p in model.params: 28 | ps[p.name] = p.get_value() 29 | pickle.dump(ps, open(f, "wb")) 30 | 31 | def load_model(f, model): 32 | ps = pickle.load(open(f, "rb")) 33 | for p in model.params: 34 | p.set_value(ps[p.name]) 35 | return model 36 | -------------------------------------------------------------------------------- /word_decoder.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | import numpy as np 3 | import theano 4 | import theano.tensor as T 5 | from utils_pg import * 6 | from lstm import * 7 | from gru import * 8 | 9 | class WordDecoderLayer(object): 10 | def __init__(self, cell, rng, layer_id, shape, X, mask, is_train = 1, batch_size = 1, p = 0.5): 11 | prefix = "WordDecoderLayer_" 12 | layer_id = "_" + layer_id 13 | self.out_size, self.in_size = shape 14 | self.mask = mask 15 | self.X = X 16 | self.words = mask.shape[0] 17 | 18 | self.W_hy = init_weights((self.out_size, self.in_size), prefix + "W_hy" + layer_id) 19 | self.b_y = init_bias(self.in_size, prefix + "b_y" + layer_id) 20 | if cell == "gru": 21 | self.decoder = GRULayer(rng, prefix + layer_id, (self.in_size, self.out_size), self.X, mask, is_train, batch_size, p) 22 | def _active(m, pre_h, x): 23 | x = T.reshape(x, (batch_size, self.in_size)) 24 | pre_h = T.reshape(pre_h, (batch_size, self.out_size)) 25 | 26 | h = self.decoder._active(x, pre_h) 27 | y = T.nnet.softmax(T.dot(h, self.W_hy) + self.b_y) 28 | y = y * m[:, None] 29 | 30 | h = T.reshape(h, (1, batch_size * self.out_size)) 31 | y = T.reshape(y, (1, batch_size * self.in_size)) 32 | return h, y 33 | [h, y], updates = theano.scan(_active, #n_steps = self.words, 34 | sequences = [self.mask], 35 | outputs_info = [{'initial':self.X, 'taps':[-1]}, 36 | T.alloc(floatX(0.), 1, batch_size * self.in_size)]) 37 | elif cell == "lstm": 38 | self.decoder = LSTMLayer(rng, prefix + layer_id, (self.in_size, self.out_size), self.X, mask, is_train, batch_size, p) 39 | def _active(m, pre_h, pre_c, x): 40 | x = T.reshape(x, (batch_size, self.in_size)) 41 | pre_h = T.reshape(pre_h, (batch_size, self.out_size)) 42 | pre_c = T.reshape(pre_c, (batch_size, self.out_size)) 43 | 44 | h, c = self.decoder._active(x, pre_h, pre_c) 45 | 46 | y = T.nnet.softmax(T.dot(h, self.W_hy) + self.b_y) 47 | y = y * m[:, None] 48 | 49 | h = T.reshape(h, (1, batch_size * self.out_size)) 50 | c = T.reshape(c, (1, batch_size * self.out_size)) 51 | y = T.reshape(y, (1, batch_size * self.in_size)) 52 | return h, c, y 53 | [h, c, y], updates = theano.scan(_active, #n_steps = self.words, 54 | sequences = [self.mask], 55 | outputs_info = [{'initial':self.X, 'taps':[-1]}, 56 | {'initial':self.X, 'taps':[-1]}, 57 | T.alloc(floatX(0.), 1, batch_size * self.in_size)]) 58 | 59 | y = T.reshape(y, (self.words, batch_size * self.in_size)) 60 | self.activation = y 61 | self.params = self.decoder.params + [self.W_hy, self.b_y] 62 | 63 | -------------------------------------------------------------------------------- /word_encoder.py: -------------------------------------------------------------------------------- 1 | #pylint: skip-file 2 | import numpy as np 3 | import theano 4 | import theano.tensor as T 5 | 6 | from gru import * 7 | from lstm import * 8 | from updates import * 9 | 10 | class WordEncoderLayer(object): 11 | def __init__(self, rng, X, in_size, out_size, hidden_size, 12 | cell, optimizer, p, is_train, batch_size, mask): 13 | self.X = X 14 | self.in_size = in_size 15 | self.out_size = out_size 16 | self.hidden_size_list = hidden_size 17 | self.cell = cell 18 | self.drop_rate = p 19 | self.is_train = is_train 20 | self.batch_size = batch_size 21 | self.mask = mask 22 | self.rng = rng 23 | self.num_hds = len(hidden_size) 24 | 25 | self.define_layers() 26 | 27 | def define_layers(self): 28 | self.layers = [] 29 | self.params = [] 30 | # hidden layers 31 | for i in xrange(self.num_hds): 32 | if i == 0: 33 | layer_input = self.X 34 | shape = (self.in_size, self.hidden_size_list[0]) 35 | else: 36 | layer_input = self.layers[i - 1].activation 37 | shape = (self.hidden_size_list[i - 1], self.hidden_size_list[i]) 38 | 39 | if self.cell == "gru": 40 | hidden_layer = GRULayer(self.rng, str(i), shape, layer_input, 41 | self.mask, self.is_train, self.batch_size, self.drop_rate) 42 | elif self.cell == "lstm": 43 | hidden_layer = LSTMLayer(self.rng, str(i), shape, layer_input, 44 | self.mask, self.is_train, self.batch_size, self.drop_rate) 45 | 46 | self.layers.append(hidden_layer) 47 | self.params += hidden_layer.params 48 | 49 | self.activation = hidden_layer.activation 50 | self.hidden_size = hidden_layer.out_size 51 | --------------------------------------------------------------------------------