├── .gitignore ├── Graph ├── __pycache__ │ └── utils.cpython-34.pyc ├── practice.py └── utils.py ├── Variable ├── README.md ├── __pycache__ │ └── utils.cpython-34.pyc ├── utils.py └── variable.py ├── dropconnect ├── README.md ├── __pycache__ │ └── utils.cpython-34.pyc ├── input.txt ├── main.py └── utils.py ├── get_variable ├── .ipynb_checkpoints │ └── Untitled-checkpoint.ipynb ├── README.md ├── __pycache__ │ └── utils.cpython-34.pyc ├── utils.py ├── with_get_variable_correct1.py ├── with_get_variable_correct2.py ├── with_get_variable_wrong.py └── without_get_variable.py ├── initializer ├── .ipynb_checkpoints │ └── example-checkpoint.ipynb └── example.ipynb ├── rnn_handmade ├── README.md ├── __pycache__ │ └── utils.cpython-34.pyc ├── basic_rnn_impl.py ├── lstm_impl.py └── utils.py ├── rnn_states ├── README.md └── rnn.py ├── save_app1 ├── README.md ├── linear_restore1.py ├── linear_restore2.py ├── linear_save.py └── save │ ├── checkpoint │ ├── linear-1100.data-00000-of-00001 │ ├── linear-1100.index │ ├── linear-1100.meta │ ├── linear-1200.data-00000-of-00001 │ ├── linear-1200.index │ ├── linear-1200.meta │ ├── linear-1300.data-00000-of-00001 │ ├── linear-1300.index │ ├── linear-1300.meta │ ├── linear-1400.data-00000-of-00001 │ ├── linear-1400.index │ ├── linear-1400.meta │ ├── linear-1500.data-00000-of-00001 │ ├── linear-1500.index │ ├── linear-1500.meta │ ├── linear-1600.data-00000-of-00001 │ ├── linear-1600.index │ ├── linear-1600.meta │ ├── linear-1700.data-00000-of-00001 │ ├── linear-1700.index │ ├── linear-1700.meta │ ├── linear-1800.data-00000-of-00001 │ ├── linear-1800.index │ ├── linear-1800.meta │ ├── linear-1900.data-00000-of-00001 │ ├── linear-1900.index │ ├── linear-1900.meta │ ├── linear-2000.data-00000-of-00001 │ ├── linear-2000.index │ ├── linear-2000.meta │ ├── linear.data-00000-of-00001 │ ├── linear.index │ └── linear.meta ├── save_app2 ├── README.md ├── __pycache__ │ └── utils.cpython-34.pyc ├── cnn.py ├── cnn_restore.py ├── save │ ├── checkpoint │ ├── my-model.data-00000-of-00001 │ ├── my-model.index │ └── my-model.meta └── utils.py ├── save_app3 ├── README.md ├── __pycache__ │ ├── utils.cpython-34.pyc │ └── utils.cpython-36.pyc ├── rnn_restore.ipynb ├── rnn_save.ipynb ├── save │ ├── checkpoint │ ├── rnn.data-00000-of-00001 │ ├── rnn.index │ └── rnn.meta └── utils.py ├── save_basic ├── .ipynb_checkpoints │ ├── restore-checkpoint.ipynb │ └── save-checkpoint.ipynb ├── README.md ├── restore.ipynb ├── save.ipynb └── save │ ├── checkpoint │ ├── model.data-00000-of-00001 │ ├── model.index │ └── model.meta ├── simple_rnn ├── README.md ├── rnn_with_static_rnn.py └── rnn_without_static_rnn.py ├── tensorboard ├── README.md ├── log │ └── events.out.tfevents.1490860755.Frankinstein └── practice.py └── tf.app.flags ├── README.md ├── implement.py └── practice.py /.gitignore: -------------------------------------------------------------------------------- 1 | __pycache__ 2 | *.swp 3 | *.pyc 4 | -------------------------------------------------------------------------------- /Graph/__pycache__/utils.cpython-34.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/Graph/__pycache__/utils.cpython-34.pyc -------------------------------------------------------------------------------- /Graph/practice.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | import sys 3 | tf.reset_default_graph() 4 | 5 | g1 = tf.Graph() 6 | with g1.as_default() as g: 7 | with g.name_scope( "g1" ) as scope: 8 | matrix1 = tf.constant([[3., 3.]], name = 'matrix1') 9 | matrix2 = tf.constant([[2.],[2.]], name = 'matrix2') 10 | product = tf.matmul(matrix1, matrix2, name = "product") 11 | 12 | tf.reset_default_graph() 13 | 14 | g2 = tf.Graph() 15 | with g2.as_default() as g: 16 | with g.name_scope( "g2" ) as scope: 17 | matrix1 = tf.constant([[4., 4.]], name = 'matrix1') 18 | matrix2 = tf.constant([[5.],[5.]], name = 'matrix2') 19 | product = tf.matmul(matrix1, matrix2, name = "product") 20 | 21 | tf.reset_default_graph() 22 | #print("Default graph") 23 | #print(tf.get_default_graph().as_graph_def()) 24 | print("Graph g1") 25 | print(g1.as_graph_def()) 26 | #print_graph_properties(g1) 27 | sys.exit() 28 | 29 | #print(product) 30 | print_nodes(g1) 31 | print_nodes(g2) 32 | with tf.Session(graph = g1)as sess: 33 | product = g1.get_tensor_by_name("g1/product:0") 34 | print(sess.run(product)) 35 | #print(matrix1.name) 36 | print(product.name) 37 | -------------------------------------------------------------------------------- /Graph/utils.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import sys 3 | 4 | def print_keys(string): 5 | print("Collection name : {}".format(string)) 6 | i = 0 7 | while True: 8 | try: 9 | print(tf.get_collection(string)[i]) 10 | i+=1 11 | except IndexError: 12 | break; 13 | def print_nodes(graph): 14 | print("Graph : {}".format(graph)) 15 | temp = [n.name for n in graph.as_graph_def().node] 16 | for i in range(len(temp)): 17 | print(temp[i]) 18 | 19 | def print_graph_properties(graph): 20 | print("building_function : {}".format(graph.building_function)) 21 | print("finalized : {}".format(graph.finalized)) 22 | print("graph_def_versions : {}".format(graph.graph_def_versions)) 23 | print("seed : {}".format(graph.seed)) 24 | print("version : {}".format(graph.version)) 25 | 26 | -------------------------------------------------------------------------------- /Variable/README.md: -------------------------------------------------------------------------------- 1 | # How to view the variables, and trainable variables 2 | How to handle them 3 | -------------------------------------------------------------------------------- /Variable/__pycache__/utils.cpython-34.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/Variable/__pycache__/utils.cpython-34.pyc -------------------------------------------------------------------------------- /Variable/utils.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import sys 3 | 4 | class Utility: 5 | @staticmethod 6 | def print_keys(string): 7 | print("Collection name : {}".format(string)) 8 | i = 0 9 | while True: 10 | try: 11 | print(tf.get_collection(string)[i]) 12 | i+=1 13 | except IndexError: 14 | break; 15 | 16 | -------------------------------------------------------------------------------- /Variable/variable.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | x_data = [1., 2., 3., 4.] 4 | y_data = [2., 4., 6., 8.] 5 | 6 | W = tf.Variable(tf.random_uniform([1], -100., 100.), name = 'weights') 7 | b = tf.Variable(tf.random_uniform([1], -100., 100.), name = 'biases') 8 | 9 | X = tf.placeholder(tf.float32) 10 | Y = tf.placeholder(tf.float32) 11 | 12 | hypothesis = W * X + b 13 | 14 | cost = tf.reduce_mean(tf.square(hypothesis - Y)) 15 | 16 | Utility.print_keys("variables") 17 | Utility.print_keys("trainable_variables") 18 | # hypothesis, cost is just operation 19 | sys.exit() 20 | 21 | -------------------------------------------------------------------------------- /dropconnect/README.md: -------------------------------------------------------------------------------- 1 | # Dropconnect 2 | * simple dropconnect implementation 3 | -------------------------------------------------------------------------------- /dropconnect/__pycache__/utils.cpython-34.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/dropconnect/__pycache__/utils.cpython-34.pyc -------------------------------------------------------------------------------- /dropconnect/input.txt: -------------------------------------------------------------------------------- 1 | #x0 x1 x2 y 2 | 1 1 0 1 3 | 1 0 2 2 4 | 1 3 0 3 5 | 1 0 4 4 6 | 1 5 0 5 7 | -------------------------------------------------------------------------------- /dropconnect/main.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | xy = np.loadtxt('input.txt', unpack=True, dtype='float32') 4 | x_data = np.transpose(xy[0:-1]) 5 | y_data = xy[-1] 6 | 7 | nfeatures = 3 8 | 9 | x = tf.placeholder(tf.float32, [None, nfeatures]) 10 | y = tf.placeholder(tf.float32, [None]) 11 | 12 | W = tf.Variable(tf.random_uniform([nfeatures, 1], -1, 1), name = 'weights') 13 | 14 | W_wrap = dropconnect_wrapper(W, 0.5) 15 | 16 | y_hat = tf.matmul(x, W_wrap) 17 | 18 | cost = tf.reduce_mean(tf.square(y_hat - y_data)) 19 | 20 | train = tf.train.GradientDescentOptimizer(0.1).minimize(cost) 21 | 22 | sess = tf.Session() 23 | sess.run(tf.global_variables_initializer()) 24 | 25 | print_variables("trainable_variables") 26 | 27 | for step in range(5): 28 | sess.run(train, feed_dict = {x : x_data, y : y_data}) 29 | print(" w : {}".format(sess.run(W))) 30 | -------------------------------------------------------------------------------- /dropconnect/utils.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import sys 4 | 5 | def dropconnect_wrapper(w, keep_prob = 1.0): 6 | ''' 7 | input : 8 | w : any tensor 9 | keep_prob : float default to be 1.0 10 | 11 | selector : same shape of w, to be 1 with probability with keep_prob otherwise 0 12 | 13 | return : 14 | keep the value of w with probability keep_prob 15 | ''' 16 | 17 | selector = tf.sign(keep_prob - tf.random_uniform(get_size(w) 18 | , minval = 0 19 | , maxval=1 20 | , dtype = tf.float32)) 21 | 22 | selector = (selector + 1)/2 23 | 24 | return selector*w 25 | 26 | def get_size(w): 27 | return w.get_shape().as_list() 28 | 29 | def sample(prob): 30 | ''' 31 | input : 32 | prob 2D tensor 33 | return: 34 | sample 1 accroding to the probability 35 | ''' 36 | return (tf.sign(prob - tf.random_uniform(prob.get_shape(),minval = 0, maxval=1, dtype = tf.float32)) + 1)/2 37 | 38 | 39 | 40 | def print_variables(keys): 41 | i = 0 42 | print(keys) 43 | while True: 44 | try: 45 | print(tf.get_collection(keys)[i]) 46 | i+=1 47 | except IndexError: 48 | break; 49 | -------------------------------------------------------------------------------- /get_variable/.ipynb_checkpoints/Untitled-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [], 3 | "metadata": {}, 4 | "nbformat": 4, 5 | "nbformat_minor": 2 6 | } 7 | -------------------------------------------------------------------------------- /get_variable/README.md: -------------------------------------------------------------------------------- 1 | # Explanation 2 | Explain the correct methods to use tf.get_Variable 3 | -------------------------------------------------------------------------------- /get_variable/__pycache__/utils.cpython-34.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/get_variable/__pycache__/utils.cpython-34.pyc -------------------------------------------------------------------------------- /get_variable/utils.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import sys 3 | 4 | class Utility: 5 | def print_keys(self, string): 6 | print("Collection name : {}".format(string)) 7 | i = 0 8 | while True: 9 | try: 10 | print(tf.get_collection(string)[i]) 11 | i+=1 12 | except IndexError: 13 | break; 14 | 15 | -------------------------------------------------------------------------------- /get_variable/with_get_variable_correct1.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | x_data = [1.] 4 | y_data = [2.] 5 | 6 | def linear(x): 7 | w = tf.get_variable('weights', shape = [1], initializer = tf.constant_initializer(0.0)) 8 | b = tf.get_variable('biases', shape = [1], initializer = tf.constant_initializer(0.0)) 9 | return x*w + b 10 | 11 | x = tf.placeholder(tf.float32, shape = [None]) 12 | y = tf.placeholder(tf.float32, shape = [None]) 13 | 14 | with tf.variable_scope("layer1"): 15 | hypo = linear(x) 16 | 17 | with tf.variable_scope("layer1", reuse= True): 18 | hypothesis = linear(linear(x)) 19 | 20 | cost = tf.reduce_mean(tf.square(hypothesis - y)) 21 | train = tf.train.GradientDescentOptimizer(1).minimize(cost) 22 | 23 | util = Utility() 24 | util.print_keys("trainable_variables") 25 | sess = tf.Session() 26 | sess.run(tf.global_variables_initializer()) 27 | 28 | for step in range(10): 29 | with tf.variable_scope("layer1", reuse=True): 30 | w = tf.get_variable('weights',[1]) 31 | print(sess.run(w)) 32 | _, error = sess.run([train, cost], feed_dict={x : x_data, y : y_data}) 33 | print("cost : %.4f"%error) 34 | -------------------------------------------------------------------------------- /get_variable/with_get_variable_correct2.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | x_data = [1.] 4 | y_data = [2.] 5 | 6 | def linear(x): 7 | w = tf.get_variable('weights', shape = [1], initializer = tf.constant_initializer(0.0)) 8 | b = tf.get_variable('biases', shape = [1], initializer = tf.constant_initializer(0.0)) 9 | return x*w + b 10 | 11 | x = tf.placeholder(tf.float32, shape = [None]) 12 | y = tf.placeholder(tf.float32, shape = [None]) 13 | 14 | with tf.variable_scope("layer1"): 15 | temp = linear(x) 16 | print(tf.get_variable_scope().name) 17 | tf.get_variable_scope().reuse_variables() 18 | hypothesis = linear(linear(x)) 19 | 20 | cost = tf.reduce_mean(tf.square(hypothesis - y)) 21 | train = tf.train.GradientDescentOptimizer(1).minimize(cost) 22 | 23 | util = Utility() 24 | util.print_keys("trainable_variables") 25 | sess = tf.Session() 26 | sess.run(tf.global_variables_initializer()) 27 | 28 | for step in range(10): 29 | with tf.variable_scope("layer1", reuse=True): 30 | w = tf.get_variable('weights',[1]) 31 | print(sess.run(w)) 32 | _, error = sess.run([train, cost], feed_dict={x : x_data, y : y_data}) 33 | print("cost : %.4f"%error) 34 | -------------------------------------------------------------------------------- /get_variable/with_get_variable_wrong.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | x_data = [1.] 4 | y_data = [2.] 5 | 6 | def linear(x): 7 | w = tf.get_variable('weights', shape = [1], initializer = tf.constant_initializer(0.0)) 8 | b = tf.get_variable('biases', shape = [1], initializer = tf.constant_initializer(0.0)) 9 | return x*w + b 10 | 11 | x = tf.placeholder(tf.float32, shape = [None]) 12 | y = tf.placeholder(tf.float32, shape = [None]) 13 | 14 | with tf.variable_scope("layer1"): 15 | temp = linear(x) 16 | 17 | with tf.variable_scope("layer2"): 18 | hypothesis = linear(temp) 19 | 20 | cost = tf.reduce_mean(tf.square(hypothesis - y)) 21 | train = tf.train.GradientDescentOptimizer(1).minimize(cost) 22 | 23 | util = Utility() 24 | util.print_keys("trainable_variables") 25 | sess = tf.Session() 26 | sess.run(tf.global_variables_initializer()) 27 | 28 | for step in range(10): 29 | with tf.variable_scope("layer1", reuse=True): 30 | w = tf.get_variable('weights',[1]) 31 | print(sess.run(w)) 32 | _, error = sess.run([train, cost], feed_dict={x : x_data, y : y_data}) 33 | print("cost : %.4f"%error) 34 | -------------------------------------------------------------------------------- /get_variable/without_get_variable.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | x_data = [1.] 4 | y_data = [2.] 5 | 6 | var_dict = { 7 | 'weights' : tf.Variable(tf.zeros([1]), name = 'weights') 8 | ,'biases' : tf.Variable(tf.zeros([1]), name = 'biases') 9 | } 10 | 11 | def linear(x, variable_dict): 12 | return x*variable_dict['weights']+variable_dict['biases'] 13 | 14 | x = tf.placeholder(tf.float32, shape = [None]) 15 | y = tf.placeholder(tf.float32, shape = [None]) 16 | 17 | hypothesis = linear(linear(x, var_dict), var_dict) 18 | 19 | cost = tf.reduce_mean(tf.square(hypothesis - y)) 20 | train = tf.train.GradientDescentOptimizer(1).minimize(cost) 21 | 22 | util = Utility() 23 | util.print_keys("trainable_variables") 24 | sess = tf.Session() 25 | sess.run(tf.global_variables_initializer()) 26 | 27 | for step in range(10): 28 | print(sess.run([var_dict['weights'], var_dict['biases']])) 29 | _, error = sess.run([train, cost], feed_dict={x : x_data, y : y_data}) 30 | print("cost : %.4f"%error) 31 | -------------------------------------------------------------------------------- /initializer/.ipynb_checkpoints/example-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import tensorflow as tf\n", 12 | "import numpy as np" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 2, 18 | "metadata": { 19 | "collapsed": true 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "x = tf.random_normal_initializer()\n", 24 | "y = x(shape=[5,5], dtype=None, partition_info=None)" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 3, 30 | "metadata": { 31 | "collapsed": true 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "def random_normal_initializer(mean = 0.0, stddev = 1.0):\n", 36 | " def random_normal(shape, dtype = tf.float32, partition_info = None):\n", 37 | " return tf.random_normal(shape=shape, mean=mean, stddev=stddev)\n", 38 | " return random_normal\n", 39 | "def identity_initializer(tensor):\n", 40 | " def identity(shape, dtype = tf.float32, partition_info = None):\n", 41 | " return tf.identity(tensor)\n", 42 | " return identity" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 4, 48 | "metadata": { 49 | "collapsed": true 50 | }, 51 | "outputs": [], 52 | "source": [ 53 | "range_ = tf.cast(tf.range(5), dtype = tf.float32)" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": 6, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "with tf.variable_scope(\"ex\") as scope:\n", 63 | " w = tf.get_variable(\"weight\", shape=[2, 2], dtype=tf.float32, initializer=random_normal_initializer())\n", 64 | " b = tf.get_variable(\"bias\", shape=[2], dtype=tf.float32, initializer=identity_initializer(range_) )" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": 8, 70 | "metadata": {}, 71 | "outputs": [ 72 | { 73 | "name": "stdout", 74 | "output_type": "stream", 75 | "text": [ 76 | "[[-1.19857717 1.12051129]\n", 77 | " [-0.52025568 -0.5067327 ]]\n", 78 | "[ 0. 1. 2. 3. 4.]\n" 79 | ] 80 | } 81 | ], 82 | "source": [ 83 | "sess = tf.Session()\n", 84 | "sess.run(tf.global_variables_initializer())\n", 85 | "print(sess.run(w))\n", 86 | "print(sess.run(b))" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": 9, 92 | "metadata": {}, 93 | "outputs": [ 94 | { 95 | "name": "stdout", 96 | "output_type": "stream", 97 | "text": [ 98 | "[ 0. 1. 2. 3. 4.]\n" 99 | ] 100 | } 101 | ], 102 | "source": [ 103 | "print(sess.run(range_))" 104 | ] 105 | } 106 | ], 107 | "metadata": { 108 | "kernelspec": { 109 | "display_name": "Python 3", 110 | "language": "python", 111 | "name": "python3" 112 | }, 113 | "language_info": { 114 | "codemirror_mode": { 115 | "name": "ipython", 116 | "version": 3 117 | }, 118 | "file_extension": ".py", 119 | "mimetype": "text/x-python", 120 | "name": "python", 121 | "nbconvert_exporter": "python", 122 | "pygments_lexer": "ipython3", 123 | "version": "3.5.3" 124 | } 125 | }, 126 | "nbformat": 4, 127 | "nbformat_minor": 2 128 | } 129 | -------------------------------------------------------------------------------- /initializer/example.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import tensorflow as tf\n", 12 | "import numpy as np" 13 | ] 14 | }, 15 | { 16 | "cell_type": "code", 17 | "execution_count": 2, 18 | "metadata": { 19 | "collapsed": true 20 | }, 21 | "outputs": [], 22 | "source": [ 23 | "x = tf.random_normal_initializer()\n", 24 | "y = x(shape=[5,5], dtype=None, partition_info=None)" 25 | ] 26 | }, 27 | { 28 | "cell_type": "code", 29 | "execution_count": 3, 30 | "metadata": { 31 | "collapsed": true 32 | }, 33 | "outputs": [], 34 | "source": [ 35 | "def random_normal_initializer(mean = 0.0, stddev = 1.0):\n", 36 | " def random_normal(shape, dtype = tf.float32, partition_info = None):\n", 37 | " return tf.random_normal(shape=shape, mean=mean, stddev=stddev)\n", 38 | " return random_normal\n", 39 | "def identity_initializer(tensor):\n", 40 | " def identity(shape, dtype = tf.float32, partition_info = None):\n", 41 | " return tf.identity(tensor)\n", 42 | " return identity" 43 | ] 44 | }, 45 | { 46 | "cell_type": "code", 47 | "execution_count": 4, 48 | "metadata": { 49 | "collapsed": true 50 | }, 51 | "outputs": [], 52 | "source": [ 53 | "range_ = tf.cast(tf.range(5), dtype = tf.float32)" 54 | ] 55 | }, 56 | { 57 | "cell_type": "code", 58 | "execution_count": 6, 59 | "metadata": {}, 60 | "outputs": [], 61 | "source": [ 62 | "with tf.variable_scope(\"ex\") as scope:\n", 63 | " w = tf.get_variable(\"weight\", shape=[2, 2], dtype=tf.float32, initializer=random_normal_initializer())\n", 64 | " b = tf.get_variable(\"bias\", shape=[2], dtype=tf.float32, initializer=identity_initializer(range_) )" 65 | ] 66 | }, 67 | { 68 | "cell_type": "code", 69 | "execution_count": 8, 70 | "metadata": {}, 71 | "outputs": [ 72 | { 73 | "name": "stdout", 74 | "output_type": "stream", 75 | "text": [ 76 | "[[-1.19857717 1.12051129]\n", 77 | " [-0.52025568 -0.5067327 ]]\n", 78 | "[ 0. 1. 2. 3. 4.]\n" 79 | ] 80 | } 81 | ], 82 | "source": [ 83 | "sess = tf.Session()\n", 84 | "sess.run(tf.global_variables_initializer())\n", 85 | "print(sess.run(w))\n", 86 | "print(sess.run(b))" 87 | ] 88 | }, 89 | { 90 | "cell_type": "code", 91 | "execution_count": 9, 92 | "metadata": {}, 93 | "outputs": [ 94 | { 95 | "name": "stdout", 96 | "output_type": "stream", 97 | "text": [ 98 | "[ 0. 1. 2. 3. 4.]\n" 99 | ] 100 | } 101 | ], 102 | "source": [ 103 | "print(sess.run(range_))" 104 | ] 105 | } 106 | ], 107 | "metadata": { 108 | "kernelspec": { 109 | "display_name": "Python 3", 110 | "language": "python", 111 | "name": "python3" 112 | }, 113 | "language_info": { 114 | "codemirror_mode": { 115 | "name": "ipython", 116 | "version": 3 117 | }, 118 | "file_extension": ".py", 119 | "mimetype": "text/x-python", 120 | "name": "python", 121 | "nbconvert_exporter": "python", 122 | "pygments_lexer": "ipython3", 123 | "version": "3.5.3" 124 | } 125 | }, 126 | "nbformat": 4, 127 | "nbformat_minor": 2 128 | } 129 | -------------------------------------------------------------------------------- /rnn_handmade/README.md: -------------------------------------------------------------------------------- 1 | # Hand made rnn cell 2 | * basic rnn cell is completed with MaeRNN 3 | * lstm cell is completed with MaeLSTM 4 | * [RNN 과 LSTM](http://www.whydsp.org/280) 5 | -------------------------------------------------------------------------------- /rnn_handmade/__pycache__/utils.cpython-34.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/rnn_handmade/__pycache__/utils.cpython-34.pyc -------------------------------------------------------------------------------- /rnn_handmade/basic_rnn_impl.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | # learning data 4 | rnn_size = 10 5 | train_steps = 10000 6 | test_data_size = 10 7 | 8 | data_size = 5 9 | num_data = 10 10 | 11 | x_data = [] 12 | y_data = [] 13 | 14 | for i in range(num_data): 15 | input_temp = [] 16 | for j in range(i,i+data_size): 17 | input_temp.append(j) 18 | output_temp = i+data_size 19 | 20 | x_data.append(input_temp) 21 | y_data.append(output_temp) 22 | 23 | x_data = np.array(x_data, dtype = np.float32) 24 | x_data = np.transpose(x_data, [1,0]) 25 | y_data = np.array(y_data, dtype = np.float32) 26 | 27 | print(x_data) 28 | print(y_data) 29 | print(x_data.shape) 30 | print(y_data.shape) 31 | 32 | train_x = tf.placeholder('float', [data_size, num_data]) 33 | train_y = tf.placeholder('float', [num_data]) 34 | rnn_size = 1 35 | maernn = MaeRNN(rnn_size) 36 | 37 | temp_x = tf.unstack(train_x) 38 | 39 | outputs = list() 40 | with tf.variable_scope("rnn") as scope: 41 | for i in range(data_size): 42 | if i==0: 43 | state = maernn(tf.expand_dims(temp_x[i],1)) 44 | outputs.append(state) 45 | else: 46 | scope.reuse_variables() 47 | state = maernn(tf.expand_dims(temp_x[i],1), state) 48 | outputs.append(state) 49 | 50 | layer = {'weights':tf.Variable(tf.random_normal([rnn_size, 1]), name = 'output_weight'), 51 | 'biases':tf.Variable(tf.random_normal([1]), name = 'output_bias')} 52 | prediction = tf.reshape(tf.matmul(outputs[-1], layer['weights'])+ layer['biases'],[-1]) 53 | error = tf.reduce_mean(tf.square(prediction - train_y)) 54 | global_step = tf.Variable(0.0, trainable = False) 55 | learning_rate = tf.train.exponential_decay(learning_rate= 1e-2, global_step= global_step, 56 | decay_steps = 1000, decay_rate = 0.1, staircase=True) 57 | optimizer = tf.train.AdamOptimizer(1e-2).minimize(loss = error, global_step = global_step) 58 | 59 | util = Utility() 60 | util.print_keys("trainable_variables") 61 | 62 | sess = tf.Session() 63 | sess.run(tf.global_variables_initializer()) 64 | 65 | for i in range(3000): 66 | _,c = sess.run([optimizer, error],feed_dict = {train_x : x_data, train_y : y_data}) 67 | if i%500==0: 68 | print(sess.run(learning_rate)) 69 | print("{}th step cost : {}".format(i, c)) 70 | 71 | print(sess.run(prediction, feed_dict = {train_x : x_data})) 72 | -------------------------------------------------------------------------------- /rnn_handmade/lstm_impl.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | # learning data 4 | rnn_size = 5 5 | train_steps = 10000 6 | test_data_size = 10 7 | 8 | data_size = 5 9 | num_data = 10 10 | 11 | x_data = [] 12 | y_data = [] 13 | 14 | for i in range(num_data): 15 | input_temp = [] 16 | for j in range(i,i+data_size): 17 | input_temp.append(j) 18 | output_temp = i+data_size 19 | 20 | x_data.append(input_temp) 21 | y_data.append(output_temp) 22 | 23 | x_data = np.array(x_data, dtype = np.float32) 24 | x_data = np.transpose(x_data, [1,0]) 25 | y_data = np.array(y_data, dtype = np.float32) 26 | 27 | print(x_data) 28 | print(y_data) 29 | print(x_data.shape) 30 | print(y_data.shape) 31 | 32 | train_x = tf.placeholder('float', [data_size, num_data]) 33 | train_y = tf.placeholder('float', [num_data]) 34 | rnn_size = 1 35 | lstm = MaeLSTM(rnn_size) 36 | 37 | temp_x = tf.unstack(train_x) 38 | 39 | outputs = list() 40 | with tf.variable_scope("rnn") as scope: 41 | for i in range(data_size): 42 | if i==0: 43 | cell, hidden = lstm(tf.expand_dims(temp_x[i],1)) 44 | outputs.append(hidden) 45 | else: 46 | scope.reuse_variables() 47 | cell, hidden = lstm(tf.expand_dims(temp_x[i],1), cell, hidden) 48 | outputs.append(hidden) 49 | 50 | layer = {'weights':tf.Variable(tf.random_normal([rnn_size, 1]), name = 'output_weight'), 51 | 'biases':tf.Variable(tf.random_normal([1]), name = 'output_bias')} 52 | 53 | prediction = tf.reshape(tf.matmul(outputs[-1], layer['weights'])+ layer['biases'],[-1]) 54 | error = tf.reduce_mean(tf.square(prediction - train_y)) 55 | global_step = tf.Variable(0.0, trainable = False) 56 | learning_rate = tf.train.exponential_decay(learning_rate= 1e-2, global_step= global_step, 57 | decay_steps = 1000, decay_rate = 0.1, staircase=True) 58 | optimizer = tf.train.AdamOptimizer(1e-2).minimize(loss = error, global_step = global_step) 59 | 60 | util = Utility() 61 | util.print_keys("trainable_variables") 62 | 63 | sess = tf.Session() 64 | sess.run(tf.global_variables_initializer()) 65 | 66 | for i in range(3000): 67 | _,c = sess.run([optimizer, error],feed_dict = {train_x : x_data, train_y : y_data}) 68 | if i%500==0: 69 | print(sess.run(learning_rate)) 70 | print("{}th step cost : {}".format(i, c)) 71 | 72 | print(sess.run(prediction, feed_dict = {train_x : x_data})) 73 | -------------------------------------------------------------------------------- /rnn_handmade/utils.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | import numpy as np 3 | import sys 4 | 5 | class Utility: 6 | def print_keys(self, string): 7 | print("Collection name : {}".format(string)) 8 | i = 0 9 | while True: 10 | try: 11 | print(tf.get_collection(string)[i]) 12 | i+=1 13 | except IndexError: 14 | break; 15 | 16 | class MaeLSTM: 17 | def __init__(self, size): 18 | self.rnn_size = size 19 | 20 | def __call__(self, x, c = None, h = None): 21 | xw = tf.get_variable(name = "input_weights" 22 | , shape = [x.get_shape()[1], 4*self.rnn_size] 23 | , initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)) 24 | if c ==None and h == None: 25 | h = tf.zeros([x.get_shape()[0], self.rnn_size]) 26 | c = tf.zeros([x.get_shape()[0], self.rnn_size]) 27 | 28 | hw = tf.get_variable(name = "state_weights" 29 | , shape = [self.rnn_size, 4*self.rnn_size] 30 | , initializer = tf.constant_initializer(0.0)) 31 | 32 | b = tf.get_variable(name = "biases" 33 | , shape = [4*self.rnn_size] 34 | , initializer = tf.truncated_normal_initializer(mean = 0.0, stddev = 0.01)) 35 | 36 | concat = tf.matmul(x, xw) + tf.matmul(h,hw) + b 37 | f, i, c_, o = tf.split(concat, 4, axis = 1) 38 | 39 | new_c = c*tf.sigmoid(f) + tf.sigmoid(i)*tf.nn.tanh(c_) 40 | new_h = o*tf.nn.tanh(new_c) 41 | 42 | return new_c, new_h 43 | 44 | class MaeRNN: 45 | def __init__(self, size = 10): 46 | self.rnn_size = size 47 | 48 | def __call__(self, i, s=None): 49 | ''' 50 | input : 51 | i = [batch, input_size] 52 | return : 53 | [batch, rnn_size] 54 | ''' 55 | iw = tf.get_variable(name = "input_weights" 56 | , shape = [i.get_shape()[1], self.rnn_size] 57 | , initializer = tf.random_normal_initializer(mean=0.0, stddev=0.01)) 58 | if s==None: 59 | s = tf.zeros([i.get_shape()[0], self.rnn_size]) 60 | 61 | sw = tf.get_variable(name = "state_weights" 62 | , shape = [self.rnn_size, self.rnn_size] 63 | , initializer = tf.constant_initializer(0.0)) 64 | 65 | b = tf.get_variable(name = "biases" 66 | , shape = [self.rnn_size] 67 | , initializer = tf.truncated_normal_initializer(mean = 0.0, stddev = 0.01)) 68 | 69 | return tf.nn.tanh(tf.matmul(i, iw) + tf.matmul(s,sw) + b) 70 | 71 | -------------------------------------------------------------------------------- /rnn_states/README.md: -------------------------------------------------------------------------------- 1 | # Use for study about rnn states 2 | -------------------------------------------------------------------------------- /rnn_states/rnn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import pandas as pd 4 | from tensorflow.python.ops import rnn, rnn_cell 5 | import sys 6 | 7 | # learning data 8 | rnn_size = 2 9 | train_steps = 10000 10 | test_data_size = 2 11 | 12 | data_size = 5 13 | num_data = 3 14 | 15 | x_data = [] 16 | y_data = [] 17 | 18 | normalizer = 1 19 | 20 | for i in range(num_data): 21 | input_temp = [] 22 | for j in range(i,i+data_size): 23 | input_temp.append(j*normalizer) 24 | x_data.append(input_temp) 25 | output_temp = normalizer*(i+data_size) 26 | y_data.append(output_temp) 27 | 28 | x_data = np.array(x_data, dtype = np.float32) 29 | x_data = np.reshape(x_data, [num_data,data_size,1]) 30 | y_data = np.array(y_data, dtype = np.float32) 31 | y_data = np.reshape(y_data,[-1,1]) 32 | 33 | #print(x_data) 34 | #print(y_data) 35 | #print(x_data.shape) 36 | #print(y_data.shape) 37 | 38 | test_x_data = [] 39 | for i in range(3,3+test_data_size): 40 | test_x_data.append(i) 41 | 42 | test_x_data = np.array(test_x_data, dtype = np.float32) 43 | test_x_data = np.reshape(test_x_data, [-1,test_data_size,1]) 44 | 45 | print(test_x_data) 46 | print(test_x_data.shape) 47 | # train 48 | train_x = tf.placeholder('float', [None, data_size,1 ]) 49 | train_y = tf.placeholder('float', [None,1]) 50 | test_x = tf.placeholder('float', [1,test_data_size,1]) 51 | test_y = tf.placeholder('float', [1]) 52 | 53 | train_x_temp = tf.transpose(train_x, [1,0,2]) 54 | train_x_temp = tf.reshape(train_x_temp, [-1,1]) 55 | train_x_temp =tf.split(0, data_size, train_x_temp) 56 | 57 | test_x_temp = tf.transpose(test_x, [1,0,2]) 58 | test_x_temp = tf.reshape(test_x_temp, [-1,1]) 59 | test_x_temp =tf.split(0, test_data_size, test_x_temp) 60 | 61 | layer = {'weights':tf.Variable(tf.random_normal([rnn_size, 1])), 62 | 'biases':tf.Variable(tf.random_normal([1]))} 63 | 64 | with tf.variable_scope("rnn") as scope: 65 | lstm_cell = rnn_cell.LSTMCell(rnn_size) 66 | train_outputs, train_states = rnn.rnn(lstm_cell, train_x_temp, dtype=tf.float32) 67 | train_output = tf.matmul(train_outputs[-1],layer['weights']) + layer['biases'] 68 | scope.reuse_variables() 69 | test_outputs, test_states = rnn.rnn(lstm_cell, test_x_temp, dtype=tf.float32) 70 | test_output = tf.matmul(test_outputs[-1],layer['weights']) + layer['biases'] 71 | 72 | error = tf.reduce_mean(tf.square(train_output-train_y)) 73 | optimizer = tf.train.AdamOptimizer().minimize(error) 74 | 75 | sess = tf.Session() 76 | sess.run(tf.global_variables_initializer()) 77 | for i in range(train_steps+1): 78 | a,c = sess.run([optimizer, error],feed_dict = {train_x : x_data, train_y : y_data}) 79 | if i%500==0: 80 | print(c) 81 | 82 | print("States") 83 | print(sess.run(train_states, feed_dict = {train_x : x_data, train_y : y_data})) 84 | print("Outputs") 85 | print(sess.run(train_outputs, feed_dict = {train_x : x_data, train_y : y_data})) 86 | print("Last output") 87 | print(sess.run(train_outputs[-1], feed_dict = {train_x : x_data, train_y : y_data})) 88 | print("value") 89 | print(sess.run([layer['weights'], layer['biases']])) 90 | print("final result") 91 | print(sess.run(train_output, feed_dict = {train_x : x_data, train_y : y_data})) 92 | 93 | 94 | print("-----------------------------------------------") 95 | print(test_x_data) 96 | print(sess.run(test_output, feed_dict = {test_x : test_x_data})) 97 | #print(sess.run(train_output, feed_dict = {train_x : x_data, train_y : y_data})) 98 | -------------------------------------------------------------------------------- /save_app1/README.md: -------------------------------------------------------------------------------- 1 | # Save app1 2 | save and restore example for simple linear regression case 3 | 4 | ## Environment 5 | python=3.4 6 | tensorflow = 1.1.1 7 | 8 | ## Application 9 | Linear regression 10 | 11 | ## linear_save.py 12 | * Train the linear regression model 13 | * Save the trained model. 14 | 15 | ## linear_restore1.py 16 | * Build the model 17 | * Restore only variable 18 | 19 | ## linear_restore2.py 20 | * Restore the entire graph 21 | * It requires to get collection predefined before to get variable 22 | -------------------------------------------------------------------------------- /save_app1/linear_restore1.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | # Only requires for name to be same 4 | W_r = tf.Variable(tf.random_uniform([1], -1., 1.), name = "W") 5 | b_r = tf.Variable(tf.random_uniform([1], -1., 1.), name = "b") 6 | 7 | X = tf.placeholder(tf.float32) 8 | Y = tf.placeholder(tf.float32) 9 | 10 | hypothesis = W_r * X + b_r 11 | 12 | sess = tf.Session() 13 | sess.run(tf.global_variables_initializer()) 14 | saver = tf.train.Saver() 15 | 16 | print("tf.train.latest_checkpoint('./save/') = {}".format(tf.train.latest_checkpoint('./save/'))) 17 | 18 | print("Before restoration : tf.global_variables()") 19 | print(["{} : {}".format(v.name, sess.run(v)) for v in tf.global_variables()]) 20 | saver.restore(sess, './save/linear-1100') 21 | print("After first restoration : tf.global_variables()") 22 | print(["{} : {}".format(v.name, sess.run(v)) for v in tf.global_variables()]) 23 | 24 | saver.restore(sess, tf.train.latest_checkpoint('./save/')) 25 | print("After second restoration : tf.global_variables()") 26 | print(["{} : {}".format(v.name, sess.run(v)) for v in tf.global_variables()]) 27 | 28 | print(sess.run(hypothesis, feed_dict={X: 5})) 29 | print(sess.run(hypothesis, feed_dict={X: 2.5})) 30 | -------------------------------------------------------------------------------- /save_app1/linear_restore2.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | sess = tf.Session() 4 | saver = tf.train.import_meta_graph('./save/linear.meta') 5 | saver.restore(sess, tf.train.latest_checkpoint('./save/')) 6 | 7 | print("tf.global_variables") 8 | print([v.name for v in tf.global_variables()]) 9 | 10 | print("tf.get_collection('vars')") 11 | print(tf.get_collection('vars')) 12 | 13 | input_vars = tf.get_collection('input') 14 | X = input_vars[0] 15 | Y = input_vars[1] 16 | 17 | hypothesis = tf.get_collection('hypo')[0] 18 | print(sess.run(hypothesis, feed_dict={X : 5})) 19 | print(sess.run(hypothesis, feed_dict={X : 2.5})) 20 | sess.close() 21 | -------------------------------------------------------------------------------- /save_app1/linear_save.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | x_data = [1., 2., 3., 4.] 4 | y_data = [2., 4., 6., 8.] 5 | 6 | W = tf.Variable(tf.random_uniform([1], -1., 1.), name = "W") 7 | b = tf.Variable(tf.random_uniform([1], -1., 1.), name = "b") 8 | 9 | X = tf.placeholder(tf.float32) 10 | Y = tf.placeholder(tf.float32) 11 | 12 | hypothesis = W * X + b 13 | 14 | tf.add_to_collection('hypo', hypothesis) 15 | tf.add_to_collection('input', X) 16 | tf.add_to_collection('input', Y) 17 | tf.add_to_collection('vars', W) 18 | tf.add_to_collection('vars', b) 19 | 20 | cost = tf.reduce_mean(tf.square(hypothesis - Y)) 21 | 22 | train = tf.train.GradientDescentOptimizer(1e-3).minimize(cost) 23 | 24 | sess = tf.Session() 25 | sess.run(tf.global_variables_initializer()) 26 | saver = tf.train.Saver(tf.global_variables(), max_to_keep = 10) 27 | 28 | # fit the line 29 | for step in range(2001): 30 | sess.run(train, feed_dict={X: x_data, Y: y_data}) 31 | if step % 100 == 0: 32 | print("{} step cost : {}".format(step, sess.run(cost, feed_dict={X: x_data, Y: y_data}), sess.run(W), sess.run(b))) 33 | saver.save(sess, './save/linear', global_step = step) 34 | 35 | print(sess.run(hypothesis, feed_dict={X: 5})) 36 | print(sess.run(hypothesis, feed_dict={X: 2.5})) 37 | -------------------------------------------------------------------------------- /save_app1/save/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "linear-2000" 2 | all_model_checkpoint_paths: "linear-1100" 3 | all_model_checkpoint_paths: "linear-1200" 4 | all_model_checkpoint_paths: "linear-1300" 5 | all_model_checkpoint_paths: "linear-1400" 6 | all_model_checkpoint_paths: "linear-1500" 7 | all_model_checkpoint_paths: "linear-1600" 8 | all_model_checkpoint_paths: "linear-1700" 9 | all_model_checkpoint_paths: "linear-1800" 10 | all_model_checkpoint_paths: "linear-1900" 11 | all_model_checkpoint_paths: "linear-2000" 12 | -------------------------------------------------------------------------------- /save_app1/save/linear-1100.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1100.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app1/save/linear-1100.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1100.index -------------------------------------------------------------------------------- /save_app1/save/linear-1100.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1100.meta -------------------------------------------------------------------------------- /save_app1/save/linear-1200.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1200.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app1/save/linear-1200.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1200.index -------------------------------------------------------------------------------- /save_app1/save/linear-1200.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1200.meta -------------------------------------------------------------------------------- /save_app1/save/linear-1300.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1300.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app1/save/linear-1300.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1300.index -------------------------------------------------------------------------------- /save_app1/save/linear-1300.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1300.meta -------------------------------------------------------------------------------- /save_app1/save/linear-1400.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1400.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app1/save/linear-1400.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1400.index -------------------------------------------------------------------------------- /save_app1/save/linear-1400.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1400.meta -------------------------------------------------------------------------------- /save_app1/save/linear-1500.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1500.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app1/save/linear-1500.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1500.index -------------------------------------------------------------------------------- /save_app1/save/linear-1500.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1500.meta -------------------------------------------------------------------------------- /save_app1/save/linear-1600.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1600.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app1/save/linear-1600.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1600.index -------------------------------------------------------------------------------- /save_app1/save/linear-1600.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1600.meta -------------------------------------------------------------------------------- /save_app1/save/linear-1700.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1700.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app1/save/linear-1700.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1700.index -------------------------------------------------------------------------------- /save_app1/save/linear-1700.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1700.meta -------------------------------------------------------------------------------- /save_app1/save/linear-1800.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1800.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app1/save/linear-1800.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1800.index -------------------------------------------------------------------------------- /save_app1/save/linear-1800.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1800.meta -------------------------------------------------------------------------------- /save_app1/save/linear-1900.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1900.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app1/save/linear-1900.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1900.index -------------------------------------------------------------------------------- /save_app1/save/linear-1900.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-1900.meta -------------------------------------------------------------------------------- /save_app1/save/linear-2000.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-2000.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app1/save/linear-2000.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-2000.index -------------------------------------------------------------------------------- /save_app1/save/linear-2000.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear-2000.meta -------------------------------------------------------------------------------- /save_app1/save/linear.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app1/save/linear.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear.index -------------------------------------------------------------------------------- /save_app1/save/linear.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app1/save/linear.meta -------------------------------------------------------------------------------- /save_app2/README.md: -------------------------------------------------------------------------------- 1 | # Explanation 2 | cnn_save.py 3 | Use MNIST data and save the model 4 | cnn_restore.py 5 | 1. Load the trained model 6 | 2. Stack up the model 7 | 3. Train all the model again. 8 | -------------------------------------------------------------------------------- /save_app2/__pycache__/utils.cpython-34.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app2/__pycache__/utils.cpython-34.pyc -------------------------------------------------------------------------------- /save_app2/cnn.py: -------------------------------------------------------------------------------- 1 | from utils import * 2 | 3 | mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True) 4 | x = tf.placeholder(tf.float32, shape=[None, 784], name = 'x') 5 | y_ = tf.placeholder(tf.float32, shape=[None, 10], name = 'y_') 6 | 7 | x_image = tf.reshape(x, [-1,28,28,1]) 8 | h_pool2 = my_image_filter(x_image) 9 | 10 | tf.add_to_collection("conv2", h_pool2) 11 | tf.add_to_collection("input", x) 12 | 13 | with tf.variable_scope("var1"): 14 | result1= tf.reshape(h_pool2, [-1, 5*5*50]) 15 | y_temp = tf.nn.relu(fully_connected_layer(result1 ,5*5*50, 500)) 16 | with tf.variable_scope("var2"): 17 | y_hat = tf.nn.softmax(fully_connected_layer(y_temp, 500, 10)) 18 | 19 | cross_entropy = - tf.reduce_sum(y_*tf.log(y_hat)) 20 | train_step = tf.train.GradientDescentOptimizer(1e-4).minimize(cross_entropy) 21 | correct_prediction = tf.equal(tf.argmax(y_hat,1), tf.argmax(y_,1)) 22 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 23 | 24 | sess = tf.InteractiveSession() 25 | sess.run(tf.global_variables_initializer()) 26 | 27 | epoch = 2 28 | for j in range(epoch): 29 | for i in range(550): 30 | batch = mnist.train.next_batch(100) 31 | train_step.run(feed_dict={x: batch[0], y_: batch[1]}) 32 | if i%50 == 49: 33 | train_accuracy = accuracy.eval(feed_dict={x:batch[0], y_: batch[1]}) 34 | print("train_accuracy : {}".format(train_accuracy)) 35 | 36 | 37 | test_accuracy = accuracy.eval(feed_dict={x: mnist.test.images, y_:mnist.test.labels}) 38 | print("test accuracy = {}".format(test_accuracy)) 39 | saver = tf.train.Saver() 40 | saver.save(sess, './save/my-model') 41 | #saver.export_meta_graph(filename = './save/my-model.meta') 42 | -------------------------------------------------------------------------------- /save_app2/cnn_restore.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.examples.tutorials.mnist import input_data 3 | import numpy as np 4 | import sys 5 | 6 | mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True) 7 | 8 | def conv_relu(input, kernel_shape, pool_shape, bias_shape): 9 | w = tf.Variable(tf.random_normal(kernel_shape,stddev=0.1),name='weights') 10 | b = tf.Variable(tf.constant(0.1, shape = bias_shape), name = 'biases') 11 | 12 | tf.add_to_collection("local", w) 13 | tf.add_to_collection("local", b) 14 | 15 | conv = tf.nn.conv2d(input, w, strides = [1,1,1,1] 16 | ,padding = 'VALID') 17 | relu = tf.nn.relu(conv + b) 18 | pool = tf.nn.max_pool(relu, ksize=pool_shape, 19 | strides=pool_shape, padding='SAME') 20 | return pool 21 | 22 | def fully_connected_layer(input, input_size, output_size): 23 | w2 = tf.Variable(tf.random_normal([input_size, output_size], stddev=0.1),name='fc_weights') 24 | b2 = tf.Variable(tf.constant(0.1,shape = [output_size]), name = "fc_biases") 25 | tf.add_to_collection("local", w2) 26 | tf.add_to_collection("local", b2) 27 | return tf.matmul(input, w2) + b2 28 | 29 | sess = tf.InteractiveSession() 30 | new_saver = tf.train.import_meta_graph('./save/my-model.meta') 31 | new_saver.restore(sess, './save/my-model') 32 | 33 | x = tf.get_collection('input')[0] 34 | h_pool2 = tf.get_collection('conv2')[0] 35 | 36 | y_ = tf.placeholder(tf.float32, shape=[None,10]) 37 | 38 | def my_image_filter(input_images): 39 | with tf.variable_scope("conv3"): 40 | conv3 = conv_relu(input_images,[2,2,50,100],[1,2,2,1],[100]) 41 | return conv3 42 | 43 | result1= tf.reshape(my_image_filter(h_pool2), [-1, 2*2*100]) 44 | y_hat = tf.nn.softmax(fully_connected_layer(result1 ,2*2*100, 10)) 45 | 46 | with tf.variable_scope("final"): 47 | error = - tf.reduce_sum(y_*tf.log(y_hat)) 48 | train = tf.train.AdamOptimizer(1e-4).minimize(loss = error, var_list = tf.get_collection("local")) 49 | prediction = tf.equal(tf.argmax(y_hat,1), tf.argmax(y_,1)) 50 | correct = tf.reduce_mean(tf.cast(prediction, tf.float32)) 51 | ''' 52 | i = 0 53 | while True: 54 | try: 55 | print(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES)[i]) 56 | i+=1 57 | except IndexError: 58 | break; 59 | i = 0 60 | while True: 61 | try: 62 | print(tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)[i]) 63 | i+=1 64 | except IndexError: 65 | break; 66 | ''' 67 | # tf.global_variables_initailizer() = tf.variables_initializer(tf.get_collection(tf.GraphKeys.GLOBAL_VARIBLES) 68 | #tf.variables_initailizer(tf.global_variables()) 69 | 70 | sess.run(tf.variables_initializer(tf.get_collection("local"))) 71 | sess.run(tf.variables_initializer([v for v in tf.get_collection("variables") if v.name.startswith("final")])) 72 | epoch = 1 73 | for j in range(epoch): 74 | for i in range(550): 75 | batch = mnist.train.next_batch(100) 76 | train.run(feed_dict={x: batch[0], y_: batch[1]}) 77 | if i%50 == 49: 78 | train_accuracy = correct.eval(feed_dict={x:batch[0], y_: batch[1]}) 79 | print("train_accuracy : {}".format(train_accuracy)) 80 | 81 | test_accuracy = correct.eval(feed_dict={x: mnist.test.images, y_:mnist.test.labels}) 82 | print("test accuracy = {}".format(test_accuracy)) 83 | 84 | -------------------------------------------------------------------------------- /save_app2/save/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "my-model" 2 | all_model_checkpoint_paths: "my-model" 3 | -------------------------------------------------------------------------------- /save_app2/save/my-model.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app2/save/my-model.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app2/save/my-model.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app2/save/my-model.index -------------------------------------------------------------------------------- /save_app2/save/my-model.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app2/save/my-model.meta -------------------------------------------------------------------------------- /save_app2/utils.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | from tensorflow.examples.tutorials.mnist import input_data 3 | import numpy as np 4 | 5 | def conv_relu(input, kernel_shape, pool_shape, bias_shape): 6 | w = tf.Variable(tf.random_normal(kernel_shape,stddev=0.1),name='weights') 7 | b = tf.Variable(tf.constant(0.1, shape = bias_shape), name = 'biases') 8 | 9 | conv = tf.nn.conv2d(input, w, strides = [1,1,1,1] 10 | ,padding = 'VALID') 11 | relu = tf.nn.relu(conv + b) 12 | pool = tf.nn.max_pool(relu, ksize=pool_shape, 13 | strides=pool_shape, padding='SAME') 14 | return pool 15 | 16 | def my_image_filter(input_images): 17 | with tf.variable_scope("conv1"): 18 | conv1 = conv_relu(input_images,[5,5,1,30],[1,2,2,1],[30]) 19 | with tf.variable_scope("conv2"): 20 | conv2 = conv_relu(conv1,[3,3,30,50],[1,2,2,1],[50]) 21 | return conv2 22 | 23 | def fully_connected_layer(input, input_size, output_size): 24 | w2 = tf.Variable(tf.random_normal([input_size, output_size], stddev=0.1),name='fc_weights') 25 | b2 = tf.Variable(tf.constant(0.1,shape = [output_size]), name = "fc_biases") 26 | return tf.matmul(input, w2) + b2 27 | 28 | -------------------------------------------------------------------------------- /save_app3/README.md: -------------------------------------------------------------------------------- 1 | # RNN restore method 2 | * Using examples in directory 'simple_rnn' 3 | -------------------------------------------------------------------------------- /save_app3/__pycache__/utils.cpython-34.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app3/__pycache__/utils.cpython-34.pyc -------------------------------------------------------------------------------- /save_app3/__pycache__/utils.cpython-36.pyc: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app3/__pycache__/utils.cpython-36.pyc -------------------------------------------------------------------------------- /save_app3/rnn_restore.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": false, 8 | "deletable": true, 9 | "editable": true 10 | }, 11 | "outputs": [], 12 | "source": [ 13 | "from utils import *" 14 | ] 15 | }, 16 | { 17 | "cell_type": "markdown", 18 | "metadata": { 19 | "deletable": true, 20 | "editable": true 21 | }, 22 | "source": [ 23 | "# test data generation" 24 | ] 25 | }, 26 | { 27 | "cell_type": "code", 28 | "execution_count": 2, 29 | "metadata": { 30 | "collapsed": false, 31 | "deletable": true, 32 | "editable": true 33 | }, 34 | "outputs": [ 35 | { 36 | "name": "stdout", 37 | "output_type": "stream", 38 | "text": [ 39 | "(3, 9, 1)\n", 40 | "(3, 1)\n" 41 | ] 42 | } 43 | ], 44 | "source": [ 45 | "rnn_size = 1\n", 46 | "\n", 47 | "data_size = 9\n", 48 | "num_data = 3\n", 49 | "\n", 50 | "x_data = []\n", 51 | "y_data = []\n", 52 | "\n", 53 | "normalizer = 1\n", 54 | "\n", 55 | "for i in range(num_data):\n", 56 | " input_temp = []\n", 57 | " for j in range(i,i+data_size):\n", 58 | " input_temp.append(j*normalizer)\n", 59 | " x_data.append(input_temp)\n", 60 | " output_temp = normalizer*(i+data_size)\n", 61 | " y_data.append(output_temp) \n", 62 | "\n", 63 | "x_data = np.array(x_data, dtype = np.float32)\n", 64 | "x_data = np.reshape(x_data, [num_data,data_size,1])\n", 65 | "y_data = np.array(y_data, dtype = np.float32)\n", 66 | "y_data = np.reshape(y_data,[-1,1])\n", 67 | "\n", 68 | "print(x_data.shape)\n", 69 | "print(y_data.shape)" 70 | ] 71 | }, 72 | { 73 | "cell_type": "markdown", 74 | "metadata": { 75 | "deletable": true, 76 | "editable": true 77 | }, 78 | "source": [ 79 | "# Graph restore" 80 | ] 81 | }, 82 | { 83 | "cell_type": "code", 84 | "execution_count": 3, 85 | "metadata": { 86 | "collapsed": false, 87 | "deletable": true, 88 | "editable": true, 89 | "scrolled": true 90 | }, 91 | "outputs": [ 92 | { 93 | "name": "stdout", 94 | "output_type": "stream", 95 | "text": [ 96 | "Collection name : trainable_variables\n", 97 | "Tensor(\"Variable/read:0\", shape=(1, 1), dtype=float32)\n", 98 | "Tensor(\"Variable_1/read:0\", shape=(1,), dtype=float32)\n", 99 | "Tensor(\"rnn/rnn/lstm_cell/weights/read:0\", shape=(2, 4), dtype=float32)\n", 100 | "Tensor(\"rnn/rnn/lstm_cell/biases/read:0\", shape=(4,), dtype=float32)\n" 101 | ] 102 | } 103 | ], 104 | "source": [ 105 | "sess = tf.Session()\n", 106 | "saver = tf.train.import_meta_graph('./save/rnn.meta')\n", 107 | "saver.restore(sess, './save/rnn')\n", 108 | "print_keys(\"trainable_variables\")" 109 | ] 110 | }, 111 | { 112 | "cell_type": "markdown", 113 | "metadata": { 114 | "deletable": true, 115 | "editable": true 116 | }, 117 | "source": [ 118 | "# Graph print" 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": 4, 124 | "metadata": { 125 | "collapsed": false, 126 | "deletable": true, 127 | "editable": true 128 | }, 129 | "outputs": [ 130 | { 131 | "name": "stdout", 132 | "output_type": "stream", 133 | "text": [ 134 | "Graph : \n", 135 | "Placeholder\n", 136 | "Placeholder_1\n", 137 | "transpose/perm\n", 138 | "transpose\n", 139 | "Reshape/shape\n", 140 | "Reshape\n", 141 | "split/split_dim\n", 142 | "split\n", 143 | "random_normal/shape\n", 144 | "random_normal/mean\n", 145 | "random_normal/stddev\n", 146 | "random_normal/RandomStandardNormal\n", 147 | "random_normal/mul\n", 148 | "random_normal\n", 149 | "Variable\n", 150 | "Variable/Assign\n", 151 | "Variable/read\n", 152 | "random_normal_1/shape\n", 153 | "random_normal_1/mean\n", 154 | "random_normal_1/stddev\n", 155 | "random_normal_1/RandomStandardNormal\n", 156 | "random_normal_1/mul\n", 157 | "random_normal_1\n", 158 | "Variable_1\n", 159 | "Variable_1/Assign\n", 160 | "Variable_1/read\n", 161 | "rnn/rnn/Shape\n", 162 | "rnn/rnn/strided_slice/stack\n", 163 | "rnn/rnn/strided_slice/stack_1\n", 164 | "rnn/rnn/strided_slice/stack_2\n", 165 | "rnn/rnn/strided_slice\n", 166 | "rnn/rnn/stack/1\n", 167 | "rnn/rnn/stack\n", 168 | "rnn/rnn/zeros/Const\n", 169 | "rnn/rnn/zeros\n", 170 | "rnn/rnn/stack_1/1\n", 171 | "rnn/rnn/stack_1\n", 172 | "rnn/rnn/zeros_1/Const\n", 173 | "rnn/rnn/zeros_1\n", 174 | "rnn/rnn/lstm_cell/weights/Initializer/random_uniform/shape\n", 175 | "rnn/rnn/lstm_cell/weights/Initializer/random_uniform/min\n", 176 | "rnn/rnn/lstm_cell/weights/Initializer/random_uniform/max\n", 177 | "rnn/rnn/lstm_cell/weights/Initializer/random_uniform/RandomUniform\n", 178 | "rnn/rnn/lstm_cell/weights/Initializer/random_uniform/sub\n", 179 | "rnn/rnn/lstm_cell/weights/Initializer/random_uniform/mul\n", 180 | "rnn/rnn/lstm_cell/weights/Initializer/random_uniform\n", 181 | "rnn/rnn/lstm_cell/weights\n", 182 | "rnn/rnn/lstm_cell/weights/Assign\n", 183 | "rnn/rnn/lstm_cell/weights/read\n", 184 | "rnn/rnn/lstm_cell/lstm_cell/concat/axis\n", 185 | "rnn/rnn/lstm_cell/lstm_cell/concat\n", 186 | "rnn/rnn/lstm_cell/lstm_cell/MatMul\n", 187 | "rnn/rnn/lstm_cell/biases/Initializer/Const\n", 188 | "rnn/rnn/lstm_cell/biases\n", 189 | "rnn/rnn/lstm_cell/biases/Assign\n", 190 | "rnn/rnn/lstm_cell/biases/read\n", 191 | "rnn/rnn/lstm_cell/BiasAdd\n", 192 | "rnn/rnn/lstm_cell/split/split_dim\n", 193 | "rnn/rnn/lstm_cell/split\n", 194 | "rnn/rnn/lstm_cell/add/y\n", 195 | "rnn/rnn/lstm_cell/add\n", 196 | "rnn/rnn/lstm_cell/Sigmoid\n", 197 | "rnn/rnn/lstm_cell/mul\n", 198 | "rnn/rnn/lstm_cell/Sigmoid_1\n", 199 | "rnn/rnn/lstm_cell/Tanh\n", 200 | "rnn/rnn/lstm_cell/mul_1\n", 201 | "rnn/rnn/lstm_cell/add_1\n", 202 | "rnn/rnn/lstm_cell/Sigmoid_2\n", 203 | "rnn/rnn/lstm_cell/Tanh_1\n", 204 | "rnn/rnn/lstm_cell/mul_2\n", 205 | "rnn/rnn/lstm_cell_1/lstm_cell/concat/axis\n", 206 | "rnn/rnn/lstm_cell_1/lstm_cell/concat\n", 207 | "rnn/rnn/lstm_cell_1/lstm_cell/MatMul\n", 208 | "rnn/rnn/lstm_cell_1/BiasAdd\n", 209 | "rnn/rnn/lstm_cell_1/split/split_dim\n", 210 | "rnn/rnn/lstm_cell_1/split\n", 211 | "rnn/rnn/lstm_cell_1/add/y\n", 212 | "rnn/rnn/lstm_cell_1/add\n", 213 | "rnn/rnn/lstm_cell_1/Sigmoid\n", 214 | "rnn/rnn/lstm_cell_1/mul\n", 215 | "rnn/rnn/lstm_cell_1/Sigmoid_1\n", 216 | "rnn/rnn/lstm_cell_1/Tanh\n", 217 | "rnn/rnn/lstm_cell_1/mul_1\n", 218 | "rnn/rnn/lstm_cell_1/add_1\n", 219 | "rnn/rnn/lstm_cell_1/Sigmoid_2\n", 220 | "rnn/rnn/lstm_cell_1/Tanh_1\n", 221 | "rnn/rnn/lstm_cell_1/mul_2\n", 222 | "rnn/rnn/lstm_cell_2/lstm_cell/concat/axis\n", 223 | "rnn/rnn/lstm_cell_2/lstm_cell/concat\n", 224 | "rnn/rnn/lstm_cell_2/lstm_cell/MatMul\n", 225 | "rnn/rnn/lstm_cell_2/BiasAdd\n", 226 | "rnn/rnn/lstm_cell_2/split/split_dim\n", 227 | "rnn/rnn/lstm_cell_2/split\n", 228 | "rnn/rnn/lstm_cell_2/add/y\n", 229 | "rnn/rnn/lstm_cell_2/add\n", 230 | "rnn/rnn/lstm_cell_2/Sigmoid\n", 231 | "rnn/rnn/lstm_cell_2/mul\n", 232 | "rnn/rnn/lstm_cell_2/Sigmoid_1\n", 233 | "rnn/rnn/lstm_cell_2/Tanh\n", 234 | "rnn/rnn/lstm_cell_2/mul_1\n", 235 | "rnn/rnn/lstm_cell_2/add_1\n", 236 | "rnn/rnn/lstm_cell_2/Sigmoid_2\n", 237 | "rnn/rnn/lstm_cell_2/Tanh_1\n", 238 | "rnn/rnn/lstm_cell_2/mul_2\n", 239 | "rnn/rnn/lstm_cell_3/lstm_cell/concat/axis\n", 240 | "rnn/rnn/lstm_cell_3/lstm_cell/concat\n", 241 | "rnn/rnn/lstm_cell_3/lstm_cell/MatMul\n", 242 | "rnn/rnn/lstm_cell_3/BiasAdd\n", 243 | "rnn/rnn/lstm_cell_3/split/split_dim\n", 244 | "rnn/rnn/lstm_cell_3/split\n", 245 | "rnn/rnn/lstm_cell_3/add/y\n", 246 | "rnn/rnn/lstm_cell_3/add\n", 247 | "rnn/rnn/lstm_cell_3/Sigmoid\n", 248 | "rnn/rnn/lstm_cell_3/mul\n", 249 | "rnn/rnn/lstm_cell_3/Sigmoid_1\n", 250 | "rnn/rnn/lstm_cell_3/Tanh\n", 251 | "rnn/rnn/lstm_cell_3/mul_1\n", 252 | "rnn/rnn/lstm_cell_3/add_1\n", 253 | "rnn/rnn/lstm_cell_3/Sigmoid_2\n", 254 | "rnn/rnn/lstm_cell_3/Tanh_1\n", 255 | "rnn/rnn/lstm_cell_3/mul_2\n", 256 | "rnn/rnn/lstm_cell_4/lstm_cell/concat/axis\n", 257 | "rnn/rnn/lstm_cell_4/lstm_cell/concat\n", 258 | "rnn/rnn/lstm_cell_4/lstm_cell/MatMul\n", 259 | "rnn/rnn/lstm_cell_4/BiasAdd\n", 260 | "rnn/rnn/lstm_cell_4/split/split_dim\n", 261 | "rnn/rnn/lstm_cell_4/split\n", 262 | "rnn/rnn/lstm_cell_4/add/y\n", 263 | "rnn/rnn/lstm_cell_4/add\n", 264 | "rnn/rnn/lstm_cell_4/Sigmoid\n", 265 | "rnn/rnn/lstm_cell_4/mul\n", 266 | "rnn/rnn/lstm_cell_4/Sigmoid_1\n", 267 | "rnn/rnn/lstm_cell_4/Tanh\n", 268 | "rnn/rnn/lstm_cell_4/mul_1\n", 269 | "rnn/rnn/lstm_cell_4/add_1\n", 270 | "rnn/rnn/lstm_cell_4/Sigmoid_2\n", 271 | "rnn/rnn/lstm_cell_4/Tanh_1\n", 272 | "rnn/rnn/lstm_cell_4/mul_2\n", 273 | "rnn/rnn/lstm_cell_5/lstm_cell/concat/axis\n", 274 | "rnn/rnn/lstm_cell_5/lstm_cell/concat\n", 275 | "rnn/rnn/lstm_cell_5/lstm_cell/MatMul\n", 276 | "rnn/rnn/lstm_cell_5/BiasAdd\n", 277 | "rnn/rnn/lstm_cell_5/split/split_dim\n", 278 | "rnn/rnn/lstm_cell_5/split\n", 279 | "rnn/rnn/lstm_cell_5/add/y\n", 280 | "rnn/rnn/lstm_cell_5/add\n", 281 | "rnn/rnn/lstm_cell_5/Sigmoid\n", 282 | "rnn/rnn/lstm_cell_5/mul\n", 283 | "rnn/rnn/lstm_cell_5/Sigmoid_1\n", 284 | "rnn/rnn/lstm_cell_5/Tanh\n", 285 | "rnn/rnn/lstm_cell_5/mul_1\n", 286 | "rnn/rnn/lstm_cell_5/add_1\n", 287 | "rnn/rnn/lstm_cell_5/Sigmoid_2\n", 288 | "rnn/rnn/lstm_cell_5/Tanh_1\n", 289 | "rnn/rnn/lstm_cell_5/mul_2\n", 290 | "rnn/rnn/lstm_cell_6/lstm_cell/concat/axis\n", 291 | "rnn/rnn/lstm_cell_6/lstm_cell/concat\n", 292 | "rnn/rnn/lstm_cell_6/lstm_cell/MatMul\n", 293 | "rnn/rnn/lstm_cell_6/BiasAdd\n", 294 | "rnn/rnn/lstm_cell_6/split/split_dim\n", 295 | "rnn/rnn/lstm_cell_6/split\n", 296 | "rnn/rnn/lstm_cell_6/add/y\n", 297 | "rnn/rnn/lstm_cell_6/add\n", 298 | "rnn/rnn/lstm_cell_6/Sigmoid\n", 299 | "rnn/rnn/lstm_cell_6/mul\n", 300 | "rnn/rnn/lstm_cell_6/Sigmoid_1\n", 301 | "rnn/rnn/lstm_cell_6/Tanh\n", 302 | "rnn/rnn/lstm_cell_6/mul_1\n", 303 | "rnn/rnn/lstm_cell_6/add_1\n", 304 | "rnn/rnn/lstm_cell_6/Sigmoid_2\n", 305 | "rnn/rnn/lstm_cell_6/Tanh_1\n", 306 | "rnn/rnn/lstm_cell_6/mul_2\n", 307 | "rnn/rnn/lstm_cell_7/lstm_cell/concat/axis\n", 308 | "rnn/rnn/lstm_cell_7/lstm_cell/concat\n", 309 | "rnn/rnn/lstm_cell_7/lstm_cell/MatMul\n", 310 | "rnn/rnn/lstm_cell_7/BiasAdd\n", 311 | "rnn/rnn/lstm_cell_7/split/split_dim\n", 312 | "rnn/rnn/lstm_cell_7/split\n", 313 | "rnn/rnn/lstm_cell_7/add/y\n", 314 | "rnn/rnn/lstm_cell_7/add\n", 315 | "rnn/rnn/lstm_cell_7/Sigmoid\n", 316 | "rnn/rnn/lstm_cell_7/mul\n", 317 | "rnn/rnn/lstm_cell_7/Sigmoid_1\n", 318 | "rnn/rnn/lstm_cell_7/Tanh\n", 319 | "rnn/rnn/lstm_cell_7/mul_1\n", 320 | "rnn/rnn/lstm_cell_7/add_1\n", 321 | "rnn/rnn/lstm_cell_7/Sigmoid_2\n", 322 | "rnn/rnn/lstm_cell_7/Tanh_1\n", 323 | "rnn/rnn/lstm_cell_7/mul_2\n", 324 | "rnn/rnn/lstm_cell_8/lstm_cell/concat/axis\n", 325 | "rnn/rnn/lstm_cell_8/lstm_cell/concat\n", 326 | "rnn/rnn/lstm_cell_8/lstm_cell/MatMul\n", 327 | "rnn/rnn/lstm_cell_8/BiasAdd\n", 328 | "rnn/rnn/lstm_cell_8/split/split_dim\n", 329 | "rnn/rnn/lstm_cell_8/split\n", 330 | "rnn/rnn/lstm_cell_8/add/y\n", 331 | "rnn/rnn/lstm_cell_8/add\n", 332 | "rnn/rnn/lstm_cell_8/Sigmoid\n", 333 | "rnn/rnn/lstm_cell_8/mul\n", 334 | "rnn/rnn/lstm_cell_8/Sigmoid_1\n", 335 | "rnn/rnn/lstm_cell_8/Tanh\n", 336 | "rnn/rnn/lstm_cell_8/mul_1\n", 337 | "rnn/rnn/lstm_cell_8/add_1\n", 338 | "rnn/rnn/lstm_cell_8/Sigmoid_2\n", 339 | "rnn/rnn/lstm_cell_8/Tanh_1\n", 340 | "rnn/rnn/lstm_cell_8/mul_2\n", 341 | "rnn/rnn/lstm_cell_9/lstm_cell/concat/axis\n", 342 | "rnn/rnn/lstm_cell_9/lstm_cell/concat\n", 343 | "rnn/rnn/lstm_cell_9/lstm_cell/MatMul\n", 344 | "rnn/rnn/lstm_cell_9/BiasAdd\n", 345 | "rnn/rnn/lstm_cell_9/split/split_dim\n", 346 | "rnn/rnn/lstm_cell_9/split\n", 347 | "rnn/rnn/lstm_cell_9/add/y\n", 348 | "rnn/rnn/lstm_cell_9/add\n", 349 | "rnn/rnn/lstm_cell_9/Sigmoid\n", 350 | "rnn/rnn/lstm_cell_9/mul\n", 351 | "rnn/rnn/lstm_cell_9/Sigmoid_1\n", 352 | "rnn/rnn/lstm_cell_9/Tanh\n", 353 | "rnn/rnn/lstm_cell_9/mul_1\n", 354 | "rnn/rnn/lstm_cell_9/add_1\n", 355 | "rnn/rnn/lstm_cell_9/Sigmoid_2\n", 356 | "rnn/rnn/lstm_cell_9/Tanh_1\n", 357 | "rnn/rnn/lstm_cell_9/mul_2\n", 358 | "rnn/MatMul\n", 359 | "rnn/add\n", 360 | "sub\n", 361 | "Square\n", 362 | "Const\n", 363 | "Mean\n", 364 | "gradients/Shape\n", 365 | "gradients/Const\n", 366 | "gradients/Fill\n", 367 | "gradients/Mean_grad/Reshape/shape\n", 368 | "gradients/Mean_grad/Reshape\n", 369 | "gradients/Mean_grad/Shape\n", 370 | "gradients/Mean_grad/Tile\n", 371 | "gradients/Mean_grad/Shape_1\n", 372 | "gradients/Mean_grad/Shape_2\n", 373 | "gradients/Mean_grad/Const\n", 374 | "gradients/Mean_grad/Prod\n", 375 | "gradients/Mean_grad/Const_1\n", 376 | "gradients/Mean_grad/Prod_1\n", 377 | "gradients/Mean_grad/Maximum/y\n", 378 | "gradients/Mean_grad/Maximum\n", 379 | "gradients/Mean_grad/floordiv\n", 380 | "gradients/Mean_grad/Cast\n", 381 | "gradients/Mean_grad/truediv\n", 382 | "gradients/Square_grad/mul/x\n", 383 | "gradients/Square_grad/mul\n", 384 | "gradients/Square_grad/mul_1\n", 385 | "gradients/sub_grad/Shape\n", 386 | "gradients/sub_grad/Shape_1\n", 387 | "gradients/sub_grad/BroadcastGradientArgs\n", 388 | "gradients/sub_grad/Sum\n", 389 | "gradients/sub_grad/Reshape\n", 390 | "gradients/sub_grad/Sum_1\n", 391 | "gradients/sub_grad/Neg\n", 392 | "gradients/sub_grad/Reshape_1\n", 393 | "gradients/sub_grad/tuple/group_deps\n", 394 | "gradients/sub_grad/tuple/control_dependency\n", 395 | "gradients/sub_grad/tuple/control_dependency_1\n", 396 | "gradients/rnn/add_grad/Shape\n", 397 | "gradients/rnn/add_grad/Shape_1\n", 398 | "gradients/rnn/add_grad/BroadcastGradientArgs\n", 399 | "gradients/rnn/add_grad/Sum\n", 400 | "gradients/rnn/add_grad/Reshape\n", 401 | "gradients/rnn/add_grad/Sum_1\n", 402 | "gradients/rnn/add_grad/Reshape_1\n", 403 | "gradients/rnn/add_grad/tuple/group_deps\n", 404 | "gradients/rnn/add_grad/tuple/control_dependency\n", 405 | "gradients/rnn/add_grad/tuple/control_dependency_1\n", 406 | "gradients/rnn/MatMul_grad/MatMul\n", 407 | "gradients/rnn/MatMul_grad/MatMul_1\n", 408 | "gradients/rnn/MatMul_grad/tuple/group_deps\n", 409 | "gradients/rnn/MatMul_grad/tuple/control_dependency\n", 410 | "gradients/rnn/MatMul_grad/tuple/control_dependency_1\n", 411 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/Shape\n", 412 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/Shape_1\n", 413 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/BroadcastGradientArgs\n", 414 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/mul\n", 415 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/Sum\n", 416 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/Reshape\n", 417 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/mul_1\n", 418 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/Sum_1\n", 419 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/Reshape_1\n", 420 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/tuple/group_deps\n", 421 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/tuple/control_dependency\n", 422 | "gradients/rnn/rnn/lstm_cell_9/mul_2_grad/tuple/control_dependency_1\n", 423 | "gradients/rnn/rnn/lstm_cell_9/Sigmoid_2_grad/SigmoidGrad\n", 424 | "gradients/rnn/rnn/lstm_cell_9/Tanh_1_grad/TanhGrad\n", 425 | "gradients/rnn/rnn/lstm_cell_9/add_1_grad/Shape\n", 426 | "gradients/rnn/rnn/lstm_cell_9/add_1_grad/Shape_1\n", 427 | "gradients/rnn/rnn/lstm_cell_9/add_1_grad/BroadcastGradientArgs\n", 428 | "gradients/rnn/rnn/lstm_cell_9/add_1_grad/Sum\n", 429 | "gradients/rnn/rnn/lstm_cell_9/add_1_grad/Reshape\n", 430 | "gradients/rnn/rnn/lstm_cell_9/add_1_grad/Sum_1\n", 431 | "gradients/rnn/rnn/lstm_cell_9/add_1_grad/Reshape_1\n", 432 | "gradients/rnn/rnn/lstm_cell_9/add_1_grad/tuple/group_deps\n", 433 | "gradients/rnn/rnn/lstm_cell_9/add_1_grad/tuple/control_dependency\n", 434 | "gradients/rnn/rnn/lstm_cell_9/add_1_grad/tuple/control_dependency_1\n", 435 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/Shape\n", 436 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/Shape_1\n", 437 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/BroadcastGradientArgs\n", 438 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/mul\n", 439 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/Sum\n", 440 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/Reshape\n", 441 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/mul_1\n", 442 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/Sum_1\n", 443 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/Reshape_1\n", 444 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/tuple/group_deps\n", 445 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/tuple/control_dependency\n", 446 | "gradients/rnn/rnn/lstm_cell_9/mul_grad/tuple/control_dependency_1\n", 447 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/Shape\n", 448 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/Shape_1\n", 449 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/BroadcastGradientArgs\n", 450 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/mul\n", 451 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/Sum\n", 452 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/Reshape\n", 453 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/mul_1\n", 454 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/Sum_1\n", 455 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/Reshape_1\n", 456 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/tuple/group_deps\n", 457 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/tuple/control_dependency\n", 458 | "gradients/rnn/rnn/lstm_cell_9/mul_1_grad/tuple/control_dependency_1\n", 459 | "gradients/rnn/rnn/lstm_cell_9/Sigmoid_grad/SigmoidGrad\n", 460 | "gradients/rnn/rnn/lstm_cell_9/Sigmoid_1_grad/SigmoidGrad\n", 461 | "gradients/rnn/rnn/lstm_cell_9/Tanh_grad/TanhGrad\n", 462 | "gradients/rnn/rnn/lstm_cell_9/add_grad/Shape\n", 463 | "gradients/rnn/rnn/lstm_cell_9/add_grad/Shape_1\n", 464 | "gradients/rnn/rnn/lstm_cell_9/add_grad/BroadcastGradientArgs\n", 465 | "gradients/rnn/rnn/lstm_cell_9/add_grad/Sum\n", 466 | "gradients/rnn/rnn/lstm_cell_9/add_grad/Reshape\n", 467 | "gradients/rnn/rnn/lstm_cell_9/add_grad/Sum_1\n", 468 | "gradients/rnn/rnn/lstm_cell_9/add_grad/Reshape_1\n", 469 | "gradients/rnn/rnn/lstm_cell_9/add_grad/tuple/group_deps\n", 470 | "gradients/rnn/rnn/lstm_cell_9/add_grad/tuple/control_dependency\n", 471 | "gradients/rnn/rnn/lstm_cell_9/add_grad/tuple/control_dependency_1\n", 472 | "gradients/rnn/rnn/lstm_cell_9/split_grad/concat\n", 473 | "gradients/rnn/rnn/lstm_cell_9/BiasAdd_grad/BiasAddGrad\n", 474 | "gradients/rnn/rnn/lstm_cell_9/BiasAdd_grad/tuple/group_deps\n", 475 | "gradients/rnn/rnn/lstm_cell_9/BiasAdd_grad/tuple/control_dependency\n", 476 | "gradients/rnn/rnn/lstm_cell_9/BiasAdd_grad/tuple/control_dependency_1\n", 477 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/MatMul_grad/MatMul\n", 478 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/MatMul_grad/MatMul_1\n", 479 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/MatMul_grad/tuple/group_deps\n", 480 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/MatMul_grad/tuple/control_dependency\n", 481 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/MatMul_grad/tuple/control_dependency_1\n", 482 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/concat_grad/Rank\n", 483 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/concat_grad/mod\n", 484 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/concat_grad/Shape\n", 485 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/concat_grad/ShapeN\n", 486 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/concat_grad/ConcatOffset\n", 487 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/concat_grad/Slice\n", 488 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/concat_grad/Slice_1\n", 489 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/concat_grad/tuple/group_deps\n", 490 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/concat_grad/tuple/control_dependency\n", 491 | "gradients/rnn/rnn/lstm_cell_9/lstm_cell/concat_grad/tuple/control_dependency_1\n", 492 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/Shape\n", 493 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/Shape_1\n", 494 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/BroadcastGradientArgs\n", 495 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/mul\n", 496 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/Sum\n", 497 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/Reshape\n", 498 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/mul_1\n", 499 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/Sum_1\n", 500 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/Reshape_1\n", 501 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/tuple/group_deps\n", 502 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/tuple/control_dependency\n", 503 | "gradients/rnn/rnn/lstm_cell_8/mul_2_grad/tuple/control_dependency_1\n", 504 | "gradients/rnn/rnn/lstm_cell_8/Sigmoid_2_grad/SigmoidGrad\n", 505 | "gradients/rnn/rnn/lstm_cell_8/Tanh_1_grad/TanhGrad\n", 506 | "gradients/AddN\n", 507 | "gradients/rnn/rnn/lstm_cell_8/add_1_grad/Shape\n", 508 | "gradients/rnn/rnn/lstm_cell_8/add_1_grad/Shape_1\n", 509 | "gradients/rnn/rnn/lstm_cell_8/add_1_grad/BroadcastGradientArgs\n", 510 | "gradients/rnn/rnn/lstm_cell_8/add_1_grad/Sum\n", 511 | "gradients/rnn/rnn/lstm_cell_8/add_1_grad/Reshape\n", 512 | "gradients/rnn/rnn/lstm_cell_8/add_1_grad/Sum_1\n", 513 | "gradients/rnn/rnn/lstm_cell_8/add_1_grad/Reshape_1\n", 514 | "gradients/rnn/rnn/lstm_cell_8/add_1_grad/tuple/group_deps\n", 515 | "gradients/rnn/rnn/lstm_cell_8/add_1_grad/tuple/control_dependency\n", 516 | "gradients/rnn/rnn/lstm_cell_8/add_1_grad/tuple/control_dependency_1\n", 517 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/Shape\n", 518 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/Shape_1\n", 519 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/BroadcastGradientArgs\n", 520 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/mul\n", 521 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/Sum\n", 522 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/Reshape\n", 523 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/mul_1\n", 524 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/Sum_1\n", 525 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/Reshape_1\n", 526 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/tuple/group_deps\n", 527 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/tuple/control_dependency\n", 528 | "gradients/rnn/rnn/lstm_cell_8/mul_grad/tuple/control_dependency_1\n", 529 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/Shape\n", 530 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/Shape_1\n", 531 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/BroadcastGradientArgs\n", 532 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/mul\n", 533 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/Sum\n", 534 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/Reshape\n", 535 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/mul_1\n", 536 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/Sum_1\n", 537 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/Reshape_1\n", 538 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/tuple/group_deps\n", 539 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/tuple/control_dependency\n", 540 | "gradients/rnn/rnn/lstm_cell_8/mul_1_grad/tuple/control_dependency_1\n", 541 | "gradients/rnn/rnn/lstm_cell_8/Sigmoid_grad/SigmoidGrad\n", 542 | "gradients/rnn/rnn/lstm_cell_8/Sigmoid_1_grad/SigmoidGrad\n", 543 | "gradients/rnn/rnn/lstm_cell_8/Tanh_grad/TanhGrad\n", 544 | "gradients/rnn/rnn/lstm_cell_8/add_grad/Shape\n", 545 | "gradients/rnn/rnn/lstm_cell_8/add_grad/Shape_1\n", 546 | "gradients/rnn/rnn/lstm_cell_8/add_grad/BroadcastGradientArgs\n", 547 | "gradients/rnn/rnn/lstm_cell_8/add_grad/Sum\n", 548 | "gradients/rnn/rnn/lstm_cell_8/add_grad/Reshape\n", 549 | "gradients/rnn/rnn/lstm_cell_8/add_grad/Sum_1\n", 550 | "gradients/rnn/rnn/lstm_cell_8/add_grad/Reshape_1\n", 551 | "gradients/rnn/rnn/lstm_cell_8/add_grad/tuple/group_deps\n", 552 | "gradients/rnn/rnn/lstm_cell_8/add_grad/tuple/control_dependency\n", 553 | "gradients/rnn/rnn/lstm_cell_8/add_grad/tuple/control_dependency_1\n", 554 | "gradients/rnn/rnn/lstm_cell_8/split_grad/concat\n", 555 | "gradients/rnn/rnn/lstm_cell_8/BiasAdd_grad/BiasAddGrad\n", 556 | "gradients/rnn/rnn/lstm_cell_8/BiasAdd_grad/tuple/group_deps\n", 557 | "gradients/rnn/rnn/lstm_cell_8/BiasAdd_grad/tuple/control_dependency\n", 558 | "gradients/rnn/rnn/lstm_cell_8/BiasAdd_grad/tuple/control_dependency_1\n", 559 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/MatMul_grad/MatMul\n", 560 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/MatMul_grad/MatMul_1\n", 561 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/MatMul_grad/tuple/group_deps\n", 562 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/MatMul_grad/tuple/control_dependency\n", 563 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/MatMul_grad/tuple/control_dependency_1\n", 564 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/concat_grad/Rank\n", 565 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/concat_grad/mod\n", 566 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/concat_grad/Shape\n", 567 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/concat_grad/ShapeN\n", 568 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/concat_grad/ConcatOffset\n", 569 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/concat_grad/Slice\n", 570 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/concat_grad/Slice_1\n", 571 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/concat_grad/tuple/group_deps\n", 572 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/concat_grad/tuple/control_dependency\n", 573 | "gradients/rnn/rnn/lstm_cell_8/lstm_cell/concat_grad/tuple/control_dependency_1\n", 574 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/Shape\n", 575 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/Shape_1\n", 576 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/BroadcastGradientArgs\n", 577 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/mul\n", 578 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/Sum\n", 579 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/Reshape\n", 580 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/mul_1\n", 581 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/Sum_1\n", 582 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/Reshape_1\n", 583 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/tuple/group_deps\n", 584 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/tuple/control_dependency\n", 585 | "gradients/rnn/rnn/lstm_cell_7/mul_2_grad/tuple/control_dependency_1\n", 586 | "gradients/rnn/rnn/lstm_cell_7/Sigmoid_2_grad/SigmoidGrad\n", 587 | "gradients/rnn/rnn/lstm_cell_7/Tanh_1_grad/TanhGrad\n", 588 | "gradients/AddN_1\n", 589 | "gradients/rnn/rnn/lstm_cell_7/add_1_grad/Shape\n", 590 | "gradients/rnn/rnn/lstm_cell_7/add_1_grad/Shape_1\n", 591 | "gradients/rnn/rnn/lstm_cell_7/add_1_grad/BroadcastGradientArgs\n", 592 | "gradients/rnn/rnn/lstm_cell_7/add_1_grad/Sum\n", 593 | "gradients/rnn/rnn/lstm_cell_7/add_1_grad/Reshape\n", 594 | "gradients/rnn/rnn/lstm_cell_7/add_1_grad/Sum_1\n", 595 | "gradients/rnn/rnn/lstm_cell_7/add_1_grad/Reshape_1\n", 596 | "gradients/rnn/rnn/lstm_cell_7/add_1_grad/tuple/group_deps\n", 597 | "gradients/rnn/rnn/lstm_cell_7/add_1_grad/tuple/control_dependency\n", 598 | "gradients/rnn/rnn/lstm_cell_7/add_1_grad/tuple/control_dependency_1\n", 599 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/Shape\n", 600 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/Shape_1\n", 601 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/BroadcastGradientArgs\n", 602 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/mul\n", 603 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/Sum\n", 604 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/Reshape\n", 605 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/mul_1\n", 606 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/Sum_1\n", 607 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/Reshape_1\n", 608 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/tuple/group_deps\n", 609 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/tuple/control_dependency\n", 610 | "gradients/rnn/rnn/lstm_cell_7/mul_grad/tuple/control_dependency_1\n", 611 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/Shape\n", 612 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/Shape_1\n", 613 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/BroadcastGradientArgs\n", 614 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/mul\n", 615 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/Sum\n", 616 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/Reshape\n", 617 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/mul_1\n", 618 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/Sum_1\n", 619 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/Reshape_1\n", 620 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/tuple/group_deps\n", 621 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/tuple/control_dependency\n", 622 | "gradients/rnn/rnn/lstm_cell_7/mul_1_grad/tuple/control_dependency_1\n", 623 | "gradients/rnn/rnn/lstm_cell_7/Sigmoid_grad/SigmoidGrad\n", 624 | "gradients/rnn/rnn/lstm_cell_7/Sigmoid_1_grad/SigmoidGrad\n", 625 | "gradients/rnn/rnn/lstm_cell_7/Tanh_grad/TanhGrad\n", 626 | "gradients/rnn/rnn/lstm_cell_7/add_grad/Shape\n", 627 | "gradients/rnn/rnn/lstm_cell_7/add_grad/Shape_1\n", 628 | "gradients/rnn/rnn/lstm_cell_7/add_grad/BroadcastGradientArgs\n", 629 | "gradients/rnn/rnn/lstm_cell_7/add_grad/Sum\n", 630 | "gradients/rnn/rnn/lstm_cell_7/add_grad/Reshape\n", 631 | "gradients/rnn/rnn/lstm_cell_7/add_grad/Sum_1\n", 632 | "gradients/rnn/rnn/lstm_cell_7/add_grad/Reshape_1\n", 633 | "gradients/rnn/rnn/lstm_cell_7/add_grad/tuple/group_deps\n", 634 | "gradients/rnn/rnn/lstm_cell_7/add_grad/tuple/control_dependency\n", 635 | "gradients/rnn/rnn/lstm_cell_7/add_grad/tuple/control_dependency_1\n", 636 | "gradients/rnn/rnn/lstm_cell_7/split_grad/concat\n", 637 | "gradients/rnn/rnn/lstm_cell_7/BiasAdd_grad/BiasAddGrad\n", 638 | "gradients/rnn/rnn/lstm_cell_7/BiasAdd_grad/tuple/group_deps\n", 639 | "gradients/rnn/rnn/lstm_cell_7/BiasAdd_grad/tuple/control_dependency\n", 640 | "gradients/rnn/rnn/lstm_cell_7/BiasAdd_grad/tuple/control_dependency_1\n", 641 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/MatMul_grad/MatMul\n", 642 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/MatMul_grad/MatMul_1\n", 643 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/MatMul_grad/tuple/group_deps\n", 644 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/MatMul_grad/tuple/control_dependency\n", 645 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/MatMul_grad/tuple/control_dependency_1\n", 646 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/concat_grad/Rank\n", 647 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/concat_grad/mod\n", 648 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/concat_grad/Shape\n", 649 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/concat_grad/ShapeN\n", 650 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/concat_grad/ConcatOffset\n", 651 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/concat_grad/Slice\n", 652 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/concat_grad/Slice_1\n", 653 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/concat_grad/tuple/group_deps\n", 654 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/concat_grad/tuple/control_dependency\n", 655 | "gradients/rnn/rnn/lstm_cell_7/lstm_cell/concat_grad/tuple/control_dependency_1\n", 656 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/Shape\n", 657 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/Shape_1\n", 658 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/BroadcastGradientArgs\n", 659 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/mul\n", 660 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/Sum\n", 661 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/Reshape\n", 662 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/mul_1\n", 663 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/Sum_1\n", 664 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/Reshape_1\n", 665 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/tuple/group_deps\n", 666 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/tuple/control_dependency\n", 667 | "gradients/rnn/rnn/lstm_cell_6/mul_2_grad/tuple/control_dependency_1\n", 668 | "gradients/rnn/rnn/lstm_cell_6/Sigmoid_2_grad/SigmoidGrad\n", 669 | "gradients/rnn/rnn/lstm_cell_6/Tanh_1_grad/TanhGrad\n", 670 | "gradients/AddN_2\n", 671 | "gradients/rnn/rnn/lstm_cell_6/add_1_grad/Shape\n", 672 | "gradients/rnn/rnn/lstm_cell_6/add_1_grad/Shape_1\n", 673 | "gradients/rnn/rnn/lstm_cell_6/add_1_grad/BroadcastGradientArgs\n", 674 | "gradients/rnn/rnn/lstm_cell_6/add_1_grad/Sum\n", 675 | "gradients/rnn/rnn/lstm_cell_6/add_1_grad/Reshape\n", 676 | "gradients/rnn/rnn/lstm_cell_6/add_1_grad/Sum_1\n", 677 | "gradients/rnn/rnn/lstm_cell_6/add_1_grad/Reshape_1\n", 678 | "gradients/rnn/rnn/lstm_cell_6/add_1_grad/tuple/group_deps\n", 679 | "gradients/rnn/rnn/lstm_cell_6/add_1_grad/tuple/control_dependency\n", 680 | "gradients/rnn/rnn/lstm_cell_6/add_1_grad/tuple/control_dependency_1\n", 681 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/Shape\n", 682 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/Shape_1\n", 683 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/BroadcastGradientArgs\n", 684 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/mul\n", 685 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/Sum\n", 686 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/Reshape\n", 687 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/mul_1\n", 688 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/Sum_1\n", 689 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/Reshape_1\n", 690 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/tuple/group_deps\n", 691 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/tuple/control_dependency\n", 692 | "gradients/rnn/rnn/lstm_cell_6/mul_grad/tuple/control_dependency_1\n", 693 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/Shape\n", 694 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/Shape_1\n", 695 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/BroadcastGradientArgs\n", 696 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/mul\n", 697 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/Sum\n", 698 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/Reshape\n", 699 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/mul_1\n", 700 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/Sum_1\n", 701 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/Reshape_1\n", 702 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/tuple/group_deps\n", 703 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/tuple/control_dependency\n", 704 | "gradients/rnn/rnn/lstm_cell_6/mul_1_grad/tuple/control_dependency_1\n", 705 | "gradients/rnn/rnn/lstm_cell_6/Sigmoid_grad/SigmoidGrad\n", 706 | "gradients/rnn/rnn/lstm_cell_6/Sigmoid_1_grad/SigmoidGrad\n", 707 | "gradients/rnn/rnn/lstm_cell_6/Tanh_grad/TanhGrad\n", 708 | "gradients/rnn/rnn/lstm_cell_6/add_grad/Shape\n", 709 | "gradients/rnn/rnn/lstm_cell_6/add_grad/Shape_1\n", 710 | "gradients/rnn/rnn/lstm_cell_6/add_grad/BroadcastGradientArgs\n", 711 | "gradients/rnn/rnn/lstm_cell_6/add_grad/Sum\n", 712 | "gradients/rnn/rnn/lstm_cell_6/add_grad/Reshape\n", 713 | "gradients/rnn/rnn/lstm_cell_6/add_grad/Sum_1\n", 714 | "gradients/rnn/rnn/lstm_cell_6/add_grad/Reshape_1\n", 715 | "gradients/rnn/rnn/lstm_cell_6/add_grad/tuple/group_deps\n", 716 | "gradients/rnn/rnn/lstm_cell_6/add_grad/tuple/control_dependency\n", 717 | "gradients/rnn/rnn/lstm_cell_6/add_grad/tuple/control_dependency_1\n", 718 | "gradients/rnn/rnn/lstm_cell_6/split_grad/concat\n", 719 | "gradients/rnn/rnn/lstm_cell_6/BiasAdd_grad/BiasAddGrad\n", 720 | "gradients/rnn/rnn/lstm_cell_6/BiasAdd_grad/tuple/group_deps\n", 721 | "gradients/rnn/rnn/lstm_cell_6/BiasAdd_grad/tuple/control_dependency\n", 722 | "gradients/rnn/rnn/lstm_cell_6/BiasAdd_grad/tuple/control_dependency_1\n", 723 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/MatMul_grad/MatMul\n", 724 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/MatMul_grad/MatMul_1\n", 725 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/MatMul_grad/tuple/group_deps\n", 726 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/MatMul_grad/tuple/control_dependency\n", 727 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/MatMul_grad/tuple/control_dependency_1\n", 728 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/concat_grad/Rank\n", 729 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/concat_grad/mod\n", 730 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/concat_grad/Shape\n", 731 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/concat_grad/ShapeN\n", 732 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/concat_grad/ConcatOffset\n", 733 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/concat_grad/Slice\n", 734 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/concat_grad/Slice_1\n", 735 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/concat_grad/tuple/group_deps\n", 736 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/concat_grad/tuple/control_dependency\n", 737 | "gradients/rnn/rnn/lstm_cell_6/lstm_cell/concat_grad/tuple/control_dependency_1\n", 738 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/Shape\n", 739 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/Shape_1\n", 740 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/BroadcastGradientArgs\n", 741 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/mul\n", 742 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/Sum\n", 743 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/Reshape\n", 744 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/mul_1\n", 745 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/Sum_1\n", 746 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/Reshape_1\n", 747 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/tuple/group_deps\n", 748 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/tuple/control_dependency\n", 749 | "gradients/rnn/rnn/lstm_cell_5/mul_2_grad/tuple/control_dependency_1\n", 750 | "gradients/rnn/rnn/lstm_cell_5/Sigmoid_2_grad/SigmoidGrad\n", 751 | "gradients/rnn/rnn/lstm_cell_5/Tanh_1_grad/TanhGrad\n", 752 | "gradients/AddN_3\n", 753 | "gradients/rnn/rnn/lstm_cell_5/add_1_grad/Shape\n", 754 | "gradients/rnn/rnn/lstm_cell_5/add_1_grad/Shape_1\n", 755 | "gradients/rnn/rnn/lstm_cell_5/add_1_grad/BroadcastGradientArgs\n", 756 | "gradients/rnn/rnn/lstm_cell_5/add_1_grad/Sum\n", 757 | "gradients/rnn/rnn/lstm_cell_5/add_1_grad/Reshape\n", 758 | "gradients/rnn/rnn/lstm_cell_5/add_1_grad/Sum_1\n", 759 | "gradients/rnn/rnn/lstm_cell_5/add_1_grad/Reshape_1\n", 760 | "gradients/rnn/rnn/lstm_cell_5/add_1_grad/tuple/group_deps\n", 761 | "gradients/rnn/rnn/lstm_cell_5/add_1_grad/tuple/control_dependency\n", 762 | "gradients/rnn/rnn/lstm_cell_5/add_1_grad/tuple/control_dependency_1\n", 763 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/Shape\n", 764 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/Shape_1\n", 765 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/BroadcastGradientArgs\n", 766 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/mul\n", 767 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/Sum\n", 768 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/Reshape\n", 769 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/mul_1\n", 770 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/Sum_1\n", 771 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/Reshape_1\n", 772 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/tuple/group_deps\n", 773 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/tuple/control_dependency\n", 774 | "gradients/rnn/rnn/lstm_cell_5/mul_grad/tuple/control_dependency_1\n", 775 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/Shape\n", 776 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/Shape_1\n", 777 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/BroadcastGradientArgs\n", 778 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/mul\n", 779 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/Sum\n", 780 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/Reshape\n", 781 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/mul_1\n", 782 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/Sum_1\n", 783 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/Reshape_1\n", 784 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/tuple/group_deps\n", 785 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/tuple/control_dependency\n", 786 | "gradients/rnn/rnn/lstm_cell_5/mul_1_grad/tuple/control_dependency_1\n", 787 | "gradients/rnn/rnn/lstm_cell_5/Sigmoid_grad/SigmoidGrad\n", 788 | "gradients/rnn/rnn/lstm_cell_5/Sigmoid_1_grad/SigmoidGrad\n", 789 | "gradients/rnn/rnn/lstm_cell_5/Tanh_grad/TanhGrad\n", 790 | "gradients/rnn/rnn/lstm_cell_5/add_grad/Shape\n", 791 | "gradients/rnn/rnn/lstm_cell_5/add_grad/Shape_1\n", 792 | "gradients/rnn/rnn/lstm_cell_5/add_grad/BroadcastGradientArgs\n", 793 | "gradients/rnn/rnn/lstm_cell_5/add_grad/Sum\n", 794 | "gradients/rnn/rnn/lstm_cell_5/add_grad/Reshape\n", 795 | "gradients/rnn/rnn/lstm_cell_5/add_grad/Sum_1\n", 796 | "gradients/rnn/rnn/lstm_cell_5/add_grad/Reshape_1\n", 797 | "gradients/rnn/rnn/lstm_cell_5/add_grad/tuple/group_deps\n", 798 | "gradients/rnn/rnn/lstm_cell_5/add_grad/tuple/control_dependency\n", 799 | "gradients/rnn/rnn/lstm_cell_5/add_grad/tuple/control_dependency_1\n", 800 | "gradients/rnn/rnn/lstm_cell_5/split_grad/concat\n", 801 | "gradients/rnn/rnn/lstm_cell_5/BiasAdd_grad/BiasAddGrad\n", 802 | "gradients/rnn/rnn/lstm_cell_5/BiasAdd_grad/tuple/group_deps\n", 803 | "gradients/rnn/rnn/lstm_cell_5/BiasAdd_grad/tuple/control_dependency\n", 804 | "gradients/rnn/rnn/lstm_cell_5/BiasAdd_grad/tuple/control_dependency_1\n", 805 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/MatMul_grad/MatMul\n", 806 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/MatMul_grad/MatMul_1\n", 807 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/MatMul_grad/tuple/group_deps\n", 808 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/MatMul_grad/tuple/control_dependency\n", 809 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/MatMul_grad/tuple/control_dependency_1\n", 810 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/concat_grad/Rank\n", 811 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/concat_grad/mod\n", 812 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/concat_grad/Shape\n", 813 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/concat_grad/ShapeN\n", 814 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/concat_grad/ConcatOffset\n", 815 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/concat_grad/Slice\n", 816 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/concat_grad/Slice_1\n", 817 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/concat_grad/tuple/group_deps\n", 818 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/concat_grad/tuple/control_dependency\n", 819 | "gradients/rnn/rnn/lstm_cell_5/lstm_cell/concat_grad/tuple/control_dependency_1\n", 820 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/Shape\n", 821 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/Shape_1\n", 822 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/BroadcastGradientArgs\n", 823 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/mul\n", 824 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/Sum\n", 825 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/Reshape\n", 826 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/mul_1\n", 827 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/Sum_1\n", 828 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/Reshape_1\n", 829 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/tuple/group_deps\n", 830 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/tuple/control_dependency\n", 831 | "gradients/rnn/rnn/lstm_cell_4/mul_2_grad/tuple/control_dependency_1\n", 832 | "gradients/rnn/rnn/lstm_cell_4/Sigmoid_2_grad/SigmoidGrad\n", 833 | "gradients/rnn/rnn/lstm_cell_4/Tanh_1_grad/TanhGrad\n", 834 | "gradients/AddN_4\n", 835 | "gradients/rnn/rnn/lstm_cell_4/add_1_grad/Shape\n", 836 | "gradients/rnn/rnn/lstm_cell_4/add_1_grad/Shape_1\n", 837 | "gradients/rnn/rnn/lstm_cell_4/add_1_grad/BroadcastGradientArgs\n", 838 | "gradients/rnn/rnn/lstm_cell_4/add_1_grad/Sum\n", 839 | "gradients/rnn/rnn/lstm_cell_4/add_1_grad/Reshape\n", 840 | "gradients/rnn/rnn/lstm_cell_4/add_1_grad/Sum_1\n", 841 | "gradients/rnn/rnn/lstm_cell_4/add_1_grad/Reshape_1\n", 842 | "gradients/rnn/rnn/lstm_cell_4/add_1_grad/tuple/group_deps\n", 843 | "gradients/rnn/rnn/lstm_cell_4/add_1_grad/tuple/control_dependency\n", 844 | "gradients/rnn/rnn/lstm_cell_4/add_1_grad/tuple/control_dependency_1\n", 845 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/Shape\n", 846 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/Shape_1\n", 847 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/BroadcastGradientArgs\n", 848 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/mul\n", 849 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/Sum\n", 850 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/Reshape\n", 851 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/mul_1\n", 852 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/Sum_1\n", 853 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/Reshape_1\n", 854 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/tuple/group_deps\n", 855 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/tuple/control_dependency\n", 856 | "gradients/rnn/rnn/lstm_cell_4/mul_grad/tuple/control_dependency_1\n", 857 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/Shape\n", 858 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/Shape_1\n", 859 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/BroadcastGradientArgs\n", 860 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/mul\n", 861 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/Sum\n", 862 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/Reshape\n", 863 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/mul_1\n", 864 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/Sum_1\n", 865 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/Reshape_1\n", 866 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/tuple/group_deps\n", 867 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/tuple/control_dependency\n", 868 | "gradients/rnn/rnn/lstm_cell_4/mul_1_grad/tuple/control_dependency_1\n", 869 | "gradients/rnn/rnn/lstm_cell_4/Sigmoid_grad/SigmoidGrad\n", 870 | "gradients/rnn/rnn/lstm_cell_4/Sigmoid_1_grad/SigmoidGrad\n", 871 | "gradients/rnn/rnn/lstm_cell_4/Tanh_grad/TanhGrad\n", 872 | "gradients/rnn/rnn/lstm_cell_4/add_grad/Shape\n", 873 | "gradients/rnn/rnn/lstm_cell_4/add_grad/Shape_1\n", 874 | "gradients/rnn/rnn/lstm_cell_4/add_grad/BroadcastGradientArgs\n", 875 | "gradients/rnn/rnn/lstm_cell_4/add_grad/Sum\n", 876 | "gradients/rnn/rnn/lstm_cell_4/add_grad/Reshape\n", 877 | "gradients/rnn/rnn/lstm_cell_4/add_grad/Sum_1\n", 878 | "gradients/rnn/rnn/lstm_cell_4/add_grad/Reshape_1\n", 879 | "gradients/rnn/rnn/lstm_cell_4/add_grad/tuple/group_deps\n", 880 | "gradients/rnn/rnn/lstm_cell_4/add_grad/tuple/control_dependency\n", 881 | "gradients/rnn/rnn/lstm_cell_4/add_grad/tuple/control_dependency_1\n", 882 | "gradients/rnn/rnn/lstm_cell_4/split_grad/concat\n", 883 | "gradients/rnn/rnn/lstm_cell_4/BiasAdd_grad/BiasAddGrad\n", 884 | "gradients/rnn/rnn/lstm_cell_4/BiasAdd_grad/tuple/group_deps\n", 885 | "gradients/rnn/rnn/lstm_cell_4/BiasAdd_grad/tuple/control_dependency\n", 886 | "gradients/rnn/rnn/lstm_cell_4/BiasAdd_grad/tuple/control_dependency_1\n", 887 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/MatMul_grad/MatMul\n", 888 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/MatMul_grad/MatMul_1\n", 889 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/MatMul_grad/tuple/group_deps\n", 890 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/MatMul_grad/tuple/control_dependency\n", 891 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/MatMul_grad/tuple/control_dependency_1\n", 892 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/concat_grad/Rank\n", 893 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/concat_grad/mod\n", 894 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/concat_grad/Shape\n", 895 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/concat_grad/ShapeN\n", 896 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/concat_grad/ConcatOffset\n", 897 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/concat_grad/Slice\n", 898 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/concat_grad/Slice_1\n", 899 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/concat_grad/tuple/group_deps\n", 900 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/concat_grad/tuple/control_dependency\n", 901 | "gradients/rnn/rnn/lstm_cell_4/lstm_cell/concat_grad/tuple/control_dependency_1\n", 902 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/Shape\n", 903 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/Shape_1\n", 904 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/BroadcastGradientArgs\n", 905 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/mul\n", 906 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/Sum\n", 907 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/Reshape\n", 908 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/mul_1\n", 909 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/Sum_1\n", 910 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/Reshape_1\n", 911 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/tuple/group_deps\n", 912 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/tuple/control_dependency\n", 913 | "gradients/rnn/rnn/lstm_cell_3/mul_2_grad/tuple/control_dependency_1\n", 914 | "gradients/rnn/rnn/lstm_cell_3/Sigmoid_2_grad/SigmoidGrad\n", 915 | "gradients/rnn/rnn/lstm_cell_3/Tanh_1_grad/TanhGrad\n", 916 | "gradients/AddN_5\n", 917 | "gradients/rnn/rnn/lstm_cell_3/add_1_grad/Shape\n", 918 | "gradients/rnn/rnn/lstm_cell_3/add_1_grad/Shape_1\n", 919 | "gradients/rnn/rnn/lstm_cell_3/add_1_grad/BroadcastGradientArgs\n", 920 | "gradients/rnn/rnn/lstm_cell_3/add_1_grad/Sum\n", 921 | "gradients/rnn/rnn/lstm_cell_3/add_1_grad/Reshape\n", 922 | "gradients/rnn/rnn/lstm_cell_3/add_1_grad/Sum_1\n", 923 | "gradients/rnn/rnn/lstm_cell_3/add_1_grad/Reshape_1\n", 924 | "gradients/rnn/rnn/lstm_cell_3/add_1_grad/tuple/group_deps\n", 925 | "gradients/rnn/rnn/lstm_cell_3/add_1_grad/tuple/control_dependency\n", 926 | "gradients/rnn/rnn/lstm_cell_3/add_1_grad/tuple/control_dependency_1\n", 927 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/Shape\n", 928 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/Shape_1\n", 929 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/BroadcastGradientArgs\n", 930 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/mul\n", 931 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/Sum\n", 932 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/Reshape\n", 933 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/mul_1\n", 934 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/Sum_1\n", 935 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/Reshape_1\n", 936 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/tuple/group_deps\n", 937 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/tuple/control_dependency\n", 938 | "gradients/rnn/rnn/lstm_cell_3/mul_grad/tuple/control_dependency_1\n", 939 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/Shape\n", 940 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/Shape_1\n", 941 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/BroadcastGradientArgs\n", 942 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/mul\n", 943 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/Sum\n", 944 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/Reshape\n", 945 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/mul_1\n", 946 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/Sum_1\n", 947 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/Reshape_1\n", 948 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/tuple/group_deps\n", 949 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/tuple/control_dependency\n", 950 | "gradients/rnn/rnn/lstm_cell_3/mul_1_grad/tuple/control_dependency_1\n", 951 | "gradients/rnn/rnn/lstm_cell_3/Sigmoid_grad/SigmoidGrad\n", 952 | "gradients/rnn/rnn/lstm_cell_3/Sigmoid_1_grad/SigmoidGrad\n", 953 | "gradients/rnn/rnn/lstm_cell_3/Tanh_grad/TanhGrad\n", 954 | "gradients/rnn/rnn/lstm_cell_3/add_grad/Shape\n", 955 | "gradients/rnn/rnn/lstm_cell_3/add_grad/Shape_1\n", 956 | "gradients/rnn/rnn/lstm_cell_3/add_grad/BroadcastGradientArgs\n", 957 | "gradients/rnn/rnn/lstm_cell_3/add_grad/Sum\n", 958 | "gradients/rnn/rnn/lstm_cell_3/add_grad/Reshape\n", 959 | "gradients/rnn/rnn/lstm_cell_3/add_grad/Sum_1\n", 960 | "gradients/rnn/rnn/lstm_cell_3/add_grad/Reshape_1\n", 961 | "gradients/rnn/rnn/lstm_cell_3/add_grad/tuple/group_deps\n", 962 | "gradients/rnn/rnn/lstm_cell_3/add_grad/tuple/control_dependency\n", 963 | "gradients/rnn/rnn/lstm_cell_3/add_grad/tuple/control_dependency_1\n", 964 | "gradients/rnn/rnn/lstm_cell_3/split_grad/concat\n", 965 | "gradients/rnn/rnn/lstm_cell_3/BiasAdd_grad/BiasAddGrad\n", 966 | "gradients/rnn/rnn/lstm_cell_3/BiasAdd_grad/tuple/group_deps\n", 967 | "gradients/rnn/rnn/lstm_cell_3/BiasAdd_grad/tuple/control_dependency\n", 968 | "gradients/rnn/rnn/lstm_cell_3/BiasAdd_grad/tuple/control_dependency_1\n", 969 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/MatMul_grad/MatMul\n", 970 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/MatMul_grad/MatMul_1\n", 971 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/MatMul_grad/tuple/group_deps\n", 972 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/MatMul_grad/tuple/control_dependency\n", 973 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/MatMul_grad/tuple/control_dependency_1\n", 974 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/concat_grad/Rank\n", 975 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/concat_grad/mod\n", 976 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/concat_grad/Shape\n", 977 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/concat_grad/ShapeN\n", 978 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/concat_grad/ConcatOffset\n", 979 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/concat_grad/Slice\n", 980 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/concat_grad/Slice_1\n", 981 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/concat_grad/tuple/group_deps\n", 982 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/concat_grad/tuple/control_dependency\n", 983 | "gradients/rnn/rnn/lstm_cell_3/lstm_cell/concat_grad/tuple/control_dependency_1\n", 984 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/Shape\n", 985 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/Shape_1\n", 986 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/BroadcastGradientArgs\n", 987 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/mul\n", 988 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/Sum\n", 989 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/Reshape\n", 990 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/mul_1\n", 991 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/Sum_1\n", 992 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/Reshape_1\n", 993 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/tuple/group_deps\n", 994 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/tuple/control_dependency\n", 995 | "gradients/rnn/rnn/lstm_cell_2/mul_2_grad/tuple/control_dependency_1\n", 996 | "gradients/rnn/rnn/lstm_cell_2/Sigmoid_2_grad/SigmoidGrad\n", 997 | "gradients/rnn/rnn/lstm_cell_2/Tanh_1_grad/TanhGrad\n", 998 | "gradients/AddN_6\n", 999 | "gradients/rnn/rnn/lstm_cell_2/add_1_grad/Shape\n", 1000 | "gradients/rnn/rnn/lstm_cell_2/add_1_grad/Shape_1\n", 1001 | "gradients/rnn/rnn/lstm_cell_2/add_1_grad/BroadcastGradientArgs\n", 1002 | "gradients/rnn/rnn/lstm_cell_2/add_1_grad/Sum\n", 1003 | "gradients/rnn/rnn/lstm_cell_2/add_1_grad/Reshape\n", 1004 | "gradients/rnn/rnn/lstm_cell_2/add_1_grad/Sum_1\n", 1005 | "gradients/rnn/rnn/lstm_cell_2/add_1_grad/Reshape_1\n", 1006 | "gradients/rnn/rnn/lstm_cell_2/add_1_grad/tuple/group_deps\n", 1007 | "gradients/rnn/rnn/lstm_cell_2/add_1_grad/tuple/control_dependency\n", 1008 | "gradients/rnn/rnn/lstm_cell_2/add_1_grad/tuple/control_dependency_1\n", 1009 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/Shape\n", 1010 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/Shape_1\n", 1011 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/BroadcastGradientArgs\n", 1012 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/mul\n", 1013 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/Sum\n", 1014 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/Reshape\n", 1015 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/mul_1\n", 1016 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/Sum_1\n", 1017 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/Reshape_1\n", 1018 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/tuple/group_deps\n", 1019 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/tuple/control_dependency\n", 1020 | "gradients/rnn/rnn/lstm_cell_2/mul_grad/tuple/control_dependency_1\n", 1021 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/Shape\n", 1022 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/Shape_1\n", 1023 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/BroadcastGradientArgs\n", 1024 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/mul\n", 1025 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/Sum\n", 1026 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/Reshape\n", 1027 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/mul_1\n", 1028 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/Sum_1\n", 1029 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/Reshape_1\n", 1030 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/tuple/group_deps\n", 1031 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/tuple/control_dependency\n", 1032 | "gradients/rnn/rnn/lstm_cell_2/mul_1_grad/tuple/control_dependency_1\n", 1033 | "gradients/rnn/rnn/lstm_cell_2/Sigmoid_grad/SigmoidGrad\n", 1034 | "gradients/rnn/rnn/lstm_cell_2/Sigmoid_1_grad/SigmoidGrad\n", 1035 | "gradients/rnn/rnn/lstm_cell_2/Tanh_grad/TanhGrad\n", 1036 | "gradients/rnn/rnn/lstm_cell_2/add_grad/Shape\n", 1037 | "gradients/rnn/rnn/lstm_cell_2/add_grad/Shape_1\n", 1038 | "gradients/rnn/rnn/lstm_cell_2/add_grad/BroadcastGradientArgs\n", 1039 | "gradients/rnn/rnn/lstm_cell_2/add_grad/Sum\n", 1040 | "gradients/rnn/rnn/lstm_cell_2/add_grad/Reshape\n", 1041 | "gradients/rnn/rnn/lstm_cell_2/add_grad/Sum_1\n", 1042 | "gradients/rnn/rnn/lstm_cell_2/add_grad/Reshape_1\n", 1043 | "gradients/rnn/rnn/lstm_cell_2/add_grad/tuple/group_deps\n", 1044 | "gradients/rnn/rnn/lstm_cell_2/add_grad/tuple/control_dependency\n", 1045 | "gradients/rnn/rnn/lstm_cell_2/add_grad/tuple/control_dependency_1\n", 1046 | "gradients/rnn/rnn/lstm_cell_2/split_grad/concat\n", 1047 | "gradients/rnn/rnn/lstm_cell_2/BiasAdd_grad/BiasAddGrad\n", 1048 | "gradients/rnn/rnn/lstm_cell_2/BiasAdd_grad/tuple/group_deps\n", 1049 | "gradients/rnn/rnn/lstm_cell_2/BiasAdd_grad/tuple/control_dependency\n", 1050 | "gradients/rnn/rnn/lstm_cell_2/BiasAdd_grad/tuple/control_dependency_1\n", 1051 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/MatMul_grad/MatMul\n", 1052 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/MatMul_grad/MatMul_1\n", 1053 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/MatMul_grad/tuple/group_deps\n", 1054 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/MatMul_grad/tuple/control_dependency\n", 1055 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/MatMul_grad/tuple/control_dependency_1\n", 1056 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/concat_grad/Rank\n", 1057 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/concat_grad/mod\n", 1058 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/concat_grad/Shape\n", 1059 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/concat_grad/ShapeN\n", 1060 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/concat_grad/ConcatOffset\n", 1061 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/concat_grad/Slice\n", 1062 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/concat_grad/Slice_1\n", 1063 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/concat_grad/tuple/group_deps\n", 1064 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/concat_grad/tuple/control_dependency\n", 1065 | "gradients/rnn/rnn/lstm_cell_2/lstm_cell/concat_grad/tuple/control_dependency_1\n", 1066 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/Shape\n", 1067 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/Shape_1\n", 1068 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/BroadcastGradientArgs\n", 1069 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/mul\n", 1070 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/Sum\n", 1071 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/Reshape\n", 1072 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/mul_1\n", 1073 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/Sum_1\n", 1074 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/Reshape_1\n", 1075 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/tuple/group_deps\n", 1076 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/tuple/control_dependency\n", 1077 | "gradients/rnn/rnn/lstm_cell_1/mul_2_grad/tuple/control_dependency_1\n", 1078 | "gradients/rnn/rnn/lstm_cell_1/Sigmoid_2_grad/SigmoidGrad\n", 1079 | "gradients/rnn/rnn/lstm_cell_1/Tanh_1_grad/TanhGrad\n", 1080 | "gradients/AddN_7\n", 1081 | "gradients/rnn/rnn/lstm_cell_1/add_1_grad/Shape\n", 1082 | "gradients/rnn/rnn/lstm_cell_1/add_1_grad/Shape_1\n", 1083 | "gradients/rnn/rnn/lstm_cell_1/add_1_grad/BroadcastGradientArgs\n", 1084 | "gradients/rnn/rnn/lstm_cell_1/add_1_grad/Sum\n", 1085 | "gradients/rnn/rnn/lstm_cell_1/add_1_grad/Reshape\n", 1086 | "gradients/rnn/rnn/lstm_cell_1/add_1_grad/Sum_1\n", 1087 | "gradients/rnn/rnn/lstm_cell_1/add_1_grad/Reshape_1\n", 1088 | "gradients/rnn/rnn/lstm_cell_1/add_1_grad/tuple/group_deps\n", 1089 | "gradients/rnn/rnn/lstm_cell_1/add_1_grad/tuple/control_dependency\n", 1090 | "gradients/rnn/rnn/lstm_cell_1/add_1_grad/tuple/control_dependency_1\n", 1091 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/Shape\n", 1092 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/Shape_1\n", 1093 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/BroadcastGradientArgs\n", 1094 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/mul\n", 1095 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/Sum\n", 1096 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/Reshape\n", 1097 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/mul_1\n", 1098 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/Sum_1\n", 1099 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/Reshape_1\n", 1100 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/tuple/group_deps\n", 1101 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/tuple/control_dependency\n", 1102 | "gradients/rnn/rnn/lstm_cell_1/mul_grad/tuple/control_dependency_1\n", 1103 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/Shape\n", 1104 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/Shape_1\n", 1105 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/BroadcastGradientArgs\n", 1106 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/mul\n", 1107 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/Sum\n", 1108 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/Reshape\n", 1109 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/mul_1\n", 1110 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/Sum_1\n", 1111 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/Reshape_1\n", 1112 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/tuple/group_deps\n", 1113 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/tuple/control_dependency\n", 1114 | "gradients/rnn/rnn/lstm_cell_1/mul_1_grad/tuple/control_dependency_1\n", 1115 | "gradients/rnn/rnn/lstm_cell_1/Sigmoid_grad/SigmoidGrad\n", 1116 | "gradients/rnn/rnn/lstm_cell_1/Sigmoid_1_grad/SigmoidGrad\n", 1117 | "gradients/rnn/rnn/lstm_cell_1/Tanh_grad/TanhGrad\n", 1118 | "gradients/rnn/rnn/lstm_cell_1/add_grad/Shape\n", 1119 | "gradients/rnn/rnn/lstm_cell_1/add_grad/Shape_1\n", 1120 | "gradients/rnn/rnn/lstm_cell_1/add_grad/BroadcastGradientArgs\n", 1121 | "gradients/rnn/rnn/lstm_cell_1/add_grad/Sum\n", 1122 | "gradients/rnn/rnn/lstm_cell_1/add_grad/Reshape\n", 1123 | "gradients/rnn/rnn/lstm_cell_1/add_grad/Sum_1\n", 1124 | "gradients/rnn/rnn/lstm_cell_1/add_grad/Reshape_1\n", 1125 | "gradients/rnn/rnn/lstm_cell_1/add_grad/tuple/group_deps\n", 1126 | "gradients/rnn/rnn/lstm_cell_1/add_grad/tuple/control_dependency\n", 1127 | "gradients/rnn/rnn/lstm_cell_1/add_grad/tuple/control_dependency_1\n", 1128 | "gradients/rnn/rnn/lstm_cell_1/split_grad/concat\n", 1129 | "gradients/rnn/rnn/lstm_cell_1/BiasAdd_grad/BiasAddGrad\n", 1130 | "gradients/rnn/rnn/lstm_cell_1/BiasAdd_grad/tuple/group_deps\n", 1131 | "gradients/rnn/rnn/lstm_cell_1/BiasAdd_grad/tuple/control_dependency\n", 1132 | "gradients/rnn/rnn/lstm_cell_1/BiasAdd_grad/tuple/control_dependency_1\n", 1133 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/MatMul_grad/MatMul\n", 1134 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/MatMul_grad/MatMul_1\n", 1135 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/MatMul_grad/tuple/group_deps\n", 1136 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/MatMul_grad/tuple/control_dependency\n", 1137 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/MatMul_grad/tuple/control_dependency_1\n", 1138 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/concat_grad/Rank\n", 1139 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/concat_grad/mod\n", 1140 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/concat_grad/Shape\n", 1141 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/concat_grad/ShapeN\n", 1142 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/concat_grad/ConcatOffset\n", 1143 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/concat_grad/Slice\n", 1144 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/concat_grad/Slice_1\n", 1145 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/concat_grad/tuple/group_deps\n", 1146 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/concat_grad/tuple/control_dependency\n", 1147 | "gradients/rnn/rnn/lstm_cell_1/lstm_cell/concat_grad/tuple/control_dependency_1\n", 1148 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/Shape\n", 1149 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/Shape_1\n", 1150 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/BroadcastGradientArgs\n", 1151 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/mul\n", 1152 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/Sum\n", 1153 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/Reshape\n", 1154 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/mul_1\n", 1155 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/Sum_1\n", 1156 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/Reshape_1\n", 1157 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/tuple/group_deps\n", 1158 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/tuple/control_dependency\n", 1159 | "gradients/rnn/rnn/lstm_cell/mul_2_grad/tuple/control_dependency_1\n", 1160 | "gradients/rnn/rnn/lstm_cell/Sigmoid_2_grad/SigmoidGrad\n", 1161 | "gradients/rnn/rnn/lstm_cell/Tanh_1_grad/TanhGrad\n", 1162 | "gradients/AddN_8\n", 1163 | "gradients/rnn/rnn/lstm_cell/add_1_grad/Shape\n", 1164 | "gradients/rnn/rnn/lstm_cell/add_1_grad/Shape_1\n", 1165 | "gradients/rnn/rnn/lstm_cell/add_1_grad/BroadcastGradientArgs\n", 1166 | "gradients/rnn/rnn/lstm_cell/add_1_grad/Sum\n", 1167 | "gradients/rnn/rnn/lstm_cell/add_1_grad/Reshape\n", 1168 | "gradients/rnn/rnn/lstm_cell/add_1_grad/Sum_1\n", 1169 | "gradients/rnn/rnn/lstm_cell/add_1_grad/Reshape_1\n", 1170 | "gradients/rnn/rnn/lstm_cell/add_1_grad/tuple/group_deps\n", 1171 | "gradients/rnn/rnn/lstm_cell/add_1_grad/tuple/control_dependency\n", 1172 | "gradients/rnn/rnn/lstm_cell/add_1_grad/tuple/control_dependency_1\n", 1173 | "gradients/rnn/rnn/lstm_cell/mul_grad/Shape\n", 1174 | "gradients/rnn/rnn/lstm_cell/mul_grad/Shape_1\n", 1175 | "gradients/rnn/rnn/lstm_cell/mul_grad/BroadcastGradientArgs\n", 1176 | "gradients/rnn/rnn/lstm_cell/mul_grad/mul\n", 1177 | "gradients/rnn/rnn/lstm_cell/mul_grad/Sum\n", 1178 | "gradients/rnn/rnn/lstm_cell/mul_grad/Reshape\n", 1179 | "gradients/rnn/rnn/lstm_cell/mul_grad/mul_1\n", 1180 | "gradients/rnn/rnn/lstm_cell/mul_grad/Sum_1\n", 1181 | "gradients/rnn/rnn/lstm_cell/mul_grad/Reshape_1\n", 1182 | "gradients/rnn/rnn/lstm_cell/mul_grad/tuple/group_deps\n", 1183 | "gradients/rnn/rnn/lstm_cell/mul_grad/tuple/control_dependency\n", 1184 | "gradients/rnn/rnn/lstm_cell/mul_grad/tuple/control_dependency_1\n", 1185 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/Shape\n", 1186 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/Shape_1\n", 1187 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/BroadcastGradientArgs\n", 1188 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/mul\n", 1189 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/Sum\n", 1190 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/Reshape\n", 1191 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/mul_1\n", 1192 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/Sum_1\n", 1193 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/Reshape_1\n", 1194 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/tuple/group_deps\n", 1195 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/tuple/control_dependency\n", 1196 | "gradients/rnn/rnn/lstm_cell/mul_1_grad/tuple/control_dependency_1\n", 1197 | "gradients/rnn/rnn/lstm_cell/Sigmoid_grad/SigmoidGrad\n", 1198 | "gradients/rnn/rnn/lstm_cell/Sigmoid_1_grad/SigmoidGrad\n", 1199 | "gradients/rnn/rnn/lstm_cell/Tanh_grad/TanhGrad\n", 1200 | "gradients/rnn/rnn/lstm_cell/add_grad/Shape\n", 1201 | "gradients/rnn/rnn/lstm_cell/add_grad/Shape_1\n", 1202 | "gradients/rnn/rnn/lstm_cell/add_grad/BroadcastGradientArgs\n", 1203 | "gradients/rnn/rnn/lstm_cell/add_grad/Sum\n", 1204 | "gradients/rnn/rnn/lstm_cell/add_grad/Reshape\n", 1205 | "gradients/rnn/rnn/lstm_cell/add_grad/Sum_1\n", 1206 | "gradients/rnn/rnn/lstm_cell/add_grad/Reshape_1\n", 1207 | "gradients/rnn/rnn/lstm_cell/add_grad/tuple/group_deps\n", 1208 | "gradients/rnn/rnn/lstm_cell/add_grad/tuple/control_dependency\n", 1209 | "gradients/rnn/rnn/lstm_cell/add_grad/tuple/control_dependency_1\n", 1210 | "gradients/rnn/rnn/lstm_cell/split_grad/concat\n", 1211 | "gradients/rnn/rnn/lstm_cell/BiasAdd_grad/BiasAddGrad\n", 1212 | "gradients/rnn/rnn/lstm_cell/BiasAdd_grad/tuple/group_deps\n", 1213 | "gradients/rnn/rnn/lstm_cell/BiasAdd_grad/tuple/control_dependency\n", 1214 | "gradients/rnn/rnn/lstm_cell/BiasAdd_grad/tuple/control_dependency_1\n", 1215 | "gradients/rnn/rnn/lstm_cell/lstm_cell/MatMul_grad/MatMul\n", 1216 | "gradients/rnn/rnn/lstm_cell/lstm_cell/MatMul_grad/MatMul_1\n", 1217 | "gradients/rnn/rnn/lstm_cell/lstm_cell/MatMul_grad/tuple/group_deps\n", 1218 | "gradients/rnn/rnn/lstm_cell/lstm_cell/MatMul_grad/tuple/control_dependency\n", 1219 | "gradients/rnn/rnn/lstm_cell/lstm_cell/MatMul_grad/tuple/control_dependency_1\n", 1220 | "gradients/AddN_9\n", 1221 | "gradients/AddN_10\n", 1222 | "beta1_power/initial_value\n", 1223 | "beta1_power\n", 1224 | "beta1_power/Assign\n", 1225 | "beta1_power/read\n", 1226 | "beta2_power/initial_value\n", 1227 | "beta2_power\n", 1228 | "beta2_power/Assign\n", 1229 | "beta2_power/read\n", 1230 | "zeros\n", 1231 | "Variable/Adam\n", 1232 | "Variable/Adam/Assign\n", 1233 | "Variable/Adam/read\n", 1234 | "zeros_1\n", 1235 | "Variable/Adam_1\n", 1236 | "Variable/Adam_1/Assign\n", 1237 | "Variable/Adam_1/read\n", 1238 | "zeros_2\n", 1239 | "Variable_1/Adam\n", 1240 | "Variable_1/Adam/Assign\n", 1241 | "Variable_1/Adam/read\n", 1242 | "zeros_3\n", 1243 | "Variable_1/Adam_1\n", 1244 | "Variable_1/Adam_1/Assign\n", 1245 | "Variable_1/Adam_1/read\n", 1246 | "zeros_4\n", 1247 | "rnn/rnn/lstm_cell/weights/Adam\n", 1248 | "rnn/rnn/lstm_cell/weights/Adam/Assign\n", 1249 | "rnn/rnn/lstm_cell/weights/Adam/read\n", 1250 | "zeros_5\n", 1251 | "rnn/rnn/lstm_cell/weights/Adam_1\n", 1252 | "rnn/rnn/lstm_cell/weights/Adam_1/Assign\n", 1253 | "rnn/rnn/lstm_cell/weights/Adam_1/read\n", 1254 | "zeros_6\n", 1255 | "rnn/rnn/lstm_cell/biases/Adam\n", 1256 | "rnn/rnn/lstm_cell/biases/Adam/Assign\n", 1257 | "rnn/rnn/lstm_cell/biases/Adam/read\n", 1258 | "zeros_7\n", 1259 | "rnn/rnn/lstm_cell/biases/Adam_1\n", 1260 | "rnn/rnn/lstm_cell/biases/Adam_1/Assign\n", 1261 | "rnn/rnn/lstm_cell/biases/Adam_1/read\n", 1262 | "Adam/learning_rate\n", 1263 | "Adam/beta1\n", 1264 | "Adam/beta2\n", 1265 | "Adam/epsilon\n", 1266 | "Adam/update_Variable/ApplyAdam\n", 1267 | "Adam/update_Variable_1/ApplyAdam\n", 1268 | "Adam/update_rnn/rnn/lstm_cell/weights/ApplyAdam\n", 1269 | "Adam/update_rnn/rnn/lstm_cell/biases/ApplyAdam\n", 1270 | "Adam/mul\n", 1271 | "Adam/Assign\n", 1272 | "Adam/mul_1\n", 1273 | "Adam/Assign_1\n", 1274 | "Adam\n", 1275 | "init\n", 1276 | "save/Const\n", 1277 | "save/SaveV2/tensor_names\n", 1278 | "save/SaveV2/shape_and_slices\n", 1279 | "save/SaveV2\n", 1280 | "save/control_dependency\n", 1281 | "save/RestoreV2/tensor_names\n", 1282 | "save/RestoreV2/shape_and_slices\n", 1283 | "save/RestoreV2\n", 1284 | "save/Assign\n", 1285 | "save/RestoreV2_1/tensor_names\n", 1286 | "save/RestoreV2_1/shape_and_slices\n", 1287 | "save/RestoreV2_1\n", 1288 | "save/Assign_1\n", 1289 | "save/RestoreV2_2/tensor_names\n", 1290 | "save/RestoreV2_2/shape_and_slices\n", 1291 | "save/RestoreV2_2\n", 1292 | "save/Assign_2\n", 1293 | "save/RestoreV2_3/tensor_names\n", 1294 | "save/RestoreV2_3/shape_and_slices\n", 1295 | "save/RestoreV2_3\n", 1296 | "save/Assign_3\n", 1297 | "save/restore_all\n" 1298 | ] 1299 | } 1300 | ], 1301 | "source": [ 1302 | "print_nodes(tf.get_default_graph())" 1303 | ] 1304 | }, 1305 | { 1306 | "cell_type": "markdown", 1307 | "metadata": { 1308 | "deletable": true, 1309 | "editable": true 1310 | }, 1311 | "source": [ 1312 | "# Example" 1313 | ] 1314 | }, 1315 | { 1316 | "cell_type": "code", 1317 | "execution_count": 5, 1318 | "metadata": { 1319 | "collapsed": false, 1320 | "deletable": true, 1321 | "editable": true 1322 | }, 1323 | "outputs": [ 1324 | { 1325 | "name": "stdout", 1326 | "output_type": "stream", 1327 | "text": [ 1328 | "[1, 1]\n", 1329 | "Variable\n", 1330 | "[[ 8.80031204]]\n" 1331 | ] 1332 | } 1333 | ], 1334 | "source": [ 1335 | "temp = tf.get_collection(\"trainable_variables\")[0]\n", 1336 | "print(temp.get_shape().as_list())\n", 1337 | "print(temp.op.name)\n", 1338 | "print(sess.run(temp))" 1339 | ] 1340 | }, 1341 | { 1342 | "cell_type": "markdown", 1343 | "metadata": { 1344 | "deletable": true, 1345 | "editable": true 1346 | }, 1347 | "source": [ 1348 | "# Create var_dict" 1349 | ] 1350 | }, 1351 | { 1352 | "cell_type": "code", 1353 | "execution_count": 6, 1354 | "metadata": { 1355 | "collapsed": false, 1356 | "deletable": true, 1357 | "editable": true 1358 | }, 1359 | "outputs": [], 1360 | "source": [ 1361 | "var_dict = get_var_dict(sess, \"trainable_variables\")" 1362 | ] 1363 | }, 1364 | { 1365 | "cell_type": "code", 1366 | "execution_count": 7, 1367 | "metadata": { 1368 | "collapsed": false, 1369 | "deletable": true, 1370 | "editable": true 1371 | }, 1372 | "outputs": [ 1373 | { 1374 | "name": "stdout", 1375 | "output_type": "stream", 1376 | "text": [ 1377 | "{'Variable': {'shape': [1, 1], 'value': array([[ 8.80031204]], dtype=float32)},\n", 1378 | " 'Variable_1': {'shape': [1], 'value': array([ 6.62012672], dtype=float32)},\n", 1379 | " 'rnn/rnn/lstm_cell/biases': {'shape': [4],\n", 1380 | " 'value': array([ 0.82305032, 1.11515999, 0.60988927, -2.59389567], dtype=float32)},\n", 1381 | " 'rnn/rnn/lstm_cell/weights': {'shape': [2, 4],\n", 1382 | " 'value': array([[-0.07742871, 0.6309644 , 1.12232792, 0.46602029],\n", 1383 | " [ 1.38758624, 2.42759705, 0.84666395, -2.32156873]], dtype=float32)}}\n" 1384 | ] 1385 | } 1386 | ], 1387 | "source": [ 1388 | "pprint.pprint(var_dict)" 1389 | ] 1390 | }, 1391 | { 1392 | "cell_type": "markdown", 1393 | "metadata": { 1394 | "deletable": true, 1395 | "editable": true 1396 | }, 1397 | "source": [ 1398 | "# Reset graph" 1399 | ] 1400 | }, 1401 | { 1402 | "cell_type": "code", 1403 | "execution_count": 8, 1404 | "metadata": { 1405 | "collapsed": false, 1406 | "deletable": true, 1407 | "editable": true 1408 | }, 1409 | "outputs": [ 1410 | { 1411 | "name": "stdout", 1412 | "output_type": "stream", 1413 | "text": [ 1414 | "Graph : \n", 1415 | "Collection name : trainable_variables\n" 1416 | ] 1417 | } 1418 | ], 1419 | "source": [ 1420 | "sess.close()\n", 1421 | "tf.reset_default_graph()\n", 1422 | "print_nodes(tf.get_default_graph())\n", 1423 | "print_keys(\"trainable_variables\")" 1424 | ] 1425 | }, 1426 | { 1427 | "cell_type": "markdown", 1428 | "metadata": { 1429 | "deletable": true, 1430 | "editable": true 1431 | }, 1432 | "source": [ 1433 | "# Basic variable restore" 1434 | ] 1435 | }, 1436 | { 1437 | "cell_type": "code", 1438 | "execution_count": 9, 1439 | "metadata": { 1440 | "collapsed": false, 1441 | "deletable": true, 1442 | "editable": true 1443 | }, 1444 | "outputs": [], 1445 | "source": [ 1446 | "weight = restore_with_var_dict(\"Variable\", var_dict)\n", 1447 | "bias = restore_with_var_dict(\"Variable_1\", var_dict)" 1448 | ] 1449 | }, 1450 | { 1451 | "cell_type": "markdown", 1452 | "metadata": { 1453 | "deletable": true, 1454 | "editable": true 1455 | }, 1456 | "source": [ 1457 | "# LSTM variable restore" 1458 | ] 1459 | }, 1460 | { 1461 | "cell_type": "code", 1462 | "execution_count": 10, 1463 | "metadata": { 1464 | "collapsed": true, 1465 | "deletable": true, 1466 | "editable": true 1467 | }, 1468 | "outputs": [], 1469 | "source": [ 1470 | "train_x = tf.placeholder('float', [None, data_size,1 ])\n", 1471 | "train_y = tf.placeholder('float', [None,1])\n", 1472 | "\n", 1473 | "train_x_temp = tf.transpose(train_x, [1,0,2])\n", 1474 | "train_x_temp = tf.reshape(train_x_temp, [-1,1])\n", 1475 | "train_x_temp =tf.split(train_x_temp, num_or_size_splits=int(data_size), axis=0)\n", 1476 | "\n", 1477 | "with tf.variable_scope(\"restore\") as scope:\n", 1478 | " lstm_cell = tf.contrib.rnn.LSTMCell(rnn_size) # varaible is not created here\n", 1479 | " \n", 1480 | " # Declare same variable with get_variable, name is very important\n", 1481 | " tf.get_variable(name = 'rnn/lstm_cell/weights'\n", 1482 | " , shape = var_dict['rnn/rnn/lstm_cell/weights']['shape']\n", 1483 | " , initializer = tf.constant_initializer(var_dict['rnn/rnn/lstm_cell/weights']['value']))\n", 1484 | " tf.get_variable(name = 'rnn/lstm_cell/biases'\n", 1485 | " , shape = var_dict['rnn/rnn/lstm_cell/biases']['shape']\n", 1486 | " , initializer = tf.constant_initializer(var_dict['rnn/rnn/lstm_cell/biases']['value']))\n", 1487 | " \n", 1488 | " # get_variable in here gets the variable declared before \n", 1489 | " scope.reuse_variables()\n", 1490 | " train_outputs, states = tf.contrib.rnn.static_rnn(lstm_cell, train_x_temp, dtype = tf.float32)\n", 1491 | " train_output = tf.matmul(train_outputs[-1], weight) + bias" 1492 | ] 1493 | }, 1494 | { 1495 | "cell_type": "code", 1496 | "execution_count": 11, 1497 | "metadata": { 1498 | "collapsed": false, 1499 | "deletable": true, 1500 | "editable": true 1501 | }, 1502 | "outputs": [ 1503 | { 1504 | "name": "stdout", 1505 | "output_type": "stream", 1506 | "text": [ 1507 | "Collection name : trainable_variables\n", 1508 | "Tensor(\"Variable/read:0\", shape=(1, 1), dtype=float32)\n", 1509 | "Tensor(\"Variable_1/read:0\", shape=(1,), dtype=float32)\n", 1510 | "Tensor(\"restore/rnn/lstm_cell/weights/read:0\", shape=(2, 4), dtype=float32)\n", 1511 | "Tensor(\"restore/rnn/lstm_cell/biases/read:0\", shape=(4,), dtype=float32)\n" 1512 | ] 1513 | } 1514 | ], 1515 | "source": [ 1516 | "print_keys(\"trainable_variables\")" 1517 | ] 1518 | }, 1519 | { 1520 | "cell_type": "markdown", 1521 | "metadata": { 1522 | "deletable": true, 1523 | "editable": true 1524 | }, 1525 | "source": [ 1526 | "# Initialize uninitailized variable\n", 1527 | "* In this step, the variables in lstm modules are restored using get_variable()" 1528 | ] 1529 | }, 1530 | { 1531 | "cell_type": "code", 1532 | "execution_count": 12, 1533 | "metadata": { 1534 | "collapsed": false, 1535 | "deletable": true, 1536 | "editable": true 1537 | }, 1538 | "outputs": [], 1539 | "source": [ 1540 | "sess = tf.Session()\n", 1541 | "sess.run(master_initializer(sess))" 1542 | ] 1543 | }, 1544 | { 1545 | "cell_type": "code", 1546 | "execution_count": 13, 1547 | "metadata": { 1548 | "collapsed": false, 1549 | "deletable": true, 1550 | "editable": true 1551 | }, 1552 | "outputs": [ 1553 | { 1554 | "name": "stdout", 1555 | "output_type": "stream", 1556 | "text": [ 1557 | "[[ 11.22189713]\n", 1558 | " [ 11.8597517 ]\n", 1559 | " [ 12.46911812]]\n", 1560 | "[[ 9.]\n", 1561 | " [ 10.]\n", 1562 | " [ 11.]]\n" 1563 | ] 1564 | } 1565 | ], 1566 | "source": [ 1567 | "print(sess.run(train_output, feed_dict = {train_x : x_data}))\n", 1568 | "print(y_data)" 1569 | ] 1570 | } 1571 | ], 1572 | "metadata": { 1573 | "kernelspec": { 1574 | "display_name": "Python 3", 1575 | "language": "python", 1576 | "name": "python3" 1577 | }, 1578 | "language_info": { 1579 | "codemirror_mode": { 1580 | "name": "ipython", 1581 | "version": 3 1582 | }, 1583 | "file_extension": ".py", 1584 | "mimetype": "text/x-python", 1585 | "name": "python", 1586 | "nbconvert_exporter": "python", 1587 | "pygments_lexer": "ipython3", 1588 | "version": "3.4.5" 1589 | } 1590 | }, 1591 | "nbformat": 4, 1592 | "nbformat_minor": 2 1593 | } 1594 | -------------------------------------------------------------------------------- /save_app3/rnn_save.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": false 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "from utils import *" 12 | ] 13 | }, 14 | { 15 | "cell_type": "markdown", 16 | "metadata": {}, 17 | "source": [ 18 | "# Data generation" 19 | ] 20 | }, 21 | { 22 | "cell_type": "code", 23 | "execution_count": 2, 24 | "metadata": { 25 | "collapsed": false 26 | }, 27 | "outputs": [ 28 | { 29 | "name": "stdout", 30 | "output_type": "stream", 31 | "text": [ 32 | "(10, 10, 1)\n", 33 | "(10, 1)\n" 34 | ] 35 | } 36 | ], 37 | "source": [ 38 | "# learning data\n", 39 | "rnn_size = 1\n", 40 | "train_steps = 10000\n", 41 | "test_data_size = 10\n", 42 | "\n", 43 | "data_size = 10\n", 44 | "num_data = 10\n", 45 | "\n", 46 | "x_data = []\n", 47 | "y_data = []\n", 48 | "\n", 49 | "normalizer = 1\n", 50 | "\n", 51 | "for i in range(num_data):\n", 52 | " input_temp = []\n", 53 | " for j in range(i,i+data_size):\n", 54 | " input_temp.append(j*normalizer)\n", 55 | " x_data.append(input_temp)\n", 56 | " output_temp = normalizer*(i+data_size)\n", 57 | " y_data.append(output_temp) \n", 58 | "\n", 59 | "x_data = np.array(x_data, dtype = np.float32)\n", 60 | "x_data = np.reshape(x_data, [num_data,data_size,1])\n", 61 | "y_data = np.array(y_data, dtype = np.float32)\n", 62 | "y_data = np.reshape(y_data,[-1,1])\n", 63 | "\n", 64 | "#print(x_data)\n", 65 | "#print(y_data)\n", 66 | "print(x_data.shape)\n", 67 | "print(y_data.shape)" 68 | ] 69 | }, 70 | { 71 | "cell_type": "markdown", 72 | "metadata": {}, 73 | "source": [ 74 | "# Train" 75 | ] 76 | }, 77 | { 78 | "cell_type": "code", 79 | "execution_count": 3, 80 | "metadata": { 81 | "collapsed": false 82 | }, 83 | "outputs": [ 84 | { 85 | "name": "stdout", 86 | "output_type": "stream", 87 | "text": [ 88 | "cost = 232.19534301757812\n", 89 | "cost = 203.6514892578125\n", 90 | "cost = 109.11512756347656\n", 91 | "cost = 75.1176986694336\n", 92 | "cost = 50.50107955932617\n", 93 | "cost = 32.66964340209961\n", 94 | "cost = 20.551624298095703\n", 95 | "cost = 13.240986824035645\n", 96 | "cost = 9.616631507873535\n", 97 | "cost = 6.087812423706055\n", 98 | "cost = 3.693427562713623\n" 99 | ] 100 | } 101 | ], 102 | "source": [ 103 | "# train\n", 104 | "train_x = tf.placeholder('float', [None, data_size,1 ])\n", 105 | "train_y = tf.placeholder('float', [None,1])\n", 106 | "\n", 107 | "train_x_temp = tf.transpose(train_x, [1,0,2])\n", 108 | "train_x_temp = tf.reshape(train_x_temp, [-1,1])\n", 109 | "train_x_temp =tf.split(train_x_temp, num_or_size_splits=int(data_size), axis=0)\n", 110 | "\n", 111 | "layer = {'weights':tf.Variable(tf.random_normal([rnn_size, 1])),\n", 112 | " 'biases':tf.Variable(tf.random_normal([1]))}\n", 113 | " \n", 114 | "with tf.variable_scope(\"rnn\") as scope:\n", 115 | " lstm_cell = tf.contrib.rnn.LSTMCell(rnn_size)\n", 116 | " train_outputs, train_states = tf.contrib.rnn.static_rnn(lstm_cell, train_x_temp, dtype=tf.float32) \n", 117 | " train_output = tf.matmul(train_outputs[-1],layer['weights']) + layer['biases']\n", 118 | "\n", 119 | "error = tf.reduce_mean(tf.square(train_output-train_y))\n", 120 | "optimizer = tf.train.AdamOptimizer().minimize(error)\n", 121 | "\n", 122 | "sess = tf.Session()\n", 123 | "sess.run(tf.global_variables_initializer())\n", 124 | "for i in range(train_steps+1):\n", 125 | " a,c = sess.run([optimizer, error],feed_dict = {train_x : x_data, train_y : y_data})\n", 126 | " if i%1000==0:\n", 127 | " print(\"cost = {}\".format(c))" 128 | ] 129 | }, 130 | { 131 | "cell_type": "markdown", 132 | "metadata": {}, 133 | "source": [ 134 | "# Train result" 135 | ] 136 | }, 137 | { 138 | "cell_type": "code", 139 | "execution_count": 4, 140 | "metadata": { 141 | "collapsed": false 142 | }, 143 | "outputs": [ 144 | { 145 | "name": "stdout", 146 | "output_type": "stream", 147 | "text": [ 148 | "[[ 11.85977364]\n", 149 | " [ 12.46920204]\n", 150 | " [ 13.0351553 ]\n", 151 | " [ 13.54467297]\n", 152 | " [ 13.98787403]\n", 153 | " [ 14.35908318]\n", 154 | " [ 14.65758991]\n", 155 | " [ 14.88770676]\n", 156 | " [ 15.05791187]\n", 157 | " [ 15.17908382]]\n" 158 | ] 159 | } 160 | ], 161 | "source": [ 162 | "print(sess.run(train_output, feed_dict = {train_x : x_data}))" 163 | ] 164 | }, 165 | { 166 | "cell_type": "markdown", 167 | "metadata": {}, 168 | "source": [ 169 | "# Save" 170 | ] 171 | }, 172 | { 173 | "cell_type": "code", 174 | "execution_count": 5, 175 | "metadata": { 176 | "collapsed": false 177 | }, 178 | "outputs": [], 179 | "source": [ 180 | "saver = tf.train.Saver(tf.trainable_variables())\n", 181 | "saver.save(sess, './save/rnn')\n", 182 | "sess.close()" 183 | ] 184 | } 185 | ], 186 | "metadata": { 187 | "kernelspec": { 188 | "display_name": "Python 3", 189 | "language": "python", 190 | "name": "python3" 191 | }, 192 | "language_info": { 193 | "codemirror_mode": { 194 | "name": "ipython", 195 | "version": 3 196 | }, 197 | "file_extension": ".py", 198 | "mimetype": "text/x-python", 199 | "name": "python", 200 | "nbconvert_exporter": "python", 201 | "pygments_lexer": "ipython3", 202 | "version": "3.4.5" 203 | } 204 | }, 205 | "nbformat": 4, 206 | "nbformat_minor": 2 207 | } 208 | -------------------------------------------------------------------------------- /save_app3/save/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "rnn" 2 | all_model_checkpoint_paths: "rnn" 3 | -------------------------------------------------------------------------------- /save_app3/save/rnn.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app3/save/rnn.data-00000-of-00001 -------------------------------------------------------------------------------- /save_app3/save/rnn.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app3/save/rnn.index -------------------------------------------------------------------------------- /save_app3/save/rnn.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_app3/save/rnn.meta -------------------------------------------------------------------------------- /save_app3/utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import pandas as pd 4 | import sys 5 | import pprint 6 | 7 | def restore_with_var_dict(key, var_dict): 8 | try: 9 | return tf.get_variable(key, shape = var_dict[key]['shape'], initializer = tf.constant_initializer(var_dict[key]['value'])) 10 | except KeyError: 11 | print("No such string") 12 | return 13 | 14 | def get_var_dict(sess, string): 15 | var_dict = {} 16 | i= 0 17 | 18 | while True: 19 | try: 20 | temp = tf.get_collection(string)[i] 21 | prop = {} 22 | prop['shape'] = temp.get_shape().as_list() 23 | prop['value'] = sess.run(temp) 24 | var_dict[temp.op.name] = prop 25 | i+=1 26 | except IndexError: 27 | break; 28 | 29 | return var_dict 30 | 31 | def master_initializer(sess): 32 | uninitailized_variables=[] 33 | for v in tf.global_variables(): 34 | try : 35 | sess.run(v) 36 | except tf.errors.FailedPreconditionError: 37 | uninitailized_variables.append(v) 38 | return tf.variables_initializer(uninitailized_variables) 39 | 40 | def print_keys(string): 41 | print("Collection name : {}".format(string)) 42 | i = 0 43 | while True: 44 | try: 45 | print(tf.get_collection(string)[i]) 46 | i+=1 47 | except IndexError: 48 | break; 49 | 50 | def get_tensor_by_name(string): 51 | i = 0 52 | while True: 53 | try: 54 | if tf.global_variables()[i].name == string: 55 | return tf.global_variables()[i] 56 | i+=1 57 | except IndexError: 58 | print("No such tensor") 59 | return None 60 | 61 | def print_nodes(graph): 62 | print("Graph : {}".format(graph)) 63 | temp = [n.name for n in graph.as_graph_def().node] 64 | for i in range(len(temp)): 65 | print(temp[i]) 66 | 67 | def print_graph_properties(graph): 68 | print("building_function : {}".format(graph.building_function)) 69 | print("finalized : {}".format(graph.finalized)) 70 | print("graph_def_versions : {}".format(graph.graph_def_versions)) 71 | print("seed : {}".format(graph.seed)) 72 | print("version : {}".format(graph.version)) 73 | -------------------------------------------------------------------------------- /save_basic/.ipynb_checkpoints/restore-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true 8 | }, 9 | "outputs": [], 10 | "source": [ 11 | "import tensorflow as tf" 12 | ] 13 | }, 14 | { 15 | "cell_type": "code", 16 | "execution_count": 2, 17 | "metadata": { 18 | "collapsed": true 19 | }, 20 | "outputs": [], 21 | "source": [ 22 | "def master_initializer():\n", 23 | " uninitailized_variables=[] \n", 24 | " for v in tf.global_variables():\n", 25 | " try :\n", 26 | " sess.run(v)\n", 27 | " except tf.errors.FailedPreconditionError:\n", 28 | " uninitailized_variables.append(v)\n", 29 | " return tf.variables_initializer(uninitailized_variables)\n", 30 | "\n", 31 | "def print_keys(string):\n", 32 | " print(\"Collection name : {}\".format(string))\n", 33 | " i = 0\n", 34 | " while True:\n", 35 | " try:\n", 36 | " print(tf.get_collection(string)[i])\n", 37 | " i+=1\n", 38 | " except IndexError:\n", 39 | " break;\n", 40 | "\n", 41 | "def get_tensor_by_name(string):\n", 42 | " i = 0\n", 43 | " while True:\n", 44 | " try:\n", 45 | " if tf.global_variables()[i].name == string:\n", 46 | " return tf.global_variables()[i]\n", 47 | " i+=1\n", 48 | " except IndexError:\n", 49 | " print(\"No such tensor\")\n", 50 | " return None" 51 | ] 52 | }, 53 | { 54 | "cell_type": "code", 55 | "execution_count": 3, 56 | "metadata": { 57 | "collapsed": false 58 | }, 59 | "outputs": [ 60 | { 61 | "name": "stdout", 62 | "output_type": "stream", 63 | "text": [ 64 | "tf.GraphKeys.GLOBAL_VARIABLES : variables\n", 65 | "\n", 66 | "tf.GraphKeys.TRAINABLE_VARIABLES : trainable_variables\n", 67 | "\n" 68 | ] 69 | } 70 | ], 71 | "source": [ 72 | "print(\"tf.GraphKeys.GLOBAL_VARIABLES : {}\".format(tf.GraphKeys.GLOBAL_VARIABLES))\n", 73 | "print(type(tf.GraphKeys.GLOBAL_VARIABLES))\n", 74 | "print(\"tf.GraphKeys.TRAINABLE_VARIABLES : {}\".format(tf.GraphKeys.TRAINABLE_VARIABLES))\n", 75 | "print(type(tf.GraphKeys.TRAINABLE_VARIABLES))" 76 | ] 77 | }, 78 | { 79 | "cell_type": "markdown", 80 | "metadata": {}, 81 | "source": [ 82 | "# Variables generation\n", 83 | " v3 : not trainable\n", 84 | " v4 : trainable" 85 | ] 86 | }, 87 | { 88 | "cell_type": "code", 89 | "execution_count": 4, 90 | "metadata": { 91 | "collapsed": true 92 | }, 93 | "outputs": [], 94 | "source": [ 95 | "v3 = tf.Variable(1.0, name=\"v3\", trainable=False )\n", 96 | "v4 = tf.Variable(2.0, name=\"v4\")" 97 | ] 98 | }, 99 | { 100 | "cell_type": "markdown", 101 | "metadata": {}, 102 | "source": [ 103 | "# Before restoration\n", 104 | "-Everything are pointers(?)" 105 | ] 106 | }, 107 | { 108 | "cell_type": "code", 109 | "execution_count": 5, 110 | "metadata": { 111 | "collapsed": false 112 | }, 113 | "outputs": [ 114 | { 115 | "name": "stdout", 116 | "output_type": "stream", 117 | "text": [ 118 | "tf.global_variables()\n", 119 | "[, ]\n", 120 | "tf.trainable_variables()\n", 121 | "[]\n", 122 | "tf.get_collection(\"variables\")\n", 123 | "[, ]\n", 124 | "tf.get_collection(\"trainable_variable\")\n", 125 | "[]\n", 126 | "Collection name : variables\n", 127 | "Tensor(\"v3/read:0\", shape=(), dtype=float32)\n", 128 | "Tensor(\"v4/read:0\", shape=(), dtype=float32)\n", 129 | "Collection name : trainable_variables\n", 130 | "Tensor(\"v4/read:0\", shape=(), dtype=float32)\n" 131 | ] 132 | } 133 | ], 134 | "source": [ 135 | "print(\"tf.global_variables()\")\n", 136 | "print(tf.global_variables())\n", 137 | "print(\"tf.trainable_variables()\")\n", 138 | "print(tf.trainable_variables())\n", 139 | "print('tf.get_collection(\"variables\")')\n", 140 | "print(tf.get_collection(\"variables\"))\n", 141 | "print('tf.get_collection(\"trainable_variable\")')\n", 142 | "print(tf.get_collection(\"trainable_variables\"))\n", 143 | "print_keys(\"variables\")\n", 144 | "print_keys(\"trainable_variables\")" 145 | ] 146 | }, 147 | { 148 | "cell_type": "markdown", 149 | "metadata": {}, 150 | "source": [ 151 | "# Restoration" 152 | ] 153 | }, 154 | { 155 | "cell_type": "code", 156 | "execution_count": 6, 157 | "metadata": { 158 | "collapsed": true 159 | }, 160 | "outputs": [], 161 | "source": [ 162 | "sess = tf.Session()\n", 163 | "saver = tf.train.import_meta_graph('./save/model.meta')" 164 | ] 165 | }, 166 | { 167 | "cell_type": "markdown", 168 | "metadata": {}, 169 | "source": [ 170 | "# After restoration" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": 7, 176 | "metadata": { 177 | "collapsed": false 178 | }, 179 | "outputs": [ 180 | { 181 | "name": "stdout", 182 | "output_type": "stream", 183 | "text": [ 184 | "Collection name : variables\n", 185 | "Tensor(\"v3/read:0\", shape=(), dtype=float32)\n", 186 | "Tensor(\"v4/read:0\", shape=(), dtype=float32)\n", 187 | "Tensor(\"v1/read:0\", shape=(), dtype=float32)\n", 188 | "Tensor(\"v2/read:0\", shape=(), dtype=float32)\n", 189 | "Collection name : trainable_variables\n", 190 | "Tensor(\"v4/read:0\", shape=(), dtype=float32)\n", 191 | "Tensor(\"v1/read:0\", shape=(), dtype=float32)\n", 192 | "Tensor(\"v2/read:0\", shape=(), dtype=float32)\n" 193 | ] 194 | } 195 | ], 196 | "source": [ 197 | "print_keys(\"variables\")\n", 198 | "print_keys(\"trainable_variables\")" 199 | ] 200 | }, 201 | { 202 | "cell_type": "code", 203 | "execution_count": 8, 204 | "metadata": { 205 | "collapsed": false 206 | }, 207 | "outputs": [ 208 | { 209 | "name": "stdout", 210 | "output_type": "stream", 211 | "text": [ 212 | "v3 : 1.0\n", 213 | "v4 : 2.0\n", 214 | "v1 : 1.3200000524520874\n", 215 | "v2 : 1.3300000429153442\n" 216 | ] 217 | } 218 | ], 219 | "source": [ 220 | "sess.run(master_initializer())\n", 221 | "saver.restore(sess, \"./save/model\")\n", 222 | "print(\"v3 : {}\".format(sess.run(v3)))\n", 223 | "print(\"v4 : {}\".format(sess.run(v4)))\n", 224 | "\n", 225 | "v1_restore = get_tensor_by_name(\"v1:0\")\n", 226 | "v2_restore = get_tensor_by_name(\"v2:0\")\n", 227 | "print(\"v1 : {}\".format(sess.run(v1_restore)))\n", 228 | "print(\"v2 : {}\".format(sess.run(v2_restore)))" 229 | ] 230 | }, 231 | { 232 | "cell_type": "markdown", 233 | "metadata": {}, 234 | "source": [ 235 | "# Other method to visualize variables " 236 | ] 237 | }, 238 | { 239 | "cell_type": "code", 240 | "execution_count": 9, 241 | "metadata": { 242 | "collapsed": false 243 | }, 244 | "outputs": [ 245 | { 246 | "name": "stdout", 247 | "output_type": "stream", 248 | "text": [ 249 | "['v4:0', 'v1:0', 'v2:0']\n", 250 | "Tensor(\"v3/read:0\", shape=(), dtype=float32)\n", 251 | "Tensor(\"v4/read:0\", shape=(), dtype=float32)\n", 252 | "Tensor(\"v1/read:0\", shape=(), dtype=float32)\n", 253 | "Tensor(\"v2/read:0\", shape=(), dtype=float32)\n" 254 | ] 255 | } 256 | ], 257 | "source": [ 258 | "print([v.name for v in tf.trainable_variables()])\n", 259 | "\n", 260 | "for v in tf.global_variables():\n", 261 | " print(v)" 262 | ] 263 | } 264 | ], 265 | "metadata": { 266 | "kernelspec": { 267 | "display_name": "Python 3", 268 | "language": "python", 269 | "name": "python3" 270 | }, 271 | "language_info": { 272 | "codemirror_mode": { 273 | "name": "ipython", 274 | "version": 3 275 | }, 276 | "file_extension": ".py", 277 | "mimetype": "text/x-python", 278 | "name": "python", 279 | "nbconvert_exporter": "python", 280 | "pygments_lexer": "ipython3", 281 | "version": "3.4.5" 282 | } 283 | }, 284 | "nbformat": 4, 285 | "nbformat_minor": 2 286 | } 287 | -------------------------------------------------------------------------------- /save_basic/.ipynb_checkpoints/save-checkpoint.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true, 8 | "deletable": true, 9 | "editable": true 10 | }, 11 | "outputs": [], 12 | "source": [ 13 | "import tensorflow as tf" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 2, 19 | "metadata": { 20 | "collapsed": true, 21 | "deletable": true, 22 | "editable": true 23 | }, 24 | "outputs": [], 25 | "source": [ 26 | "def save_model(save_path):\n", 27 | " saver = tf.train.Saver()\n", 28 | " saver.save(sess, save_path)" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 3, 34 | "metadata": { 35 | "collapsed": false, 36 | "deletable": true, 37 | "editable": true 38 | }, 39 | "outputs": [ 40 | { 41 | "name": "stdout", 42 | "output_type": "stream", 43 | "text": [ 44 | "v1 : 1.3200000524520874\n", 45 | "v2 : 1.3300000429153442\n" 46 | ] 47 | } 48 | ], 49 | "source": [ 50 | "v1 = tf.Variable(1.32, name=\"v1\")\n", 51 | "v2 = tf.Variable(1.33, name=\"v2\")\n", 52 | "\n", 53 | "init = tf.global_variables_initializer()\n", 54 | "\n", 55 | "save_path=\"./save/model\"\n", 56 | "\n", 57 | "sess = tf.Session()\n", 58 | "sess.run(init)\n", 59 | "\n", 60 | "print(\"v1 : {}\".format(sess.run(v1)))\n", 61 | "print(\"v2 : {}\".format(sess.run(v2)))\n", 62 | "\n", 63 | "save_model(save_path) " 64 | ] 65 | } 66 | ], 67 | "metadata": { 68 | "kernelspec": { 69 | "display_name": "Python 3", 70 | "language": "python", 71 | "name": "python3" 72 | }, 73 | "language_info": { 74 | "codemirror_mode": { 75 | "name": "ipython", 76 | "version": 3 77 | }, 78 | "file_extension": ".py", 79 | "mimetype": "text/x-python", 80 | "name": "python", 81 | "nbconvert_exporter": "python", 82 | "pygments_lexer": "ipython3", 83 | "version": "3.4.5" 84 | } 85 | }, 86 | "nbformat": 4, 87 | "nbformat_minor": 2 88 | } 89 | -------------------------------------------------------------------------------- /save_basic/README.md: -------------------------------------------------------------------------------- 1 | # Environment 2 | python = 3.4 3 | tensorflow 1.0.1 4 | 5 | # Object 6 | Basis for save and restore 7 | -------------------------------------------------------------------------------- /save_basic/restore.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true, 8 | "deletable": true, 9 | "editable": true 10 | }, 11 | "outputs": [], 12 | "source": [ 13 | "import tensorflow as tf" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 2, 19 | "metadata": { 20 | "collapsed": true, 21 | "deletable": true, 22 | "editable": true 23 | }, 24 | "outputs": [], 25 | "source": [ 26 | "def master_initializer():\n", 27 | " uninitailized_variables=[] \n", 28 | " for v in tf.global_variables():\n", 29 | " try :\n", 30 | " sess.run(v)\n", 31 | " except tf.errors.FailedPreconditionError:\n", 32 | " uninitailized_variables.append(v)\n", 33 | " return tf.variables_initializer(uninitailized_variables)\n", 34 | "\n", 35 | "def print_keys(string):\n", 36 | " print(\"Collection name : {}\".format(string))\n", 37 | " i = 0\n", 38 | " while True:\n", 39 | " try:\n", 40 | " print(tf.get_collection(string)[i])\n", 41 | " i+=1\n", 42 | " except IndexError:\n", 43 | " break;\n", 44 | "\n", 45 | "def get_tensor_by_name(string):\n", 46 | " i = 0\n", 47 | " while True:\n", 48 | " try:\n", 49 | " if tf.global_variables()[i].name == string:\n", 50 | " return tf.global_variables()[i]\n", 51 | " i+=1\n", 52 | " except IndexError:\n", 53 | " print(\"No such tensor\")\n", 54 | " return None" 55 | ] 56 | }, 57 | { 58 | "cell_type": "code", 59 | "execution_count": 3, 60 | "metadata": { 61 | "collapsed": false, 62 | "deletable": true, 63 | "editable": true 64 | }, 65 | "outputs": [ 66 | { 67 | "name": "stdout", 68 | "output_type": "stream", 69 | "text": [ 70 | "tf.GraphKeys.GLOBAL_VARIABLES : variables\n", 71 | "\n", 72 | "tf.GraphKeys.TRAINABLE_VARIABLES : trainable_variables\n", 73 | "\n" 74 | ] 75 | } 76 | ], 77 | "source": [ 78 | "print(\"tf.GraphKeys.GLOBAL_VARIABLES : {}\".format(tf.GraphKeys.GLOBAL_VARIABLES))\n", 79 | "print(type(tf.GraphKeys.GLOBAL_VARIABLES))\n", 80 | "print(\"tf.GraphKeys.TRAINABLE_VARIABLES : {}\".format(tf.GraphKeys.TRAINABLE_VARIABLES))\n", 81 | "print(type(tf.GraphKeys.TRAINABLE_VARIABLES))" 82 | ] 83 | }, 84 | { 85 | "cell_type": "markdown", 86 | "metadata": { 87 | "deletable": true, 88 | "editable": true 89 | }, 90 | "source": [ 91 | "# Variables generation\n", 92 | " v3 : not trainable\n", 93 | " v4 : trainable" 94 | ] 95 | }, 96 | { 97 | "cell_type": "code", 98 | "execution_count": 4, 99 | "metadata": { 100 | "collapsed": true, 101 | "deletable": true, 102 | "editable": true 103 | }, 104 | "outputs": [], 105 | "source": [ 106 | "v3 = tf.Variable(1.0, name=\"v3\", trainable=False )\n", 107 | "v4 = tf.Variable(2.0, name=\"v4\")" 108 | ] 109 | }, 110 | { 111 | "cell_type": "markdown", 112 | "metadata": { 113 | "deletable": true, 114 | "editable": true 115 | }, 116 | "source": [ 117 | "# Before restoration\n", 118 | "-Everything are pointers(?)" 119 | ] 120 | }, 121 | { 122 | "cell_type": "code", 123 | "execution_count": 5, 124 | "metadata": { 125 | "collapsed": false, 126 | "deletable": true, 127 | "editable": true 128 | }, 129 | "outputs": [ 130 | { 131 | "name": "stdout", 132 | "output_type": "stream", 133 | "text": [ 134 | "tf.global_variables()\n", 135 | "[, ]\n", 136 | "tf.trainable_variables()\n", 137 | "[]\n", 138 | "tf.get_collection(\"variables\")\n", 139 | "[, ]\n", 140 | "tf.get_collection(\"trainable_variable\")\n", 141 | "[]\n", 142 | "Collection name : variables\n", 143 | "Tensor(\"v3/read:0\", shape=(), dtype=float32)\n", 144 | "Tensor(\"v4/read:0\", shape=(), dtype=float32)\n", 145 | "Collection name : trainable_variables\n", 146 | "Tensor(\"v4/read:0\", shape=(), dtype=float32)\n" 147 | ] 148 | } 149 | ], 150 | "source": [ 151 | "print(\"tf.global_variables()\")\n", 152 | "print(tf.global_variables())\n", 153 | "print(\"tf.trainable_variables()\")\n", 154 | "print(tf.trainable_variables())\n", 155 | "print('tf.get_collection(\"variables\")')\n", 156 | "print(tf.get_collection(\"variables\"))\n", 157 | "print('tf.get_collection(\"trainable_variable\")')\n", 158 | "print(tf.get_collection(\"trainable_variables\"))\n", 159 | "print_keys(\"variables\")\n", 160 | "print_keys(\"trainable_variables\")" 161 | ] 162 | }, 163 | { 164 | "cell_type": "markdown", 165 | "metadata": { 166 | "deletable": true, 167 | "editable": true 168 | }, 169 | "source": [ 170 | "# Restoration" 171 | ] 172 | }, 173 | { 174 | "cell_type": "code", 175 | "execution_count": 6, 176 | "metadata": { 177 | "collapsed": true, 178 | "deletable": true, 179 | "editable": true 180 | }, 181 | "outputs": [], 182 | "source": [ 183 | "sess = tf.Session()\n", 184 | "saver = tf.train.import_meta_graph('./save/model.meta')" 185 | ] 186 | }, 187 | { 188 | "cell_type": "markdown", 189 | "metadata": { 190 | "deletable": true, 191 | "editable": true 192 | }, 193 | "source": [ 194 | "# After restoration" 195 | ] 196 | }, 197 | { 198 | "cell_type": "code", 199 | "execution_count": 7, 200 | "metadata": { 201 | "collapsed": false, 202 | "deletable": true, 203 | "editable": true 204 | }, 205 | "outputs": [ 206 | { 207 | "name": "stdout", 208 | "output_type": "stream", 209 | "text": [ 210 | "Collection name : variables\n", 211 | "Tensor(\"v3/read:0\", shape=(), dtype=float32)\n", 212 | "Tensor(\"v4/read:0\", shape=(), dtype=float32)\n", 213 | "Tensor(\"v1/read:0\", shape=(), dtype=float32)\n", 214 | "Tensor(\"v2/read:0\", shape=(), dtype=float32)\n", 215 | "Collection name : trainable_variables\n", 216 | "Tensor(\"v4/read:0\", shape=(), dtype=float32)\n", 217 | "Tensor(\"v1/read:0\", shape=(), dtype=float32)\n", 218 | "Tensor(\"v2/read:0\", shape=(), dtype=float32)\n" 219 | ] 220 | } 221 | ], 222 | "source": [ 223 | "print_keys(\"variables\")\n", 224 | "print_keys(\"trainable_variables\")" 225 | ] 226 | }, 227 | { 228 | "cell_type": "code", 229 | "execution_count": 8, 230 | "metadata": { 231 | "collapsed": false, 232 | "deletable": true, 233 | "editable": true 234 | }, 235 | "outputs": [ 236 | { 237 | "name": "stdout", 238 | "output_type": "stream", 239 | "text": [ 240 | "v3 : 1.0\n", 241 | "v4 : 2.0\n", 242 | "v1 : 1.3200000524520874\n", 243 | "v2 : 1.3300000429153442\n" 244 | ] 245 | } 246 | ], 247 | "source": [ 248 | "sess.run(master_initializer())\n", 249 | "saver.restore(sess, \"./save/model\")\n", 250 | "print(\"v3 : {}\".format(sess.run(v3)))\n", 251 | "print(\"v4 : {}\".format(sess.run(v4)))\n", 252 | "\n", 253 | "v1_restore = get_tensor_by_name(\"v1:0\")\n", 254 | "v2_restore = get_tensor_by_name(\"v2:0\")\n", 255 | "print(\"v1 : {}\".format(sess.run(v1_restore)))\n", 256 | "print(\"v2 : {}\".format(sess.run(v2_restore)))" 257 | ] 258 | }, 259 | { 260 | "cell_type": "markdown", 261 | "metadata": { 262 | "deletable": true, 263 | "editable": true 264 | }, 265 | "source": [ 266 | "# Bonus restoration with get_variable " 267 | ] 268 | }, 269 | { 270 | "cell_type": "code", 271 | "execution_count": 9, 272 | "metadata": { 273 | "collapsed": false, 274 | "deletable": true, 275 | "editable": true 276 | }, 277 | "outputs": [], 278 | "source": [ 279 | "with tf.variable_scope(\"hi\"):\n", 280 | " temp = tf.get_collection(\"trainable_variables\")[1]\n", 281 | " temp2 = tf.get_variable(\"v1_restore\", shape = temp.get_shape(), initializer = tf.constant_initializer(sess.run(temp)))\n", 282 | " tf.get_variable_scope().reuse_variables()\n", 283 | " v1_restore2 = tf.get_variable(name=\"v1_restore\", shape = None)" 284 | ] 285 | }, 286 | { 287 | "cell_type": "code", 288 | "execution_count": 10, 289 | "metadata": { 290 | "collapsed": false, 291 | "deletable": true, 292 | "editable": true 293 | }, 294 | "outputs": [ 295 | { 296 | "name": "stdout", 297 | "output_type": "stream", 298 | "text": [ 299 | "v1 : 1.3200000524520874\n" 300 | ] 301 | } 302 | ], 303 | "source": [ 304 | "sess.run(master_initializer())\n", 305 | "print(\"v1 : {}\".format(sess.run(v1_restore2)))" 306 | ] 307 | }, 308 | { 309 | "cell_type": "markdown", 310 | "metadata": { 311 | "deletable": true, 312 | "editable": true 313 | }, 314 | "source": [ 315 | "# Other method to visualize variables " 316 | ] 317 | }, 318 | { 319 | "cell_type": "code", 320 | "execution_count": 11, 321 | "metadata": { 322 | "collapsed": false, 323 | "deletable": true, 324 | "editable": true 325 | }, 326 | "outputs": [ 327 | { 328 | "name": "stdout", 329 | "output_type": "stream", 330 | "text": [ 331 | "['v4:0', 'v1:0', 'v2:0', 'hi/v1_restore:0']\n", 332 | "Tensor(\"v3/read:0\", shape=(), dtype=float32)\n", 333 | "Tensor(\"v4/read:0\", shape=(), dtype=float32)\n", 334 | "Tensor(\"v1/read:0\", shape=(), dtype=float32)\n", 335 | "Tensor(\"v2/read:0\", shape=(), dtype=float32)\n", 336 | "Tensor(\"hi/v1_restore/read:0\", shape=(), dtype=float32)\n" 337 | ] 338 | } 339 | ], 340 | "source": [ 341 | "print([v.name for v in tf.trainable_variables()])\n", 342 | "\n", 343 | "for v in tf.global_variables():\n", 344 | " print(v)" 345 | ] 346 | } 347 | ], 348 | "metadata": { 349 | "kernelspec": { 350 | "display_name": "Python 3", 351 | "language": "python", 352 | "name": "python3" 353 | }, 354 | "language_info": { 355 | "codemirror_mode": { 356 | "name": "ipython", 357 | "version": 3 358 | }, 359 | "file_extension": ".py", 360 | "mimetype": "text/x-python", 361 | "name": "python", 362 | "nbconvert_exporter": "python", 363 | "pygments_lexer": "ipython3", 364 | "version": "3.6.0" 365 | } 366 | }, 367 | "nbformat": 4, 368 | "nbformat_minor": 2 369 | } 370 | -------------------------------------------------------------------------------- /save_basic/save.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 1, 6 | "metadata": { 7 | "collapsed": true, 8 | "deletable": true, 9 | "editable": true 10 | }, 11 | "outputs": [], 12 | "source": [ 13 | "import tensorflow as tf" 14 | ] 15 | }, 16 | { 17 | "cell_type": "code", 18 | "execution_count": 2, 19 | "metadata": { 20 | "collapsed": true, 21 | "deletable": true, 22 | "editable": true 23 | }, 24 | "outputs": [], 25 | "source": [ 26 | "def save_model(save_path):\n", 27 | " saver = tf.train.Saver()\n", 28 | " saver.save(sess, save_path)" 29 | ] 30 | }, 31 | { 32 | "cell_type": "code", 33 | "execution_count": 3, 34 | "metadata": { 35 | "collapsed": false, 36 | "deletable": true, 37 | "editable": true 38 | }, 39 | "outputs": [ 40 | { 41 | "name": "stdout", 42 | "output_type": "stream", 43 | "text": [ 44 | "v1 : 1.3200000524520874\n", 45 | "v2 : 1.3300000429153442\n" 46 | ] 47 | } 48 | ], 49 | "source": [ 50 | "v1 = tf.Variable(1.32, name=\"v1\")\n", 51 | "v2 = tf.Variable(1.33, name=\"v2\")\n", 52 | "\n", 53 | "init = tf.global_variables_initializer()\n", 54 | "\n", 55 | "save_path=\"./save/model\"\n", 56 | "\n", 57 | "sess = tf.Session()\n", 58 | "sess.run(init)\n", 59 | "\n", 60 | "print(\"v1 : {}\".format(sess.run(v1)))\n", 61 | "print(\"v2 : {}\".format(sess.run(v2)))\n", 62 | "\n", 63 | "save_model(save_path) " 64 | ] 65 | } 66 | ], 67 | "metadata": { 68 | "kernelspec": { 69 | "display_name": "Python 3", 70 | "language": "python", 71 | "name": "python3" 72 | }, 73 | "language_info": { 74 | "codemirror_mode": { 75 | "name": "ipython", 76 | "version": 3 77 | }, 78 | "file_extension": ".py", 79 | "mimetype": "text/x-python", 80 | "name": "python", 81 | "nbconvert_exporter": "python", 82 | "pygments_lexer": "ipython3", 83 | "version": "3.4.5" 84 | } 85 | }, 86 | "nbformat": 4, 87 | "nbformat_minor": 2 88 | } 89 | -------------------------------------------------------------------------------- /save_basic/save/checkpoint: -------------------------------------------------------------------------------- 1 | model_checkpoint_path: "model" 2 | all_model_checkpoint_paths: "model" 3 | -------------------------------------------------------------------------------- /save_basic/save/model.data-00000-of-00001: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_basic/save/model.data-00000-of-00001 -------------------------------------------------------------------------------- /save_basic/save/model.index: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_basic/save/model.index -------------------------------------------------------------------------------- /save_basic/save/model.meta: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/save_basic/save/model.meta -------------------------------------------------------------------------------- /simple_rnn/README.md: -------------------------------------------------------------------------------- 1 | # Requirement 2 | tensorflow version = 1.0.0 3 | python = 3.4 4 | 5 | # Explanation 6 | Using for loop instead of static_rnn module 7 | -------------------------------------------------------------------------------- /simple_rnn/rnn_with_static_rnn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import pandas as pd 4 | import sys 5 | 6 | # learning data 7 | rnn_size = 1 8 | train_steps = 100000 9 | test_data_size = 10 10 | 11 | data_size = 10 12 | num_data = 10 13 | 14 | x_data = [] 15 | y_data = [] 16 | 17 | normalizer = 1 18 | 19 | for i in range(num_data): 20 | input_temp = [] 21 | for j in range(i,i+data_size): 22 | input_temp.append(j*normalizer) 23 | x_data.append(input_temp) 24 | output_temp = normalizer*(i+data_size) 25 | y_data.append(output_temp) 26 | 27 | x_data = np.array(x_data, dtype = np.float32) 28 | x_data = np.reshape(x_data, [num_data,data_size,1]) 29 | y_data = np.array(y_data, dtype = np.float32) 30 | y_data = np.reshape(y_data,[-1,1]) 31 | 32 | print(x_data) 33 | print(y_data) 34 | print(x_data.shape) 35 | print(y_data.shape) 36 | 37 | # train 38 | train_x = tf.placeholder('float', [None, data_size,1 ]) 39 | train_y = tf.placeholder('float', [None,1]) 40 | 41 | train_x_temp = tf.transpose(train_x, [1,0,2]) 42 | train_x_temp = tf.reshape(train_x_temp, [-1,1]) 43 | train_x_temp =tf.split(train_x_temp, num_or_size_splits=int(data_size), axis=0) 44 | 45 | layer = {'weights':tf.Variable(tf.random_normal([rnn_size, 1])), 46 | 'biases':tf.Variable(tf.random_normal([1]))} 47 | 48 | with tf.variable_scope("rnn") as scope: 49 | lstm_cell = tf.contrib.rnn.LSTMCell(rnn_size) 50 | train_outputs, train_states = tf.contrib.rnn.static_rnn(lstm_cell, train_x_temp, dtype=tf.float32) 51 | train_output = tf.matmul(train_outputs[-1],layer['weights']) + layer['biases'] 52 | 53 | error = tf.reduce_mean(tf.square(train_output-train_y)) 54 | optimizer = tf.train.AdamOptimizer().minimize(error) 55 | 56 | sess = tf.Session() 57 | sess.run(tf.global_variables_initializer()) 58 | for i in range(train_steps+1): 59 | a,c = sess.run([optimizer, error],feed_dict = {train_x : x_data, train_y : y_data}) 60 | if i%500==0: 61 | print("cost = {}".format(c)) 62 | 63 | print(sess.run(train_output, feed_dict = {train_x : x_data})) 64 | 65 | -------------------------------------------------------------------------------- /simple_rnn/rnn_without_static_rnn.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | import pandas as pd 4 | import sys 5 | 6 | # learning data 7 | rnn_size = 1 8 | train_steps = 100000 9 | test_data_size = 10 10 | 11 | data_size = 10 12 | num_data = 10 13 | 14 | x_data = [] 15 | y_data = [] 16 | 17 | normalizer = 1 18 | 19 | for i in range(num_data): 20 | input_temp = [] 21 | for j in range(i,i+data_size): 22 | input_temp.append(j*normalizer) 23 | x_data.append(input_temp) 24 | output_temp = normalizer*(i+data_size) 25 | y_data.append(output_temp) 26 | 27 | x_data = np.array(x_data, dtype = np.float32) 28 | x_data = np.reshape(x_data, [num_data,data_size,1]) 29 | y_data = np.array(y_data, dtype = np.float32) 30 | y_data = np.reshape(y_data,[-1,1]) 31 | 32 | print(x_data) 33 | print(y_data) 34 | print(x_data.shape) 35 | print(y_data.shape) 36 | 37 | batch_size = 1 38 | 39 | sess = tf.Session() 40 | # train 41 | train_x = tf.placeholder('float', [None, data_size, 1 ]) 42 | train_y = tf.placeholder('float', [None, 1]) 43 | 44 | train_x_temp = tf.transpose(train_x, [1,0,2]) 45 | train_x_temp = tf.reshape(train_x_temp, [-1, 1]) 46 | train_x_temp =tf.split(train_x_temp, num_or_size_splits=int(data_size), axis=0) 47 | print(sess.run(tf.shape(train_x_temp), feed_dict = {train_x : x_data})) 48 | 49 | layer = {'weights':tf.Variable(tf.random_normal([rnn_size, 1])), 50 | 'biases':tf.Variable(tf.random_normal([1]))} 51 | 52 | lstm_cell = tf.contrib.rnn.LSTMCell(rnn_size) 53 | with tf.variable_scope("rnn") as scope: 54 | state = lstm_cell.zero_state(num_data, tf.float32) 55 | train_outputs = [] 56 | flag = 0 57 | for input_ in train_x_temp: 58 | if flag==0: 59 | flag=1 60 | else : 61 | scope.reuse_variables() 62 | output, state = lstm_cell(input_, state) 63 | train_outputs.append(output) 64 | 65 | train_output = tf.matmul(train_outputs[-1],layer['weights']) + layer['biases'] 66 | 67 | error = tf.reduce_mean(tf.square(train_output-train_y)) 68 | optimizer = tf.train.AdamOptimizer().minimize(error) 69 | 70 | sess.run(tf.global_variables_initializer()) 71 | for i in range(train_steps+1): 72 | a,c = sess.run([optimizer, error],feed_dict = {train_x : x_data, train_y : y_data}) 73 | if i%500==0: 74 | print("cost = {}".format(c)) 75 | 76 | print(sess.run(train_output, feed_dict = {train_x : x_data})) 77 | 78 | -------------------------------------------------------------------------------- /tensorboard/README.md: -------------------------------------------------------------------------------- 1 | # How to use 2 | python practice.py 3 | tensorboard --logdir=./log 4 | -------------------------------------------------------------------------------- /tensorboard/log/events.out.tfevents.1490860755.Frankinstein: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/maestrojeong/tensorflow_basic/964904cbf244e3cfd801685c39cacf170689b5b4/tensorboard/log/events.out.tfevents.1490860755.Frankinstein -------------------------------------------------------------------------------- /tensorboard/practice.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | import tensorflow as tf 3 | from tensorflow.examples.tutorials.mnist import input_data 4 | 5 | tf.reset_default_graph() 6 | 7 | mnist = input_data.read_data_sets('../../MNIST_data', one_hot=True) 8 | 9 | batch_size = 100 10 | training_epochs = 5 11 | logs_path = "./log" 12 | 13 | with tf.name_scope('input'): 14 | x = tf.placeholder(tf.float32, shape=[None, 784], name="x-input") 15 | y_ = tf.placeholder(tf.float32, shape=[None, 10], name="y-input") 16 | 17 | with tf.name_scope("weights"): 18 | W = tf.Variable(tf.zeros([784, 10]), name = "weights") 19 | 20 | with tf.name_scope("biases"): 21 | b = tf.Variable(tf.zeros([10]), name = "biases") 22 | 23 | with tf.name_scope("softmax"): 24 | y = tf.nn.softmax(tf.matmul(x,W) + b) 25 | 26 | 27 | with tf.name_scope('cross_entropy'): 28 | cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1])) 29 | 30 | with tf.name_scope('train'): 31 | train_op = tf.train.GradientDescentOptimizer(1e-1).minimize(cross_entropy) 32 | 33 | with tf.name_scope('Accuracy'): 34 | correct_prediction = tf.equal(tf.argmax(y,1), tf.argmax(y_,1)) 35 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 36 | 37 | tf.summary.scalar("cost_track", cross_entropy) 38 | tf.summary.scalar("accuracy", accuracy) 39 | tf.summary.histogram("weights_hist",W) 40 | tf.summary.histogram("biases_hist",b) 41 | 42 | summary_op = tf.summary.merge_all() 43 | 44 | with tf.Session() as sess: 45 | sess.run(tf.global_variables_initializer()) 46 | writer = tf.summary.FileWriter(logs_path, sess.graph) 47 | 48 | for epoch in range(training_epochs): 49 | batch_count = int(mnist.train.num_examples/batch_size) 50 | for i in range(batch_count): 51 | batch_x, batch_y = mnist.train.next_batch(batch_size) 52 | _, summary = sess.run([train_op, summary_op], feed_dict={x: batch_x, y_: batch_y}) 53 | writer.add_summary(summary, epoch * batch_count + i) 54 | if epoch % 5 == 0: 55 | print ("Epoch: ", epoch) 56 | print ("Accuracy: ", accuracy.eval(feed_dict={x: mnist.test.images, y_: mnist.test.labels})) 57 | print ("done") 58 | -------------------------------------------------------------------------------- /tf.app.flags/README.md: -------------------------------------------------------------------------------- 1 | # How to implement 2 | python practice.py --var1 0.03 3 | -------------------------------------------------------------------------------- /tf.app.flags/implement.py: -------------------------------------------------------------------------------- 1 | import os 2 | 3 | for i in range(3): 4 | os.system("python practice.py --var {}".format(i)) 5 | 6 | ''' 7 | 0.0 8 | {'__parsed': True, '__flags': {'var1': 0.0}} 9 | {'var1': 0.0} 10 | 1.0 11 | {'__parsed': True, '__flags': {'var1': 1.0}} 12 | {'var1': 1.0} 13 | 2.0 14 | {'__parsed': True, '__flags': {'var1': 2.0}} 15 | {'var1': 2.0} 16 | ''' 17 | 18 | -------------------------------------------------------------------------------- /tf.app.flags/practice.py: -------------------------------------------------------------------------------- 1 | import tensorflow as tf 2 | 3 | flags = tf.app.flags 4 | conf = flags.FLAGS 5 | flags.DEFINE_float("var1",0.01,"Document") 6 | 7 | print(conf.var1) 8 | print(conf.__dict__) 9 | print(conf.__flags) 10 | 11 | ''' 12 | 0.01 13 | {'__flags': {'var1': 0.01}, '__parsed': True} 14 | {'var1': 0.01} 15 | ''' 16 | --------------------------------------------------------------------------------