├── Damped Cubic oscillator --- 0% noise.ipynb ├── Damped Cubic oscillator --- 1% noise.ipynb ├── Damped Cubic oscillator --- 10% noise --- variable timesteps.ipynb ├── Damped Cubic oscillator --- 10% noise.ipynb ├── Damped Cubic oscillator --- 25% noise.ipynb ├── Damped Cubic oscillator --- 5% noise.ipynb ├── Double Pendulum --- 0% Noise.ipynb ├── Double Pendulum --- 1% Noise.ipynb ├── Double Pendulum --- 10% Noise.ipynb ├── Double Pendulum --- 5% Noise.ipynb ├── Lorenz - Multi run - 5 % noise.ipynb ├── Lorenz --- 0% noise.ipynb ├── Lorenz --- 1% Noise.ipynb ├── Lorenz --- 10% Noise.ipynb ├── Lorenz --- 15% Noise.ipynb ├── Lorenz --- 5% Noise.ipynb ├── Lorenz --- 5% noise --- Long time series.ipynb ├── Navier Stokes ODE --- 0% noise.ipynb ├── Navier Stokes ODE --- 1% Noise.ipynb ├── PODcoefficients_run1.mat ├── README.md ├── pendulum_data.dat ├── utils.py └── utils_var_step.py /PODcoefficients_run1.mat: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/snagcliffs/RKNN/90a14c0945124619aa1c10fa532cc5b351bfd6bd/PODcoefficients_run1.mat -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RKNN 2 | 3 | Code used in `Deep learning of dynamics and signal noise decomposition with time-stepping constraints.' 4 | -------------------------------------------------------------------------------- /utils.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from numpy.fft import fft, ifft, fftfreq 4 | 5 | def RK_timestepper(x,t,f,h,weights,biases,direction='F',method = 'RK4'): 6 | """ 7 | Explicit Runge-Kutta time integrator. Assumes no time dependence in f 8 | """ 9 | 10 | if method == 'RK4_38': 11 | b = [1/8,3/8,3/8,1/8] 12 | A = [[],[1/3],[-1/3, 1],[1,-1,1]] 13 | 14 | elif method == 'Euler': 15 | b = [1] 16 | A = [[]] 17 | 18 | elif method == 'Midpoint': 19 | b = [0,1] 20 | A = [[],[1/2]] 21 | 22 | elif method == 'Heun': 23 | b = [1/2,1/2] 24 | A = [[],[1]] 25 | 26 | elif method == 'Ralston': 27 | b = [1/4,3/4] 28 | A = [[],[2/3]] 29 | 30 | elif method == 'RK3': 31 | b = [1/6,2/3,1/6] 32 | A = [[],[1/2],[-1,2]] 33 | 34 | else: 35 | b = [1/6,1/3,1/3,1/6] 36 | A = [[],[1/2],[0, 1/2],[0,0,1]] 37 | 38 | steps = len(b) 39 | 40 | if direction == 'F': 41 | K = [f(x, weights, biases)] 42 | for i in range(1,steps): 43 | K.append(f(tf.add_n([x]+[h*A[i][j]*K[j] for j in range(i) if A[i][j] != 0]), weights, biases)) 44 | else: 45 | K = [-f(x, weights, biases)] 46 | for i in range(1,steps): 47 | K.append(-f(tf.add_n([x]+[h*A[i][j]*K[j] for j in range(i) if A[i][j] != 0]), weights, biases)) 48 | 49 | return tf.add_n([x]+[h*b[j]*K[j] for j in range(steps)]) 50 | 51 | def RK4_forward(x,t,f,h,weights,biases): 52 | """ 53 | 4th order Runge-Kutta time integrator 54 | """ 55 | 56 | return RK_timestepper(x,t,f,h,weights,biases,direction='F',method = 'RK4_classic') 57 | 58 | def RK4_backward(x,t,f,h,weights,biases): 59 | """ 60 | 4th order Runge-Kutta time integrator - backwards in time 61 | """ 62 | 63 | return RK_timestepper(x,t,f,h,weights,biases,direction='B',method = 'RK4_classic') 64 | 65 | def dense_layer(x, W, b, last = False): 66 | x = tf.matmul(W,x) 67 | x = tf.add(x,b) 68 | 69 | if last: return x 70 | else: return tf.nn.elu(x) 71 | 72 | def simple_net(x, weights, biases): 73 | 74 | layers = [x] 75 | 76 | for l in range(len(weights)-1): 77 | layers.append(dense_layer(layers[l], weights[l], biases[l])) 78 | 79 | out = dense_layer(layers[-1], weights[-1], biases[-1], last = True) 80 | 81 | return out 82 | 83 | def approximate_noise(Y, lam = 10): 84 | 85 | n,m = Y.shape 86 | 87 | D = np.zeros((m,m)) 88 | D[0,:4] = [2,-5,4,-1] 89 | D[m-1,m-4:] = [-1,4,-5,2] 90 | 91 | for i in range(1,m-1): 92 | D[i,i] = -2 93 | D[i,i+1] = 1 94 | D[i,i-1] = 1 95 | 96 | D = D.dot(D) 97 | 98 | X_smooth = np.vstack([np.linalg.solve(np.eye(m) + lam*D.T.dot(D), Y[j,:].reshape(m,1)).reshape(1,m) for j in range(n)]) 99 | 100 | N_hat = Y-X_smooth 101 | 102 | return N_hat, X_smooth 103 | 104 | def get_network_variables(n, n_hidden, size_hidden, N_hat): 105 | 106 | layer_sizes = [n] + [size_hidden for _ in range(n_hidden)] + [n] 107 | num_layers = len(layer_sizes) 108 | 109 | weights = [] 110 | biases = [] 111 | 112 | for j in range(1,num_layers): 113 | weights.append(tf.get_variable("W"+str(j), [layer_sizes[j],layer_sizes[j-1]], \ 114 | initializer = tf.contrib.layers.xavier_initializer(seed = 1))) 115 | biases.append(tf.get_variable("b"+str(j), [layer_sizes[j],1], initializer = tf.zeros_initializer())) 116 | 117 | N = tf.get_variable("N", initializer = N_hat.astype('float32')) 118 | 119 | return (weights, biases, N) 120 | 121 | def create_computational_graph(n, N_hat, net_params, num_dt = 10, method = 'RK4', gamma = 1e-5, beta = 1e-8, weight_decay = 'exp', decay_const = 0.9): 122 | 123 | assert(n == N_hat.shape[0]) 124 | m = N_hat.shape[1] 125 | 126 | ########################################################################### 127 | # 128 | # Placeholders for initial condition 129 | # 130 | ########################################################################### 131 | Y_0 = tf.placeholder(tf.float32, [n,None], name = "Y_0") # noisy measurements of state 132 | T_0 = tf.placeholder(tf.float32, [1,None], name = "T_0") # time 133 | 134 | ########################################################################### 135 | # 136 | # Placeholders for true forward and backward predictions 137 | # 138 | ########################################################################### 139 | true_forward_Y = [] 140 | true_backward_Y = [] 141 | 142 | for j in range(num_dt): 143 | true_forward_Y.append(tf.placeholder(tf.float32, [n,None], name = "Y"+str(j+1)+"_true")) 144 | true_backward_Y.append(tf.placeholder(tf.float32, [n,None], name = "Yn"+str(j+1)+"_true")) 145 | 146 | h = tf.placeholder(tf.float32, [1,1], name = "h") # timestep 147 | 148 | ########################################################################### 149 | # 150 | # Forward and backward predictions of true state 151 | # 152 | ########################################################################### 153 | 154 | (weights, biases, N) = net_params 155 | X_0 = tf.subtract(Y_0, tf.slice(N, [0,num_dt],[n,m-2*num_dt])) # estimate of true state 156 | 157 | pred_forward_X = [RK_timestepper(X_0, T_0, simple_net, h, weights, biases, method = method)] 158 | pred_backward_X = [RK_timestepper(X_0, T_0, simple_net, h, weights, biases, method = method, direction = 'B')] 159 | 160 | for j in range(1,num_dt): 161 | pred_forward_X.append(RK_timestepper(pred_forward_X[-1], T_0, simple_net, h, weights, biases, method = method)) 162 | pred_backward_X.append(RK_timestepper(pred_backward_X[-1], T_0, simple_net, h, weights, biases,\ 163 | method = method, direction = 'B')) 164 | 165 | ########################################################################### 166 | # 167 | # Forward and backward predictions of measured (noisy) state 168 | # 169 | ########################################################################### 170 | 171 | pred_forward_Y = [pred_forward_X[j] + tf.slice(N, [0,num_dt+1+j],[n,m-2*num_dt]) for j in range(num_dt)] 172 | pred_backward_Y = [pred_backward_X[j] + tf.slice(N, [0,num_dt-1-j],[n,m-2*num_dt]) for j in range(num_dt)] 173 | 174 | ########################################################################### 175 | # 176 | # Set up cost function 177 | # 178 | ########################################################################### 179 | 180 | if weight_decay == 'linear': output_weights = [(1+j)**-1 for j in range(num_dt)] # linearly decreasing importance 181 | else: output_weights = [decay_const**j for j in range(num_dt)] # exponentially decreasing importance 182 | 183 | forward_fidelity = tf.reduce_sum([w*tf.losses.mean_squared_error(true,pred) \ 184 | for (w,true,pred) in zip(output_weights,true_forward_Y,pred_forward_Y)]) 185 | 186 | backward_fidelity = tf.reduce_sum([w*tf.losses.mean_squared_error(true,pred) \ 187 | for (w,true,pred) in zip(output_weights,true_backward_Y,pred_backward_Y)]) 188 | 189 | fidelity = tf.add(forward_fidelity, backward_fidelity) 190 | 191 | # Regularizer for NN weights 192 | weights_regularizer = tf.reduce_mean([tf.nn.l2_loss(W) for W in weights]) 193 | 194 | # Regularizer for explicit noise term 195 | noise_regularizer = tf.nn.l2_loss(N) 196 | 197 | # Weighted sum of individual cost functions 198 | cost = tf.reduce_sum(fidelity + beta*weights_regularizer + gamma*noise_regularizer) 199 | 200 | # BFGS optimizer via scipy 201 | optimizer = tf.contrib.opt.ScipyOptimizerInterface(cost, options={'maxiter': 50000, 202 | 'maxfun': 50000, 203 | 'ftol': 1e-15, 204 | 'gtol' : 1e-11, 205 | 'eps' : 1e-12, 206 | 'maxls' : 100}) 207 | 208 | placeholders = {'Y_0': Y_0, 209 | 'T_0': T_0, 210 | 'true_forward_Y': true_forward_Y, 211 | 'true_backward_Y': true_backward_Y, 212 | 'h': h} 213 | 214 | return optimizer, placeholders 215 | 216 | -------------------------------------------------------------------------------- /utils_var_step.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import tensorflow as tf 3 | from numpy.fft import fft, ifft, fftfreq 4 | 5 | def RK_timestepper(x,t,f,h,weights,biases,direction='F',method = 'RK4'): 6 | """ 7 | Explicit Runge-Kutta time integrator. Assumes no time dependence in f 8 | """ 9 | 10 | if method == 'RK4_38': 11 | b = [1/8,3/8,3/8,1/8] 12 | A = [[],[1/3],[-1/3, 1],[1,-1,1]] 13 | 14 | elif method == 'Euler': 15 | b = [1] 16 | A = [[]] 17 | 18 | elif method == 'Midpoint': 19 | b = [0,1] 20 | A = [[],[1/2]] 21 | 22 | elif method == 'Heun': 23 | b = [1/2,1/2] 24 | A = [[],[1]] 25 | 26 | elif method == 'Ralston': 27 | b = [1/4,3/4] 28 | A = [[],[2/3]] 29 | 30 | elif method == 'RK3': 31 | b = [1/6,2/3,1/6] 32 | A = [[],[1/2],[-1,2]] 33 | 34 | else: 35 | b = [1/6,1/3,1/3,1/6] 36 | A = [[],[1/2],[0, 1/2],[0,0,1]] 37 | 38 | steps = len(b) 39 | 40 | if direction == 'F': 41 | K = [f(x, weights, biases)] 42 | for i in range(1,steps): 43 | K.append(f(tf.add_n([x]+[h*A[i][j]*K[j] for j in range(i) if A[i][j] != 0]), weights, biases)) 44 | else: 45 | K = [-f(x, weights, biases)] 46 | for i in range(1,steps): 47 | K.append(-f(tf.add_n([x]+[h*A[i][j]*K[j] for j in range(i) if A[i][j] != 0]), weights, biases)) 48 | 49 | return tf.add_n([x]+[h*b[j]*K[j] for j in range(steps)]) 50 | 51 | def dense_layer(x, W, b, last = False): 52 | x = tf.matmul(W,x) 53 | x = tf.add(x,b) 54 | 55 | if last: return x 56 | else: return tf.nn.elu(x) 57 | 58 | def simple_net(x, weights, biases): 59 | 60 | layers = [x] 61 | 62 | for l in range(len(weights)-1): 63 | layers.append(dense_layer(layers[l], weights[l], biases[l])) 64 | 65 | out = dense_layer(layers[-1], weights[-1], biases[-1], last = True) 66 | 67 | return out 68 | 69 | def approximate_noise(Y, T, lam = 10): 70 | 71 | n,m = Y.shape 72 | dt = T[1:] - T[:-1] 73 | 74 | D = np.zeros((m-2,m)) 75 | 76 | for i in range(m-2): 77 | D[i,i] = 2 / (dt[i]*(dt[i]+dt[i+1])) 78 | D[i,i+1] = -2/(dt[i]*(dt[i]+dt[i+1])) - 2/(dt[i+1]*(dt[i]+dt[i+1])) 79 | D[i,i+2] = 2 / (dt[i+1]*(dt[i]+dt[i+1])) 80 | 81 | X_smooth = np.vstack([np.linalg.solve(np.eye(m) + lam*D.T.dot(D), Y[j,:].reshape(m,1)).reshape(1,m) for j in range(n)]) 82 | 83 | N_hat = Y-X_smooth 84 | 85 | return N_hat, X_smooth 86 | 87 | def get_network_variables(n, n_hidden, size_hidden, N_hat): 88 | 89 | layer_sizes = [n] + [size_hidden for _ in range(n_hidden)] + [n] 90 | num_layers = len(layer_sizes) 91 | 92 | weights = [] 93 | biases = [] 94 | 95 | for j in range(1,num_layers): 96 | weights.append(tf.get_variable("W"+str(j), [layer_sizes[j],layer_sizes[j-1]], \ 97 | initializer = tf.contrib.layers.xavier_initializer(seed = 1))) 98 | biases.append(tf.get_variable("b"+str(j), [layer_sizes[j],1], initializer = tf.zeros_initializer())) 99 | 100 | N = tf.get_variable("N", initializer = N_hat.astype('float32')) 101 | 102 | return (weights, biases, N) 103 | 104 | def create_computational_graph(n, N_hat, net_params, num_dt = 10, method = 'RK4', gamma = 1e-5, beta = 1e-8, weight_decay = 'exp', decay_const = 0.9): 105 | 106 | assert(n == N_hat.shape[0]) 107 | m = N_hat.shape[1] 108 | 109 | ########################################################################### 110 | # 111 | # Placeholders for initial condition 112 | # 113 | ########################################################################### 114 | Y_0 = tf.placeholder(tf.float32, [n,None], name = "Y_0") # noisy measurements of state 115 | T_0 = tf.placeholder(tf.float32, [1,None], name = "T_0") # time 116 | 117 | ########################################################################### 118 | # 119 | # Placeholders for true forward and backward predictions 120 | # 121 | ########################################################################### 122 | true_forward_Y = [] 123 | true_backward_Y = [] 124 | 125 | for j in range(num_dt): 126 | true_forward_Y.append(tf.placeholder(tf.float32, [n,None], name = "Y"+str(j+1)+"_true")) 127 | true_backward_Y.append(tf.placeholder(tf.float32, [n,None], name = "Yn"+str(j+1)+"_true")) 128 | 129 | H = tf.placeholder(tf.float32, [1,m-1], name = "H") # timestep 130 | 131 | ########################################################################### 132 | # 133 | # Forward and backward predictions of true state 134 | # 135 | ########################################################################### 136 | 137 | (weights, biases, N) = net_params 138 | X_0 = tf.subtract(Y_0, tf.slice(N, [0,num_dt],[n,m-2*num_dt])) # estimate of true state 139 | 140 | pred_forward_X = [RK_timestepper(X_0, T_0, simple_net, tf.slice(H, [0,num_dt],[1,m-2*num_dt]), \ 141 | weights, biases, method = method)] 142 | pred_backward_X = [RK_timestepper(X_0, T_0, simple_net, tf.slice(H, [0,num_dt-1],[1,m-2*num_dt]), \ 143 | weights, biases, method = method, direction = 'B')] 144 | 145 | for j in range(1,num_dt): 146 | pred_forward_X.append(RK_timestepper(pred_forward_X[-1], T_0, simple_net, tf.slice(H, [0,num_dt+j],[1,m-2*num_dt]), \ 147 | weights, biases, method = method)) 148 | pred_backward_X.append(RK_timestepper(pred_backward_X[-1], T_0, simple_net, tf.slice(H, [0,num_dt-1-j],[1,m-2*num_dt]), \ 149 | weights, biases, method = method, direction = 'B')) 150 | 151 | ########################################################################### 152 | # 153 | # Forward and backward predictions of measured (noisy) state 154 | # 155 | ########################################################################### 156 | 157 | pred_forward_Y = [pred_forward_X[j] + tf.slice(N, [0,num_dt+1+j],[n,m-2*num_dt]) for j in range(num_dt)] 158 | pred_backward_Y = [pred_backward_X[j] + tf.slice(N, [0,num_dt-1-j],[n,m-2*num_dt]) for j in range(num_dt)] 159 | 160 | ########################################################################### 161 | # 162 | # Set up cost function 163 | # 164 | ########################################################################### 165 | 166 | if weight_decay == 'linear': output_weights = [(1+j)**-1 for j in range(num_dt)] # linearly decreasing importance 167 | else: output_weights = [decay_const**j for j in range(num_dt)] # exponentially decreasing importance 168 | 169 | forward_fidelity = tf.reduce_sum([w*tf.losses.mean_squared_error(true,pred) \ 170 | for (w,true,pred) in zip(output_weights,true_forward_Y,pred_forward_Y)]) 171 | 172 | backward_fidelity = tf.reduce_sum([w*tf.losses.mean_squared_error(true,pred) \ 173 | for (w,true,pred) in zip(output_weights,true_backward_Y,pred_backward_Y)]) 174 | 175 | fidelity = tf.add(forward_fidelity, backward_fidelity) 176 | 177 | # Regularizer for NN weights 178 | weights_regularizer = tf.reduce_mean([tf.nn.l2_loss(W) for W in weights]) 179 | 180 | # Regularizer for explicit noise term 181 | noise_regularizer = tf.nn.l2_loss(N) 182 | 183 | # Weighted sum of individual cost functions 184 | cost = tf.reduce_sum(fidelity + beta*weights_regularizer + gamma*noise_regularizer) 185 | 186 | # BFGS optimizer via scipy 187 | optimizer = tf.contrib.opt.ScipyOptimizerInterface(cost, options={'maxiter': 50000, 188 | 'maxfun': 50000, 189 | 'ftol': 1e-15, 190 | 'gtol' : 1e-11, 191 | 'eps' : 1e-12, 192 | 'maxls' : 100}) 193 | 194 | placeholders = {'Y_0': Y_0, 195 | 'T_0': T_0, 196 | 'true_forward_Y': true_forward_Y, 197 | 'true_backward_Y': true_backward_Y, 198 | 'H': H} 199 | 200 | return optimizer, placeholders 201 | 202 | --------------------------------------------------------------------------------