├── Example Butterfly.ipynb ├── Example Call Option.ipynb ├── Example Path-dependent option.ipynb ├── Example SP500-Asian.ipynb ├── Example SP500-Asian_BlackScholesHedge.ipynb ├── Example SP500-Asian_SeveralWindows.ipynb ├── Example-PriceBounds.ipynb ├── FiniteDifferences.py ├── Functions.py ├── Functions_NonRobust.py ├── LICENSE └── README.md /FiniteDifferences.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Fri Oct 1 18:10:59 2021 4 | 5 | @author: Julian Sester 6 | """ 7 | 8 | import numpy as np 9 | from tqdm import tqdm 10 | 11 | def linear_pde(a_0,a_1,b_0,b_1,gamma,payoff, 12 | start_value_from =0, 13 | start_value_to =1, 14 | Nr_grid_s = 10, 15 | Nr_grid_t = 100, 16 | time_from =0, 17 | time_to = 1): 18 | # Initialize grids 19 | s = np.linspace(start_value_from,start_value_to,Nr_grid_s) 20 | Delta_s = s[1]-s[0] 21 | t = np.linspace(time_from,time_to,Nr_grid_t) 22 | Delta_t = t[1]-t[0] 23 | if Delta_s**2/max([a_0+a_1*val*(val>0) for val in s]) < Delta_t: 24 | print("WARNING: Stability condition is violated. \nChoose more time iterations!") 25 | #Delta_t = t[1]-t[0] 26 | u = np.zeros((len(t),len(s))) 27 | 28 | #Boundary Condition for maturity T 29 | u[-1,:] = [payoff(val) for val in s] 30 | #Boundary Condition for border of space: 31 | u[:,0] = [payoff(s[0])]*len(t) 32 | u[:,-1] = [payoff(s[-1])]*len(t) 33 | time_step = 0 34 | for i in tqdm(range(len(t)-1,0,-1)): 35 | time_step += 1 36 | u[i-1,0] = u[i,0]+(b_0+b_1*s[0])*(Delta_t/(Delta_s))*(u[i,1]-u[i,0])\ 37 | +0.5*((a_0+a_1*(s[0]*(s[0]>0)))**(2*gamma))*(Delta_t/Delta_s**2)*(u[i,2]-2*u[i,1]+u[i,0]) 38 | u[i-1,-1] = u[i,-1]+(b_0+b_1*s[-1])*(Delta_t/(Delta_s))*(u[i,-1]-u[i,-2])\ 39 | +0.5*((a_0+a_1*(s[-1]*(s[-1]>0)))**(2*gamma))*(Delta_t/Delta_s**2)*(u[i,-1]-2*u[i,-2]+u[i,-3]) 40 | for j in range(1,len(s)-1): 41 | u[i-1,j] = u[i,j]+(b_0+b_1*s[j])*(Delta_t/(2*Delta_s))*(u[i,j+1]-u[i,j-1])\ 42 | +0.5*((a_0+a_1*(s[j]*(s[j]>0)))**(2*gamma))*(Delta_t/Delta_s**2)*(u[i,j+1]-2*u[i,j]+u[i,j-1]) 43 | return u[0,:], u 44 | 45 | def nonlinear_pde(a_0_lower_bound,a_0_upper_bound,a_1_lower_bound,a_1_upper_bound, 46 | b_0_lower_bound,b_0_upper_bound,b_1_lower_bound,b_1_upper_bound, 47 | gamma_lower_bound,gamma_upper_bound, 48 | payoff, 49 | start_value_from =0, 50 | start_value_to =1, 51 | Nr_grid_s = 20, 52 | Nr_grid_t = 40, 53 | time_from =0, 54 | time_to = 1, 55 | minimize = True): 56 | # Initialize grids 57 | s = np.linspace(start_value_from,start_value_to,Nr_grid_s) 58 | Delta_s = s[1]-s[0] 59 | t = np.linspace(time_from,time_to,Nr_grid_t) 60 | Delta_t = t[1]-t[0] 61 | #print(1-(Delta_t/Delta_s**2)*max([a_0_upper_bound+a_1_upper_bound*val*(val>0) for val in s])) 62 | if Delta_s**2/max([a_0_upper_bound+a_1_upper_bound*val*(val>0) for val in s]) < Delta_t: 63 | print("WARNING: Stability condition is violated. \nChoose more time iterations!") 64 | u = np.zeros((len(t),len(s))) 65 | def G(x,p,q,minimize): 66 | possible_b = [(b_0_lower_bound+b_1_lower_bound*x)*p, 67 | (b_0_upper_bound+b_1_lower_bound*x)*p, 68 | (b_0_lower_bound+b_1_upper_bound*x)*p, 69 | (b_0_upper_bound+b_1_upper_bound*x)*p] 70 | possible_a = [(0.5*(a_0_lower_bound+a_1_lower_bound*x*(x>0))**(2*gamma_lower_bound))*q, 71 | (0.5*(a_0_upper_bound+a_1_lower_bound*x*(x>0))**(2*gamma_lower_bound))*q, 72 | (0.5*(a_0_lower_bound+a_1_upper_bound*x*(x>0))**(2*gamma_lower_bound))*q, 73 | (0.5*(a_0_upper_bound+a_1_upper_bound*x*(x>0))**(2*gamma_lower_bound))*q, 74 | (0.5*(a_0_lower_bound+a_1_lower_bound*x*(x>0))**(2*gamma_upper_bound))*q, 75 | (0.5*(a_0_upper_bound+a_1_lower_bound*x*(x>0))**(2*gamma_upper_bound))*q, 76 | (0.5*(a_0_lower_bound+a_1_upper_bound*x*(x>0))**(2*gamma_upper_bound))*q, 77 | (0.5*(a_0_upper_bound+a_1_upper_bound*x*(x>0))**(2*gamma_upper_bound))*q] 78 | if minimize: 79 | return np.min(possible_b)+np.min(possible_a) 80 | else: 81 | return np.max(possible_b)+np.max(possible_a) 82 | 83 | #Boundary Condition for maturity T 84 | u[-1,:] = [payoff(val) for val in s] 85 | #Boundary Condition for border of space: 86 | u[:,0] = [payoff(s[0])]*len(t) 87 | u[:,-1] = [payoff(s[-1])]*len(t) 88 | time_step = 0 89 | # Iteration over the time steps 90 | for i in tqdm(range(len(t)-1,0,-1)): 91 | time_step += 1 92 | #Boundary Conditions for left and right side of the space grid 93 | #Left Boundary: 94 | # G_evaluated = G(s[0], 95 | # (1/(Delta_s))*(u[i,1]-u[i,0]), 96 | # (1/Delta_s**2)*(u[i,2]-2*u[i,1]+u[i,0]), 97 | # minimize) 98 | # u[i-1,0] = u[i,0]+G_evaluated*Delta_t 99 | # Right Boundary: 100 | # G_evaluated = G(s[-1], 101 | # (1/(Delta_s))*(u[i,-1]-u[i,-2]), 102 | # (1/Delta_s**2)*(u[i,-1]-2*u[i,-2]+u[i,-3]), 103 | # minimize) 104 | # u[i-1,-1] = u[i,-1]+G_evaluated*Delta_t 105 | #Iteration over the inner values of the space 106 | for j in range(1,len(s)-1): 107 | G_evaluated = G(s[j], 108 | (1/(2*Delta_s))*(u[i,j+1]-u[i,j-1]), 109 | (1/Delta_s**2)*(u[i,j+1]-2*u[i,j]+u[i,j-1]), 110 | minimize) 111 | u[i-1,j] = u[i,j]+G_evaluated*Delta_t 112 | return u[0,:], u -------------------------------------------------------------------------------- /Functions.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Fri Mar 26 11:20:37 2021 4 | 5 | @author: Julian 6 | """ 7 | import numpy as np 8 | import tensorflow as tf 9 | from tensorflow import keras 10 | from tensorflow.keras import layers 11 | from scipy.stats import norm 12 | from tqdm import tqdm 13 | 14 | def generate_path(a_0, # Interval for Parameter a_0 15 | a_1, # Interval for Parameter a_1 16 | b_0, # Interval for Parameter b_0 17 | b_1, # Interval for Parameter b_1 18 | gamma, # Interval for Gamma 19 | x_0, # Initial value 20 | T, # Maturity (in years) 21 | n, # Nr of trading days 22 | seed = 0): 23 | 24 | #Time difference between trading days: 25 | dt = T/n 26 | # Create the differences of a Brownian Motion 27 | # Set the random seed for the Brownian motion - if desired 28 | if seed != 0: 29 | np.random.seed(seed) 30 | dW = np.sqrt(dt) * np.random.randn(n) 31 | # Apply the Euler Maruyama Scheme 32 | # Use real randomness for the parameters! 33 | np.random.seed() 34 | # Initial value 35 | X = [x_0] 36 | for i in range(n): 37 | # Choose random samples in each step 38 | a_0_sample = a_0[0] + (a_0[1]-a_0[0])*np.random.uniform() 39 | a_1_sample = a_1[0] + (a_1[1]-a_1[0])*np.random.uniform() 40 | b_0_sample = b_0[0] + (b_0[1]-b_0[0])*np.random.uniform() 41 | b_1_sample = b_1[0] + (b_1[1]-b_1[0])*np.random.uniform() 42 | gamma_sample = gamma[0] + (gamma[1]-gamma[0])*np.random.uniform() 43 | #Compute the discretized value 44 | X += [X[-1]+(b_0_sample+b_1_sample*X[-1])*dt+(a_0_sample+a_1_sample*np.max([X[-1],0]))**gamma_sample*dW[i]] 45 | return X 46 | 47 | 48 | # The corresponding generator 49 | def generate_batch_of_paths(a_0, # Interval for Parameter a_0 50 | a_1, # Interval for Parameter a_1 51 | b_0, # Interval for Parameter b_0 52 | b_1, # Interval for Parameter b_1 53 | gamma, # Interval for Gamma 54 | x_0, # Initial value 55 | T, # Maturity (in years) 56 | n, 57 | batch_size = 256, 58 | scaling_factor = 1.): # Nr of trading days 59 | while True: 60 | batch = tf.reshape([generate_path(a_0,a_1,b_0,b_1,gamma, x_0,T,n) for i in range(batch_size)],([batch_size,n+1])) 61 | yield batch/scaling_factor 62 | 63 | 64 | 65 | def optimal_hedge(derivative, # Function describing the payoff of the derivative to hedge 66 | a_0, # Interval for Parameter a_0 67 | a_1, # Interval for Parameter a_1 68 | b_0, # Interval for Parameter b_0 69 | b_1, # Interval for Parameter b_1 70 | gamma, # Parameter gamma 71 | x_0, # Initial value 72 | T, # Maturity (in years) 73 | n,# Nr of trading days 74 | depth = 2, # Depth of the neural network (nr of hidden layers) 75 | nr_neurons = 15, # Nr of neurons per layer 76 | EPOCHS = 1000, # Total number of epocs 77 | l_r = 0.0001, # Learning rate of the Adam optimizer, 78 | BATCH_SIZE =256, # Batch size for sampling the paths, 79 | hedge = "hedge", 80 | scaling_factor =1., 81 | path_dependent = False 82 | ): 83 | #x_0 = tf.cast(x_0,tf.float32) 84 | #List of Trading Days 85 | first_path = next(generate_batch_of_paths(a_0,a_1,b_0,b_1,gamma,x_0,T,n,BATCH_SIZE,scaling_factor)) 86 | Initial_value = tf.reduce_mean(tf.map_fn(derivative,first_path)) 87 | t_k = np.linspace(0,T,n+1) 88 | alpha = tf.Variable([Initial_value],trainable=True,dtype = "float32") 89 | 90 | # Define the neural networks 91 | def build_model(depth,nr_neurons): 92 | if path_dependent: 93 | x = keras.Input(shape=(1,),name = "x") 94 | t = keras.Input(shape=(1,),name = "t") 95 | max_x = keras.Input(shape=(1,),name = "max_x") 96 | fully_connected_Input = layers.concatenate([x, t,max_x]) 97 | else: 98 | x = keras.Input(shape=(1,),name = "x") 99 | t = keras.Input(shape=(1,),name = "t") 100 | fully_connected_Input = layers.concatenate([x, t]) 101 | # Create the NN 102 | values_all = layers.Dense(nr_neurons,activation = "relu")(fully_connected_Input) 103 | # Create deep layers 104 | for i in range(depth): 105 | values_all = layers.Dense(nr_neurons,activation = "relu")(values_all) 106 | # Output Layers 107 | value_out = layers.Dense(1)(values_all) 108 | if path_dependent: 109 | model = keras.Model(inputs=[x,t,max_x], 110 | outputs = [value_out]) 111 | else: 112 | model = keras.Model(inputs=[x,t], 113 | outputs = [value_out]) 114 | return model 115 | 116 | # Define Risk Measure 117 | #def rho(x): # Inpur as a list of entries!, Entropy with lambda = 1 118 | # return tf.math.log(tf.reduce_mean(tf.math.exp(-x))) 119 | 120 | if hedge == "hedge": 121 | def rho(x): 122 | return tf.reduce_mean(tf.math.square(x)) 123 | if hedge == "super-hedge": 124 | def rho(x): 125 | return tf.reduce_mean(tf.math.square(x))+tf.reduce_mean(tf.math.square(tf.nn.relu(-x))) 126 | if hedge == "sub-hedge": 127 | def rho(x): 128 | return tf.reduce_mean(tf.math.square(x))+tf.reduce_mean(tf.math.square(tf.nn.relu(x))) 129 | 130 | # Define the Loss function 131 | def loss(model,batch): 132 | #model_evaluated = [model([tf.reshape(batch[:,i],(BATCH_SIZE,1)),tf.reshape(np.repeat(t_k[i],BATCH_SIZE),(BATCH_SIZE,1))]) for i in range(n)] 133 | #delta_S = tf.reduce_sum([model_evaluated[i]*np.reshape(np.diff(batch)[:,i],(BATCH_SIZE,1)) for i in range(n)],0) 134 | #derivative_on_batch = np.array([[derivative(batch[i,:])] for i in range(BATCH_SIZE)]) 135 | patch_diff = batch[:,1:]-batch[:,:-1] 136 | if path_dependent: 137 | hedge_evaluated = [model([tf.reshape(batch[:,i],(BATCH_SIZE,1)), 138 | tf.reshape(np.repeat(t_k[i],BATCH_SIZE),(BATCH_SIZE,1)), 139 | tf.reshape(tf.reduce_max(batch[:,:(i+1)],1),(BATCH_SIZE,1))]) for i in range(n)] 140 | else: 141 | hedge_evaluated = [model([tf.reshape(batch[:,i],(BATCH_SIZE,1)), 142 | tf.reshape(np.repeat(t_k[i],BATCH_SIZE),(BATCH_SIZE,1))]) for i in range(n)] 143 | delta_S = tf.reduce_sum(tf.math.multiply(patch_diff,tf.transpose(tf.reshape(hedge_evaluated,(n,BATCH_SIZE)))),1) 144 | derivative_on_batch = tf.map_fn(derivative,batch) 145 | loss = rho(alpha+delta_S-derivative_on_batch) 146 | return loss 147 | 148 | # Define Gradient 149 | def grad(model,batch): 150 | with tf.GradientTape() as tape: 151 | loss_value = loss(model,batch) 152 | return loss_value, tape.gradient(loss_value,model.trainable_variables+[alpha]) 153 | 154 | def grad_alpha(model,batch): 155 | with tf.GradientTape() as tape: 156 | loss_value = loss(model,batch) 157 | return tape.gradient(loss_value,[alpha]) 158 | 159 | # Create Optimizer and Model 160 | optimizer = tf.keras.optimizers.Adam(learning_rate = l_r, beta_1=0.9, beta_2=0.999) 161 | optimizer_alpha = tf.keras.optimizers.SGD(learning_rate = 10*l_r) 162 | model = build_model(depth,nr_neurons) 163 | losses = [] 164 | 165 | # Training Loop 166 | for epoch in tqdm(range(int(EPOCHS))): 167 | batch = next(generate_batch_of_paths(a_0,a_1,b_0,b_1,gamma,x_0,T,n,BATCH_SIZE,scaling_factor)) 168 | loss_value, grads = grad(model,batch) 169 | #grads_a = grad_alpha(model,batch) 170 | optimizer.apply_gradients(zip(grads, model.trainable_variables+[alpha])) 171 | #optimizer_alpha.apply_gradients(zip(grads_a,[alpha])) 172 | losses.append(loss_value.numpy()*scaling_factor) 173 | if epoch % 10 == 0 and epoch > 0: 174 | print("Iteration:{}, Price of Hedge: {}, Loss: {}".format((epoch),alpha.numpy()[0]*scaling_factor,losses[-1])) 175 | return np.mean(alpha.numpy()[0]), model 176 | 177 | def BS_Delta(sigma,r,S_0,T,K): 178 | d_1 = (np.log(S_0/K)+(r+0.5*sigma**2)*T)/(sigma*np.sqrt(T)) 179 | return norm.cdf(d_1) 180 | 181 | def BS_Price(sigma,r,S_0,T,K,Call_Put): 182 | d_1 = (np.log(S_0/K)+(r+0.5*sigma**2)*T)/(sigma*np.sqrt(T)) 183 | d_2 = d_1 -sigma*np.sqrt(T) 184 | if Call_Put == "Call": 185 | return S_0*norm.cdf(d_1)-K*np.exp(-r*T)*norm.cdf(d_2) 186 | elif Call_Put == "Put": 187 | return K*np.exp(-r*T)*norm.cdf(-d_2)-S_0*norm.cdf(-d_1) 188 | 189 | -------------------------------------------------------------------------------- /Functions_NonRobust.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Created on Fri Mar 26 11:20:37 2021 4 | 5 | @author: Julian 6 | """ 7 | import numpy as np 8 | import tensorflow as tf 9 | from tensorflow import keras 10 | from tensorflow.keras import layers 11 | from scipy.stats import norm 12 | from tqdm import tqdm 13 | 14 | def generate_path_non_robust(a_0, # Parameter a_0 15 | a_1, # Parameter a_1 16 | b_0, # Parameter b_0 17 | b_1, # Parameter b_1 18 | gamma, # Gamma 19 | x_0, # Initial value 20 | T, # Maturity (in years) 21 | n, # Nr of trading days 22 | seed = 0): 23 | 24 | #Time difference between trading days: 25 | dt = T/n 26 | # Create the differences of a Brownian Motion 27 | # Set the random seed for the Brownian motion - if desired 28 | if seed != 0: 29 | np.random.seed(seed) 30 | dW = np.sqrt(dt) * np.random.randn(n) 31 | # Apply the Euler Maruyama Scheme 32 | # Use real randomness for the parameters! 33 | np.random.seed() 34 | # Initial value 35 | X = [x_0] 36 | for i in range(n): 37 | #Compute the discretized value 38 | X += [X[-1]+(b_0+b_1*X[-1])*dt+(a_0+a_1*np.max([X[-1],0]))**gamma*dW[i]] 39 | return X 40 | 41 | 42 | # The corresponding generator 43 | def generate_batch_of_paths_non_robust(a_0, # Interval for Parameter a_0 44 | a_1, # Parameter a_1 45 | b_0, # Parameter b_0 46 | b_1, # Parameter b_1 47 | gamma, # Gamma 48 | x_0, # Initial value 49 | T, # Maturity (in years) 50 | n, 51 | batch_size = 256, 52 | scaling_factor = 1.): # Nr of trading days 53 | while True: 54 | batch = tf.reshape([generate_path_non_robust(a_0,a_1,b_0,b_1,gamma, x_0,T,n) for i in range(batch_size)],([batch_size,n+1])) 55 | yield batch/scaling_factor 56 | 57 | 58 | 59 | def optimal_hedge_non_robust(derivative, # Function describing the payoff of the derivative to hedge 60 | a_0, # Parameter a_0 61 | a_1, # Parameter a_1 62 | b_0, # Parameter b_0 63 | b_1, # Parameter b_1 64 | gamma, # Parameter gamma 65 | x_0, # Initial value 66 | T, # Maturity (in years) 67 | n,# Nr of trading days 68 | depth = 2, # Depth of the neural network (nr of hidden layers) 69 | nr_neurons = 15, # Nr of neurons per layer 70 | EPOCHS = 1000, # Total number of epocs 71 | l_r = 0.0001, # Learning rate of the Adam optimizer, 72 | BATCH_SIZE =256, # Batch size for sampling the paths, 73 | hedge = "hedge", 74 | scaling_factor =1 75 | ): 76 | #x_0 = tf.cast(x_0,tf.float32) 77 | #List of Trading Days 78 | first_path = next(generate_batch_of_paths_non_robust(a_0,a_1,b_0,b_1,gamma,x_0,T,n,BATCH_SIZE,scaling_factor)) 79 | Initial_value = tf.reduce_mean(tf.map_fn(derivative,first_path)) 80 | t_k = np.linspace(0,T,n+1) 81 | alpha = tf.Variable([Initial_value],trainable=True,dtype = "float32") 82 | 83 | # Define the neural networks 84 | def build_model(depth,nr_neurons): 85 | x = keras.Input(shape=(1,),name = "x") 86 | t = keras.Input(shape=(1,),name = "t") 87 | fully_connected_Input = layers.concatenate([x, t]) 88 | # Create the NN 89 | values_all = layers.Dense(nr_neurons,activation = "relu")(fully_connected_Input) 90 | # Create deep layers 91 | for i in range(depth): 92 | values_all = layers.Dense(nr_neurons,activation = "relu")(values_all) 93 | # Output Layers 94 | value_out = layers.Dense(1)(values_all) 95 | model = keras.Model(inputs=[x,t], 96 | outputs = [value_out]) 97 | return model 98 | 99 | # Define Risk Measure 100 | #def rho(x): # Inpur as a list of entries!, Entropy with lambda = 1 101 | # return tf.math.log(tf.reduce_mean(tf.math.exp(-x))) 102 | 103 | if hedge == "hedge": 104 | def rho(x): 105 | return tf.reduce_mean(tf.math.square(x)) 106 | if hedge == "super-hedge": 107 | def rho(x): 108 | return tf.reduce_mean(tf.math.square(x))+tf.reduce_mean(tf.math.square(tf.nn.relu(-x))) 109 | if hedge == "sub-hedge": 110 | def rho(x): 111 | return tf.reduce_mean(tf.math.square(x))+tf.reduce_mean(tf.math.square(tf.nn.relu(x))) 112 | 113 | # Define the Loss function 114 | def loss(model,batch): 115 | #model_evaluated = [model([tf.reshape(batch[:,i],(BATCH_SIZE,1)),tf.reshape(np.repeat(t_k[i],BATCH_SIZE),(BATCH_SIZE,1))]) for i in range(n)] 116 | #delta_S = tf.reduce_sum([model_evaluated[i]*np.reshape(np.diff(batch)[:,i],(BATCH_SIZE,1)) for i in range(n)],0) 117 | #derivative_on_batch = np.array([[derivative(batch[i,:])] for i in range(BATCH_SIZE)]) 118 | patch_diff = batch[:,1:]-batch[:,:-1] 119 | hedge_evaluated = [model([tf.reshape(batch[:,i],(BATCH_SIZE,1)),tf.reshape(np.repeat(t_k[i],BATCH_SIZE),(BATCH_SIZE,1))]) for i in range(n)] 120 | delta_S = tf.reduce_sum(tf.math.multiply(patch_diff,tf.transpose(tf.reshape(hedge_evaluated,(n,BATCH_SIZE)))),1) 121 | derivative_on_batch = tf.map_fn(derivative,batch) 122 | loss = rho(alpha+delta_S-derivative_on_batch) 123 | return loss 124 | 125 | # Define Gradient 126 | def grad(model,batch): 127 | with tf.GradientTape() as tape: 128 | loss_value = loss(model,batch) 129 | return loss_value, tape.gradient(loss_value,model.trainable_variables+[alpha]) 130 | 131 | def grad_alpha(model,batch): 132 | with tf.GradientTape() as tape: 133 | loss_value = loss(model,batch) 134 | return tape.gradient(loss_value,[alpha]) 135 | 136 | # Create Optimizer and Model 137 | optimizer = tf.keras.optimizers.Adam(learning_rate = l_r, beta_1=0.9, beta_2=0.999) 138 | optimizer_alpha = tf.keras.optimizers.SGD(learning_rate = 10*l_r) 139 | model = build_model(depth,nr_neurons) 140 | losses = [] 141 | 142 | # Training Loop 143 | for epoch in tqdm(range(int(EPOCHS))): 144 | batch = next(generate_batch_of_paths_non_robust(a_0,a_1,b_0,b_1,gamma,x_0,T,n,BATCH_SIZE,scaling_factor)) 145 | loss_value, grads = grad(model,batch) 146 | #grads_a = grad_alpha(model,batch) 147 | optimizer.apply_gradients(zip(grads, model.trainable_variables+[alpha])) 148 | #optimizer_alpha.apply_gradients(zip(grads_a,[alpha])) 149 | losses.append(loss_value.numpy()*scaling_factor) 150 | if epoch % 10 == 0 and epoch > 0: 151 | print("Iteration:{}, Price of Hedge: {}, Loss: {}".format((epoch),alpha.numpy()[0]*scaling_factor,losses[-1])) 152 | return np.mean(alpha.numpy()[0]), model 153 | 154 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2021 juliansester 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Code for "Robust deep hedging" 2 | 3 | ## Eva Lutkebohmert, Thorsten Schmidt, Julian Sester 4 | 5 | # Abstract 6 | 7 | We study pricing and hedging under parameter uncertainty for a class of Markov 8 | processes which we call generalized affine processes and which includes the Black- 9 | Scholes model as well as the constant elasticity of variance (CEV) model as special 10 | cases. Based on a general dynamic programming principle, we are able to link the 11 | associated nonlinear expectation to a variational form of the Kolmogorov equation 12 | which opens the door for fast numerical pricing in the robust framework. 13 | The main novelty of the paper is that we propose a deep hedging approach which 14 | efficiently solves the hedging problem under parameter uncertainty. We numerically 15 | evaluate this method on simulated and real data and show that the robust deep hedging 16 | outperforms existing hedging approaches, in particular in highly volatile periods. 17 | 18 | # Preprint 19 | 20 | Can be found [here](https://arxiv.org/abs/2106.10024) 21 | 22 | 23 | # Content 24 | 25 | The Examples from the paper are provided as seperate jupyter notebooks, each with a unique name, exactly specifying which example is covered therein. These are: 26 | - An [Example](https://github.com/juliansester/nga/blob/main/Example%20Butterfly.ipynb) covering butterfly options. 27 | - An [Example](https://github.com/juliansester/nga/blob/main/Example%20Call%20Option.ipynb) covering call options. 28 | - An [Example](https://github.com/juliansester/nga/blob/main/Example%20Path-dependent%20option.ipynb) covering path-dependent options. 29 | - An [Example](https://github.com/juliansester/nga/blob/main/Example%20SP500-Asian.ipynb) covering Asian options using data of the SP 500. 30 | - An [Example](https://github.com/juliansester/nga/blob/main/Example%20SP500-Asian_BlackScholesHedge.ipynb) covering Asian options using data of the SP 500 using a Black Scholes model for hedging. 31 | - An [Example](https://github.com/juliansester/nga/blob/main/Example%20SP500-Asian_SeveralWindows.ipynb) covering Asian options using data of the SP 500 over several windows. 32 | - An [Example](https://github.com/juliansester/nga/blob/main/Example-PriceBounds.ipynb) comparing prices of the robust deep hedging approach with prices of the finite differences approach. 33 | 34 | - The file [Functions.py](https://github.com/juliansester/nga/blob/main/Functions.py) contains the Python-code that is employed to train the hedging strategies. 35 | - The file [Functions_NonRobust.py](https://github.com/juliansester/nga/blob/main/Functions_NonRobust.py) contains the Python-code that is employed to train the non-robust hedging strategies. 36 | - The file [FiniteDifferences.py](https://github.com/juliansester/nga/blob/main/FiniteDifferences.py) contains the python-code that is used to compute the prices udner the finite differences method. 37 | 38 | 39 | 40 | ## Data 41 | Note that the data for the S&P 500 examples cannot be provided for legal reasons. 42 | 43 | # License 44 | 45 | MIT License 46 | 47 | Copyright (c) 2021 Julian Sester 48 | 49 | Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: 50 | 51 | The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. 52 | 53 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. 54 | --------------------------------------------------------------------------------