├── saved_results └── init.txt ├── datasets ├── svhn └── mnist ├── saved_init └── init.txt ├── generate_mnist_training_validation.py ├── dependencies ├── exp_kern.py └── scipydirect_for_bo_bos.py ├── run_bo_bos.py ├── README.md ├── bos_function.py ├── helper_funcs.py ├── bayesian_optimization.py ├── objective_functions.py └── analyze_results.ipynb /saved_results/init.txt: -------------------------------------------------------------------------------- 1 | This folder saves the results. 2 | -------------------------------------------------------------------------------- /datasets/svhn: -------------------------------------------------------------------------------- 1 | Please download the SVHN dataset from http://ufldl.stanford.edu/housenumbers/ and place in this folder 2 | -------------------------------------------------------------------------------- /datasets/mnist: -------------------------------------------------------------------------------- 1 | Please download "mnist-original.mat" from https://www.kaggle.com/avnishnish/mnist-original and place in this folder 2 | -------------------------------------------------------------------------------- /saved_init/init.txt: -------------------------------------------------------------------------------- 1 | This folder saves the random initializations used by GP-UCB, such that BO-BOS can use the same set of random initializations as those used by GP-UCB. 2 | -------------------------------------------------------------------------------- /generate_mnist_training_validation.py: -------------------------------------------------------------------------------- 1 | # This script is used to generate the training set/validation set split for the MNIST dataset 2 | 3 | import scipy.io as sio 4 | import numpy as np 5 | from sklearn.model_selection import train_test_split 6 | from sklearn import preprocessing 7 | import pickle 8 | 9 | mnist = sio.loadmat('datasets/mnist-original.mat') 10 | 11 | X = mnist['data'].T 12 | Y = preprocessing.OneHotEncoder().fit_transform(mnist['label'].T).toarray() 13 | 14 | X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, stratify=Y, random_state=0) 15 | scaler = preprocessing.MinMaxScaler().fit(X_train) 16 | X_train = scaler.transform(X_train) 17 | X_test = scaler.transform(X_test) 18 | 19 | dataset = {"X_train":X_train, "Y_train":Y_train, "X_test":X_test, "Y_test":Y_test} 20 | 21 | pickle.dump(dataset, open("datasets/mnist_dataset.p", "wb")) 22 | -------------------------------------------------------------------------------- /dependencies/exp_kern.py: -------------------------------------------------------------------------------- 1 | from .kern import Kern 2 | import numpy as np 3 | from GPy.core.parameterization.param import Param 4 | 5 | class ExpKernel(Kern): 6 | def __init__(self,input_dim, alp=1.0, bet=1.0, active_dims=None): 7 | super(ExpKernel, self).__init__(input_dim, active_dims, 'exp kernel') 8 | assert input_dim == 1, "For this kernel we assume input_dim=1" 9 | self.alp = Param("alp", alp) 10 | self.bet = Param("bet", bet) 11 | self.link_parameters(self.alp, self.bet) 12 | 13 | def K(self,X,X2): 14 | if X2 is None: X2 = X 15 | return (self.bet ** self.alp) / (X + X2.T + self.bet) ** self.alp 16 | 17 | def Kdiag(self,X): 18 | return np.squeeze((self.bet ** self.alp) / (X + X + self.bet) ** self.alp) 19 | 20 | def parameters_changed(self): 21 | pass 22 | 23 | def update_gradients_full(self, dL_dK, X, X2): 24 | if X2 is None: X2 = X 25 | 26 | denom = (X + X2.T + self.bet) ** self.alp 27 | d_alp_num = np.log(self.bet) * (self.bet**self.alp) * denom - np.log(X + X2.T + self.bet) * denom * (self.bet**self.alp) 28 | d_alp_den = denom ** 2 29 | d_bet_num = self.alp * (self.bet**(self.alp-1)) * denom - self.alp * ((X + X2.T + self.bet)**(self.alp-1)) * (self.bet**self.alp) 30 | d_bet_den = denom ** 2 31 | 32 | self.alp.gradient = np.sum((d_alp_num/d_alp_den) * dL_dK) 33 | self.bet.gradient = np.sum((d_bet_num/d_bet_den) * dL_dK) 34 | 35 | def update_gradients_diag(self, dL_dKdiag, X): 36 | pass 37 | def gradients_X(self,dL_dK,X,X2): 38 | pass 39 | def gradients_X_diag(self,dL_dKdiag,X): 40 | pass 41 | -------------------------------------------------------------------------------- /run_bo_bos.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | 3 | ### import the BO package 4 | from bayesian_optimization import BayesianOptimization 5 | 6 | ### import the objetive function 7 | from objective_functions import objective_function_LR_MNIST as objective_function 8 | # from objective_functions import objective_function_CNN_CIFAR_10 as objective_function 9 | # from objective_functions import objective_function_CNN_SVHN as objective_function 10 | 11 | np.random.seed(0) 12 | 13 | iterations_list = np.arange(1, 11) 14 | 15 | for run_iter in iterations_list: 16 | ''' 17 | The input arguments to "BayesianOptimization" are explained in the script "bayesian_optimization.py"; 18 | In particular, set "no_BOS=True" if we want to run standard GP-UCB, and "no_BOS=False" if we want to run the BO-BOS algorithm; 19 | When running the "maximize" function, the intermediate results are saved after every BO iteration, under the file name log_file; the content of the log file is explained in the "analyze_results" ipython notebook script. 20 | ''' 21 | 22 | # run without BOS 23 | BO_no_BOS = BayesianOptimization(f=objective_function, 24 | dim = 3, gp_opt_schedule=10, \ 25 | no_BOS=True, use_init=None, \ 26 | log_file="saved_results/bos_mnist_no_stop_" + str(run_iter) + ".p", save_init=True, \ 27 | save_init_file="mnist_5_" + str(run_iter) + ".p", \ 28 | parameter_names=["batch_size", "C", "learning_rate"]) 29 | # "parameter_names" are dummy variables whose correspondance in the display is not guaranteed 30 | BO_no_BOS.maximize(n_iter=50, init_points=3, kappa=2, use_fixed_kappa=False, kappa_scale=0.2, acq='ucb') 31 | 32 | # run with BOS, using the same initializations as above 33 | BO_BOS = BayesianOptimization(f=objective_function, 34 | dim = 3, gp_opt_schedule=10, no_BOS=False, use_init="mnist_5_" + str(run_iter) + ".p", \ 35 | log_file="saved_results/bos_mnist_with_stop_" + str(run_iter) + ".p", save_init=False, \ 36 | save_init_file=None, \ 37 | add_interm_fid=[0, 9, 19, 29, 39], parameter_names=["batch_size", "C", "learning_rate"]) 38 | BO_BOS.maximize(n_iter=70, init_points=3, kappa=2, use_fixed_kappa=False, kappa_scale=0.2, acq='ucb') 39 | 40 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Bayesian-Optimization-Meets-Bayesian-Optimal-Stopping 2 | Code for the following paper: 3 | 4 | Zhongxiang Dai, Haibin Yu, Kian Hsiang Low and Patrick Jaillet. "Bayesian Optimization 5 | Meets Bayesian Optimal Stopping." In International Conference on Machine Learning (ICML), 6 | Long Beach, CA, Jun 9-15, 2019. 7 | 8 | 9 | 10 | Description of the scripts: 11 | * bayesian_optimization.py: the BO algorithm; implements both standard GP-UCB and BO-BOS 12 | * helper_funcs.py: some helper functions (e.g. acquisition functions) for the BO algorithm 13 | * bos_function.py: contains the Bayesian optimal stopping algorithm 14 | * objective_functions.py: contains several objective functions for hyper-parameter tuning 15 | * run_bo_bos.py: the wrapper script which calls the BO-BOS algorithm 16 | * analyze_results.ipynb: an ipython notebook script analyzing the results obtained by running the "run_bo_bos.py" script (assuming "objective_function_LR_MNIST" is used as the objective function) 17 | * generate_mnist_training_validation.py: generate the training set/validation set split for the MNIST dataset 18 | 19 | 20 | Description of the directories: 21 | * datasets: contains the datasets used for hyper-parameter tuning 22 | MNIST: please download the "mnist-original.zip" from "https://www.kaggle.com/avnishnish/mnist-original", and unzip to folder "datasets/"; 23 | then, run "generate_mnist_training_validation.py", which will generate the training set/validation set split. 24 | SVHN: please download the files "train.tar.gz" and "test.tar.gz" from "http://ufldl.stanford.edu/housenumbers/", 25 | and put then in the folder "datasets/" 26 | CIFAR-10: this dataset will be automatically downloaded by the keras package 27 | * dependencies: contains some dependency scripts, which are explained in more detail below 28 | * saved_init: contains the initializations used by the BO/BO-BOS algorithm; since we would like to use the same initializations for both GP-UCB and BO-BOS 29 | * saved_results: contains the results of the BO/BO-BOS algorithm; the results are saved/updated after every iteration 30 | 31 | 32 | key dependencies (excluding commonly used packages such as scipy, numpy, tensorflow, keras, etc.) 33 | * GPy 34 | * install GPy 35 | * add the line "from .src.exp_kern import ExpKernel" to "PYTHON_PATH/lib/python3.5/site-packages/GPy/kern/\_\_init\_\_.py" 36 | * add the line "from .exp_kern import ExpKernel" to "PYTHON_PATH/lib/python3.5/site-packages/GPy/kern/src/\_\_init\_\_.py" 37 | * place the script "exp_kern.py" in the "dependencies" folder to the folder "PYTHON_PATH/lib/python3.5/site-packages/GPy/kern/src/" 38 | * scipydirect: this package uses the DIRECT method to optimize the acquisition function 39 | * install scipydirect with "pip install scipydirect" 40 | * replace the content of the script "PYTHON_PATH/lib/python3.5/site-packages/scipydirect/\_\_init\_\_.py" with the content of the script "scipydirect_for_bo_bos.py" in the "dependencies" folder; this step is required since we modified the interface of the scipydirect minimize function 41 | -------------------------------------------------------------------------------- /bos_function.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This script implements the Bayesian Optimal Stopping (BOS) function, which outputs the optimal decision rules 3 | ''' 4 | 5 | from __future__ import print_function 6 | import numpy as np 7 | import matplotlib.pyplot as plt 8 | import GPy 9 | from tqdm import tqdm 10 | 11 | def run_BOS(init_curve, incumbent, training_epochs, bo_iteration): 12 | ''' 13 | init_curve: initial learning curves used to generate forward simulations 14 | incumbent: currently optimal validation error 15 | training_epochs: the maximum number of epochs to train (N) 16 | bo_itetation: iteration of BO, starting from 0 (after initialization) 17 | ''' 18 | 19 | grid_size = 100 20 | sample_number = 200 21 | 22 | # this number can be reduced to improve the time efficiency, at the potential expense of less accurate approximation 23 | fs_sample_number = 100000 24 | 25 | ###### define the cost parameters, including K_2, c, and the K_1 sequence 26 | # K1_init, K2, C, gamma = 100, 99, 1, 1.0 # fixed K1 27 | K1_init, K2, C, gamma = 100, 99, 1, 0.99 # small K1 28 | # K1_init, K2, C, gamma = 100, 99, 1, 0.95 # normal K1 29 | # K1_init, K2, C, gamma = 100, 99, 1, 0.89 # large K1 89 30 | 31 | K1 = K1_init / (gamma ** bo_iteration) 32 | 33 | T = training_epochs - len(init_curve) 34 | lc_curr_opt = 1 - incumbent 35 | 36 | ######## Below generates forward simulation samples ######### 37 | # Use the initial samples from a learning curve to initialize the prior distribution 38 | initial_sample_len = len(init_curve) 39 | lc_x = np.arange(1, initial_sample_len+1).reshape(-1, 1) 40 | init_curve = np.array(init_curve).reshape(-1, 1) 41 | alpha_init, beta_init = initial_sample_len, np.sum(init_curve) 42 | 43 | k_exp = GPy.kern.src.ExpKernel(input_dim=1, active_dims=[0]) 44 | m_gpy = GPy.models.GPRegression(lc_x, init_curve, k_exp) 45 | m_gpy.likelihood.variance.fix(1e-3) # fix the noise, to produce more diverse and realistic forward simulation samples 46 | m_gpy.optimize(messages=False) 47 | 48 | xx = np.arange(1, initial_sample_len+T+1).reshape(-1, 1) 49 | post_samples = m_gpy.posterior_samples_f(xx, full_cov=True, size=fs_sample_number) 50 | post_samples = np.squeeze(post_samples) 51 | samples_data = post_samples.T[:, initial_sample_len:] 52 | print("samples_data: ", samples_data.shape) 53 | 54 | # remove those sampled curves that exceed the range [0, 1] 55 | ind = np.all(samples_data<1, axis=1) 56 | samples_data = samples_data[ind] 57 | ind = np.all(samples_data>0, axis=1) 58 | samples_data = samples_data[ind] 59 | 60 | ######### Below we run backward induction to get Bayes optimal decision ########## 61 | # calculate St from sample trajectories 62 | St = [] 63 | for s in tqdm(samples_data): 64 | St.append(np.cumsum(s) / (np.arange(len(s)) + 1)) 65 | St = np.array(St) 66 | 67 | grid_St = np.linspace(0, 1, grid_size) 68 | 69 | state = [] 70 | for i in range(len(grid_St) - 1): 71 | state.append((grid_St[i] + grid_St[i + 1]) / 2) 72 | state = np.array(state) 73 | 74 | losses = np.zeros((T, grid_size - 1, 3)) 75 | print("Calculating termination losses...") 76 | 77 | all_Pr_z_star = np.zeros((T, grid_size - 1)) 78 | for step in tqdm((np.arange(T) + 1)): 79 | data_t = St[:, step - 1] 80 | Pr_z_star_samples = np.zeros(grid_size - 1) 81 | Pr_z_star_accum = np.zeros(grid_size - 1) 82 | 83 | for i in range(len(data_t)): 84 | error_last_step = samples_data[i, -1] 85 | 86 | val = data_t[i] 87 | ind_left = np.max(np.nonzero(val > grid_St)[0]) 88 | if error_last_step > lc_curr_opt: 89 | Pr_z_star_accum[ind_left] += 1.0 90 | Pr_z_star_samples[ind_left] += 1 91 | 92 | # for each grid/cell, calculate the average over all next-step continuation losses 93 | for i in range(grid_size - 1): 94 | if Pr_z_star_samples[i] != 0: 95 | losses[step - 1, i, 2] 96 | Pr_z_star = Pr_z_star_accum[i] / Pr_z_star_samples[i] 97 | all_Pr_z_star[step - 1, i] = Pr_z_star 98 | 99 | loss_d = K2 * Pr_z_star + C * step 100 | loss_d_star = K1 * (1 - Pr_z_star) + C * step 101 | losses[step - 1, i, 0] = loss_d 102 | losses[step - 1, i, 1] = loss_d_star 103 | else: 104 | # this says that if the current cell is not visited by any forward simulation samples, we should always choose to continue, which is a convervative approach 105 | all_Pr_z_star[step - 1, i] = 100 106 | losses[step - 1, i, 0] = 1e4 107 | losses[step - 1, i, 1] = 1e4 108 | 109 | print("Calculating continuation losses...") 110 | step = T - 1 111 | while step > 0: 112 | print("Running step {0}".format(step)) 113 | 114 | data_t = St[:, step - 1] 115 | grid_samples = np.zeros(grid_size - 1) 116 | grid_losses_cont = np.zeros(grid_size - 1) 117 | 118 | # go through all forward simulation samples; for each sample, find the index it ends up with in the next step, and find the corresponding optimal loss 119 | for i in range(len(data_t)): 120 | val = data_t[i] 121 | 122 | St_next = St[i, step] # the statistic at the next step 123 | St_next_ind_left = np.max(np.nonzero(St_next > grid_St)[0]) 124 | 125 | if step == T - 1: 126 | loss_continue = np.min(losses[step + 1 - 1, St_next_ind_left, :2]) 127 | else: 128 | loss_continue = np.min(losses[step + 1 - 1, St_next_ind_left, :]) 129 | 130 | ind_left = np.max(np.nonzero(val > grid_St)[0]) 131 | grid_losses_cont[ind_left] += loss_continue 132 | grid_samples[ind_left] += 1 133 | 134 | # for each grid/cell, calculate the average over all next-step continuation losses 135 | for i in range(len(grid_samples)): 136 | if grid_samples[i] > 30: 137 | losses[step - 1, i, 2] = grid_losses_cont[i] / grid_samples[i] 138 | else: 139 | # this says that if the current cell is visited by no more than 30 forward simulation samples, we should always choose to continue, which is taking a convervative approach 140 | losses[step - 1, i, 2] = 0 141 | 142 | step = step - 1 143 | 144 | # Below we extract the Bayes optimal decisions according to the losses calculated above 145 | # 0: decision d_0 146 | # 1: decision d_2 147 | # 2: decision d_1 148 | actions = np.zeros((T, len(state))) 149 | for s_ind in range(len(state)): 150 | actions[-1, s_ind] = np.argmin(losses[-1, s_ind, :2]) + 1 151 | for step in range(T-1): 152 | for s_ind in range(len(state)): 153 | if losses[step, s_ind, 2] != 0: 154 | actions[step, s_ind] = np.argmin(losses[step, s_ind, :]) + 1 155 | else: 156 | actions[step, s_ind] = 0 157 | print("Done") 158 | 159 | # return the obtained decision rules, as well as the space of summary statistics 160 | return actions, grid_St 161 | -------------------------------------------------------------------------------- /helper_funcs.py: -------------------------------------------------------------------------------- 1 | #from __future__ import print_function 2 | #from __future__ import division 3 | import numpy as np 4 | from datetime import datetime 5 | from scipy.stats import norm 6 | from scipy.optimize import minimize 7 | import matplotlib.pyplot as plt 8 | from tqdm import tqdm 9 | 10 | from scipydirect import minimize as mini_direct 11 | 12 | # np.random.seed(0) 13 | 14 | ## leave this flag to always TRUE, since we assume we always use the DIRECT optimizer 15 | USE_DIRECT_OPTIMIZER = True 16 | 17 | def acq_max(ac, gp, y_max, bounds, iteration): 18 | """ 19 | """ 20 | print("[Running the direct optimizer]") 21 | bound_list = [] 22 | for b in bounds: 23 | bound_list.append(tuple(b)) 24 | 25 | res = mini_direct(ac, bound_list, para_dict={"gp":gp, "y_max":y_max, "iteration":iteration}) 26 | x_max = res["x"] 27 | 28 | return np.clip(x_max, bounds[:, 0], bounds[:, 1]) 29 | 30 | class UtilityFunction(object): 31 | """ 32 | An object to compute the acquisition functions. 33 | """ 34 | 35 | def __init__(self, kind, kappa, use_fixed_kappa, kappa_scale, xi): 36 | """ 37 | If UCB is to be used, a constant kappa is needed. 38 | """ 39 | self.kappa = kappa 40 | self.use_fixed_kappa = use_fixed_kappa 41 | self.kappa_scale = kappa_scale 42 | 43 | self.xi = xi 44 | 45 | if kind not in ['ucb', 'ei', 'poi']: 46 | err = "The utility function " \ 47 | "{} has not been implemented, " \ 48 | "please choose one of ucb, ei, or poi.".format(kind) 49 | raise NotImplementedError(err) 50 | else: 51 | self.kind = kind 52 | 53 | # This function is defined to work with the DIRECT optimizer 54 | def utility(self, x, para_dict): 55 | gp, y_max, iteration = para_dict["gp"], para_dict["y_max"], para_dict["iteration"] 56 | 57 | if self.kind == 'ucb': 58 | return self._ucb(x, gp, self.kappa, self.use_fixed_kappa, self.kappa_scale, iteration) 59 | if self.kind == 'ei': 60 | return self._ei(x, gp, y_max, self.xi) 61 | if self.kind == 'poi': 62 | return self._poi(x, gp, y_max, self.xi) 63 | 64 | @staticmethod 65 | def _ucb(x, gp, kappa, use_fixed_kappa, kappa_scale, iteration): 66 | if USE_DIRECT_OPTIMIZER: 67 | x = x.reshape(1, -1) 68 | 69 | x = np.concatenate((x, np.ones((x.shape[0], 1))), axis=1) 70 | 71 | mean, var = gp.predict(x) 72 | std = np.sqrt(var) 73 | 74 | d = x.shape[1] 75 | 76 | if USE_DIRECT_OPTIMIZER: 77 | optimizer_flag = -1 78 | else: 79 | optimizer_flag = 1 80 | 81 | if use_fixed_kappa: 82 | return optimizer_flag * (mean + kappa * std) # beta_t value taken from the high-dimensional BO paper 83 | else: 84 | return optimizer_flag * (mean + (kappa_scale * d * np.log(2 * iteration)) * std) 85 | # beta_t value taken from the high-dimensional BO paper 86 | 87 | @staticmethod 88 | def _ei(x, gp, y_max, xi): 89 | x = np.concatenate((x, np.ones((x.shape[0], 1))), axis=1) 90 | mean, var = gp.predict(x) 91 | std = np.sqrt(var) 92 | 93 | z = (mean - y_max - xi)/std 94 | return (mean - y_max - xi) * norm.cdf(z) + std * norm.pdf(z) 95 | 96 | @staticmethod 97 | def _poi(x, gp, y_max, xi): 98 | mean, std = gp.predict(x, return_std=True) 99 | z = (mean - y_max - xi)/std 100 | return norm.cdf(z) 101 | 102 | 103 | def unique_rows(a): 104 | """ 105 | A functions to trim repeated rows that may appear when optimizing. 106 | This is necessary to avoid the sklearn GP object from breaking 107 | 108 | :param a: array to trim repeated rows from 109 | 110 | :return: mask of unique rows 111 | """ 112 | 113 | # Sort array and kep track of where things should go back to 114 | order = np.lexsort(a.T) 115 | reorder = np.argsort(order) 116 | 117 | a = a[order] 118 | diff = np.diff(a, axis=0) 119 | ui = np.ones(len(a), 'bool') 120 | ui[1:] = (diff != 0).any(axis=1) 121 | 122 | return ui[reorder] 123 | 124 | 125 | class BColours(object): 126 | BLUE = '\033[94m' 127 | CYAN = '\033[36m' 128 | GREEN = '\033[32m' 129 | MAGENTA = '\033[35m' 130 | RED = '\033[31m' 131 | ENDC = '\033[0m' 132 | 133 | 134 | class PrintLog(object): 135 | 136 | def __init__(self, params): 137 | 138 | self.ymax = None 139 | self.xmax = None 140 | self.params = params 141 | self.ite = 1 142 | 143 | self.start_time = datetime.now() 144 | self.last_round = datetime.now() 145 | 146 | # sizes of parameters name and all 147 | self.sizes = [max(len(ps), 7) for ps in params] 148 | 149 | # Sorted indexes to access parameters 150 | self.sorti = sorted(range(len(self.params)), 151 | key=self.params.__getitem__) 152 | 153 | def reset_timer(self): 154 | self.start_time = datetime.now() 155 | self.last_round = datetime.now() 156 | 157 | def print_header(self, initialization=True): 158 | 159 | if initialization: 160 | print("{}Initialization{}".format(BColours.RED, 161 | BColours.ENDC)) 162 | else: 163 | print("{}Bayesian Optimization{}".format(BColours.RED, 164 | BColours.ENDC)) 165 | 166 | print(BColours.BLUE + "-" * (29 + sum([s + 5 for s in self.sizes])) + 167 | BColours.ENDC) 168 | 169 | print("{0:>{1}}".format("Step", 5), end=" | ") 170 | print("{0:>{1}}".format("Time", 6), end=" | ") 171 | print("{0:>{1}}".format("Value", 10), end=" | ") 172 | 173 | for index in self.sorti: 174 | print("{0:>{1}}".format(self.params[index], 175 | self.sizes[index] + 2), 176 | end=" | ") 177 | print('') 178 | 179 | def print_step(self, x, y, warning=False): 180 | 181 | print("{:>5d}".format(self.ite), end=" | ") 182 | 183 | m, s = divmod((datetime.now() - self.last_round).total_seconds(), 60) 184 | print("{:>02d}m{:>02d}s".format(int(m), int(s)), end=" | ") 185 | 186 | if self.ymax is None or self.ymax < y: 187 | self.ymax = y 188 | self.xmax = x 189 | print("{0}{2: >10.5f}{1}".format(BColours.MAGENTA, 190 | BColours.ENDC, 191 | y), 192 | end=" | ") 193 | 194 | for index in self.sorti: 195 | print("{0}{2: >{3}.{4}f}{1}".format( 196 | BColours.GREEN, BColours.ENDC, 197 | x[index], 198 | self.sizes[index] + 2, 199 | min(self.sizes[index] - 3, 6 - 2) 200 | ), 201 | end=" | ") 202 | else: 203 | print("{: >10.5f}".format(y), end=" | ") 204 | for index in self.sorti: 205 | print("{0: >{1}.{2}f}".format(x[index], 206 | self.sizes[index] + 2, 207 | min(self.sizes[index] - 3, 6 - 2)), 208 | end=" | ") 209 | 210 | if warning: 211 | print("{}Warning: Test point chose at " 212 | "random due to repeated sample.{}".format(BColours.RED, 213 | BColours.ENDC)) 214 | 215 | print() 216 | 217 | self.last_round = datetime.now() 218 | self.ite += 1 219 | 220 | def print_summary(self): 221 | pass 222 | -------------------------------------------------------------------------------- /dependencies/scipydirect_for_bo_bos.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | scipydirect - A python wrapper to the DIRECT algorithm. 4 | ======================================================= 5 | 6 | DIRECT is a method to solve global bound constraint optimization problems and 7 | was originally developed by D. R. Jones, C. D. Perttunen and B. E. Stuckmann. 8 | It is designed to find **global** solutions of mathematical optimization problems of the from 9 | 10 | .. math:: 11 | 12 | \min_ {x \in R^n} f(x) 13 | 14 | subject to 15 | 16 | .. math:: 17 | 18 | x_L \leq x \leq x_U 19 | 20 | Where :math:`x` are the optimization variables (with upper and lower 21 | bounds), :math:`f(x)` is the objective function. 22 | 23 | The DIRECT package uses the Fortran implementation of DIRECT written by 24 | Joerg.M.Gablonsky, DIRECT Version 2.0.4. More information on the DIRECT 25 | algorithm can be found in Gablonsky's `thesis `_. 26 | 27 | .. codeauthor:: Andreas Mayer , Amit Aides 28 | """ 29 | 30 | from __future__ import print_function 31 | import numpy as np 32 | try: 33 | from .direct import direct 34 | except ImportError: 35 | print('Fortran code not compiled, module not functional') 36 | direct = None 37 | 38 | 39 | __version_info__ = ('1', '1') 40 | __version__ = '.'.join(__version_info__) 41 | 42 | ERROR_MESSAGES = ( 43 | 'u[i] < l[i] for some i', 44 | 'maxf is too large', 45 | 'Initialization failed', 46 | 'There was an error in the creation of the sample points', 47 | 'An error occured while the function was sampled', 48 | 'Maximum number of levels has been reached.', 49 | ) 50 | 51 | SUCCESS_MESSAGES = ( 52 | 'Number of function evaluations done is larger then maxf', 53 | 'Number of iterations is equal to maxT', 54 | 'The best function value found is within fglper of the (known) global optimum', 55 | 'The volume of the hyperrectangle with best function value found is below volper', 56 | 'The volume of the hyperrectangle with best function value found is smaller then volper' 57 | ) 58 | 59 | # Class for returning the result of an optimization algorithm (copied from 60 | # scipy.optimize) 61 | class OptimizeResult(dict): 62 | """ Represents the optimization result. 63 | 64 | Attributes 65 | ---------- 66 | x : ndarray 67 | The solution of the optimization. 68 | success : bool 69 | Whether or not the optimizer exited successfully. 70 | status : int 71 | Termination status of the optimizer. Its value depends on the 72 | underlying solver. Refer to `message` for details. 73 | message : str 74 | Description of the cause of the termination. 75 | fun, jac, hess, hess_inv : ndarray 76 | Values of objective function, Jacobian, Hessian or its inverse (if 77 | available). The Hessians may be approximations, see the documentation 78 | of the function in question. 79 | nfev, njev, nhev : int 80 | Number of evaluations of the objective functions and of its 81 | Jacobian and Hessian. 82 | nit : int 83 | Number of iterations performed by the optimizer. 84 | maxcv : float 85 | The maximum constraint violation. 86 | 87 | Notes 88 | ----- 89 | There may be additional attributes not listed above depending of the 90 | specific solver. Since this class is essentially a subclass of dict 91 | with attribute accessors, one can see which attributes are available 92 | using the `keys()` method. 93 | """ 94 | def __getattr__(self, name): 95 | try: 96 | return self[name] 97 | except KeyError: 98 | raise AttributeError(name) 99 | 100 | __setattr__ = dict.__setitem__ 101 | __delattr__ = dict.__delitem__ 102 | 103 | def __repr__(self): 104 | if self.keys(): 105 | m = max(map(len, list(self.keys()))) + 1 106 | return '\n'.join([k.rjust(m) + ': ' + repr(v) 107 | for k, v in self.items()]) 108 | else: 109 | return self.__class__.__name__ + "()" 110 | 111 | def minimize(func, bounds=None, para_dict={}, nvar=None, args=(), disp=False, 112 | eps=1e-4, 113 | maxf=20000, 114 | maxT=6000, 115 | algmethod=0, 116 | fglobal=-1e100, 117 | fglper=0.01, 118 | volper=-1.0, 119 | sigmaper=-1.0, 120 | **kwargs 121 | ): 122 | """ 123 | Solve an optimization problem using the DIRECT (Dividing Rectangles) algorithm. 124 | It can be used to solve general nonlinear programming problems of the form: 125 | 126 | .. math:: 127 | 128 | \min_ {x \in R^n} f(x) 129 | 130 | subject to 131 | 132 | .. math:: 133 | 134 | x_L \leq x \leq x_U 135 | 136 | Where :math:`x` are the optimization variables (with upper and lower 137 | bounds), :math:`f(x)` is the objective function. 138 | 139 | Parameters 140 | ---------- 141 | func : objective function 142 | called as func(x, *args); does not need to be defined everywhere, 143 | raise an Exception where function is not defined 144 | 145 | bounds : array-like 146 | ``(min, max)`` pairs for each element in ``x``, defining 147 | the bounds on that parameter. 148 | 149 | nvar: integer 150 | Dimensionality of x (only needed if `bounds` is not defined) 151 | 152 | eps : float 153 | Ensures sufficient decrease in function value when a new potentially 154 | optimal interval is chosen. 155 | 156 | maxf : integer 157 | Approximate upper bound on objective function evaluations. 158 | 159 | .. note:: 160 | 161 | Maximal allowed value is 90000 see documentation of Fortran library. 162 | 163 | maxT : integer 164 | Maximum number of iterations. 165 | 166 | .. note:: 167 | 168 | Maximal allowed value is 6000 see documentation of Fortran library. 169 | 170 | algmethod : integer 171 | Whether to use the original or modified DIRECT algorithm. Possible values: 172 | 173 | * ``algmethod=0`` - use the original DIRECT algorithm 174 | * ``algmethod=1`` - use the modified DIRECT-l algorithm 175 | 176 | fglobal : float 177 | Function value of the global optimum. If this value is not known set this 178 | to a very large negative value. 179 | 180 | fglper : float 181 | Terminate the optimization when the percent error satisfies: 182 | 183 | .. math:: 184 | 185 | 100*(f_{min} - f_{global})/\max(1, |f_{global}|) \leq f_{glper} 186 | 187 | volper : float 188 | Terminate the optimization once the volume of a hyperrectangle is less 189 | than volper percent of the original hyperrectangel. 190 | 191 | sigmaper : float 192 | Terminate the optimization once the measure of the hyperrectangle is less 193 | than sigmaper. 194 | 195 | Returns 196 | ------- 197 | res : OptimizeResult 198 | The optimization result represented as a ``OptimizeResult`` object. 199 | Important attributes are: ``x`` the solution array, ``success`` a 200 | Boolean flag indicating if the optimizer exited successfully and 201 | ``message`` which describes the cause of the termination. See 202 | `OptimizeResult` for a description of other attributes. 203 | 204 | """ 205 | 206 | if bounds is None: 207 | l = np.zeros(nvar, dtype=np.float64) 208 | u = np.ones(nvar, dtype=np.float64) 209 | else: 210 | bounds = np.asarray(bounds) 211 | l = bounds[:, 0] 212 | u = bounds[:, 1] 213 | 214 | def _objective_wrap(x, iidata, ddata, cdata, n, iisize, idsize, icsize): 215 | """ 216 | To simplify the python objective we use a wrapper objective that complies 217 | with the required fortran objective. 218 | 219 | We return function value and a flag indicating whether function is defined. 220 | If function is not defined return np.nan 221 | """ 222 | try: 223 | # return func(x, *args), 0 224 | return func(x, para_dict), 0 225 | except: 226 | return np.nan, 1 227 | 228 | # 229 | # Dummy values so that the python wrapper will comply with the required 230 | # signature of the fortran library. 231 | # 232 | iidata = np.ones(0, dtype=np.int32) 233 | ddata = np.ones(0, dtype=np.float64) 234 | cdata = np.ones([0, 40], dtype=np.uint8) 235 | 236 | # 237 | # Call the DIRECT algorithm 238 | # 239 | x, fun, ierror = direct( 240 | _objective_wrap, 241 | eps, 242 | maxf, 243 | maxT, 244 | l, 245 | u, 246 | algmethod, 247 | 'dummylogfile', 248 | fglobal, 249 | fglper, 250 | volper, 251 | sigmaper, 252 | iidata, 253 | ddata, 254 | cdata, 255 | disp 256 | ) 257 | 258 | return OptimizeResult(x=x,fun=fun, status=ierror, success=ierror>0, 259 | message=SUCCESS_MESSAGES[ierror-1] if ierror>0 else ERROR_MESSAGES[abs(ierror)-1]) 260 | -------------------------------------------------------------------------------- /bayesian_optimization.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | 3 | import numpy as np 4 | import GPy 5 | from helper_funcs import UtilityFunction, unique_rows, PrintLog, acq_max 6 | import pickle 7 | from scipy.stats import expon, norm, gamma, truncexpon 8 | from tqdm import tqdm 9 | import itertools 10 | import matplotlib.pyplot as plt 11 | from sklearn.preprocessing import MinMaxScaler, StandardScaler 12 | import time 13 | 14 | init_path = "saved_init/" 15 | 16 | class BayesianOptimization(object): 17 | def __init__(self, f, gp_opt_schedule, no_BOS=True, \ 18 | use_init=False, log_file=None, save_init=False, save_init_file=None, \ 19 | max_running_hours=6, add_interm_fid=[], N=50, N_init_epochs=8, dim=3, parameter_names=[], verbose=1): 20 | """ 21 | gp_opt_schedule: we update the GP hyper-parameters via maximum likelihood every "gp_opt_schedule" iterations 22 | no_BOS: whether we run Bayesian Optimal Stopping 23 | use_init: whether to use existing initializations 24 | log_file: the log file in which the results are saved 25 | save_init: Boolean; whether to save the initialization 26 | save_init_file: the file name under which to save the initializations; only used if save_init==True 27 | max_running_hours: the maximum number of hours to run the script 28 | add_interm_fid: the intermedium results to be used in updating the GP surrogate function 29 | N: the maximum number of epochs 30 | N_init_epochs: the initial number of epochs to use in the BOS algorithm 31 | dim: the number of hyper-parameters 32 | verbose: verbosity 33 | """ 34 | 35 | self.use_init = use_init 36 | self.max_running_hours = max_running_hours 37 | 38 | self.add_interm_fid = add_interm_fid 39 | 40 | self.N = N 41 | self.N_init_epochs = N_init_epochs 42 | 43 | self.log_file = log_file 44 | 45 | self.no_BOS = no_BOS 46 | 47 | self.curr_max_X = None 48 | self.curr_opt_lc = None 49 | self.incumbent = None 50 | 51 | self.action_regions = None 52 | self.grid_St = None 53 | 54 | self.keys = parameter_names 55 | 56 | # Find number of parameters 57 | self.dim = dim 58 | 59 | # assume all hyper-parameters are bounded in the range [0.0, 1.0] 60 | bounds = np.zeros((self.dim, 2)) 61 | bounds[:, 1] = 1.0 62 | self.bounds = bounds 63 | 64 | # The function to be optimized 65 | self.f = f 66 | 67 | # Initialization flag 68 | self.initialized = False 69 | 70 | # Initialization lists --- stores starting points before process begins 71 | self.init_points = [] 72 | self.x_init = [] 73 | self.y_init = [] 74 | 75 | # Numpy array place holders 76 | self.X = None 77 | self.Y = None 78 | 79 | self.time_started = 0 80 | 81 | # Counter of iterations 82 | self.i = 0 83 | 84 | # Utility Function placeholder 85 | self.util = None 86 | 87 | # PrintLog object 88 | self.plog = PrintLog(self.keys) 89 | 90 | self.save_init = save_init 91 | self.save_init_file = save_init_file 92 | 93 | self.gp_opt_schedule = gp_opt_schedule 94 | 95 | self.res = {} 96 | self.res['all'] = {'params':[], 'init':[], 'eval_times':[], 'BOS_times':[], 'epoch_values':[], 'time_started':0} 97 | 98 | self.verbose = verbose 99 | 100 | def init(self, init_points): 101 | """ 102 | randomly sample "init_points" points as the initializations 103 | """ 104 | 105 | # Generate random points 106 | self.time_started = time.time() 107 | self.res['all']['time_started'] = self.time_started 108 | 109 | l = [np.random.uniform(x[0], x[1], size=init_points) 110 | for x in self.bounds] 111 | 112 | self.init_points = l 113 | 114 | # Create empty list to store the new values of the function 115 | y_init = [] 116 | 117 | fid_inits = [] 118 | for x in self.init_points: 119 | curr_y, fid, BOS_time, epoch_values, time_func_eval = self.f(x, no_stop=True, bo_iteration=0, \ 120 | N=self.N, N_init_epochs=self.N_init_epochs) 121 | self.res['all']['eval_times'].append(time_func_eval) 122 | self.res['all']['BOS_times'].append(BOS_time) 123 | self.res['all']['epoch_values'].append(epoch_values) 124 | 125 | y_init.append(curr_y) # we need to negate the error because the program assumes maximization 126 | fid_inits.append(1.0) 127 | 128 | if self.verbose: 129 | self.plog.print_step(x, y_init[-1]) 130 | 131 | self.X = np.concatenate((np.asarray(self.init_points), np.array(fid_inits).reshape(-1, 1)), axis=1) 132 | self.Y = np.asarray(y_init) 133 | 134 | self.incumbent = np.max(y_init) 135 | self.initialized = True 136 | 137 | init = {"X":self.X, "Y":self.Y, "init_epoch_values":self.res['all']['epoch_values'],\ 138 | "init_eval_times":self.res['all']['eval_times'], \ 139 | "init_time_started":self.res['all']['time_started']} 140 | self.res['all']['init'] = init 141 | 142 | if self.save_init: 143 | pickle.dump(init, open(init_path + self.save_init_file, "wb")) 144 | 145 | 146 | def maximize(self, 147 | init_points=5, 148 | n_iter=25, 149 | acq='ucb', 150 | kappa=2.576, 151 | use_fixed_kappa=True, 152 | kappa_scale=0.2, 153 | xi=0.0,): 154 | """ 155 | Main optimization method. 156 | 157 | Parameters 158 | ---------- 159 | :param init_points: 160 | Number of randomly chosen points to sample the 161 | target function before fitting the gp. 162 | 163 | :param n_iter: 164 | Total number of times the process is to repeated. Note that 165 | currently this methods does not have stopping criteria (due to a 166 | number of reasons), therefore the total number of points to be 167 | sampled must be specified. 168 | 169 | :param acq: 170 | Acquisition function to be used, defaults to Upper Confidence Bound. 171 | 172 | Returns 173 | ------- 174 | :return: Nothing 175 | """ 176 | # Reset timer 177 | self.plog.reset_timer() 178 | 179 | # Set acquisition function 180 | self.util = UtilityFunction(kind=acq, kappa=kappa, use_fixed_kappa=use_fixed_kappa, kappa_scale=kappa_scale, xi=xi) 181 | 182 | # Initialize x, y and find current y_max 183 | if not self.initialized: 184 | if self.verbose: 185 | self.plog.print_header() 186 | 187 | # if we would like to use existing initializations 188 | if self.use_init != None: 189 | init = pickle.load(open(init_path + self.use_init, "rb")) 190 | 191 | print("[loaded init: {0}]".format(init["X"])) 192 | 193 | self.X, self.Y = init["X"], init["Y"] 194 | 195 | self.res['all']['eval_times'] = init["init_eval_times"] 196 | self.res['all']['epoch_values'] = init["init_epoch_values"] 197 | 198 | self.time_started = time.time() - (init["init_eval_times"][-1][-1] - init["init_time_started"]) 199 | self.res['all']['time_started'] = self.time_started 200 | 201 | # if we use BOS, we add the intermedium results as inputs to the GP surrogate function 202 | if not self.no_BOS: 203 | N = self.N 204 | y_init = [] 205 | y_add_all = [] 206 | for i in range(len(self.X)): 207 | epoch_values = self.res['all']['epoch_values'][i] 208 | curr_y = self.Y[i] 209 | 210 | y_init.append(curr_y) 211 | 212 | #### optionally add some intermediate results to the GP model 213 | if self.add_interm_fid != []: 214 | y_add = list(np.array(epoch_values)[self.add_interm_fid]) 215 | y_add_all += y_add 216 | 217 | x_add = [] 218 | for fid in self.add_interm_fid: 219 | x_add.append(list(self.X[i, :-1]) + [fid / float(N)]) 220 | x_add = np.array(x_add) 221 | self.X = np.concatenate((self.X, x_add), axis=0) 222 | y_init += y_add_all 223 | self.Y = np.asarray(y_init) 224 | 225 | self.incumbent = np.max(self.Y) 226 | self.initialized = True 227 | self.res['all']['init'] = init 228 | 229 | print("Using pre-existing initializations with {0} points".format(len(self.Y))) 230 | else: 231 | self.init(init_points) 232 | 233 | y_max = self.Y.max() 234 | 235 | # Find unique rows of X to avoid GP from breaking 236 | ur = unique_rows(self.X) 237 | 238 | self.gp = GPy.models.GPRegression(self.X[ur], self.Y[ur].reshape(-1, 1), \ 239 | GPy.kern.Matern52(input_dim=self.X.shape[1], variance=1.0, lengthscale=1.0)) 240 | 241 | self.gp.optimize_restarts(num_restarts = 10, messages=False) 242 | self.gp_params = self.gp.parameters 243 | print("---Optimized hyper: ", self.gp) 244 | 245 | x_max = acq_max(ac=self.util.utility, 246 | gp=self.gp, 247 | y_max=y_max, 248 | bounds=self.bounds, 249 | iteration=1) 250 | 251 | 252 | #### save the posterior stds to be used for the second criteria for early stopping 253 | N = self.N 254 | num_init_curve=self.N_init_epochs 255 | all_fids = np.linspace(0, 1, N+1)[1:] 256 | all_stds = [] 257 | for fid in all_fids: 258 | x_fid = np.append(x_max, fid).reshape(1, -1) 259 | mean, var = self.gp.predict(x_fid) 260 | std = np.sqrt(var)[0][0] 261 | all_stds.append(std) 262 | 263 | # Print new header 264 | if self.verbose: 265 | self.plog.print_header(initialization=False) 266 | for i in range(n_iter): 267 | current_time = time.time() 268 | if current_time - self.time_started > self.max_running_hours * 3600: 269 | break 270 | 271 | # Test if x_max is repeated, if it is, draw another one at random 272 | # If it is repeated, print a warning 273 | pwarning = False 274 | if np.any(np.all(self.X[:, :-1] - x_max == 0, axis=1)): 275 | print("X repeated: ", x_max) 276 | x_max = np.random.uniform(self.bounds[:, 0], 277 | self.bounds[:, 1], 278 | size=self.bounds.shape[0]) 279 | pwarning = True 280 | 281 | curr_y, curr_fid, BOS_time, epoch_values, time_func_eval = self.f(x_max, no_stop=self.no_BOS, \ 282 | incumbent=y_max, bo_iteration=i, stds=all_stds, N=self.N, N_init_epochs=self.N_init_epochs) 283 | self.res['all']['eval_times'].append(time_func_eval) 284 | self.res['all']['BOS_times'].append(BOS_time) 285 | self.res['all']['epoch_values'].append(epoch_values) 286 | 287 | self.Y = np.append(self.Y, curr_y) # the negative sign converts minimization to maximization problem 288 | self.X = np.vstack((self.X, np.append(x_max, curr_fid).reshape(1, -1))) 289 | 290 | ##### optionally add some results from intermediate epochs to the input of GP 291 | if (not self.no_BOS) and (self.add_interm_fid != []): 292 | N = self.N 293 | # y_add = np.array(epoch_values)[self.add_interm_fid] 294 | # self.Y = np.append(self.Y, y_add) 295 | for fid in self.add_interm_fid: 296 | if fid/float(N) < curr_fid: 297 | x_add = np.append(self.X[-1, :-1], fid / float(N)).reshape(1, -1) 298 | self.X = np.vstack((self.X, x_add)) 299 | 300 | y_add = epoch_values[fid] 301 | self.Y = np.append(self.Y, y_add) 302 | 303 | print("y_max: ", y_max) 304 | # Update maximum value to search for next probe point. 305 | if self.Y[-1] > y_max: 306 | y_max = self.Y[-1] 307 | self.incumbent = self.Y[-1] 308 | 309 | # # Updating the GP. 310 | ur = unique_rows(self.X) 311 | 312 | self.gp.set_XY(X=self.X[ur], Y=self.Y[ur].reshape(-1, 1)) 313 | 314 | if i >= self.gp_opt_schedule and i % self.gp_opt_schedule == 0: 315 | self.gp.optimize_restarts(num_restarts = 10, messages=False) 316 | self.gp_params = self.gp.parameters 317 | 318 | print("---Optimized hyper: ", self.gp) 319 | 320 | x_max = acq_max(ac=self.util.utility, 321 | gp=self.gp, 322 | y_max=y_max, 323 | bounds=self.bounds, 324 | iteration=i+2) 325 | 326 | 327 | #### save the posterior stds to be used for the second criteria for early stopping 328 | N = self.N 329 | num_init_curve=self.N_init_epochs 330 | all_fids = np.linspace(0, 1, N+1)[1:] 331 | all_stds = [] 332 | for fid in all_fids: 333 | x_fid = np.append(x_max, fid).reshape(1, -1) 334 | mean, var = self.gp.predict(x_fid) 335 | std = np.sqrt(var)[0][0] 336 | all_stds.append(std) 337 | 338 | # Print stuff 339 | if self.verbose: 340 | self.plog.print_step(x_max, self.Y[-1], warning=pwarning) 341 | 342 | # Keep track of total number of iterations 343 | self.i += 1 344 | 345 | self.curr_max_X = self.X[self.Y.argmax()] 346 | 347 | x_max_param = self.X[self.Y.argmax(), :-1] 348 | 349 | self.res['all']['params'].append(dict(zip(self.keys, self.X[-1]))) 350 | 351 | if self.log_file is not None: 352 | pickle.dump(self.res, open(self.log_file, "wb")) 353 | 354 | -------------------------------------------------------------------------------- /objective_functions.py: -------------------------------------------------------------------------------- 1 | ''' 2 | This script contains several objective functions implemented for the BO-BOS algorithm, including 3 | (1) Logistic regression trained on the MNIST dataset 4 | (2) Convolutional neural network (CNN) trained on the CIFAR-10 dataset 5 | (3) Convolutional neural network (CNN) trained on the SVHN dataset 6 | ''' 7 | 8 | from __future__ import print_function 9 | import tensorflow as tf 10 | import scipy.io as sio 11 | import numpy as np 12 | from sklearn.model_selection import train_test_split 13 | from sklearn import preprocessing, linear_model 14 | from sklearn.datasets import make_classification 15 | from sklearn.metrics import accuracy_score 16 | import matplotlib.pyplot as plt 17 | from bayesian_optimization import BayesianOptimization 18 | import pickle 19 | import GPy 20 | from tqdm import tqdm 21 | from sklearn.preprocessing import MinMaxScaler 22 | import time 23 | import keras 24 | from keras.datasets import cifar10 25 | from keras.preprocessing.image import ImageDataGenerator 26 | from keras.models import Sequential 27 | from keras.layers import Dense, Dropout, Activation, Flatten 28 | from keras.layers import Conv2D, MaxPooling2D 29 | from keras import regularizers 30 | 31 | from bos_function import run_BOS 32 | 33 | import os 34 | os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" 35 | os.environ["CUDA_VISIBLE_DEVICES"] = '0' 36 | os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' 37 | config = tf.ConfigProto() 38 | config.gpu_options.allow_growth = True 39 | 40 | mnist_path = "datasets/mnist/" 41 | svhn_path = "datasets/svhn/" 42 | 43 | kappa = 2.0 # the \kappa parameter to be used in the second criteria for early stopping 44 | 45 | 46 | def objective_function_LR_MNIST(param, no_stop=False, incumbent=None, bo_iteration=0, stds=[], N=50, N_init_epochs=8): 47 | ''' 48 | param: parameters 49 | no_stop: if TRUE, then the function evaluation never early-stops 50 | incumbent: the currently found maximum value 51 | bo_iteration: the BO iteration 52 | stds: the standard deviations corresponding to different input number of epochs; used in the second criteria for early stopping 53 | N: the maximum number of epochs 54 | N_init_epochs: the number of initial epochs used in BOS 55 | ''' 56 | 57 | training_epochs = N 58 | num_init_curve=N_init_epochs 59 | time_BOS = -1 # the time spent in solving the BOS problem, just for reference 60 | 61 | #### load the MNIST dataset 62 | loaded_data = pickle.load(open(mnist_path + "mnist_dataset.p", "rb")) 63 | X_train = loaded_data["X_train"] 64 | X_test = loaded_data["X_test"] 65 | Y_train = loaded_data["Y_train"] 66 | Y_test = loaded_data["Y_test"] 67 | n_ft, n_classes = X_train.shape[1], Y_train.shape[1] 68 | 69 | # transform the input to the real range of the hyper-parameters, to be used for model training 70 | parameter_range = [[20, 500], [1e-6, 1.0], [1e-3, 0.10]] 71 | batch_size_ = param[0] 72 | batch_size = int(batch_size_ * (parameter_range[0][1] - parameter_range[0][0]) + parameter_range[0][0]) 73 | C_ = param[1] 74 | C = C_ * (parameter_range[1][1] - parameter_range[1][0]) + parameter_range[1][0] 75 | learning_rate_ = param[2] 76 | learning_rate = learning_rate_ * (parameter_range[2][1] - parameter_range[2][0]) + parameter_range[2][0] 77 | 78 | print("[Evaluating parameters: batch size={0}/C={1}/lr={2}]".format(batch_size, C, learning_rate)) 79 | 80 | 81 | ### The tensorflow model of logistic regression is built below 82 | 83 | # tf Graph Input 84 | x = tf.placeholder(tf.float32, [None, n_ft]) # mnist data image of shape 28*28=784 85 | y = tf.placeholder(tf.float32, [None, n_classes]) # 0-9 digits recognition => 10 classes 86 | 87 | # Set model weights 88 | W = tf.Variable(tf.zeros([n_ft, n_classes])) 89 | b = tf.Variable(tf.zeros([n_classes])) 90 | 91 | # Construct model 92 | pred = tf.nn.softmax(tf.matmul(x, W) + b) # Softmax 93 | 94 | regularizers = tf.nn.l2_loss(W) 95 | 96 | # Minimize error using cross entropy 97 | cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1) + C * regularizers) 98 | # Gradient Descent 99 | optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost) 100 | 101 | # Test model 102 | correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) 103 | # Calculate accuracy 104 | accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) 105 | neg_log_loss = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1)) 106 | 107 | # Initialize the variables (i.e. assign their default value) 108 | init = tf.global_variables_initializer() 109 | 110 | val_epochs = [] 111 | time_func_eval = [] 112 | with tf.Session(config=config) as sess: 113 | # Run the initializer 114 | sess.run(init) 115 | # iteration over the number of epochs 116 | for epoch in tqdm(range(training_epochs)): 117 | avg_cost = 0.0 118 | total_batch = int(X_train.shape[0] / batch_size) 119 | 120 | # Loop over all batches for SGD 121 | for i in range(total_batch): 122 | batch_xs, batch_ys = X_train[(i*batch_size):((i+1)*batch_size), :], Y_train[(i*batch_size):((i+1)*batch_size), :] 123 | _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys}) 124 | avg_cost += c / total_batch 125 | 126 | # calculate validation loss 127 | # val_log_loss = neg_log_loss.eval({x:X_test, y:Y_test}) 128 | val_acc = accuracy.eval({x:X_test, y:Y_test}) 129 | val_epochs.append(val_acc) 130 | 131 | time_func_eval.append(time.time()) 132 | 133 | # run BOS after observing "num_init_curve" initial number of training epochs 134 | if (epoch+1 == num_init_curve) and (not no_stop): 135 | print("initial learning errors: ", 1 - np.array(val_epochs)) 136 | time_start = time.time() 137 | action_regions, grid_St = run_BOS(1 - np.array(val_epochs), incumbent, training_epochs, bo_iteration) 138 | time_BOS = time.time() - time_start 139 | 140 | # start using the decision rules obtained from BOS 141 | if (epoch >= num_init_curve) and (not no_stop): 142 | state = np.sum(1 - np.array(val_epochs[num_init_curve:])) / (epoch - num_init_curve + 1) 143 | ind_state = np.max(np.nonzero(state > grid_St)[0]) 144 | action_to_take = action_regions[epoch - num_init_curve, ind_state] 145 | 146 | # condition 1: if action_to_take == 2, then the optimal decision is to stop the current training 147 | if action_to_take == 2: 148 | # condition 2: the second criteria used in the BO-BOS algorithm 149 | if (kappa * stds[epoch] >= stds[-1]) or (stds == []): 150 | break 151 | 152 | return val_epochs[-1], (epoch + 1) / training_epochs, time_BOS, val_epochs, time_func_eval 153 | 154 | 155 | def objective_function_CNN_CIFAR_10(param, no_stop=False, incumbent=None, bo_iteration=0, stds=[], N=50, N_init_epochs=8): 156 | ''' 157 | param: parameters 158 | no_stop: if TRUE, then the function evaluation never early-stops 159 | incumbent: the currently found maximum value 160 | bo_iteration: the BO iteration 161 | stds: the standard deviations corresponding to different input number of epochs; used in the second criteria for early stopping 162 | N: the maximum number of epochs 163 | N_init_epochs: the number of initial epochs used in BOS 164 | ''' 165 | 166 | data_augmentation = True 167 | 168 | training_epochs = N 169 | num_init_curve=N_init_epochs 170 | time_BOS = -1 # the time spent in solving the BOS problem, just for reference 171 | 172 | #### load the CIFAR-10 dataset 173 | num_classes = 10 174 | (x_train, y_train), (x_test, y_test) = cifar10.load_data() 175 | x_train = x_train.astype('float32') 176 | x_test = x_test.astype('float32') 177 | x_train /= 255 178 | x_test /= 255 179 | # Convert class vectors to binary class matrices. 180 | y_train = keras.utils.to_categorical(y_train, num_classes) 181 | y_test = keras.utils.to_categorical(y_test, num_classes) 182 | 183 | 184 | # transform the input to the real range of the hyper-parameters, to be used for model training 185 | parameter_range = [[32, 512], [1e-7, 0.1], [1e-7, 1e-3], [1e-7, 1e-3], [128, 256], [256, 512]] 186 | batch_size_ = param[0] 187 | batch_size = int(batch_size_ * (parameter_range[0][1] - parameter_range[0][0]) + parameter_range[0][0]) 188 | learning_rate_ = param[1] 189 | learning_rate = learning_rate_ * (parameter_range[1][1] - parameter_range[1][0]) + parameter_range[1][0] 190 | learning_rate_decay_ = param[2] 191 | learning_rate_decay = learning_rate_decay_ * (parameter_range[2][1] - parameter_range[2][0]) + parameter_range[2][0] 192 | l2_regular_ = param[3] 193 | l2_regular = l2_regular_ * (parameter_range[3][1] - parameter_range[3][0]) + parameter_range[3][0] 194 | conv_filters_ = param[4] 195 | conv_filters = int(conv_filters_ * (parameter_range[4][1] - parameter_range[4][0]) + parameter_range[4][0]) 196 | dense_units_ = param[5] 197 | dense_units = int(dense_units_ * (parameter_range[5][1] - parameter_range[5][0]) + parameter_range[5][0]) 198 | 199 | print("[parameters: batch_size: {0}/lr: {1}/lr_decay: {2}/l2: {3}/conv_filters: {4}/dense_unit: {5}]".format(\ 200 | batch_size, learning_rate, learning_rate_decay, l2_regular, conv_filters, dense_units)) 201 | 202 | num_conv_layers = 3 203 | dropout_rate = 0.0 204 | kernel_size = 5 205 | pool_size = 3 206 | 207 | 208 | # build the CNN model using Keras 209 | model = Sequential() 210 | model.add(Conv2D(conv_filters, (kernel_size, kernel_size), padding='same', 211 | input_shape=x_train.shape[1:], kernel_regularizer=regularizers.l2(l2_regular))) 212 | model.add(Activation('relu')) 213 | model.add(MaxPooling2D(pool_size=(pool_size, pool_size))) 214 | model.add(Dropout(dropout_rate)) 215 | 216 | model.add(Conv2D(conv_filters, (kernel_size, kernel_size), padding='same', kernel_regularizer=regularizers.l2(l2_regular))) 217 | model.add(Activation('relu')) 218 | model.add(MaxPooling2D(pool_size=(pool_size, pool_size))) 219 | model.add(Dropout(dropout_rate)) 220 | 221 | if num_conv_layers >= 3: 222 | model.add(Conv2D(conv_filters, (kernel_size, kernel_size), padding='same', kernel_regularizer=regularizers.l2(l2_regular))) 223 | model.add(Activation('relu')) 224 | model.add(MaxPooling2D(pool_size=(pool_size, pool_size))) 225 | model.add(Dropout(dropout_rate)) 226 | 227 | model.add(Flatten()) 228 | model.add(Dense(dense_units, kernel_regularizer=regularizers.l2(l2_regular))) 229 | model.add(Activation('relu')) 230 | model.add(Dropout(dropout_rate)) 231 | model.add(Dense(num_classes)) 232 | model.add(Activation('softmax')) 233 | 234 | opt = keras.optimizers.rmsprop(lr=learning_rate, decay=learning_rate_decay) 235 | 236 | model.compile(loss='categorical_crossentropy', 237 | optimizer=opt, 238 | metrics=['accuracy']) 239 | 240 | time_start = time.time() 241 | 242 | val_epochs = [] 243 | time_func_eval = [] 244 | for epoch in range(training_epochs): 245 | model.fit(x_train, y_train, 246 | batch_size=batch_size, 247 | epochs=1, 248 | validation_data=(x_test, y_test), 249 | shuffle=True, verbose=0) 250 | scores = model.evaluate(x_test, y_test, verbose=0) 251 | val_epochs.append(scores[1]) 252 | time_func_eval.append(time.time()) 253 | 254 | # run BOS after observing "num_init_curve" initial number of training epochs 255 | if (epoch+1 == num_init_curve) and (not no_stop): 256 | print("initial learning errors: ", 1 - np.array(val_epochs)) 257 | time_start = time.time() 258 | action_regions, grid_St = run_BOS(1 - np.array(val_epochs), incumbent, training_epochs, bo_iteration) 259 | time_BOS = time.time() - time_start 260 | 261 | # start using the decision rules obtained from BOS 262 | if (epoch >= num_init_curve) and (not no_stop): 263 | state = np.sum(1 - np.array(val_epochs[num_init_curve:])) / (epoch - num_init_curve + 1) 264 | ind_state = np.max(np.nonzero(state > grid_St)[0]) 265 | action_to_take = action_regions[epoch - num_init_curve, ind_state] 266 | 267 | # condition 1: if action_to_take == 2, then the optimal decision is to stop the current training 268 | if action_to_take == 2: 269 | # condition 2: the second criteria used in the BO-BOS algorithm 270 | if (kappa * stds[epoch] >= stds[-1]) or (stds == []): 271 | break 272 | 273 | return val_epochs[-1], (epoch + 1) / training_epochs, time_BOS, val_epochs, time_func_eval 274 | 275 | 276 | def objective_function_CNN_SVHN(param, no_stop=False, incumbent=None, bo_iteration=0, stds=[], N=50, N_init_epochs=8): 277 | ''' 278 | param: parameters 279 | no_stop: if TRUE, then the function evaluation never early-stops 280 | incumbent: the currently found maximum value 281 | bo_iteration: the BO iteration 282 | stds: the standard deviations corresponding to different input number of epochs; used in the second criteria for early stopping 283 | N: the maximum number of epochs 284 | N_init_epochs: the number of initial epochs used in BOS 285 | ''' 286 | 287 | data_augmentation = True 288 | 289 | training_epochs = N 290 | num_init_curve=N_init_epochs 291 | time_BOS = -1 # the time spent in solving the BOS problem, just for reference 292 | 293 | # load the svhn dataset 294 | train_data = loadmat(svhn_path + "train_32x32.mat") 295 | test_data = loadmat(svhn_path + "test_32x32.mat") 296 | y_train = keras.utils.to_categorical(train_data['y'][:,0])[:,1:] 297 | y_test = keras.utils.to_categorical(test_data['y'][:,0])[:,1:] 298 | x_train = np.zeros((73257, 32, 32, 3)) 299 | for i in range(len(x_train)): 300 | x_train[i] = train_data['X'].T[i].T.astype('float32')/255 301 | x_test = np.zeros((26032, 32, 32, 3)) 302 | for i in range(len(x_test)): 303 | x_test[i] = test_data['X'].T[i].T.astype('float32')/255 304 | 305 | 306 | # transform the input to the real range of the hyper-parameters, to be used for model training 307 | parameter_range = [[32, 512], [1e-7, 0.1], [1e-7, 1e-3], [1e-7, 1e-3], [128, 256], [256, 512]] 308 | batch_size_ = param[0] 309 | batch_size = int(batch_size_ * (parameter_range[0][1] - parameter_range[0][0]) + parameter_range[0][0]) 310 | learning_rate_ = param[1] 311 | learning_rate = learning_rate_ * (parameter_range[1][1] - parameter_range[1][0]) + parameter_range[1][0] 312 | learning_rate_decay_ = param[2] 313 | learning_rate_decay = learning_rate_decay_ * (parameter_range[2][1] - parameter_range[2][0]) + parameter_range[2][0] 314 | l2_regular_ = param[3] 315 | l2_regular = l2_regular_ * (parameter_range[3][1] - parameter_range[3][0]) + parameter_range[3][0] 316 | conv_filters_ = param[4] 317 | conv_filters = int(conv_filters_ * (parameter_range[4][1] - parameter_range[4][0]) + parameter_range[4][0]) 318 | dense_units_ = param[5] 319 | dense_units = int(dense_units_ * (parameter_range[5][1] - parameter_range[5][0]) + parameter_range[5][0]) 320 | 321 | print("[parameters: batch_size: {0}/lr: {1}/lr_decay: {2}/l2: {3}/conv_filters: {4}/dense_unit: {5}]".format(\ 322 | batch_size, learning_rate, learning_rate_decay, l2_regular, conv_filters, dense_units)) 323 | 324 | num_conv_layers = 3 325 | dropout_rate = 0.0 326 | kernel_size = 5 327 | pool_size = 3 328 | 329 | 330 | # build the CNN model using Keras 331 | model = Sequential() 332 | model.add(Conv2D(conv_filters, (kernel_size, kernel_size), padding='same', 333 | input_shape=x_train.shape[1:], kernel_regularizer=regularizers.l2(l2_regular))) 334 | model.add(Activation('relu')) 335 | model.add(MaxPooling2D(pool_size=(pool_size, pool_size))) 336 | model.add(Dropout(dropout_rate)) 337 | 338 | model.add(Conv2D(conv_filters, (kernel_size, kernel_size), padding='same', kernel_regularizer=regularizers.l2(l2_regular))) 339 | model.add(Activation('relu')) 340 | model.add(MaxPooling2D(pool_size=(pool_size, pool_size))) 341 | model.add(Dropout(dropout_rate)) 342 | 343 | if num_conv_layers >= 3: 344 | model.add(Conv2D(conv_filters, (kernel_size, kernel_size), padding='same', kernel_regularizer=regularizers.l2(l2_regular))) 345 | model.add(Activation('relu')) 346 | model.add(MaxPooling2D(pool_size=(pool_size, pool_size))) 347 | model.add(Dropout(dropout_rate)) 348 | 349 | model.add(Flatten()) 350 | model.add(Dense(dense_units, kernel_regularizer=regularizers.l2(l2_regular))) 351 | model.add(Activation('relu')) 352 | model.add(Dropout(dropout_rate)) 353 | model.add(Dense(num_classes)) 354 | model.add(Activation('softmax')) 355 | 356 | opt = keras.optimizers.rmsprop(lr=learning_rate, decay=learning_rate_decay) 357 | 358 | model.compile(loss='categorical_crossentropy', 359 | optimizer=opt, 360 | metrics=['accuracy']) 361 | 362 | time_start = time.time() 363 | 364 | val_epochs = [] 365 | time_func_eval = [] 366 | for epoch in range(training_epochs): 367 | model.fit(x_train, y_train, 368 | batch_size=batch_size, 369 | epochs=1, 370 | validation_data=(x_test, y_test), 371 | shuffle=True, verbose=0) 372 | scores = model.evaluate(x_test, y_test, verbose=0) 373 | val_epochs.append(scores[1]) 374 | time_func_eval.append(time.time()) 375 | 376 | # run BOS after observing "num_init_curve" initial number of training epochs 377 | if (epoch+1 == num_init_curve) and (not no_stop): 378 | print("initial learning errors: ", 1 - np.array(val_epochs)) 379 | time_start = time.time() 380 | action_regions, grid_St = run_BOS(1 - np.array(val_epochs), incumbent, training_epochs, bo_iteration) 381 | time_BOS = time.time() - time_start 382 | 383 | # start using the decision rules obtained from BOS 384 | if (epoch >= num_init_curve) and (not no_stop): 385 | state = np.sum(1 - np.array(val_epochs[num_init_curve:])) / (epoch - num_init_curve + 1) 386 | ind_state = np.max(np.nonzero(state > grid_St)[0]) 387 | action_to_take = action_regions[epoch - num_init_curve, ind_state] 388 | 389 | # condition 1: if action_to_take == 2, then the optimal decision is to stop the current training 390 | if action_to_take == 2: 391 | # condition 2: the second criteria used in the BO-BOS algorithm 392 | if (kappa * stds[epoch] >= stds[-1]) or (stds == []): 393 | break 394 | 395 | return val_epochs[-1], (epoch + 1) / training_epochs, time_BOS, val_epochs, time_func_eval 396 | 397 | 398 | -------------------------------------------------------------------------------- /analyze_results.ipynb: -------------------------------------------------------------------------------- 1 | { 2 | "cells": [ 3 | { 4 | "cell_type": "code", 5 | "execution_count": 17, 6 | "metadata": { 7 | "ExecuteTime": { 8 | "end_time": "2019-05-04T11:02:02.360135Z", 9 | "start_time": "2019-05-04T11:02:02.356421Z" 10 | } 11 | }, 12 | "outputs": [], 13 | "source": [ 14 | "import numpy as np\n", 15 | "import pickle\n", 16 | "import matplotlib.pyplot as plt\n", 17 | "\n" 18 | ] 19 | }, 20 | { 21 | "cell_type": "markdown", 22 | "metadata": { 23 | "ExecuteTime": { 24 | "end_time": "2018-12-30T05:55:25.479560Z", 25 | "start_time": "2018-12-30T05:55:25.475750Z" 26 | }, 27 | "heading_collapsed": true 28 | }, 29 | "source": [ 30 | "# analyze individual runs" 31 | ] 32 | }, 33 | { 34 | "cell_type": "code", 35 | "execution_count": 5, 36 | "metadata": { 37 | "ExecuteTime": { 38 | "end_time": "2019-05-02T07:07:46.120452Z", 39 | "start_time": "2019-05-02T07:07:46.104896Z" 40 | }, 41 | "hidden": true, 42 | "scrolled": true 43 | }, 44 | "outputs": [ 45 | { 46 | "name": "stdout", 47 | "output_type": "stream", 48 | "text": [ 49 | "2650\n", 50 | "2822\n" 51 | ] 52 | } 53 | ], 54 | "source": [ 55 | "\n", 56 | "run_iter = 2\n", 57 | "\n", 58 | "bos_no_stop = pickle.load(open(\"saved_results/bos_mnist_no_stop_\" + str(run_iter) + \".p\", \"rb\"))\n", 59 | "bos_with_stop = pickle.load(open(\"saved_results/bos_mnist_with_stop_\" + str(run_iter) + \".p\", \"rb\"))\n", 60 | "\n", 61 | "epoch_values = bos_no_stop[\"all\"][\"epoch_values\"]\n", 62 | "conc_epoch_values = [e for epoch in epoch_values for e in epoch]\n", 63 | "conc_epoch_values_cum = 1.0 - np.maximum.accumulate(conc_epoch_values)\n", 64 | "print(len(conc_epoch_values_cum))\n", 65 | "\n", 66 | "epoch_values = bos_with_stop[\"all\"][\"epoch_values\"]\n", 67 | "conc_epoch_values = [e for epoch in epoch_values for e in epoch]\n", 68 | "conc_epoch_values_cum_bos = 1.0 - np.maximum.accumulate(conc_epoch_values)\n", 69 | "print(len(conc_epoch_values_cum_bos))\n" 70 | ] 71 | }, 72 | { 73 | "cell_type": "code", 74 | "execution_count": null, 75 | "metadata": { 76 | "hidden": true 77 | }, 78 | "outputs": [], 79 | "source": [] 80 | }, 81 | { 82 | "cell_type": "code", 83 | "execution_count": 6, 84 | "metadata": { 85 | "ExecuteTime": { 86 | "end_time": "2019-05-02T07:07:52.308301Z", 87 | "start_time": "2019-05-02T07:07:52.057051Z" 88 | }, 89 | "hidden": true, 90 | "scrolled": false 91 | }, 92 | "outputs": [ 93 | { 94 | "data": { 95 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgIAAAGKCAYAAAB+TinkAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4xLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvDW2N/gAAIABJREFUeJzt3Xm4HHWZ9//3fXKSnJCFkJVASIKRJQRZNBr2RYGoyMADKkoAYXTgBz8VxlF0GAXkYVBQGJdBhEEEEQFBgSA6OCiRLQhByWAAoyCRGLYACTmBkO37/FGdeDg5S59Od/X2fl1XX92nqrrq7qLD+Zy7vlUVKSUkSVJzaql2AZIkqXoMApIkNTGDgCRJTcwgIElSEzMISJLUxAwCkiQ1MYOAJElNzCAgaYOIuCoiUkT8tIt5hxfmrSn8fEDh54UR0dZp2Tsj4qpO672zw88tEfHZiPhDRKyIiKURMS8izivMn11Yd0+PSRXaDVJTMQhI6uyvwAciYmyn6ScDC7tYfgxweh+3cRbwb8BXgV2AvYHzgcGF+UcC4zo8AD7ZadozfdympC4YBCR19ifgAeCE9RMiYgJwMPD9Lpb/BvCFiBjVh20cAXwvpfTDlNKTKaX5KaUbUkr/DJBSejml9Nz6R+E9yzpOSymtLeXDSXozg4CkrlwOfCIiovDzJ4Bf0XVH4DLgOeDsPqz/WWD/iNh6k6qUtMkMApK6chMwAjggIvoB/0gWDrqyBvg8cHJEbFfk+v8ZGAQ8ExF/jIirI2JmRLRuauGS+sYgIGkjKaWVwDXAPwGHAq3AbT0sfyswB7igyPU/AbwNeAfwn8AA4ArggYgYtEnFS+oT07ek7lwO/A7YBvh+Smn1348UdOmzwG8jYp9iVp6yW5/+vvD4duF99wAfBq7elMIlFc+OgKQupZQeAx4iG9F/RRHLPwRcD3y9xE0+XngeU+L7JZXAjoCknswA2lJKLxe5/JnAE8A64MfdLRQRPwHuLzwWA1sDXwRWA7dvSsGS+saOgKRupZRe60MIIKX0NPBtsoGAPflv4L3AT4EFwI3AKmD/QidCUk4iO0wnSZKakR0BSZKamEFAkqQmZhCQJKmJGQQkSWpiBgFJkppY01xHYNSoUWnSpEnVLkOSpFw8/PDDS1JKo3tbrmmCwKRJk5g7d261y5AkKRcR0dXdQjfioQFJkpqYQUCSpCZmEJAkqYkZBCRJamJNM1hQkpSv1atXs2jRIlauXFntUhpWW1sb48ePp3///iWvwyAgSaqIRYsWMXToUCZNmkREVLuchpNS4qWXXmLRokVsu+22Ja/HQwOSpIpYuXIlI0eONARUSEQwcuTITe64GAQkSRVjCKiscuxfg4AkSU3MICBJUhMzCEiSmtb111/P9OnTGTx4MGPGjGH69Ol85zvfIaXECSecwIABAxgyZAgjRozg4IMP5oknnuhyPbNnz2b8+PEbTT/ggAO44oorNvy8YMECPvShDzFq1Cg233xzdtllFy6++GLWrl3L008/TUQwZMgQhgwZwtixYzn11FNZvXp1xT4/GARK8scfz+ORb8yudhmSpE1w0UUXcdppp/G5z32O5557jueff57vfve73HfffaxatQqAM844g/b2dhYtWsSYMWM44YQTSt7ek08+yfTp09lmm2149NFHWbZsGTfeeCNz585l+fLlG5ZbunQp7e3tPProo8yZM4dLLrlkUz9qjzx9sAQvfuHrTHjmPjj9qWqXIkkqwbJlyzjrrLP4wQ9+wFFHHbVh+u67786111670fKbbbYZxxxzDEcffXTJ2zz77LPZa6+9uPjiizdM22GHHfjRj34EZAGgozFjxnDwwQfz2GOPlbzNYhgEShEtBKnaVUhSXTn9dHjkkcpuY7fd4Bvf6H25OXPm8MYbb3D44YcXtd729nauvfZadt9995Jru/POO/nKV75S9PKLFy/mjjvu4LTTTit5m8Xw0EApIoi0rtpVSJJKtGTJEkaNGkVr69//Ht5rr70YPnw4gwYN4u677wbg61//OsOHD+etb30r7e3tXHXVVSVv86WXXmLcuHG9Ljdq1CiGDx/O1ltvzeDBg/ngBz9Y8jaLYUegBKnFjoAk9VUxf6nnZeTIkSxZsoQ1a9ZsCAP3338/AOPHj2fduuyPvc9+9rOcd955G71/yJAhG14/9thjtLa2djmob/Xq1Rsu/zty5EieffbZXmtbsmQJra2tvP7665x11lnMmDGDOXPm9P1DFsmOQEmCFjsCklS39txzTwYOHMitt95a0vvb29s3PCZMmMCECRNYsmQJ7e3tG5ZJKbFw4UImTpwIwEEHHcRPfvKTorcxaNAgTjjhBB544AGWLFlSUp3FMAiUILW0gB0BSapbw4cP5+yzz+bUU0/lpptuYvny5axbt45HHnmEFStW9Hl9EyZMYPr06Xz+85+nvb2dN954g6997Wv079+fPfbYA4Avf/nL3H///RvOUgD485//zLHHHrvRQEGAN954g2uuuYYtt9ySkSNHbtoH7oFBoBQRtGBHQJLq2RlnnMHFF1/MhRdeyNixYxk7diwnn3wyF1xwAXvttVef13fDDTfwwgsv8Na3vpWtt96aX/3qV9x+++20tbUBMHnyZObMmcPTTz/N1KlT2XzzzTnqqKOYNm0aQ4cO3bCe4cOHb7iOwJw5c5g1a1ZFL9UcKTXHX7bTpk1Lc+fOLcu67p5yMjsuuJUxa58ry/okqRE9/vjjTJkypdplNLzu9nNEPJxSmtbb++0IlKKlhWiSACVJamwGgVJEEB4akCQ1AINACTx9UJLUKAwCJXGwoCSpMRgESmFHQJLUIAwCpfASw5KkBmEQKIFjBCRJjcIgUAovKCRJahAGgZKEHQFJUkMwCJTCQwOSVNcmTZrEoEGDGDJkCFtssQWHHnoozzzzzIb5999/P+9+97sZOnQom2++OYcddhiPPfbYJq2z2PWef/75bLvttgwZMoTx48dz9NFHl++Dd8EgUAoPDUhS3bvttttob2/n2WefZezYsXzqU58CYM6cORxyyCEcfvjhLF68mL/85S/suuuu7L333jz11FMlrbPY9V599dVcc8013HnnnbS3tzN37lze8573VG4nYBAojR0BSWoYbW1tfPCDH9zwl/kZZ5zB8ccfz2mnncbQoUMZMWIE5513HnvssQfnnHNOSessdr0PPfQQM2bMYPLkyQBsueWWnHTSSWX9vJ21VnTtjcqOgCT13emnwyOPVHYbu+0G3/hGn97y2muvccMNN7DHHnvw2muvcf/993PuuedutNyHP/xhzjzzzD6vc/3Pxax3jz324NOf/jRbb701Bx54ILvvvjv9+vXr0+fpK4NAKewISFLdO+KII2htbWXFihWMHj2aO+64g5dffpl169Yxbty4jZYfN24cS5Ys6fM6gaLXe+yxxxIRfP/73+ecc86hra2NM844g89//vNl+MRdyy0IRMQI4HvAIcAS4F9TSj/qYrkDgbOAtwOvpJQmdZq/G/BtYBdgOXBZSun/Vrb6jYq0IyBJfdXHv9Qr7ZZbbuGggw5i7dq13Hrrrey///48+uijtLS08Oyzz7Ljjju+aflnn32WUaNGAfC+972Pe+65B4DLLruMmTNndrvOxx57jC222KKo9QLMnDmTmTNnsnr1am655RZmzpzJbrvtxowZMyqyH/IcI3AJsAoYC8wELo2IqV0stwK4EvhcN+v5EXA3MALYHzg1Iv6h/OX2oKWFFjsCktQQ+vXrx5FHHkm/fv2YM2cOe+65JzfeeONGy/34xz/eMHDvF7/4Be3t7bS3t28IAd2t895772Xw4MFFrbej/v3786EPfYhddtmFP/zhD2X4pF3LpSMQEYOBo4CdU0rtwL0RMQs4DvhCx2VTSg8CD0bEQd2sbhJwbUppLfBkRNwLTAVmVar+jUQAkNYloiVy26wkqfxSSsyaNYtXXnmFKVOm8NWvfpUZM2aw4447cuKJJ7JmzRouuugi5syZw0MPPVTSOoGi1nvVVVcxevRo9ttvPwYPHswdd9zB/PnzmT59esU+f16HBrYH1qSUFnSYNo/sL/q++gZwfER8CXgLsCdwYVcLRsRJwEkAEyZMKGFT3TAISFLdO+yww+jXrx8RwcSJE7n66quZOjVrVN9xxx188Ytf5Mwzz6SlpYV9992Xe++9l+22267kde6zzz69rnfYsGGcf/75HHvssaxdu5aJEydy6aWXss8++1RsP+QVBIYAr3aatgwYWsK6fgb8APgs0A84N6XUZURLKV0OXA4wbdq08vXyW7IjKmmdhwckqR49/fTTPc7fZ599mD17dlnXWcx6jzzySI488sg+bXdT5TVGoB0Y1mnaMLLBfkUrDDj8b+BcoA3YBpgREaeWo8g+FALAujUOGJQk1be8gsACoDUiOvZUdgXm93E9bwHWppR+kFJak1JaBFwPvL9MdRbHjoAkqUHkEgRSSiuAnwLnRsTgiNgbOBy4pvOyEdESEW1A/+zHaIuIAYXZCwrTjikstyVwNPC/eXyODkUCdgQkSfUvz9MHTwUGAS8A1wGnpJTmR8S+EdHeYbn9gNeBnwMTCq9/CZBSehU4Evhn4BXgEeAPwHl5fQjAjoAkqWHkdkGhlNLLwBFdTL+HbDDh+p9nA90OxU8p/Rp4ZwVKLJ4dAUkqSkqJCM+uqpSUNv0PUm86VIoOpw9KkrrW1tbGSy+9VJZfVtpYSomXXnqJtra2TVqP9xooReHQgB0BSere+PHjWbRoES+++GK1S2lYbW1tjB8/fpPWYRAoRYsdAUnqTf/+/dl2222rXYZ64aGBEkShI4DtLklSnTMIlMLBgpKkBmEQKIWnD0qSGoRBoBR2BCRJDcIgUIp+dgQkSY3BIFCC9RfHSGvtCEiS6ptBoBReUEiS1CAMAqVwsKAkqUEYBErR4qEBSVJjMAiUIOwISJIahEGgFJ4+KElqEAaBEkQ/LzEsSWoMBoFS2BGQJDUIg0ApvKCQJKlBGARK4AWFJEmNwiBQCi8oJElqEAaBEkT/VgDWrlxd5UokSdo0BoES9BvcBsDq9jeqXIkkSZvGIFCCfpsNBGDNCoOAJKm+GQRKsD4IrH3NICBJqm8GgRK0Ds6CwLIHHqtyJZIkbRqDQAnG7bcdAJNvuqDKlUiStGlaq11APRo1ZTRPDtiRyaueYO6/30G/tv5lWe/AgbDjjhvucvz3iXvsAf36lWUbkiR1ZBAo0Qv//7lM/o8PM+2L7638xq67Dj7ykcpvR5LUdAwCJZp+4VHMf9tvWdO+sizre+YZuPBrcP75sM/ehYlr18K73w2PORZBklQZBoEStbS2MPXEd5Vtfet+D/d8DZZMAfbrMGP4cHjllbJtR5KkjhwsWCNauruz8RZbwNKludcjSWoOBoEaUbh9Aes638fIjoAkqYIMAjXCjoAkqRoMAjXCjoAkqRoMAjXCjoAkqRoMAjWix46AQUCSVCEGgRrRY0fgtddg1arca5IkNT6DQI3osSMAdgUkSRVhEKgRPXYEwAGDkqSKMAjUCDsCkqRqMAjUiPUdgY2CgB0BSVIFGQRqxPqOwEaHBuwISJIqKLcgEBEjIuLmiFgREQsj4phuljswIu6KiGUR8XQ3y5wWEX8prOvxiNi+osXnwI6AJKka8uwIXAKsAsYCM4FLI2JqF8utAK4EPtfVSiLiE8DHgUOBIcAHgCWVKDhPdgQkSdWQy22II2IwcBSwc0qpHbg3ImYBxwFf6LhsSulB4MGIOKiL9bQAZwMnpJQeK0x+sqLF56TbjkBbW/awIyBJqoC8OgLbA2tSSgs6TJsHdNUR6Mn4wmPniHimcHjgy4WAsJGIOCki5kbE3BdffLG0ynPSbUcAvLqgJKli8goCQ4BXO01bBgzt43rGF54PAd4GHAh8lOxQwUZSSpenlKallKaNHj26j5vKV7cdAfDGQ5KkiskrCLQDwzpNGwYs7+N6Xi88X5hSWppSehq4DHj/ppVXfd1eUAi88ZAkqWLyCgILgNaI2K7DtF2B+X1czx/JBhx2/HXZ1a/OutPtBYXAjoAkqWJyCQIppRXAT4FzI2JwROwNHA5c03nZiGiJiDagf/ZjtEXEgMJ6XgNuAM6IiKERMR44CfhZHp+jkuwISJKqIc/TB08FBgEvANcBp6SU5kfEvhHR3mG5/cgOAfwcmFB4/csO8z9JdqhhMTAH+BHZ6YZ1zY6AJKkacjl9ECCl9DJwRBfT7yEbTLj+59lA9LCeV4GPVKDEqiqqI5DS3xODJEll4CWGa0SvHYF162B5X8dWSpLUM4NAjei1IwCOE5AklZ1BoEb02hEAxwlIksrOIFAj7AhIkqrBIFAj7AhIkqrBIFAj7AhIkqrBIFAjiuoIGAQkSWVmEKgRPXYEhhbuzfRq5/s2SZK0aQwCNaLHjkCL/5kkSZXhb5gasT4IdNkRkCSpQgwCNaLHjoAkSRViEKghLS29dARsF0iSyswgUEMi7AhIkvJlEKgh3XYEvOOgJKlCDAI1xI6AJClvBoEa0tJiEJAk5csgUEMiHCwoScqXQaCGdNsRcIyAJKlCDAI1pNeOgCRJZWYQqCGOEZAk5c0gUEN6vaCQJEllZhCoIb2ePmhKkCSVmUGghtgRkCTlzSBQQ7ygkCQpbyUFgYgYFBEDy11Ms7MjIEnKW1FBICK+HhHvKrw+FHgZeCUiDqtkcc3GjoAkKW/FdgRmAn8ovD4LOBb4B+D8ShTVrLwNsSQpb61FLrdZSum1iBgJvCWl9BOAiJhYudKaT48dAa8uKEmqgGKDwIKImAm8FfgfgIgYBbxeqcKakWMEJEl5KzYInAp8E1gFfLwwbQbwy0oU1awcIyBJyltRQSCl9BCwV6dp1wLXVqKoZuUYAUlS3oo9a+DAiNi28HpcRFwdEd+PiC0rW15zcYyAJClvxZ418B1gbeH1RUB/YB1weSWKalaOEZAk5a3YMQJbp5T+GhGtZGMDJpKNF1hcscqakGMEJEl5KzYIvBoRY4GdgcdSSu0RMYCsM6AysSMgScpbsUHg28BDwADg9MK0vYEnKlFUs/Lug5KkvBV71sAFEXEzsDal9GRh8t+AT1SssibUY0fAwYKSpAootiMA8BSwV+GeA38D7k8pralMWc3JMQKSpLwVFQQiYkfgNmAQ8AywDbAyIg5LKT1ewfqaimMEJEl568vpg5cD26SU9kwpjQe+W5iuMrEjIEnKW7FBYDfg4pTe9PfqNwrTVSZeWVCSlLdig8BiYP9O0/alD9cRiIgREXFzRKyIiIURcUw3yx0YEXdFxLKIeLqH9e0fESkiziu2hlrnlQUlSXkrdrDgmcCsiPgZsJDsgkKHAsf2YVuXkF2EaCxZJ+H2iJiXUprfabkVwJXAdYXtbiQi+pPdBOm3fdh+zWtp8dCAJClfRXUEUkqzgLcDfwCGFp7fkVK6tZj3R8Rg4CjgSyml9pTSvcAs4LgutvVgSukasrMUuvMvZHc+bKjrGETY/Zck5avo0wdTSguAUtvw2wNrCutYbx4bH27oVURMBP6RLJj8Zy/LngScBDBhwoS+bip3vXYETAmSpDLrNghExDVAr795UkrHF7GdIcCrnaYtI+su9NW3KHQWopfj5imlyyncGGnatGk1/1vUCwpJkvLWU0fgz2XcTjswrNO0YcDyvqwkIg4DhqaUbihXYbXE0wclSXnrNgiklL5cxu0sAFojYruU0p8K03YFOg8U7M17gGkR8Vzh582BtRHxtpTS4WWqtWq8oJAkKW/Fnj64SVJKK4CfAudGxOCI2Bs4HLim87IR0RIRbWR3NoyIaCvc6RDgS2TjDXYrPGYB/wWcmMPHqDg7ApKkvOUSBApOJbtE8QtkpwaeklKaHxH7RkR7h+X2A14Hfg5MKLz+JUBKaXlK6bn1j8K8FSmll3P8HBXjBYUkSXnry02HNknhl/URXUy/h2ww4fqfZwNFjYxLKZ1QpvJqghcUkiTlLc+OgHrhGAFJUt6K7ghExCFkx+WHdJyeUjqr3EU1qwhYu7baVUiSmkmxtyH+T+DDwF3Aax1m+fdrGbW0wJo11a5CktRMiu0IHAPsmlJ6ppLFNLtezxrwuIEkqcyKHSOwBFhayULklQUlSfkrtiNwEXBtRHwFeL7jjJRSTzcHUh94HQFJUt6KDQKXFp4/0Gl6AvqVr5zm5lkDkqS8FRUEUkqeZpgDxwhIkvLWpwsKRcQEYGtgkQMHy88xApKkvBX1l35EjIuI35DdkfCnwJMRcXdEbFXR6pqMYwQkSXkrtuV/KTAP2CKlNA7YAvg98N1KFdaMHCMgScpbsYcG9gHGpZRWQ3Y3wYg4A/hbxSprQnYEJEl5K7Yj8AqwU6dpO+C1BcrKuw9KkvJWbEfgQuDOiPgesBCYCJwIfKlShTUj7z4oScpbsacP/ldEPEl2qeFdgMXAMSmlX1WyuGbjGAFJUt6KPn0wpfRr4NcVrKXpOUZAkpS3boNARPxbSunfC6/P7W45b0NcPnYEJEl566kjML7D620qXYi8sqAkKX/dBoGU0ikdXp+YTznNraXFwYKSpHwVe2XBl7uZ/kJ5y2luHhqQJOWt2OsI9O88ISL6450Hy8rBgpKkvPV41kBE3EN2q+G2iLi70+zxwP2VKqwZeUEhSVLeejt98AoggHcC3+swPQHP4+mEZeUFhSRJeesxCKSUrgaIiAdSSk/kU1LzcoyAJClvxV5Z8ImIGAu8CxhF1iVYP+/KCtXWdBwjIEnKW1FBICKOAH4I/AmYCswHdgbuBQwCZWJHQJKUt2LPGjgPODGltDuwovB8EvBwxSprQl5QSJKUt2KDwISU0o2dpl0NHF/meppajx0BBwtKkiqg2CDwQmGMAMDTEbEnMBmvI1BWjhGQJOWt2CDwX8A+hdf/AdwFzAO+U4mimpVjBCRJeSv2rIELOrz+QUTMBganlB6vVGHNyI6AJClvRQWBzlJKfy13IfLKgpKk/HUbBCLiGbIrCPYopTShrBU1Ma8sKEnKW08dgWM7vH4n8DHgW8BCYCLwSeAHlSut+ThGQJKUt26DQErpN+tfR8QlwIyU0t86TPsF8N/ARRWtsIk4RkCSlLdizxrYCmjvNK0d2Lq85TQ3xwhIkvJWbBCYBcyKiIMjYkpEHALcXJiuMnGMgCQpb8UGgf8PmAN8F/hd4fm3hekqE8cISJLyVux1BFYCXyg8VCGOEZAk5a2n0wf3SyndXXj97u6WSyn9uhKFNSM7ApKkvPXUEfgO2a2GAb7XzTIJeEtZK2pi3n1QkpS3bscIpJR27vB6224eRYeAiBgRETdHxIqIWBgRx3Sz3IERcVdELIuIpzvNGxMR10XE4sL8+yJierE11DrvPihJyluxgwXL4RJgFTAWmAlcGhFTu1huBXAl8Lku5g0BHgLeAYwguxXy7RExpCIV5ywiCwL+4S9JyksulxiOiMHAUcDOKaV24N6ImAUcR6cBiCmlB4EHI+KgLrb1FHBxh0mXR8TXgR2Ah3uro9a1FGJZSjYAJEn5KPYSw5tqe2BNSmlBh2nzgP03ZaURsRswAPhzN/NPAk4CmDCh9m+J0DEISJKUh6IuMVwGQ4BXO01bBgwtdYURMQy4BvhySmlZV8uklC4HLgeYNm1azf96XR8E1q6Ffv26WMCEIEkqs6JvQ1z463tfYBSwoXGdUjqriLe3A8M6TRsGLC92+51qGQTcBjyQUvpKKeuoRf37Z8+rV8OAAZ1meqxAklQBRQ0WLLTY7wPeDXweeBvwL8Bbi9zOAqA1IrbrMG1XYH7xpW6oZSBwC7AIOLmv769lAwdmz6tWVbcOSVLzKPasgTOA96aU/g/weuH5g8DqYt6cUloB/BQ4NyIGR8TewOFkrf03iYiWiGgD+mc/RltEDCjM6w/cBLwOfCyl1FDX4VvfBXjjjerWIUlqHsUGgTEppXsKr9dFREtK6RfAYX3Y1qnAIOAF4DrglJTS/IjYNyI63tlwP7Jf9D8HJhRe/7Iwby/gA8AhwNKIaC889u1DHTVrfUfAICBJykuxYwQWRcSklNLTZG3+wyNiCdl1AYqSUnoZOKKL6feQDSZc//NsOoxB6LTsb7qb1wh6DQIOFpQklVmxQeBCYArwNHAuWXt+APDpypTVnHocI+BgQUlSBRR798GrOrz+RURsAQwoXBxIZeIYAUlS3oo9a+AbEfHO9T+nlFYZAsrPMQKSpLwVO1gwgFsj4k8R8eWI2KGSRTWrXk8fdIyAJKnMigoCKaXTgPFkI/+3AR6IiIcj4jOVLK7Z9NgRcIyAJKkCir77YEppXUrpf1JK/wjsDLwEfK1ilTUhDw1IkvJWdBAoXAjo2Ii4newUwjXAxypWWRNysKAkKW9FnTUQETcC7wN+R3YxoI+llJZUsrBm5CWGJUl5K/Y6Ag8B/5JS+msli2l2XlBIkpS3Yq8jcGGlC5GDBSVJ+St6jIAqzzECkqS8GQRqyKBB2fPKldWtQ5LUPAwCNWTgwOwIwIoV1a5EktQsDAI1JAIGD4bXXutmAQcLSpLKzCBQYzbbrJsg4GBBSVIFGARqzODBHhqQJOXHIFBjuu0ISJJUAQaBGrPZZj10BBwjIEkqM4NAjdl8c1i6tIsZjhGQJFWAQaDGjBkDL7xQ7SokSc3CIFBjxo6F55+vdhWSpGZhEKgxY8ZkYwQ8c0CSlAeDQI0ZMyZ77vLwgIMFJUllZhCoMWPHZs8bBQEHC0qSKsAgUGPWB4G//a26dUiSmoNBoMbstBP07w8PPljtSiRJzcAgUGM22wze/na4775qVyJJagYGgRq0zz5w771w552dZjhYUJJUZgaBGnTccTBgALzvffDyy4WJDhaUJFWAQaAG7bor/PjHsGYNfOQjsHZttSuSJDUqg0CNOvxwOPRQ+J//gdZWWLmy2hVJkhpRa7ULUPeuvx7OPBO+/W1Yugyen5e486JqV1X7Djoo66pIknpnEKhhQ4bAt76VnUWQTgweeAA++0C1q6p9BxwAd91V7SokqT4YBOrACSfAui/Aie+HY75Z7Wpq2z/9EzxgWJKkohkE6kRLwID+MGBotSupbZMnw003ZQMs+/WrdjWSVPscLKiGMmZMFgLS5u82AAARaElEQVSWLq12JZJUHwwC9cQLCvVq5Mjs+aWXqluHJNULg0C98IJCRTEISFLfGATUUAwCktQ3BgE1FIOAJPWNQUANZdSo7PnFF6tbhyTVi9yCQESMiIibI2JFRCyMiGO6We7AiLgrIpZFxNNdzJ9UmP9aRDwREQdVvPha4WDBXg0bBoMGwbPPVrsSSaoPeXYELgFWAWOBmcClETG1i+VWAFcCn+tmPdcBvwdGAv8G3BQRo8tfbo1xsGBRImCrrQwCklSsXIJARAwGjgK+lFJqTyndC8wCjuu8bErpwZTSNcBTXaxne+DtwNkppddTSj8BHi2sWwKyILB4cbWrkKT6kFdHYHtgTUppQYdp84CuOgI9mQo8lVJaXsx6IuKkiJgbEXNf9KBx0xg3ziAgScXKKwgMAV7tNG0Z0NcL5g4pvK+o9aSULk8pTUspTRs9ugGOHjhGoCjrOwLuLknqXV5BoB0Y1mnaMGB5F8vmsZ764xiBom21FaxYAcsb/1shSZssryCwAGiNiO06TNsVmN/H9cwH3hIRHTsApaxHDWyrrbJnDw9IUu9yCQIppRXAT4FzI2JwROwNHA5c03nZiGiJiDagf/ZjtEXEgMJ6FgCPAGcXpv8fYBfgJ3l8DtWH9UHgb3+rbh2SVA/yPH3wVGAQ8ALZKYCnpJTmR8S+EdHeYbn9gNeBnwMTCq9/2WH+R4BpwCvAV4EPppQcCagNdt45e37wwerWIUn1oDWvDaWUXgaO6GL6PWSDANf/PBvo9oB4Sulp4ICyF1gPHP1WlNGjszMH/vSnalciSbXPSwzXCwcL9sn48R4akKRiGATUkLbe2iAgScUwCKghTZoETz0F69ZVuxJJqm0GATWkKVPg9dfhiSeqXYkk1TaDQD1xsGDR3vWu7PmRR6pbhyTVOoNAvXCwYJ9MmJA9v/BCdeuQpFpnEFBDGj4c+vUD7zUlST0zCKghtbRk1xJ4/PFqVyJJtc0gUE8cI9AnRx0FN98M870ThSR1yyBQLxwj0Gef+lT2fN111a1DkmqZQUANa/Jk2GUX+N3vql2JJNUug4Aa2tvfDg8/7FEVSeqOQUAN7R3vyE4hXLy42pVIUm0yCNQT/6zts7e/PXt++OHq1iFJtSq32xBrEzlYsCS77pqdSnjeeXDLLV0vM2ECbLFFvnWp/rS1wXHHwWabVbsSqbwMAmpogwfDRz8Kd98Nzz238fyXX4YVK/KvS/Vp+HA4+uhqVyGVl0FADe+HP+x+XkqwdGl+tag+vfgi7LADvPRStSuRys8goKYW4WEB9W794YBXXqluHVIlOFiwnjhYUKqKgQNh0CC7R2pMBoF64WBBqaqGD7cjoMZkEJCkIowd2/WAU6neGQQkqQjjxsGzz1a7Cqn8DAL1xDECUtWMGOGhATUmg4AkFWHEiOy6E1KjMQjUCwcLSlU1YgQsWwZr11a7Eqm8DAKSVISRI7NnLyqkRmMQkKQibLll9vz889WtQyo3g0A9cbCgVDXrg4BnDqjRGATqRWurByelKlofBLyWgBqNQaBetLbC6tXVrkJqWuPGZc92BNRoDAL1on9/WLOm2lVITWvIkOy21nYE1GgMAvXCjoBUdRMmwJNPVrsKqbwMAvXCjoBUddOmwYMPOm5XjcUgUC9aWw0CUpXtsUd2+uBf/1rtSqTyMQjUCw8NSFU3fXr2/MAD1a1DKieDQL3w0IBUdbvsAptvDrfeWu1KpPIxCNQLOwJS1fXvD4ccAvffX+1KpPIxCNQLOwJSTXjXu2DhQliypNqVSOVhEKgXdgSkmjBlSvb80EPVrUMqF4NAvfCsAakm7LRT9nzOOVUtQyobg0C98NCAVBO23RZOOSW7nsA991S7GmnT5RYEImJERNwcESsiYmFEHNPNchERF0TES4XHBRERHea/OyJ+FxGvRsRTEXFSXp+hqjw0INWMr341u8rgv/5rtSuRNl2eHYFLgFXAWGAmcGlETO1iuZOAI4BdgV2Aw4CTASKiP3AzcBmwOXA0cHFE7Frx6qvNjoBUM4YNg1NPhfvu85LDqn+teWwkIgYDRwE7p5TagXsjYhZwHPCFTot/DLgopbSo8N6LgH8CvguMAIYB16SUEvBQRDwO7ATMy+OzVI0dAammzJwJX/gCHHxw9lrFa22FE06AiROrXYkgpyAAbA+sSSkt6DBtHrB/F8tO5c2/1OcVppFSej4irgNOjIjvAu8CJgL3drXRwmGDkwAmTJiwqZ+hugYMgFWrql2FpILx4+Gww+BnP4Pzz692NfVl3Tr43e+8MFOtyCsIDAFe7TRtGTC0m2WXdVpuSEREoQtwHXAF8M3C/FNSSs90tdGU0uXA5QDTpk2r79uEDBkC7e3Z3U7+PmRCUhXNmlXtCurTpz8N3/42fOYz8O//DoMGVbui5pbXGIF2spZ+R8OA5UUsOwxoTymliNgRuB44HhhA1ik4IyIOLX/JNWbo0GyMwBtvVLsSSdokn/oUTJ0K//EfcMst1a5GeQWBBUBrRGzXYdquwPwulp1fmNfVcjsDC1JKd6SU1qWU/gjcDryvAjXXlqGF5snyrrKTJNWP7baDu+/OXj//fHVrUU5BIKW0AvgpcG5EDI6IvYHDgWu6WPwHwGciYuuI2Ar4F+CqwrzfA9sVTiGMiJgMfAD434p/iGozCEhqIJtvnj2/8kp161C+pw+eCgwCXiA7zn9KSml+ROwbEe0dlrsMuA14FPgD2V/8lwGklJ4E/hH4FtmYg98APyEbM9DY1geBJ56obh2SVAb9+sHw4fDyy9WuRJGNv2t806ZNS3Pnzq12GaWbPx923jk7qHb66dWuRpI22XbbwYsvwqJF2XholVdEPJxSmtbbcl5iuF7stBMMHAiLF1e7EkkqixkzYNkyuOmmalfS3AwC9SICttgCLrus2pVIUllcdFH2fOON1a2j2RkE6sm228Krr2Z9NEmqcwMHwlZbwa9/7ZnR1WQQqCdnnpk9e8szSQ3iox+FlSvh5JOrXUnzcrBgPVmx4u8jambPhv27ukKzJNWP1auzK6hDdgOnt7yluvU0kmIHC+Z1iWGVw+DB2cG044/Prs05d66XG5ZU1/r3h9tvh0MPhcmTs7s6HnQQjBlT/m1ttVV2hFVvZkegHn3yk3DJJdkphTvtVO1qJGmTXXZZdt+BZ7q8c0z5TJwILSUcFB86FObV2T1ui+0IGATq0bx5sNtu2evHHoMpU6pbjySVyXPPwaOPln+9zzwDv/lNdt+2UgwaVH8nbRkEOmmoIJASfPnL2WPvvbMht+sPskmShGMEGlsEnHNOFnGvvDK7vsAuu1S7KknSpthvP7jggtw3axCoZ1dcAXvtBTffnA29lSTVr802q8pmDQL1LAI+/vHsIUlSCbygkCRJTcwgIElSEzMISJLUxAwCkiQ1MYOAJElNzCAgSVITMwhIktTEDAKSJDUxg4AkSU3MICBJUhMzCEiS1MQMApIkNTGDgCRJTSxSStWuIRcR8SKwsIyrHAUsKeP6Go37p2fun565f3rm/umZ+yczMaU0ureFmiYIlFtEzE0pTat2HbXK/dMz90/P3D89c//0zP3TNx4akCSpiRkEJElqYgaB0l1e7QJqnPunZ+6fnrl/eub+6Zn7pw8cIyBJUhOzIyBJUhMzCEiS1MQMAn0UESMi4uaIWBERCyPimGrXlLeImB0RKyOivfD4Y4d5xxT2y4qIuCUiRnSY13D7LiI+GRFzI+KNiLiq07z3RMQTEfFaRNwVERM7zBsYEVdGxKsR8VxEfKbY99aT7vZPREyKiNThO9QeEV/qML9Z9s/AiPhe4d/D8oh4JCLe12F+U3+Heto/fofKKKXkow8P4DrgBmAIsA+wDJha7bpy3gezgU90MX0qsBzYr7B/fgRc38j7DjgSOAK4FLiqw/RRhc/3IaAN+BrwQIf5XwHuAbYApgDPAe8t5r319Ohh/0wCEtDazfuaZf8MBs4p7I8W4AOFf0OT/A71un/8DpVrP1e7gHp6FL6Uq4DtO0y7BvhqtWvLeT/MpusgcD7wow4/Ty7sr6GNvu+A8zr9ojsJuL/Td+d1YMfCz4uBQzrM/78UQlNv763HRxf7p7f/iTfV/un02f8XOMrvUK/7x+9QmR4eGuib7YE1KaUFHabNI/tLuNl8JSKWRMR9EXFAYdpUsv0BQErpSQq//Gm+fdd5X6wAngSmRsQWwLiO83nzvuj2vRWuuRoWRsSiiPh+RIwCaOb9ExFjyf6tzMfv0EY67Z/1/A5tIoNA3wwBXu00bRnZX7zN5PPAW4Ctyc7XvS0iJpPtn2Wdll2/f5pt3/W2L+g0v+O+6Om9jWIJ8E5gIvAOss92bWFeU+6fiOhPtg+uTik9gd+hN+li//gdKpPWahdQZ9qBYZ2mDSM7ZtU0Ukq/7fDj1RHxUeD99Lx/1vUwrxH1tC/aO/y8stO83t7bEFJK7cDcwo/PR8QngWcjYihNuH8iooXsUNkq4JOFyX6HCrraP36HyseOQN8sAFojYrsO03blzW2qZpSAINsPu66fGBFvAQaS7bdm23ed98VgsjET81NKrwDPdpzPm/dFt++tcM3VtP7KZi3Ntn8iIoDvAWOBo1JKqwuz/A7R4/7prGm/Q5us2oMU6u0BXE82+n0wsDcNMPK9j59/ODCDbKRtKzATWEF23G4qWft/38L++SFvPmug4fZdYR+0kY1QvqbDfhld+HxHFaZdwJtHfH8V+A3ZiOYdyf6ntX5Ec4/vradHD/tnOrAD2R8jI8nOJrmr2fZP4fN8F3gAGNJput+hnveP36Fy7eNqF1BvD2AEcEvhl99fgWOqXVPOn3808BBZC21p4R/owR3mH1PYLyuAW4ERjbzvyE5tSp0e5xTmHQQ8QTYaeTYwqcP7BgJXkgWn54HPdFpvt++tp0d3+wf4KPCXwnfhWeAHwJZNuH8mFvbJSrJ29frHTL9DPe8fv0Ple3ivAUmSmphjBCRJamIGAUmSmphBQJKkJmYQkCSpiRkEJElqYgYBSZKamEFAUtV0uKe8lzuXqsQgIElSEzMISJLUxAwCkt4kIraKiJ9ExIsR8ZeI+HRh+jkRcVNE3BARyyPidxHR8cYtUyJidkQsjYj5EfEPHeYNioiLImJhRCyLiHsjYlCHzc6MiL9GxJKI+LcO73tXRMyNiFcj4vmIuDiXnSA1EYOApA0Kt3u9DZgHbA28Bzg9ImYUFjkcuJHsvhE/Am6JiP6Fe8XfBvwSGAN8Crg2InYovO/rZPeM36vw3jPIbk293j5kN5B5D3BWREwpTP8m8M2U0jCyu8P9uOwfWmpy3mtA0gYRMR24MaU0ocO0fyW7u+RCsru37VGY3gL8DfhwYdEbga1SSusK868D/gicS3ZjmD1SSvM6bW8S2Y1jtkkpLSpMexC4OKV0fUTcDdwFfDultKQiH1pqcnYEJHU0Ediq0N5fGhFLgTPJ7gUP8Mz6BQu/8BcBWxUez6wPAQULyboKo8hu9fpkD9t9rsPr14AhhdcfJwshT0TEQxHxgZI/maQuGQQkdfQM8JeU0vAOj6EppfcX5m+zfsFCR2A8sLjw2KYwbb0JZB2DJWS3kZ3c12JSSn9KKX2U7HDDBcBNETG4lA8mqWsGAUkdPQgsj4jPFwb49YuInSPinYX574iIIwvn/Z8OvAE8APyW7C/5MwpjBg4ADgOuL3QJrgQuLgxE7BcRe0bEwN6KiYhjI2J0YR1LC5PX9fQeSX1jEJC0QUppLfABYDeyY/dLgCuAzQuL3AocDbwCHAccmVJanVJaRfaL/32F93wHOD6l9EThfZ8FHgUeAl4m++u+mP//vBeYHxHtZAMHP5JSen1TP6ekv3OwoKSiRMQ5wFtTSsdWuxZJ5WNHQJKkJmYQkCSpiXloQJKkJmZHQJKkJmYQkCSpiRkEJElqYgYBSZKamEFAkqQmZhCQJKmJ/T/tOI6gnqFb9QAAAABJRU5ErkJggg==\n", 96 | "text/plain": [ 97 | "
" 98 | ] 99 | }, 100 | "metadata": { 101 | "needs_background": "light" 102 | }, 103 | "output_type": "display_data" 104 | } 105 | ], 106 | "source": [ 107 | "\n", 108 | "plt.rc('font', family='sans-serif', size=12)\n", 109 | "plt.figure(figsize=(8, 6))\n", 110 | "plt.plot(conc_epoch_values_cum, color='b')\n", 111 | "plt.plot(conc_epoch_values_cum_bos, color='r')\n", 112 | "plt.title(\"MNIST\")\n", 113 | "plt.xlabel(\"epochs\")\n", 114 | "plt.ylabel(\"validation loss\")\n", 115 | "plt.legend((\"GP-UCB\", \"BO-BOS\"))\n", 116 | "axes = plt.gca()\n", 117 | "# axes.set_ylim([0.07, 0.1])\n", 118 | "plt.show()\n", 119 | "\n" 120 | ] 121 | }, 122 | { 123 | "cell_type": "markdown", 124 | "metadata": { 125 | "ExecuteTime": { 126 | "end_time": "2018-12-30T05:52:42.848234Z", 127 | "start_time": "2018-12-30T05:52:42.844403Z" 128 | } 129 | }, 130 | "source": [ 131 | "# analyze runs" 132 | ] 133 | }, 134 | { 135 | "cell_type": "markdown", 136 | "metadata": {}, 137 | "source": [ 138 | "\n", 139 | "Each log file (represented by \"result\" below) is a dictionary, with the following key elements:

\n", 140 | "**results[\"all\"][\"epoch_values\"]**: this is a list, whose length is equal to the total number of BO iterations; each element of the list is another list corresponding to a particular iteration, which contains the validation accuracy after each training epoch in this particular iteration.

\n", 141 | "**results[\"all\"][\"eval_times\"]**: this item has exactly the same shape as \"epoch values\", with each element representing the time at which the corresponding epoch value is obtained.

\n", 142 | "**results[\"all\"][\"time_started\"]**: the starting time of the script, which is to be subtracted from each element of the \"eval_times\" to calculate the run time of the script.

\n", 143 | "**results[\"all\"][\"params\"]**: this is a list whose length is equal to the number of BO iterations; each element of the list represents the the value of the hyper-parameters evaluated in the corresponding BO iteration.

\n" 144 | ] 145 | }, 146 | { 147 | "cell_type": "code", 148 | "execution_count": 35, 149 | "metadata": { 150 | "ExecuteTime": { 151 | "end_time": "2019-05-05T15:35:24.948048Z", 152 | "start_time": "2019-05-05T15:35:24.891465Z" 153 | } 154 | }, 155 | "outputs": [], 156 | "source": [ 157 | "\n", 158 | "min_len = 1e4 # get the minimum length among all runs, for visualization\n", 159 | "\n", 160 | "epoch_values_no_stop = []\n", 161 | "epoch_values_with_stop = []\n", 162 | "for run_iter in np.arange(1, 11):\n", 163 | " # load runs without BOS\n", 164 | " bos_no_stop = pickle.load(open(\"saved_results/bos_mnist_no_stop_\" + str(run_iter) + \".p\", \"rb\"))\n", 165 | " epoch_values = bos_no_stop[\"all\"][\"epoch_values\"]\n", 166 | " conc_epoch_values = [e for epoch in epoch_values for e in epoch]\n", 167 | " conc_epoch_values_cum = 1.0 - np.maximum.accumulate(conc_epoch_values)\n", 168 | "\n", 169 | " if len(conc_epoch_values_cum) < min_len:\n", 170 | " min_len = len(conc_epoch_values_cum)\n", 171 | " epoch_values_no_stop.append(conc_epoch_values_cum)\n", 172 | "\n", 173 | " \n", 174 | " # load runs with BOS\n", 175 | " bos_with_stop = pickle.load(open(\"saved_results/bos_mnist_with_stop_\" + str(run_iter) + \".p\", \"rb\"))\n", 176 | " epoch_values = bos_with_stop[\"all\"][\"epoch_values\"]\n", 177 | " conc_epoch_values = [e for epoch in epoch_values for e in epoch]\n", 178 | " conc_epoch_values_cum_bos = 1.0 - np.maximum.accumulate(conc_epoch_values)\n", 179 | " \n", 180 | " if len(conc_epoch_values_cum_bos) < min_len:\n", 181 | " min_len = len(conc_epoch_values_cum_bos)\n", 182 | " epoch_values_with_stop.append(conc_epoch_values_cum_bos)\n", 183 | " \n" 184 | ] 185 | }, 186 | { 187 | "cell_type": "code", 188 | "execution_count": null, 189 | "metadata": {}, 190 | "outputs": [], 191 | "source": [] 192 | }, 193 | { 194 | "cell_type": "code", 195 | "execution_count": 39, 196 | "metadata": { 197 | "ExecuteTime": { 198 | "end_time": "2019-05-05T15:58:31.465146Z", 199 | "start_time": "2019-05-05T15:58:31.171083Z" 200 | }, 201 | "scrolled": false 202 | }, 203 | "outputs": [ 204 | { 205 | "data": { 206 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgoAAAGKCAYAAABtmWkQAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDMuMC4xLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvDW2N/gAAIABJREFUeJzs3Xl8VNX5x/HPk4Q9QJRVgoALiqKANRb33VJrqXv9KS5YW1xaa9UW+7N1qVqtWmi1P6tQF5C6VWsVa5VWKyqCVdyKINK6g4gG2QIKhDy/P84NDMNMMklmz/f9es1rZu49997nziQzz5xz7jnm7oiIiIgkUpLrAERERCR/KVEQERGRpJQoiIiISFJKFERERCQpJQoiIiKSlBIFERERSUqJguQlM6sys8Vm1inXseQDMxttZrW5jqM1M7ODzczNrG+Oju9mdmoTt5luZrene99mNiDaZv+WxNcc2TpO3DFPNrOXzcyyedx8oUShiJjZpOif6OEE646O1tXGLKv/4PvAzNrHlX/KzCbF7fupmOclZvZjM3vTzFab2XIze8PMronWT4/23dBtQAOn8xvgBndf3ewXRHLOzH5rZv8yszXJEh0za2NmN0SJ4RdmNsPM9sx2rAVgG+ChJm5zHHBR/ZP4/+sW7jsT+9gow7E21f1AR2BUlo+bF5QoFJ8PgW+aWa+45WcDHyTZpifwoyYe53LgZ8CvgCHAfsC1QH0NwHGEf+j6G8AP4pZ9lGjHZrYXsBcwqYkxZYSZtcl1DPnMzNo2sLoUuBf4fQNlbgTOIvyN7gW8CzxlZr2bGEdRv0/u/om7f9nEbT5395WZ2Hcm9pFPx4k7pgN30PTPyeLg7roVyY3wxfoU8BxwSczyfsB64AqgNmb5wYAD1wHLge4x654CJsXvO+b568CvmxCbA6emWPa3wN/jlm0F/JGQCH0BvA1cDFi0/ghgA9A3bruTgDVAl+h5r+hcPgNWAS8AByZ4TY4CZgBfAuc2dvxo2xJCsvQZUEP4FfKj2Nc8JtYXov0sAu4CusXt52rg02g/DwAXxu8nwevWGZgQHX8tMBv4Wsz6F4CJCbZ7C7gm5vn/RO/vl8D7wHigU8z66YQPzauBxcAnKbynoxPFD3SJjjMmZlkp8AlwZQP7a8n7NInw9z2GkDyvBKYCveKOcT6wMPr7mQacHh2zb0yZbwCvRK/3p4SEqFOCY9Xvqwa4HWgDnBMdfxkwEWjblP+h6Pl5wBTC3/JC4H/jtpkO3B4Ti8fdDk6y7wuiv4Ga6L24H9gmZv2AaJv9E8UHXJngWE70mQJsBzwMfBy9vnOA0+Jet1Rj3SaKb3n0nk8HqhL8rRxB+GxcA8wDjox7rS4lJKlrCf9D04AOMeu3i/YzKNXPvWK55TwA3dL4Zm76UDoV+A+bvkSvAp4k7sM65h9oADAf+F3MusYShSeAl4HKFGNrSqLwGjFfXNGy3sBPga9E/7CnRh9iZ0brSwgflJfEbfc34N7ocYfoA+LPQBWwI6FWZC2wS9xrMh8YGR2rb2PHj7a9KFp2GjAwev553Gt+aPRBdX5UZi/gGeDZmPfrAmA1cAawEzA2+hBsLFF4kPDFPgLYBbgJWFf/wUb4YlwGtIvZ5qvR+e4UPR8dlTkN2B44EPg3MCVmm+mEL6bbgF2B3VN4Tzf724tZfkh0/H5xy6fE/r0l2K4l79MkYAVwH7AbsA/wXtw5Hg3URu/hToQajyXEJAqEmrRaQjPZIOBIQoIyJe5YK4HJ0XsykpDUPAHcHS07ivAFd25T/oei50uA7wE7AN+Plh0W917VJwpdCV+UD0SvU2+i5CTBvi8ADo9ew32AmcCzMesH0HCiUB5zjN7Rea8HzojW706oYRwaxX5+9Foe0pRYAQP+RUhq9o/2+wDhb7h73N/KG8DXCf93d0Xvy1ZRmeOi5yMJP6yGEZL8DnHvwZLG3qdivOU8AN3S+GZuShTaA0sJH8KlhC/Q40ieKPQlfDCuAwZG6xpLFAYBbwJ1hF9tkwntd2VJYmtKorA8lX9GwhfhP2Ke/wp4M+Z5r+jDZ0T0fHT0WpTF7eefwG/jXpPTmnH8RcDVcWXuj3vNpwO/iivTLzrmsOj5QuCXcWUeooFEgZD0OPCNuOWvAndGjysIX0gnxqz/P2BWzPP3gXPi9nFgtO+tYs5hAVDShL/N0YniB06J9t02bvmNwNwG9teS92kS4dd/bMJ0CbA45vkM4J64/fyazROFKcBLcWWOjv4n+scdq21MmceB6rjjPwo81Mh5JEoUbo4r8xZwXdzf2+0xzzf7v0627wTr94jKVEbPB9BAohC37baEmqcbGjm/R4E/NCVW4LDo+a4x69tFx7s87m/luJgyvaJl9Z8NF0Z/020aifFV4MZU/+6L5aY+CkXIQ/vdFMIvjaOAMuCxRrZ5FJgFXJ/iMeYTsvc9CV82bQlVqi+aWYdmBx90IPzq2ijqPPlTM3vdzKrNrIZQdds/pthkYLCZfSV6PorwIV3fCXMvwi+T5WZWU38DDiD8yoj1UlOOb2ZdgT7Ai3H7mRX3fC/gR3HHnxetG2hmXYBKwi+4WDNo2K7R/XNxy58DBgO4+3JCFftpUcxtCM0Md0fPe0TnMz4uvieife0Ys99X3L2ukZiyoUnvU4z57r425vnHhC+PervS+HswmC1f72cJv3J3jVn2lruvi3n+CfB23PE/IfQVaqrX457Hn0ezRB2dp5nZR2a2ik3nHv86NrafcsJnzyxCMla/vKOZ/crM5prZ59H79I2m7p/wHix19/r/IaLX9V/Rulivx5RZQmiqrH+t/kRoDvog6rh9mpl1TnC8LwmfT61KWa4DkIyZSMh+twXucvf1KVzZ82PgX7GXPDXEQ4r9WnT7XbTd88C3CV/azfUZsHXcsouB/yVk/q8Rqr4vJCRC9fG8ZWazCW3Jr0b3f3T3DVGREsIvrmMTHHNN3PP4qy0aPX59GI2cWwkhGZuSYN0nZL6D8d3AX6KkYD9CFfH9MbFBqHZ+JsG2C2Mep+tqlMXRfW9CtX29XjHrGtLc92ld3HMnfMFnwvoEx0q0rDnvfaLzaNHfkJn1IzTZTSE0W1YTah2fIvwgSHU/JYTmnfWEGoDY/40bCbUvFxFqJFcD4whNDpkS/1pB9Fq5+yIzG0SohT0UuAy43syGu3tsp+utCZ9PrYpqFIpUlGG/TPgyaPQ66miblwlfGr9u5mHfiu6b88so1qts+WvgQOBJd7/T3V9z9/+yZS0AhATl5KhWYSjRr+XIbEK7+0p3/2/c7eNGYmrw+O6+gvBrbp+47faOez4bGJzg+P919xoPPdQXAfvGbbdfI/HNjYkzPu43Y55PI/Sb+B9CIvVXd18WncMSwpUoOyeJLxM9zes7Ao6oXxB9wRxO47UoiaT6d9KYeTT+Hsxly9f7IMKX9Vzy0zpCc2RD9iL8av6Ru7/g7m/TvFqKXxP+B0e6e3wifiChaedP7v4GoRPhTs2IdS7Qzcw21uCYWTtgOJv/3TfK3de6+5PuPpZQW9oROCZmvx0J/SlmN2W/xUA1CsVtBNDe3T9vwjaXEjqI1RGq4xIysz8TqmZnEr4gK4GfE349PN7cgCN/I9RuxHobOM3MDiF8kZ5O+DBYFlfuPkIv/TuAV9099sPiHsKvy8fN7GeENslehF8Qb7n7Iw3ElMrxxwG/MLP5hCrxo4CvsXktw+XA381sPCGJWUX4IjsR+IG7fxHt5+poPy8C3yJ8cSbl7u+Y2YPA782s/lLYcwmd9U6JKVdrZvdG63YATojb1c+AO8xsGaHNeD2hw92R7n52QzEkYmY7Emot+kXPh0WrNiZGZnYbcK2ZLSZ0KvwJ4YtqQlOPR+p/J40ZBzxoZi8R/h73J2qyiXEj8KqZ/SaKdQDwO8IX4Ifkp/eAQ8xsB0KHzhXuHl+78R/C3+zFZnYP4cv+8qYcxMxGE67I+Fb0vP5S1y+ipPpt4Ojoc6SGULPQh9BZsCmx/pPwv3avmX0/KncZoZ/WrU2I9yzCD+eXCH2kDiNcRTQvpth+hKT22VT3WyxUo1DE3H1NE5ME3P19woddY+1wTxJ6ED9M+MJ9kPAL4KDY9sJmugfoaWaxv+iuJvyD1vel2Aq4OUH8SwmJyjA2r02o77txEOEXwV1R3A8Tev4nG2OiKcf/LaG/xk2Eau+9CV84G3+Ju/szhMRkCKGZ5t+EXvOr2FQdfVO0798Q2lX3IVQBN+a7hBqDPxJ6eO8HfDPqTxKrvgf+Cjb1P6iPbwqh6eibhA/NlwmXui1K4fiJ3E54LX5B+HVY31RVFVPmJ4T343ZCDcNA4Ah3T6XpIV5KfyeNcfe/EJoxxhLeo1HEtLFHZf5N+CI8kPB6TyH87Z3TjLizZRyhKeENQhX6FjVV0XmdTxjXYh4haW/q+AEHEzoVTiM0IdXfborWX0j4n3sGeJrw9xU/iFIqsTrhV/98wmv/MqEZ6wh3r25CvMuAMwmdP98iJC5j3P3pmDKnEpLAmibstyjUX44lklfM7DJgT3c/ptHCeczM7gSGurtGGhQpUGa2LSFhHObujf2oKDpqepB8dSPwEzPr5AUyjLOZ9SF0lHyG0KN6JKHq+we5jEtEWmwA8L3WmCRAFpsezGxrM/uLhXkBPjCzU5KUO8TMnjGzFWb2foL1A6L1a8xsvpkdHrf+QjP7xMxWmtmdUccWKTDu/qW7X10oSUJkA6GvwQxC9frphPEgbstpVCLSIu7+vLtne36JvJG1pgczu4+QmJxFaD9+HNjX3efGlfsqsDOhjfxSdx8Qt34Woe3xZ4Trbu8gDBL0mZmNILRLH0roYPcX4EV3/2kGT01ERKRoZSVRsDBV8DJgN3dfEC2bAixK9iUe1RTcHpsomNlOhDHBu7v7qmjZ84QOJrdFvbnfd/dLo3WHReuaNLmMiIiIBNlqetiJMHzrgphlb7DltfKNGQy8W58kJNjP4Oh57LpeZtaticcRERERsteZsZww4UasFYTrVJu6nxUJ9lOZZH39486EuQ82MrMxhEly6NSp056DBg1qYij5a/0r/+bLdl3pvFtTR0MVEZHW4pVXXql29x6NlctWolBDmE42VhfCtePp3E/8+vrHWxzH3ScShjmmqqrKZ88unsG2PimrZP623+Dg2X/IdSgiIpKnzCylqziy1fSwACgzs9ihVIfS9GFO5wLbx03WEbufudHz2HVLokF4WhWNjiEiIumQlUQhusTtYeAqM+tkZvsRJgTZYmKcaPa39oSZvMzM2ptZ22g/Cwgj1V0RLT+WMMLdn6PN7wbOMrNdzayCMKTwpAyfXl4yDaQlIiJpkM0hnM8jXPL4KWE8/nPdfa6ZHRBNMVrvQOALwvjq/aLHf49Z/z+E4V+XAb8CTnD3zwDc/UngBsKANx8Shgi9IpMnlY88Y5PgiYhIa5O1kRmjOQe2GI7X3Z8ndEKsfz6dBqZ7jeYiOLiB9eMJkwK1WoaaHkREJD00hLOIiGRdXV0d1dXVLF++nA0bNuQ6nKJUWlpKRUUF3bt3p6Sk+Q0IShSKlfooiEgeW7hwIWbGgAEDaNOmDWZqMk0nd2f9+vUsWbKEhQsX0q9fv2bvS9NMFyE3U9uDiOS11atXU1lZSdu2bZUkZICZ0bZtWyorK1m9umVT5ihREBGRnGhJdbikJh2vsd4lERERSUqJQrFSHwUREUkDJQpFSOMoiIi03P3338/w4cPp1KkTPXv2ZPjw4fz+97/H3Rk9ejRt27alvLycrbfemiOOOIL58+cn3M/06dPp27fvFssPPvhgbr/99o3PFyxYwIknnkj37t3p2rUrQ4YMYfz48WzYsIH3338fM6O8vJzy8nJ69erFeeedx/r16zN2/vWUKBQhpQkiIi0zbtw4LrjgAn7yk5/wySefsGTJEm677TZeeOEF1q1bB8DYsWOpqalh4cKF9OzZk9GjRzf7eO+88w7Dhw9n2223Zc6cOaxYsYIHH3yQ2bNns2rVpumKli9fTk1NDXPmzGHWrFnccsstLT3VRunyyCKlhgcRkeZZsWIFl19+OXfffTfHH3/8xuV77LEH99xzzxblO3bsyCmnnMJJJ53U7GNeccUV7Lvvvowfv2m8wJ133pl7770XCAlCrJ49e3LEEUcwb968Zh8zVUoUipX6KIhIAfnRj+D11zN7jGHD4Le/bbzcrFmzWLt2LUcffXRK+62pqeGee+5hjz32aHZsTz31FNddd13K5T/++GOmTZvGBRdc0OxjpkpND0XIdU2yiEizVVdX0717d8rKNv2W3nfffamoqKBDhw4899xzAPz617+moqKCHXfckZqaGiZNmtTsYy5dupRtttmm0XLdu3enoqKCyspKOnXqxAknnNDsY6ZKNQrFShUKIlJAUvmlny3dunWjurqa2trajcnCzJkzAejbty91dXUA/PjHP+aaa67ZYvvy8o3TFzFv3jzKysoSdjpcv349bdq02XjMxYsXNxpbdXU1ZWVlfPHFF1x++eWMGDGCWbNmNf0km0A1CiIiIjH22Wcf2rVrx6OPPtqs7Wtqajbe+vXrR79+/aiurqamZtNEye7OBx98QP/+/QE4/PDD+fOf/5zyMTp06MDo0aN58cUXqa6ublacqVKiULRUpSAi0hwVFRVcccUVnHfeeTz00EOsWrWKuro6Xn/99WYNh9yvXz+GDx/OJZdcQk1NDWvXruXGG2+kTZs27L333gD84he/YObMmRuvsgD473//y6mnnrpFR0aAtWvXMmXKFHr37k23bt1adsKNUKJQhBzN9SAi0hJjx45l/Pjx3HDDDfTq1YtevXpx9tlnc/3117Pvvvs2eX8PPPAAn376KTvuuCOVlZU8/fTTPP7447Rv3x6AHXbYgVmzZvH+++8zePBgunbtyvHHH09VVRWdO3feuJ+KioqN4yjMmjWLqVOnZnyuDHP1jqeqqspnz56d6zDSZlHbAbzd+2AO/XBSrkMREUnorbfeYpdddsl1GK1CstfazF5x96rGtleNQhEyVKEgIiLpoUShSJlqikREJA2UKBQhjaMgIiLpokShSKk+QURE0kGJgoiIiCSlRKFIqY+CiIikgxKFIuSYmh5ERCQtlCgUIXVlFBGRdFGiUKxUpSAiImmgRKEYGShTEBFpvgEDBtChQwfKy8vZaqutOOqoo/joo482rp85cyaHHnoonTt3pmvXrowcOZJ58+a1aJ+p7vfaa69lu+22o7y8nL59+3LSSSel78QTUKJQhDTXg4hIyz322GPU1NSwePFievXqxfnnnw/ArFmz+NrXvsbRRx/Nxx9/zHvvvcfQoUPZb7/9ePfdd5u1z1T3O3nyZKZMmcJTTz1FTU0Ns2fP5rDDDsvci4ASBRERkQa1b9+eE044YeMv+7Fjx3L66adzwQUX0LlzZ7beemuuueYa9t57b6688spm7TPV/b788suMGDGCHXbYAYDevXszZsyYtJ5vvLKM7l1yRhUKIlJQfvQjeP31zB5j2DD47W+bvNmaNWt44IEH2HvvvVmzZg0zZ87kqquu2qLct7/9bS699NIm77P+eSr73XvvvfnhD39IZWUlhxxyCHvssQelpaVNPqemUKJQpDSOgohIyxxzzDGUlZWxevVqevTowbRp0/j888+pq6tjm2222aL8NttsQ3V1dZP3CaS831NPPRUz46677uLKK6+kffv2jB07lksuuSQNZ5yYEoWipAskRaTANOOXfqY98sgjHH744WzYsIFHH32Ugw46iDlz5lBSUsLixYsZNGjQZuUXL15M9+7dATjyyCN5/vnnAZgwYQKjRo1Kus958+ax1VZbpbRfgFGjRjFq1CjWr1/PI488wqhRoxg2bBgjRozIyOugPgpFSvUJIiLpUVpaynHHHUdpaSmzZs1in3324cEHH9yi3J/+9KeNHQufeOIJampqqKmp2ZgkJNvnjBkz6NSpU0r7jdWmTRtOPPFEhgwZwptvvpmGM01MNQrFSBUKIiJp4+5MnTqVZcuWscsuu/CrX/2KESNGMGjQIM4880xqa2sZN24cs2bN4uWXX27WPoGU9jtp0iR69OjBgQceSKdOnZg2bRpz585l+PDhGTt/JQpFSn0URERaZuTIkZSWlmJm9O/fn8mTJzN48GAApk2bxs9//nMuvfRSSkpKOOCAA5gxYwYDBw5s9j7333//RvfbpUsXrr32Wk499VQ2bNhA//79ufXWW9l///0z9jqY6wuFqqoqnz17dq7DSJuPOgzk7a5f5fBP7sl1KCIiCb311lsbf0lLZiV7rc3sFXevamx79VEQERGRpJQoFCGN4CwiIumiRKFIaaJpERFJByUKRchNaYKIiKSHEoVipUxBRPJcXV1drkMoeul4jZUoiIhI1nXq1IlFixaxbt06dPVd+rk769atY9GiRXTq1KlF+9I4CkVL/3gikr/69u1LdXU1H3zwAbW1tbkOpyiVlZXRtWvXzYZ/btZ+0hSP5BVTniAiea2kpISePXvSs2fPXIcijVDTQzHSEM4iIpImShREREQkqawlCma2tZn9xcxWm9kHZnZKknJmZteb2dLodr2ZWcz6kWb2ppnVmNlMM9s1Zt1oM9sQrau/HZyF08s/6hwkIiJpkM0ahVuAdUAvYBRwq5kNTlBuDHAMMBQYAowEzgYws4HAPcA5QAXwGDDVzGL7Wsxy9/KY2/QMnU/ecrU9iIhImmQlUTCzTsDxwGXuXuPuM4CpwGkJip8BjHP3he6+CBgHjI7WjQCed/cZ7l4LXA9UAgdl+hwKidIEERFJl2zVKOwE1Lr7gphlbwCJahQGR+uSlbO4xwbsFrNsDzOrNrMFZnZZXG3Dpg3NxpjZbDOb/dlnnzXlXAqCGh5ERCQdspUolAMr45atADonKbsirlx51E/hKeAgMzvYzNoClwJtgY5R2ecISUNPQg3GycBPEgXk7hPdvcrdq3r06NG8s8pjpj4KIiKSBtlKFGqALnHLugCrUijbBajxYD6haeL/gMVAd2AesBDA3d919/fcvc7d5wBXASek9UwKgJsaH0REJD2ylSgsAMqizoj1hgJzE5SdG61LWM7dH3L33dy9G3AFMAB4OclxnVbaZK/6BBERSYesJAruvhp4GLjKzDqZ2X7A0cCUBMXvBi4ys0oz6wNcDEyqX2lme5pZqZn1ACYCU6OaBszsSDPrFT0eBFwGPJrBU8tfyhRERCQNsnl55HlAB+BT4D7gXHefa2YHmFlNTLkJhMse5wBvAo9Hy+rdBCwH3gaWAd+LWXcY8G8zWw38jZCcXJuZ08lvmmhaRETSIWtzPbj754TxEeKXP0/owFj/3IGx0S3RfvZv4Bg/Bn7c4mALntIEERFJDw3hXIxaZa8MERHJBCUKxUpVCiIikgZKFIqUGh9ERCQdlCgUJaUJIiKSHkoUipUyBRERSQMlCsVInRlFRCRNlCgULVUpiIhIyylRKEKuKgUREUkTJQpFSGmCiIikixKFYqWWBxERSQMlCkVLmYKIiLScEoUi5GbKE0REJC2UKIiIiEhSShREREQkKSUKRShc9aC2BxERaTklCkXITRdIiohIeihRKFaqUBARkTRQolCklCeIiEg6KFEoUppoWkRE0kGJQlHSOAoiIpIeShSKkfoyiohImihRKFqqUhARkZZTolCMNISziIikiRKFIuVKFEREJA2UKBQhQxUKIiKSHkoUipGhKgUREUkLJQrFSH0UREQkTZQoFClVKIiISDooUShCqlAQEZF0UaJQrFSlICIiaaBEoRiZYUoUREQkDZQoFCMrUY2CiIikhRKFIuQlJZjX5ToMEREpAkoUipEZhhIFERFpOSUKxUhNDyIikiZKFIqQl5RQoqYHERFJAyUKxchK1PQgIiJpoUShGJlRQp1aH0REpMWUKBQhLynBcDZsyHUkIiJS6JQoFKOSEkqoo06tDyIi0kJKFIpR1PSgGgUREWkpJQrFKGp6UI2CiIi0lBKFYmQlqlEQEZG0UKJQjEpMfRRERCQtspYomNnWZvYXM1ttZh+Y2SlJypmZXW9mS6Pb9WZmMetHmtmbZlZjZjPNbNe47S80s0/MbKWZ3Wlm7TJ9bnnHdNWDiIikRzZrFG4B1gG9gFHArWY2OEG5McAxwFBgCDASOBvAzAYC9wDnABXAY8BUMyuL1o8AfgocBvQHtgd+kblTylO66kFERNIkK4mCmXUCjgcuc/cad58BTAVOS1D8DGCcuy9090XAOGB0tG4E8Ly7z3D3WuB6oBI4KGbbO9x9rrsvA66O2bb1KFEfBRERSY9s1SjsBNS6+4KYZW8AiWoUBkfrkpWzuMcG7NbAtr3MrFv8QcxsjJnNNrPZn332WconUhDUR0FERNIkW4lCObAybtkKoHOSsiviypVH/RSeAg4ys4PNrC1wKdAW6NjAtiQ6jrtPdPcqd6/q0aNHU88nv6mPgoiIpEm2EoUaoEvcsi7AqhTKdgFqPJhPaF74P2Ax0B2YByxsYFuSHKd4laqPgoiIpEe2EoUFQFnUGbHeUGBugrJzo3UJy7n7Q+6+m7t3A64ABgAvN7DtEndf2uIzKCQamVFERNIkK4mCu68GHgauMrNOZrYfcDQwJUHxu4GLzKzSzPoAFwOT6lea2Z5mVmpmPYCJwNSopqF+27PMbFczqwB+Hrtta2EamVFERNIkm5dHngd0AD4F7gPOdfe5ZnaAmdXElJtAuOxxDvAm8Hi0rN5NwHLgbWAZ8L36Fe7+JHAD8AzwIfABodahddFVDyIikiZl2TqQu39OGB8hfvnzhE6I9c8dGBvdEu1n/0aOMx4Y36JgC1101cMG1SiIiEgLaQjnYlSiqx5ERCQ9Gk0UzKzEzA6NLkeUAmDRVQ+1tbmORERECl2jiYK71wGPuvu6LMQjaVASJQrr9I6JiEgLpdr08JyZ7Z3RSCRtSkpDH4W1a3MdiYiIFLpUOzN+ADxhZo8CHwFev8LdL89EYNJ8JW1CHwUlCiIi0lKpJgodgEeix31jlnuCspJjJWUlqlEQEZG0SClRcPczMx2IpE+pmh5ERCRNUh5HIRp++WTCtM6LgPvc/T+ZCkyaT00PIiKSLil1ZjSzkcArwCDgc2BnYLaZfStEpbU3AAAgAElEQVSDsUkzlarpQURE0iTVGoVrgaPd/Zn6BWZ2MGEWx6kZiEtaoKRMTQ8iIpIeqV4e2Rd4Pm7ZDDbv2Ch5orRMTQ8iIpIeqSYKrxNmcYx1UbRc8kxpGzU9iIhIeqTa9HAeMNXMLiCMo7AtsAYYmanApPnqEwWNzCgiIi2VaqLwNrALsDfQB/gY+Je7r89UYNJ89SMzfvFFriMREZFC12iiYGalQA1Q4e4zMh+StJSVhj4Kq1blOhIRESl0qUwKtQFYAHTLfDiSFiUllFqdEgUREWmxVJse7gH+amY3AQvZfK6Hf2YiMGkBC00PShRERKSlUk0Uzo3ur4xb7sD2aYtG0qNETQ8iIpIeqSYKO0ZNEFIISkpUoyAiImnRaB+F+s6MZtYuC/FIOqjpQURE0kSdGYuRmh5ERCRN1JmxGJWUUOJ11NTkOhARESl06sxYjEpCRdGqlQ5YbmMREZGCllKi4O7bZToQSSMLycH6dXWsX19KmzY5jkdERApWg30UzKx3I+v3TG84khZRjYI6NIqISEs11plxQewTM/tP3Ppn0huOpEVZqCgqo1aJgoiItEhjiUJ8A3f3RtZLPojaGtqwnuXLcxyLiIgUtMYSBW/ic8kHMYnC55/nOBYRESlojY6jIAVIiYKIiKRJY1c9dDSz52Ked455bkCHzIQlLRKTKCxdmuNYRESkoDWWKJwV9/yOuOe3pzEWSRfVKIiISJo0mCi4++RsBSJpFCUK5W1VoyAiIi2jPgrFKEoUelSoRkFERFpGiUIxihKFbl2UKIiISMsoUShGMYmCmh5ERKQllCgUoyhR2KqzahRERKRlUpoUyszaAqOBYUB57Dp3Pz39YUmL1CcKndaz9O0cxyIiIgUt1WmmJwNDgceAJZkLR9Iipumhuho2bIDS0hzHJCIiBSnVROHrwHburpkDCkGUKGzTfT21tbBoEfTrl+OYRESkIKXaR+FDoF0mA5E0qm96KF8PoA6NIiLSbKnWKNwNPGpmNxHX9ODu/0x7VNIy9QMutQuJgmaQFBGR5ko1UfhBdH9t3HIHtk9fOJIW7ULlT+c2XwKwbFkugxERkUKWUqLg7ttlOhBJo65dASj3lYASBRERab5UaxQwszJgX6ASWAjMcvfaTAUmLVCfKKwPbQ4aS0FERJorpc6MZjYIeAu4F/ghcB8w38x2SfVAZra1mf3FzFab2QdmdkqScmZm15vZ0uh2vZlZzPpDzexVM1tpZu+a2ZiYdQebWZ2Z1cTczkg1xqLRvj20a0fbNcvp2BEWL851QCIiUqhSverh98BEYFt338fd+wK3RctTdQuwDugFjAJuNbPBCcqNAY4hjNswBBgJnA1gZm2AvwATgK7AScB4Mxsas/3H7l4ec2udM2BWVGArV1BZGS6PFBERaY5UE4VhwHh395hlv42WN8rMOgHHA5e5e427zwCmAqclKH4GMM7dF7r7ImAcYVRIgK2BLsAUD14m1HTsmuJ5tB4VFbB8OX36wMcf5zoYEREpVKkmCh8DB8UtOyBanoqdgFp3XxCz7A0gUY3C4GjdFuXcfQmh2eNMMys1s32A/sCMmPI9zWyJmb1nZr+JkpTWJ0oUVKMgIiItkWpnxkuBqWb2V+ADwpfzUcCpKW5fDqyMW7YC6Jyk7Iq4cuVmZlGNxn3A7cBN0fpz3f2j6PF8Qi3H/CjGycB4oqaLWFHfhjEA/Ypx2MKuXWHFCvoMCTUK7rCpp4eIiEhqUqpRcPepwFeANwlf7m8Ce7r7oykep4bQZBCrC7AqhbJdgBp396hT5f3A6UBbQk3DWDM7KorzE3ef5+517v4eMJbQ5JHonCa6e5W7V/Xo0SPF0yggMTUKa9fqygcREWmelC+PjJoNrmnmcRYAZWY20N3/Ey0bCsxNUHZutO6lBOV2Axa4+7To+dtm9jhwJPB4orBprVNpx/RRgFCr0K1bbkMSEZHCkzRRMLOJ7j4mejyF8KW7hVSmmXb31Wb2MHCVmX2X0DxwNGFchnh3AxeZ2d+iY14M/C5a9xow0MwOBZ4hjAr5TeCGKM5DgHcJc1P0BX4FpFrrUVwqKmDFCrbdNjx9/33YffecRiQiIgWooV/b78U8/i/wTpJbqs4DOgCfEvoZnOvuc83sADOriSk3gTCd9RxCE8fj0TLc/R3gO8DNhD4PzwJ/JvRZANgDmAmsju7nEMZ9aH26doUvv2SX7cIwzvPm5TgeEREpSElrFNz9upinE9z9k/gyZtY71QO5++eE8RHilz9P6MBY/9wJfQvGJtnPn4A/JVk3ntB5USoqwp2toLKyPXMTNfKIiIg0ItX2+wVJlut3ar6KEgWWL2foUHjlldyGIyIihSnVRGGLC+vMrAtQl95wJG2i+R5YsYL99gtND0uX5jYkEREpPA0mCmb2kZl9CHQwsw9jb8Bi4JGsRClNF1OjcMAB4eELL+QuHBERKUyNXR55KqE24W9sPtyyA0vc/e1MBSYt1LNnuF+8mL1OgrZtYcYM+Na3chuWiIgUlgYTBXd/FsDMurv7muyEJGkxYEAYivGdd2jfHvbYA156qdGtRERENpPSgEvuvsbMhhHmd+hOTJ8Fd788Q7FJS7RpA717b5zoYdAg+Mc/chyTiIgUnJQ6M0bzIrwAHApcAuxOGAhpx8yFJi0WMyPUzjuH0Rmrq3Mck4iIFJRUr3oYC3zd3Y8FvojuTwDWZywyabmYROHII8OiP/whh/GIiEjBSTVR6BkNjARQZ2Yl7v4EMDJDcUk6VFbCRx9BXR3DhoWOjL/8JcyZk+vARESkUKSaKCw0swHR4wXA0WZ2ALAuE0FJmuy5J6xYAW+Hi1Nuugnat4fzzw/TTouIiDQm1UThBmCX6PFVwB+BfwK/yERQkia77hru//tfIFwIceWV8OyzMHVqzqISEZECklKi4O6ToqYGovutgK3c/dZMBictNHBguI8SBYAxY6C0FK6+OkcxiYhIQWlomumGkohaoDbqq6BhnPNVt26w1VawYNNUHW3bwt57wydbTPElIiKypcaSgfUp3CSfDR0Kr7662aIhQ0LXBRERkcY0lChsB2wf3c4HngW+Tuir8HXgGeAHmQ5QWmjPPeGNN6BuU8VP164hUVCHRhERaUzSpgd3/6D+sZldBFS5+/Jo0QIzmw3MBtRPIZ/tvDOsXQsffhh6MwJdusD69fDll9ChQ27DExGR/JbqVQ9dgY5xyzpGyyWf7bxzuJ87d+OimBmoRUREGpRqojAZeMrMxpjZkdGQztOi5ZLPvvKVcJnDiy9uXKREQUREUpXSpFCEIZz/C5wE9AEWA/8HaEDgfFdeDrvvDrNnb1xUnyisXJmjmEREpGCkOntkHXBbdJNC06PHZtUH9YnC8uVJyouIiEQaGkfhNHefEj3+TrJy7n5nJgKTNGrffrNpI7fdNty/+26O4hERkYLRUI3CycCU6PFpSco4oEQh37VvHy5xiPTvH2oVXn89hzGJiEhBaOjyyG/EPD4kO+FIRrRrFy6RjJjBsGFw221wxRXQu3cOYxMRkbyW9KoHMytJ5ZbNYKWZ4moUAM4+O9yffDI88AC8+WYO4hIRkbzXUNNDLaFpIRmL1pemNSJJvwSJwsknwyuvwG9+A9Onh2V77gk9e0KbNlBWBr16wbXXQkVF9kMWEZH80FCisF3WopDMimt6qPfrX8PPfx7mjPrd7+Cdd+Czz6C2NtwefRQ++gj+8Ac1T4iItFYpDeEsBS5BjUK9igr46ldhypQt111wAdx8M+y6K9x3H+yzTxj+WUREWo+U+xiY2bfMbJyZTTazu+tvmQxO0qR9e9iwIVQTNMH48fD446EZ4utfh+23D8uWLIEvvthsnikRESlSKSUKZnYFMCEqfyKwFBgBaMieQtCuXbhP0PzQkNJS+MY34LXX4O67oU8fuPji0AzRsSN07gyLFmUgXhERyRup1ih8BzjC3S8E1kX3I4EBmQpM0qh9+3CfpPmhMZWVcNppYbbqv/4VfvtbGD0a1qwJzRPN3K2IiBSAVOd6qHD3+gvo1plZG3d/ycwOylRgkkYtTBTqmcFRR216/uWXcP/9MHUqXHopHHssDB3aokOIiEieSTVReMfMBrv7XOBN4FwzWwYsy1xokjbNbHpozOTJoSPkHXfAL34Rbg8+CFVVTdtPSUkYVtosreGJiEgapJoo/BzoFj3+KXAvUA6cl4mgJM3SVKMQr21buPBC+NGP4D//CR0eTzyxefv6+tfhkUc25TQiIpIfGkwUzKzE3evc/W/1y9z9JWDHjEcm6ZOhRKGeGey0Ezz7LDz9dNO3X7AArrsOvvWtkCx06JD+GEVEpHkaq1FYZGZTgLtj+ihIoclQ00O8bbcNnRybo337MO/EGWfAvfeGSzJFRCT3Gvs4Pgc4FXjZzN4CJgP3uvtnGY9M0ifDNQrpcPnloWbi8svDiJCDBoWJqxpTVgb/+7+wo+q4REQyosFEwd0fBR41swrgJMJ00zeY2TRC0jDV3ddnPkxpkQJIFAB+9jMYPBj++U945hl47rnGt3n/fejbN3SkFBGR9EupgtfdlxMGXJpgZtsTahl+Ey3rnrnwJC2y1PTQUiUlcNxx4ZaqHXeEt97KXEwiIq1dk6aJNrO2QBUwHOgFzMlEUJJmBVKj0Bxf+Qq8+GKuoxARKV6pDuG8v5lNBJYA1wAvAju5+yGZDE7SpIgTheHDwwyX1dW5jkREpDg1mCiY2ZVm9l/gsWjRN919J3e/WrNLFpACaXpojvoOj6+8kts4RESKVWM1CsMJgy1t4+5j3P2FLMQk6VbkNQplZTB9eq4jEREpTg0mCu5+pLvf7+7F9w3TmhRxolBeHoaRfuqpXEciIlKcmtSZUQpUETc9QBg2evZs+Pvfcx2JiEjxUaLQGpSUQJs2RVmjAHDuubD99nDWWbByZa6jEREpLllLFMxsazP7i5mtNrMPzOyUJOXMzK43s6XR7XqzTfMKmtmhZvaqma00s3fNbEzc9qdE+19tZo+Y2daZPreC0L49fPFFrqPIiHbt4M47YeFCGDs219GIiBSXbNYo3AKsI4y/MAq41cwGJyg3BjgGGAoMAUYCZwOYWRvgL4SBnroSRoscb2ZDo/WDo3WnRcdZA/w+c6dUQLbaCj7/PNdRZMxBB4UZKCdMgHPOgTVrch2RiEhxyEqiYGadgOOBy9y9xt1nAFMJX+jxzgDGuftCd18EjANGR+u2BroAUzx4GXgL2DVaPwp4zN2fc/ca4DLgODPrnKlzKxg9ehT9YANTpoRmiAkT4LvfDc0Qq1eHpOGLL0LLy9q1sH491NbChg1QVwfuuY5cRCR/ZatGYSeg1t0XxCx7A0hUozA4WrdFOXdfAtwHnGlmpWa2D9AfmJFoW3d/h1CLsVP8QcxsjJnNNrPZn33WCua46tEDivw8u3eH3/8efvlLuO8+6No1XBXRqRN07Bimr27fHtq2DV02ysqgtDR04TAL96WlYXmbNrD77iGhEBFpzbI1mW85EN/NbAWQ6Jd+ebQutly5mZm7OyFRuB24KVp/rrt/lGTbpMdx94nARICqqqri/03ZvXurmRTh0kvDQEzz5oXagvpbfe1BQ8vqnz/xBLz2GixYALvu2vgxRUSKVbYShRpCk0GsLsCqFMp2AWrc3c1sEHA/cBzwD2Ag8Fcz+9jdH2/icVqXbbaBTz4J34Kb+oYWrW98I9ya65BD4Igjir4SRkSkUdlqelgAlJnZwJhlQ4G5CcrOjdYlKrcbsMDdp7l7nbu/DTwOHJlo22imy3bR8Vu3Pn1CA/2yZbmOpCD06xfu77wzt3GIiORaVhIFd18NPAxcZWadzGw/4GhgSoLidwMXmVmlmfUBLgYmReteAwZGl0iame0AfBP4d7T+HmCkmR0QdaC8CnjY3VWj0KdPuF+4MLdxFIiddoIDDwxNEHV1uY5GRCR3snl55HlAB+BTQj+Dc919bvSlXhNTbgJhEqo5wJuEGoMJsLFz4neAmwl9Hp4F/kzos4C7zwXOISQMnxL6JpyX8TMrBPUN7a+/nts4Csg554Smh5dfznUkIiK5Y65rw6iqqvLZs2fnOozMqqsLYymccgrcemuuoykIy5ZB797hUstbbsl1NCIi6WVmr7h7VWPlNIRza1FSAvvsA48/rmv+UrTVVjBihGamFJHWTYlCa3LmmfDRR/CPf+Q6koIxcCC8954GZRKR1kuJQmty7LHQrRvcfHOuIykY220XRnVcsiTXkYiI5IYShdakbVv4znfgySfhP//JdTQFYfvtw/177+U2DhGRXFGi0Nr88Ifh/uyzYd263MZSALbbLty/+25u4xARyRUlCq1N375w8snwzDNwwAFFO/V0uvTvH+4//DC3cYiI5Eq2hnCWfHLvvWE0oXPPhT32CKMLde8ehnYuK4M994T/+R/oEj8aduvTsSNUVMDHH+c6EhGR3FCi0Fqdc06Yg3nq1NAA/9prYXl1NUycCL/5Ddx1F+y9d27jzAOVlbBoUa6jEBHJDSUKrdnFF4dbrA0bQvJw5plh3IX994eTTgrTMbZrt6lcv37Qq1d2482RPn2UKIhI66VEQTZXWhouozzwwJAszJgB55+fuOx224XkYZtt4GtfC/X09QYMCFMwtmkT9llSEm4FOHNlZWWYslpEpDVSoiCJdesWahYA3n8f5s8PtQ0Qpqt++eXQdFFTAy++GDpHpsJsU+IweDDceCMcdlhGTiFdKivDKW/YEEIXEWlNlChI4wYMCLdYZ5216XFdHSxfvun5l1+G0R+XLAnfrnV1W96vXBk6VR5+eGjeePDBMLFCHqqsDGF/+mmoPBERaU2UKEjLlZTA1ltvvuyMMxrf7pe/hLFjYcKE8A3cufOmdT/4AVx7bXrjbKbKynC/aJESBRFpfZQoSO506QK33RZqJx54INQ0QKhdmDkzt7HF6NMn3C9aBFWNzrMmIlJclChI7u21V7jVe+stWLo0d/HEqa9R0FgKItIaaWRGyT8dO+bViJE9e4ZOjLpEUkRaIyUKkn86doQ1a3IdxUalpaFvghIFEWmNlChI/smzRAE06JKItF5KFCT/5GGiUFkJCxfmOgoRkexToiD5Jw8ThWHDwphTixfnOhIRkexSoiD5p0MHqK2F9etzHclGRx0F7vDcc7mOREQku5QoSP6pnzMij2oVhgyB9u3hX//KdSQiItmlREHyTx4mCm3awFe+okRBRFofJQqSf+oThTwaSwFg+HB49dW8ahEREck4JQqSf/KwRgFCovDllzBnTq4jERHJHiUKkn/Ky8P9qlW5jSPO8OHhftas3MYhIpJNShQk/3TrFu6rq3MbR5z+/WHQIBg/fvNZtUVEipkSBck/9bMw3XJLbuOIYwYTJ8L778MNN+Q6GhGR7FCiIPmnTx/4/vdh2jR4+ulcR7OZAw6AESPCrNjuuY5GRCTzlChIfrrxRth+ezj2WHjssVxHs5mRI+Hdd8Ns2CIixc5cP4uoqqry2bNn5zoMiTdvHgwdGkZpnDgRvve9XEcEhGGct90Wdt4ZRo8OTRKJbiUlydfV37761XCKIiLZZmavuHtVo+WUKChRyGuvvBIuN9iwAQYOhIqK8A09eXL4Js6RU0+Fe+5p+X523RXmzm35fkREmkqJQhMoUchzy5fDFVfARx/BCy/Ap5/Cm2/C4ME5C8k9DPPgnvhWV9f48htvDP0116yBtm1zdioi0kqlmiiUZSMYkRapqICbbgqPn3sODjoIPvkkp4mCGXTq1LJ9HHQQ3Hwz3H47nHdeeuISEUk3dWaUwtKrV7i/777cxpEGxxwDhx4KP/sZrFuX62hERBJToiCFZaed4MgjQx+FefNyHU2LlJSEq0CXL8+7CztERDZSoiCFxSwkCeXlcNRRBX+N4te/Dp07w0MP5ToSEZHElChI4enRAx58MAyROGwYfPZZriNqto4dwxUU99+vqx9EJD8pUZDCdPjh8Ic/hMb9Z57JdTQtUj88xEEHwRNP5DYWEZF4ShSkcJ16KpSWFvy8z3vsEYaL6NABvvENuPRSeO21XEclIhLo8kgpXO3bh8GX/v3vXEfSYl/5Smh6+OY34brrwu2QQ8JzCDNXHntsTseYEpFWSomCFLYhQ+DFF3MdRVp06RKGifjwQ7jwQnj44c1bVQYPDqdaXp67GEWk9dHvEylsQ4aETo0rVuQ6krTp1w/+/GdYvTqc1tKl8O1vhxqHzp1hu+3CaNa77AK77Rb6NjzyiGazFJHMUI2CFLYhQ8L9v/8d5oAuIh07bnr8wANw0knw17+GaS9qa8Nt9eowG/dzz4WJqkaPhvPPDxeGiIikQ9YSBTPbGrgD+BpQDfyvu9+boJwBvwK+Gy26Hfipu7uZHQDE9wvvBJzg7n82s9HRMb6IWf9Nd5+eznORPLLXXuH+xReLLlGId9xx4RZv+fIwFPTkyXDNNfDUU3DRRYn3YZbZ5dk4Rq6W51tMZWUwYEC4hzCkeJcuibcXaYmsTQplZvcRmjrOAoYBjwP7uvvcuHJnAxcBhwEO/AO42d1vS7DPg4HHgN7uvjpKFL7r7vs3JTZNClXgdtwx3J58MteR5Nytt2reiNaqbVt47z3o0yfXkUihyKtJocysE3A8sJu71wAzzGwqcBrw07jiZwDj3H1htO044HvAFolCVPYhd1+dseAl/51xBlx+OUyfDgcfnOtocurcc8Noj6sT/Eck+02QruXZOEauludjTEuWwLJl4fFHH8EvfwmvvqpEQdIvW00POwG17r4gZtkbwEEJyg6O1sWW22KawCj5OAEYGbdqDzOrBj4HpgDXuXttgu3HAGMA+vXrl/qZSP65+OIw+NLpp8P8+Zs37rdC222X6wgk21asCInCa69tuqRWJF2yddVDObAybtkKoHOSsiviypVHfRdiHUfo6/BszLLngN2AnoQajJOBnyQKyN0nunuVu1f1UM+vwtaxI0yaFH5WnX22uv9Lq9O1K+y+O8yaletIpBhlK1GoAeK72XQBVqVQtgtQ41t2pjgDuDt2ubu/6+7vuXudu88BriLUOkixO/RQ2Hdf+OMf4fjj4fPPcx2RSFYNGwZvvNF4OZGmylaisAAoM7OBMcuGAommwZkbrUtazsy2BQ4G7m7kuA400IdZisqzz4YhDR9/HPbbD955J9cRiWTNV78KH38cOjSKpFNWEoWos+HDwFVm1snM9gOOJvQhiHc3cJGZVZpZH+BiYFJcmdOAme6+2TeBmR1pZr2ix4OAy4BH03oykr/KyuCnPw2DDsyfD2PH5joikaw55JBwP316TsOQIpTNkRnPAzoAnwL3Aee6+1wzO8DMamLKTSBc8jgHeJNwGeWEuH2dDkxOcIzDgH+b2Wrgb4Tk5Nq0noXkv2OOga99Dd59N9eRiGTNrrtC795wzz3qpiPplbVxFPKZxlEoQt/5Dtx1F6xapckRpNW4+Wa44AK4//4wkqdIQ1IdR0FzPUhxOuqocH+tKpSk9Tj33HD1w/e/H0bsFEkHJQpSnI4/Hk45BcaPh7//PdfRiGRFmzZw551hIrGjjoKamsa3EWmMEgUpXr/5TRj8fsSIMCCTSCtQVRVqFmbNCrOLzp+f64ik0KmPAuqjUNQWLAifnKtWhX4Lhx4aZtipv5WUJH6c6PnOO8MOO+T6jERSMmMGnHBCqF3Yd99Nf8b16h/H36e6LFvlO3YME17lq0GDQr+QhiYTy1ep9lFQooAShaK3aFGoVZibaNiOJth+e43NIAVlzhy45BJYs2bzKyHqH8ffp7osm+VXroS1axOfX66tXh2ad445BkaNColZIVGi0ARKFFoBd3j/faithbq68Lz+lsrzSZPC1IwvvbRpamsRadU2bAhTzNx7b3i+ZAn07JnbmJpCiUITKFGQRn30UWh6GDIEZs4MzRIiIsDTT8Phh8NVV8HPf144zRC6PFIknbbdFn7/e/jXv2DMmPBTQkSEMHx2x45htvspicYbLnBKFERSdcYZcPLJcMcdMHRoaKAUkVavc+dNA8GecQacf35xfTyo6QE1PUgT1NaGMRqmToVttoGjjw4Xr7dtG2odOnfevHt5Ux43Z5uhQ2HAgIyftog0bvZs+Pa3N03MNXQo9OoF7dunZ//33pveK0BSbXooS98hRVqBsjJ49FH49a9D58aHH4Z162DFitwNsH/++eGqjqZoTiNqU7cplmM0Zxsdo1Ueowp4549hYq6XXoLqalj6YfKWSm/i5MZ166qgU/a/tlWjgGoUJA02bAiXYdZfKQFNe9ycbZYtC2NDfPBB5s9PRHJv+XLo2jVtu1ONgkg2lZZCv37ZP+6778Ibb4QmkVQ158dBU7cplmM0ZxsdQ8fI1DYdOzZ9mzRQoiBSyEpKYI89ch2FiBQxXfUgIiIiSSlREBERkaSUKIiIiEhSShREREQkKSUKIiIikpQSBREREUlKiYKIiIgkpURBREREklKiICIiIkkpURAREZGklCiIiIhIUkoUREREJCklCiIiIpKUEgURERFJSomCiIiIJKVEQURERJJSoiAiIiJJKVEQERGRpJQoiIiISFJKFERERCQpJQoiIiKSlBIFERERSUqJgoiIiCSlREFERESSUqIgIiIiSSlREBERkaSUKIiIiEhSShREREQkKSUKIiIikpQSBREREUkqa4mCmW1tZn8xs9Vm9oGZnZKknJnZ9Wa2NLpdb2YWrTvAzGribm5mx8dsf6GZfWJmK83sTjNrl61zFBERKTbZrFG4BVgH9AJGAbea2eAE5cYAxwBDgSHASOBsAHd/3t3L62/AN4Ea4EkAMxsB/BQ4DOgPbA/8IpMnJSIiUsyykiiYWSfgeOAyd69x9xnAVOC0BMXPAMa5+0J3XwSMA0Yn2fUZwEPuvjrm+R3uPtfdlwFXN7CtiIiINCJbNQo7AbXuviBm2RtAohqFwdG6BstFyccJwN/WS54AAAfOSURBVORGtu1lZt2aGbeIiEirVpal45QDK+OWrQA6Jym7Iq5cuZmZu3vM8uOAauDZRrYlOs7S2IOY2RhCMwdAjZm9ncJ5pKp7FJu0jF7HltNr2HJ6DVtOr2HLZeI17J9KoWwlCjVAl7hlXYBVKZTtAtTEJQkQmhnujlueaFsSHcfdJwITGw+96cxstrtXZWLfrYlex5bTa9hyeg1bTq9hy+XyNcxW08MCoMzMBsYsGwrMTVB2brQuaTkz2xY4GLg7hW2XuPtSREREpMmykihEnQ0fBq4ys05mth9wNDAlQfG7gYvMrNLM+gAXA5PiypwGzHT3dxJse5aZ7WpmFcDPE2wrIiIiKcrm5ZHnAR2AT4H7gHPdfW792Agx5SYAjwFzgDeBx6NlsU5n806MALj7k8ANwDPAh8AHwBVpPo9UZKRJoxXS69hyeg1bTq9hy+k1bLmcvYa2ZdO/iIiISKAhnEVERCQpJQoiIiKSlBKFNEp1PovWzsymm9mXMfN1vB2z7pTotVttZo+Y2dYx61rt62tmPzCz2Wa21swmxa07zMzmm9kaM3vGzPrHrGsXzXmyMpoD5aJUty02yV5DMxsQzRkTO4fMZTHr9RpGotfijuj/b5WZvW5mR8as199iIxp6DfP2b9HddUvTjdBJ8wHCwE/7EwZ8GpzruPLtBkwHvptg+WDCmBcHRq/hvcD9en0dwgBjxwC3ApNilnePXocTgfbAjcCLMeuvA54HtoL/b+/eQqyq4jiOf39mKaikZhdnsptWiuBLSQ8lFNmFKKjMB/VFKutFCoQMekky0l4iyELohllREZEJERVdICMte4iEibI0ywtManhFq38Pa03uOZ59Zk5zxnOY8/vAgrP3ds/Z++eamTV7r33+TAN2Azf3Z9+h1mpkeBEQwPCS/ZzhiSxGActyZsNI9XYO5GX3xYFn2JJ9semhDZWW//OPAZcV1q0FVjb72FqtUT5QeAJ4vbA8OWc6xvn+d86PV/ySu4/0qHDP8ijgCDA1L+8EbixsX04efPW171BtVTLs64ezM6yd53ekWj7uiwPPsCX7om89NE499SwMVkjqlrRB0rV5Xa9aHZE+J+MYKVvnW11lZoeArcB0SeOAiZTXTindd5CPuVVtl/SbpJclTQBwhrVJOpf0vbkF98X/pSLDHi3VFz1QaJx66lm0u4dJJcA7Sc8Gr5c0mZNrdcCJDJ1vdX1lBifXP+nJrNa+7aQbmEn63PsrSOf/Wt7mDEtIOp2U05qI6MJ9sW5VMmzJvniqaj20g3rqWbS1iNhYWFwjaR5wC7Uz/KfGtnZWK7ODheWjFdv62rdtRMRB4Ju8uEfSYmCXpDE4w6okDSPd+jsGLM6r3RfrUC3DVu2LvqLQOPXUs7DeAhAVtTokXQKMIGXrfKurzGwUaW7HlojYB+yivHZK6b6DfMytrudT6IY5w5NJEvAicC4wJyKO503ui/1UI8NKrdEXmz2JYyg14A3SzPxRwNW00az8OjIaC9xEmpU7HFgAHCLdo5tOur0wK2f4Kr2femjbfHNWI0mzntcW8js75zAnr3uS3jPNV5JKsY8DppJ+0PTMkq6571BrNTK8Cric9IfTWaQnaz51hqU5rga+AkZXrHdfHHiGLdkXmx7YUGrAeODd/IvvV2B+s4+p1VruzF+TLoftz98sNxS2z8/ZHQLWAeOdb0B6nCoq2rK8bTbQRZrh/BlwUWG/EcBLpAHYHmBJxdct3XeotbIMgXnAL7lf7SIVlzvPGVbN8MKc21HSpe6etsB9ceAZtmpfdK0HMzMzK+U5CmZmZlbKAwUzMzMr5YGCmZmZlfJAwczMzEp5oGBmZmalPFAwMzOzUh4omFnLkhSSpjT7OMzamQcKZtZvkrZJOiLpYKGtavZxmdngcVEoM6vXbRHxcbMPwsxODV9RMLMBk7RQ0gZJqyT9KalL0vWF7R2S3pO0V9JPkhYVtp0m6RFJWyUdkLRZ0qTCl58t6UdJ+yU9mwvqIGmKpM/z+3VLevMUnrJZ2/AVBTNrlKuAt4EJwJ3AO5Iujoi9pIJe3wMdpGI2H0naGhGfAEtIn3F/C6lK6AzgcOHr3grMJJXM3QysBz4AlgMfAtcBZwBXDvYJmrUj13ows36TtI00EPirsPoh4DjwBNAZ+YeKpE3AM6TiNNuAsRFxIG9bAUyMiIWSfgCWRsS6Ku8XwKyI+CIvvwV8GxErJb1CKqzzWET8Ngina2b41oOZ1e/2iBhbaM/n9b9H7788tpOuIHQAe3sGCYVtnfn1JGBrjffbXXh9GBidXy8FBGyStEXS3f/zfMysBg8UzKxROnvmD2QXADtzGy9pTMW23/PrHcDket8sInZHxKKI6ADuB57zo5RmjeeBgpk1yjnAA5JOlzQXmAa8HxE7gC+BFZJGSpoB3AO8mvd7AVgu6VIlMySd1debSZor6fy8uA8I4J9Gn5RZu/NkRjOr13pJfxeWPwLWARuBS4FuYA9wV0T8kf/NPGA16erCPuDRwiOWTwEjSBMTJwBdwB39OI6ZwNOSzszv92BE/DyQEzOzk3kyo5kNmKSFwL0RcU2zj8XMGsu3HszMzKyUBwpmZmZWyrcezMzMrJSvKJiZmVkpDxTMzMyslAcKZmZmVsoDBTMzMyvlgYKZmZmV8kDBzMzMSv0LiAHUSH73NNYAAAAASUVORK5CYII=\n", 207 | "text/plain": [ 208 | "
" 209 | ] 210 | }, 211 | "metadata": { 212 | "needs_background": "light" 213 | }, 214 | "output_type": "display_data" 215 | } 216 | ], 217 | "source": [ 218 | "\n", 219 | "epochs_no_stop_trunc = np.array([epochs[:min_len] for epochs in epoch_values_no_stop])\n", 220 | "epochs_no_stop_trunc_mean = np.mean(epochs_no_stop_trunc, axis=0)\n", 221 | "\n", 222 | "epochs_with_stop_trunc = np.array([epochs[:min_len] for epochs in epoch_values_with_stop])\n", 223 | "epochs_with_stop_trunc_mean = np.mean(epochs_with_stop_trunc, axis=0)\n", 224 | "\n", 225 | "plt.rc('font', family='sans-serif', size=12)\n", 226 | "plt.figure(figsize=(8, 6))\n", 227 | "plt.plot(epochs_no_stop_trunc_mean, color='b')\n", 228 | "plt.plot(epochs_with_stop_trunc_mean, color='r')\n", 229 | "plt.title(\"MNIST (averaged over 10 random initializations)\")\n", 230 | "plt.xlabel(\"Epochs\")\n", 231 | "plt.ylabel(\"Validation Error\")\n", 232 | "\n", 233 | "axes = plt.gca()\n", 234 | "# axes.set_xlim([1, 30])\n", 235 | "axes.set_ylim([0.07, 0.10])\n", 236 | "\n", 237 | "plt.legend((\"GP-UCB\", \"BO-BOS\"))\n", 238 | "plt.show()\n", 239 | "\n", 240 | "\n" 241 | ] 242 | }, 243 | { 244 | "cell_type": "code", 245 | "execution_count": null, 246 | "metadata": {}, 247 | "outputs": [], 248 | "source": [] 249 | } 250 | ], 251 | "metadata": { 252 | "kernelspec": { 253 | "display_name": "Python 3", 254 | "language": "python", 255 | "name": "python3" 256 | }, 257 | "language_info": { 258 | "codemirror_mode": { 259 | "name": "ipython", 260 | "version": 3 261 | }, 262 | "file_extension": ".py", 263 | "mimetype": "text/x-python", 264 | "name": "python", 265 | "nbconvert_exporter": "python", 266 | "pygments_lexer": "ipython3", 267 | "version": "3.5.2" 268 | }, 269 | "toc": { 270 | "base_numbering": 1, 271 | "nav_menu": {}, 272 | "number_sections": true, 273 | "sideBar": true, 274 | "skip_h1_title": false, 275 | "title_cell": "Table of Contents", 276 | "title_sidebar": "Contents", 277 | "toc_cell": false, 278 | "toc_position": {}, 279 | "toc_section_display": true, 280 | "toc_window_display": false 281 | }, 282 | "varInspector": { 283 | "cols": { 284 | "lenName": 16, 285 | "lenType": 16, 286 | "lenVar": 40 287 | }, 288 | "kernels_config": { 289 | "python": { 290 | "delete_cmd_postfix": "", 291 | "delete_cmd_prefix": "del ", 292 | "library": "var_list.py", 293 | "varRefreshCmd": "print(var_dic_list())" 294 | }, 295 | "r": { 296 | "delete_cmd_postfix": ") ", 297 | "delete_cmd_prefix": "rm(", 298 | "library": "var_list.r", 299 | "varRefreshCmd": "cat(var_dic_list()) " 300 | } 301 | }, 302 | "types_to_exclude": [ 303 | "module", 304 | "function", 305 | "builtin_function_or_method", 306 | "instance", 307 | "_Feature" 308 | ], 309 | "window_display": false 310 | } 311 | }, 312 | "nbformat": 4, 313 | "nbformat_minor": 2 314 | } 315 | --------------------------------------------------------------------------------