├── tmp └── __init__.py ├── datasets └── __init__.py ├── results └── __init__.py ├── requirements.txt ├── README.md ├── NSL_KDD_DNN_QLearning.py └── NSL_KDD_DNN_QLearning.ipynb /tmp/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /datasets/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /results/__init__.py: -------------------------------------------------------------------------------- 1 | -------------------------------------------------------------------------------- /requirements.txt: -------------------------------------------------------------------------------- 1 | numpy>=1.10.4 2 | requests>=2.0 3 | six 4 | pyglet>=1.2.0 5 | scipy==0.17.1 6 | tensorflow==1.4.1 7 | scikit-learn==0.18.1 8 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # RL4AD - Reinforcement Learning for Anomaly Detection 2 | 3 | This needs to be used in combination with gym-network_intrusion library 4 | 5 | # Installation 6 | # Step 1: 7 | Install all the required python packages described in requirements.txt 8 | 9 | # Step 2: 10 | Install gym from OpenAI using the following steps 11 | 1. git clone https://github.com/openai/gym.git 12 | 2. cd gym 13 | 3. pip install -e . 14 | 15 | # Step 3: 16 | Install gym-network_intrusion using the following steps 17 | 1. git clone https://github.com/harik68/gym-network_intrusion.git 18 | 2. cd gym-network_intrusion 19 | 3. pip install -e . 20 | 21 | # Step 4: 22 | Install RL4AD using the following steps 23 | 1. git clone https://github.com/harik68/RL4AD.git 24 | 2. Copy the following file into the directory datasets 25 | https://www.dropbox.com/s/e2n5ow6b117ub80/kdd_nsl_train_onehot_string.pkl?dl=0 26 | 3. In your code create an instance of gym_network_intrusion environment using the following commands 27 | 4. Run the code NSL_KDD_DNN_QLearning.py 28 | 29 | There are 3 directories 30 | 1. datasets - where you need to keep all the inputdata 31 | 2. results - where the program will output the results 32 | 3. temp - where the program will store intermediate results such as configuration of DNN after training 33 | 34 | # Running Configuration 35 | I have set default values of running configuration as follows 36 | n_iterations = 10 # number of training iterations 37 | n_max_steps = 100 # max steps per episode 38 | This is for testing the code. It should complete in about 10 minutes. However this will not produce good results 39 | 40 | You need to edit the code and use the following configuration to get better results once you finish the testing. 41 | n_iterations = 250 # number of training iterations 42 | n_max_steps = 1000 # max steps per episode 43 | 44 | 45 | Remember to clear tmp folder after each run if you want the DNN to learn from scratch 46 | -------------------------------------------------------------------------------- /NSL_KDD_DNN_QLearning.py: -------------------------------------------------------------------------------- 1 | # coding: utf-8 2 | """ 3 | Main program implementing reinforcement learning for anomaly detection in KDD NSL Dataset 4 | Author @hari.koduvely 5 | Date Jan 2018 6 | """ 7 | import sys 8 | import os 9 | 10 | import gym 11 | import gym_network_intrusion 12 | env = gym.make('network-intrusion-v0') 13 | 14 | import tensorflow as tf 15 | from tensorflow.contrib.layers import fully_connected 16 | import numpy as np 17 | import pandas as pd 18 | from matplotlib import pyplot as plt 19 | import time 20 | 21 | # 1. Specify the NN Architecture 22 | n_inputs = 226 # == env.observation_space.shape[0] 23 | n_hidden = 10 24 | n_outputs = 1 # prob of accelerating left 25 | initializer = tf.contrib.layers.variance_scaling_initializer() 26 | 27 | # 2. Build the NN 28 | X = tf.placeholder(tf.float32, shape=[None, n_inputs]) 29 | hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer) 30 | logits = fully_connected(hidden, n_outputs, activation_fn=None, weights_initializer=initializer) 31 | outputs = tf.nn.sigmoid(logits) 32 | 33 | #3. Select a random action based on the estimated probabilities 34 | p_yes_and_no = tf.concat(values=[outputs, 1 - outputs], axis=1) 35 | action = tf.multinomial(tf.log(p_yes_and_no), num_samples=1) 36 | 37 | init=tf.global_variables_initializer() 38 | 39 | # Setting up the training of the NN Using Policy Gradient 40 | y = 1.0 - tf.to_float(action) # target probability is 1 when action is 0 and 0 when action is 1 41 | learning_rate = 0.01 42 | cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y) 43 | optimizer = tf.train.AdamOptimizer(learning_rate) 44 | grads_and_vars = optimizer.compute_gradients(cross_entropy) 45 | gradients = [grad for grad, variable in grads_and_vars] 46 | gradient_placeholders = [] 47 | grads_and_vars_feed = [] 48 | for grad, variable in grads_and_vars: 49 | gradient_placeholder = tf.placeholder(tf.float32) 50 | gradient_placeholders.append(gradient_placeholder) 51 | grads_and_vars_feed.append((gradient_placeholder, variable)) 52 | 53 | training_op = optimizer.apply_gradients(grads_and_vars_feed) 54 | 55 | init = tf.global_variables_initializer() 56 | saver = tf.train.Saver() 57 | 58 | #Function to compute the total discounted rewards given the raw rewards 59 | def discount_rewards(rewards, discount_rate): 60 | discounted_rewards = np.empty(len(rewards)) 61 | cumulative_rewards = 0 62 | for step in reversed(range(len(rewards))): 63 | cumulative_rewards = rewards[step] + discount_rate * cumulative_rewards 64 | discounted_rewards[step] = cumulative_rewards 65 | return discounted_rewards 66 | 67 | def discount_and_normalize_rewards(all_rewards, discount_rate): 68 | all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards] 69 | flat_rewards = np.concatenate(all_discounted_rewards) 70 | reward_mean = flat_rewards.mean() 71 | reward_std = flat_rewards.std() 72 | return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards] 73 | 74 | """ 75 | n_iterations = 250 # number of training iterations 76 | n_max_steps = 1000 # max steps per episode 77 | n_games_per_update = 10 # train the policy every 10 episodes 78 | save_iterations = 10 # save the model every 10 training iterations 79 | discount_rate = 0.95 80 | """ 81 | n_iterations = 10 # number of training iterations 82 | n_max_steps = 100 # max steps per episode 83 | n_games_per_update = 10 # train the policy every 10 episodes 84 | save_iterations = 10 # save the model every 10 training iterations 85 | discount_rate = 0.95 86 | 87 | # Executing the graph 88 | t_start = time.time() 89 | with tf.Session(config=tf.ConfigProto(log_device_placement=True)) as sess: 90 | init.run() 91 | for iteration in range(n_iterations): 92 | all_rewards = [] # all sequences of raw rewards for each episode 93 | all_gradients = [] # gradients saved at each step of each episode 94 | for game in range(n_games_per_update): 95 | current_rewards = [] # all raw rewards for the current episode 96 | current_gradients = [] # all gradients from the current episode 97 | obs = env.reset() 98 | for step in range(n_max_steps): 99 | action_val, gradients_val = sess.run([action, gradients], feed_dict={X: np.array(list(obs)).reshape(1, n_inputs)}) 100 | obs, reward, done, info = env.step(action_val[0][0]) 101 | current_rewards.append(reward) 102 | current_gradients.append(gradients_val) 103 | if done: 104 | break 105 | all_rewards.append(current_rewards) 106 | all_gradients.append(current_gradients) 107 | 108 | # Perform a Policy Update after running the policy for 10 episodes 109 | all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate) 110 | feed_dict = {} 111 | for var_index, grad_placeholder in enumerate(gradient_placeholders): 112 | # Multiply the gradients by the action scores and compute the mean 113 | mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index] 114 | for game_index, rewards in enumerate(all_rewards) 115 | for step, reward in enumerate(rewards)], axis=0) 116 | feed_dict[grad_placeholder] = mean_gradients 117 | 118 | sess.run(training_op, feed_dict=feed_dict) 119 | if iteration % save_iterations == 0: 120 | # save_path = saver.save(sess, '/Users/harikoduvely/Projects/RL/DataSets/NSL_KDD_CKPT/nsl_kdd_qlearning_4.ckpt') 121 | save_path = saver.save(sess, 'tmp/nsl_kdd_qlearning_1.ckpt') 122 | 123 | 124 | # Evaluation of the results 125 | n_max_steps = 1000 126 | o_list = [] 127 | p_list = [] 128 | a_list = [] 129 | y_list = [] 130 | saver = tf.train.Saver() 131 | with tf.Session() as sess: 132 | saver.restore(sess,'tmp/nsl_kdd_qlearning_1.ckpt') 133 | # load test dataset into a Pandas DataFrame 134 | df = pd.read_pickle('datasets/kdd_nsl_train_onehot_string.pkl') 135 | init.run() 136 | tp = 0 137 | fp = 0 138 | tn = 0 139 | fn = 0 140 | for step in range(n_max_steps): 141 | obs = df.iloc[step]['s'] 142 | y = df.iloc[step]['y'] 143 | outputs = logits.eval(feed_dict={X: np.array(list(obs)).reshape(1, n_inputs)}) 144 | p_yes_and_no = tf.concat(values=[outputs, 1 - outputs], axis=1) 145 | py = p_yes_and_no.eval() 146 | action = tf.multinomial(tf.log(p_yes_and_no), num_samples=1) 147 | act = action.eval() 148 | o_list.append(obs) 149 | p_list.append(py[0][0]) 150 | a_list.append(act[0][0]) 151 | y_list.append(y) 152 | 153 | df_pred = pd.DataFrame({'obs':o_list,'pred':p_list, 'action':a_list, 'y':y_list}) 154 | # Saving the dataframe to a pickle file in directory DataSets 155 | df_pred.to_pickle('results/kdd_nsl_train_results.pkl') 156 | 157 | df_pred = pd.read_pickle('results/kdd_nsl_train_results.pkl') 158 | 159 | lam = 0.1 * np.arange(10) 160 | 161 | def generate_roc(df): 162 | lamda = 0.1 * np.arange(10) 163 | tpl = [] 164 | fpl = [] 165 | tnl = [] 166 | fnl = [] 167 | prl = [] 168 | rel = [] 169 | frl = [] 170 | for lam in lamda: 171 | tp = 0.0 172 | fp = 0.0 173 | tn = 0.0 174 | fn = 0.0 175 | for i in range(df.shape[0]): 176 | p = df.iloc[i]['pred'] 177 | y = df.iloc[i]['y'] 178 | y = y.astype(int) 179 | if y==1 and p>=lam: 180 | tp += 1.0 181 | elif y==1 and p=lam: 184 | fp += 1.0 185 | elif y==-1 and p' has deprecated methods. Compatibility code invoked.\u001b[0m\n" 53 | ] 54 | } 55 | ], 56 | "source": [ 57 | "import gym\n", 58 | "import gym_network_intrusion\n", 59 | "env = gym.make('network-intrusion-v0')\n", 60 | "%reload_ext autoreload\n", 61 | "%autoreload 2" 62 | ] 63 | }, 64 | { 65 | "cell_type": "code", 66 | "execution_count": 4, 67 | "metadata": { 68 | "collapsed": false 69 | }, 70 | "outputs": [ 71 | { 72 | "name": "stderr", 73 | "output_type": "stream", 74 | "text": [ 75 | "Couldn't import dot_parser, loading of dot files will not be possible.\n" 76 | ] 77 | } 78 | ], 79 | "source": [ 80 | "import tensorflow as tf\n", 81 | "from tensorflow.contrib.layers import fully_connected\n", 82 | "import numpy as np\n", 83 | "import pandas as pd\n", 84 | "from matplotlib import pyplot as plt\n", 85 | "import time" 86 | ] 87 | }, 88 | { 89 | "cell_type": "code", 90 | "execution_count": 5, 91 | "metadata": { 92 | "collapsed": false 93 | }, 94 | "outputs": [], 95 | "source": [ 96 | "# 1. Specify the NN Architecture\n", 97 | "n_inputs = 226 # == env.observation_space.shape[0]\n", 98 | "n_hidden = 10\n", 99 | "n_outputs = 1 # prob of accelerating left\n", 100 | "initializer = tf.contrib.layers.variance_scaling_initializer()\n", 101 | "\n", 102 | "# 2. Build the NN\n", 103 | "X = tf.placeholder(tf.float32, shape=[None, n_inputs])\n", 104 | "hidden = fully_connected(X, n_hidden, activation_fn=tf.nn.elu, weights_initializer=initializer)\n", 105 | "logits = fully_connected(hidden, n_outputs, activation_fn=None, weights_initializer=initializer)\n", 106 | "outputs = tf.nn.sigmoid(logits)\n", 107 | "\n", 108 | "#3. Select a random action based on the estimated probabilities\n", 109 | "p_yes_and_no = tf.concat(values=[outputs, 1 - outputs], axis=1)\n", 110 | "action = tf.multinomial(tf.log(p_yes_and_no), num_samples=1)\n", 111 | "\n", 112 | "init=tf.global_variables_initializer()" 113 | ] 114 | }, 115 | { 116 | "cell_type": "code", 117 | "execution_count": 6, 118 | "metadata": { 119 | "collapsed": false 120 | }, 121 | "outputs": [], 122 | "source": [ 123 | "# Setting up the training of the NN Using Policy Gradient\n", 124 | "y = 1.0 - tf.to_float(action) # target probability is 1 when action is 0 and 0 when action is 1\n", 125 | "learning_rate = 0.01\n", 126 | "cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=y)\n", 127 | "optimizer = tf.train.AdamOptimizer(learning_rate)\n", 128 | "grads_and_vars = optimizer.compute_gradients(cross_entropy)\n", 129 | "gradients = [grad for grad, variable in grads_and_vars]\n", 130 | "gradient_placeholders = []\n", 131 | "grads_and_vars_feed = []\n", 132 | "for grad, variable in grads_and_vars:\n", 133 | " gradient_placeholder = tf.placeholder(tf.float32)\n", 134 | " gradient_placeholders.append(gradient_placeholder)\n", 135 | " grads_and_vars_feed.append((gradient_placeholder, variable))\n", 136 | " \n", 137 | "training_op = optimizer.apply_gradients(grads_and_vars_feed)\n", 138 | "\n", 139 | "init = tf.global_variables_initializer()\n", 140 | "saver = tf.train.Saver()\n", 141 | "\n", 142 | "#Function to compute the total discounted rewards given the raw rewards\n", 143 | "def discount_rewards(rewards, discount_rate):\n", 144 | " discounted_rewards = np.empty(len(rewards))\n", 145 | " cumulative_rewards = 0\n", 146 | " for step in reversed(range(len(rewards))):\n", 147 | " cumulative_rewards = rewards[step] + discount_rate * cumulative_rewards\n", 148 | " discounted_rewards[step] = cumulative_rewards\n", 149 | " return discounted_rewards\n", 150 | "\n", 151 | "def discount_and_normalize_rewards(all_rewards, discount_rate):\n", 152 | " all_discounted_rewards = [discount_rewards(rewards, discount_rate) for rewards in all_rewards]\n", 153 | " flat_rewards = np.concatenate(all_discounted_rewards)\n", 154 | " reward_mean = flat_rewards.mean()\n", 155 | " reward_std = flat_rewards.std()\n", 156 | " return [(discounted_rewards - reward_mean)/reward_std for discounted_rewards in all_discounted_rewards]" 157 | ] 158 | }, 159 | { 160 | "cell_type": "code", 161 | "execution_count": 7, 162 | "metadata": { 163 | "collapsed": false 164 | }, 165 | "outputs": [], 166 | "source": [ 167 | "\"\"\"\n", 168 | "n_iterations = 250 # number of training iterations\n", 169 | "n_max_steps = 1000 # max steps per episode\n", 170 | "n_games_per_update = 10 # train the policy every 10 episodes\n", 171 | "save_iterations = 10 # save the model every 10 training iterations\n", 172 | "discount_rate = 0.95\n", 173 | "\"\"\"\n", 174 | "n_iterations = 10 # number of training iterations\n", 175 | "n_max_steps = 100 # max steps per episode\n", 176 | "n_games_per_update = 10 # train the policy every 10 episodes\n", 177 | "save_iterations = 10 # save the model every 10 training iterations\n", 178 | "discount_rate = 0.95" 179 | ] 180 | }, 181 | { 182 | "cell_type": "code", 183 | "execution_count": 8, 184 | "metadata": { 185 | "collapsed": false 186 | }, 187 | "outputs": [ 188 | { 189 | "name": "stdout", 190 | "output_type": "stream", 191 | "text": [ 192 | "Traning Time: 389.334667921Secs\n" 193 | ] 194 | } 195 | ], 196 | "source": [ 197 | "# Executing the graph\n", 198 | "t_start = time.time()\n", 199 | "with tf.Session() as sess:\n", 200 | " init.run()\n", 201 | " for iteration in range(n_iterations):\n", 202 | " all_rewards = [] # all sequences of raw rewards for each episode\n", 203 | " all_gradients = [] # gradients saved at each step of each episode\n", 204 | " for game in range(n_games_per_update):\n", 205 | " current_rewards = [] # all raw rewards for the current episode\n", 206 | " current_gradients = [] # all gradients from the current episode\n", 207 | " obs = env.reset()\n", 208 | " for step in range(n_max_steps):\n", 209 | " action_val, gradients_val = sess.run([action, gradients], feed_dict={X: np.array(list(obs)).reshape(1, n_inputs)})\n", 210 | " obs, reward, done, info = env.step(action_val[0][0])\n", 211 | " current_rewards.append(reward)\n", 212 | " current_gradients.append(gradients_val)\n", 213 | " if done:\n", 214 | " break\n", 215 | " all_rewards.append(current_rewards)\n", 216 | " all_gradients.append(current_gradients)\n", 217 | " \n", 218 | " # Perform a Policy Update after running the policy for 10 episodes\n", 219 | " all_rewards = discount_and_normalize_rewards(all_rewards, discount_rate)\n", 220 | " feed_dict = {}\n", 221 | " for var_index, grad_placeholder in enumerate(gradient_placeholders):\n", 222 | " # Multiply the gradients by the action scores and compute the mean\n", 223 | " mean_gradients = np.mean([reward * all_gradients[game_index][step][var_index]\n", 224 | " for game_index, rewards in enumerate(all_rewards)\n", 225 | " for step, reward in enumerate(rewards)], axis=0)\n", 226 | " feed_dict[grad_placeholder] = mean_gradients\n", 227 | " \n", 228 | " sess.run(training_op, feed_dict=feed_dict)\n", 229 | " if iteration % save_iterations == 0:\n", 230 | " # save_path = saver.save(sess, '/Users/harikoduvely/Projects/RL/DataSets/NSL_KDD_CKPT/nsl_kdd_qlearning_4.ckpt')\n", 231 | " save_path = saver.save(sess, 'datasets/nsl_kdd_qlearning_1.ckpt')\n", 232 | "\n", 233 | "t_end = time.time()\n", 234 | "print(\"Traning Time: \" + str(t_end - t_start) + \"Secs\")" 235 | ] 236 | }, 237 | { 238 | "cell_type": "code", 239 | "execution_count": 10, 240 | "metadata": { 241 | "collapsed": false, 242 | "scrolled": false 243 | }, 244 | "outputs": [ 245 | { 246 | "name": "stdout", 247 | "output_type": "stream", 248 | "text": [ 249 | "INFO:tensorflow:Restoring parameters from datasets/nsl_kdd_qlearning_1.ckpt\n" 250 | ] 251 | } 252 | ], 253 | "source": [ 254 | "# Evaluation of the results\n", 255 | "n_max_steps = 1000\n", 256 | "o_list = []\n", 257 | "p_list = []\n", 258 | "a_list = []\n", 259 | "y_list = []\n", 260 | "saver = tf.train.Saver()\n", 261 | "with tf.Session() as sess:\n", 262 | " saver.restore(sess,'datasets/nsl_kdd_qlearning_1.ckpt')\n", 263 | " # load test dataset into a Pandas DataFrame\n", 264 | " df = pd.read_pickle('datasets/kdd_nsl_train_onehot_string.pkl')\n", 265 | " init.run()\n", 266 | " tp = 0\n", 267 | " fp = 0\n", 268 | " tn = 0\n", 269 | " fn = 0\n", 270 | " for step in range(n_max_steps):\n", 271 | " obs = df.iloc[step]['s']\n", 272 | " y = df.iloc[step]['y']\n", 273 | " outputs = logits.eval(feed_dict={X: np.array(list(obs)).reshape(1, n_inputs)})\n", 274 | " p_yes_and_no = tf.concat(values=[outputs, 1 - outputs], axis=1)\n", 275 | " py = p_yes_and_no.eval()\n", 276 | " action = tf.multinomial(tf.log(p_yes_and_no), num_samples=1)\n", 277 | " act = action.eval()\n", 278 | " o_list.append(obs)\n", 279 | " p_list.append(py[0][0])\n", 280 | " a_list.append(act[0][0])\n", 281 | " y_list.append(y) \n", 282 | " \n", 283 | " df_pred = pd.DataFrame({'obs':o_list,'pred':p_list, 'action':a_list, 'y':y_list})\n", 284 | " # Saving the dataframe to a pickle file in directory DataSets\n", 285 | " df_pred.to_pickle('datasets/kdd_nsl_train_results.pkl')" 286 | ] 287 | }, 288 | { 289 | "cell_type": "code", 290 | "execution_count": 12, 291 | "metadata": { 292 | "collapsed": false 293 | }, 294 | "outputs": [], 295 | "source": [ 296 | "df_pred = pd.read_pickle('datasets/kdd_nsl_train_results.pkl')" 297 | ] 298 | }, 299 | { 300 | "cell_type": "code", 301 | "execution_count": 13, 302 | "metadata": { 303 | "collapsed": false 304 | }, 305 | "outputs": [], 306 | "source": [ 307 | "lam = 0.1 * np.arange(10)" 308 | ] 309 | }, 310 | { 311 | "cell_type": "code", 312 | "execution_count": 14, 313 | "metadata": { 314 | "collapsed": true 315 | }, 316 | "outputs": [], 317 | "source": [ 318 | "def generate_roc(df):\n", 319 | " lamda = 0.1 * np.arange(10)\n", 320 | " tpl = []\n", 321 | " fpl = []\n", 322 | " tnl = []\n", 323 | " fnl = []\n", 324 | " prl = []\n", 325 | " rel = []\n", 326 | " frl = []\n", 327 | " for lam in lamda:\n", 328 | " tp = 0.0\n", 329 | " fp = 0.0\n", 330 | " tn = 0.0\n", 331 | " fn = 0.0\n", 332 | " for i in range(df.shape[0]):\n", 333 | " p = df.iloc[i]['pred']\n", 334 | " y = df.iloc[i]['y']\n", 335 | " y = y.astype(int)\n", 336 | " if y==1 and p>=lam:\n", 337 | " tp += 1.0\n", 338 | " elif y==1 and p=lam:\n", 341 | " fp += 1.0\n", 342 | " elif y==-1 and p]" 391 | ] 392 | }, 393 | "execution_count": 17, 394 | "metadata": {}, 395 | "output_type": "execute_result" 396 | } 397 | ], 398 | "source": [ 399 | "plt.plot(df_lam['FPR'],df_lam['Recall'])" 400 | ] 401 | }, 402 | { 403 | "cell_type": "code", 404 | "execution_count": 18, 405 | "metadata": { 406 | "collapsed": false 407 | }, 408 | "outputs": [ 409 | { 410 | "data": { 411 | "image/png": "iVBORw0KGgoAAAANSUhEUgAAAYoAAAD8CAYAAABpcuN4AAAABHNCSVQICAgIfAhkiAAAAAlwSFlz\nAAALEgAACxIB0t1+/AAAHxFJREFUeJzt3Xl4VPW9x/H3V5BNkAgERBYBCZtoFSOi1dZdwCXY3t6L\n7a1ry4NVb629tSiIdWtdeu2t1UqxpWoX6XIFo0UR0WqrIgSrkLCGyJKwRYGwE0K+94859kljmJkk\nMzmzfF7Pkydnzvn9hu+PwHxy5sx8x9wdERGRwzki7AJERCS1KShERCQqBYWIiESloBARkagUFCIi\nEpWCQkREolJQiIhIVAoKERGJSkEhIiJRtQ67gMbo1q2b9+vXL+wyRETSyuLFiz9299ymzk+roOjX\nrx9FRUVhlyEiklbMbF1z5uupJxERiUpBISIiUSkoREQkKgWFiIhEpaAQEZGoFBQiIhKVgkJERKJS\nUIiIpLD9Bw/xg8IStuzcH1oNCgoRkRT2xBulPP3OWtZs3R1aDQoKEZEUVbp1N9PeXMOVp/birIHd\nQqtDQSEikoLcnbtmF9P+yFbcOXZoqLXEFRRmNtrMVppZqZlNauC4mdljwfElZjaizrEZZrbVzIrr\nzXnEzFYE42eZWU7zlyMikhlmf1DBu2WfcPvoIeR2ahtqLTGDwsxaAU8AY4BhwFVmNqzesDFAXvA1\nAXiyzrGngdEN3PU8YLi7nwysAu5obPEiIpmoau9B7n9pOaf0yeGrI/uGXU5cZxQjgVJ3L3P3amAm\nUFBvTAHwrEcsAHLMrCeAu78FbKt/p+7+qrvXBDcXAL2buggRkUzy8NwVbN9bzQNXDueIIyzscuIK\nil7Ahjq3y4N9jR0TzfXAyw0dMLMJZlZkZkWVlZWNuEsRkfTz/vrt/H7heq49qz8nHtc57HKAFLiY\nbWaTgRrgdw0dd/fp7p7v7vm5uU3+3A0RkZRXc6iWybOK6dGpHbddPCjscv4png8uqgD61LndO9jX\n2DGfYWbXApcBF7i7x1GLiEjGeubddSzftJMnvzaCjm1T53Pl4jmjWATkmVl/M2sDjAcK640pBK4O\nXv00Cqhy903R7tTMRgO3A1e4+94m1C4ikjE2Ve3j0VdXcu7gXEYPPzbscv5FzKAILjjfDMwFlgN/\ndPcSM5toZhODYXOAMqAUeAr41qfzzew54F1gsJmVm9kNwaHHgU7APDP7wMymJWpRIiLp5t4Xl1FT\n69x7xXDMwr+AXVdc5zbuPodIGNTdN63OtgM3HWbuVYfZPzD+MkVEMtcbK7bycvFmvnfJYPp27RB2\nOZ8R+sVsEZFstq/6EFMLizkh9yi+ec6AsMtpUOpcLRERyUKPv7GaDdv28dw3R9GmdWr+7p6aVYmI\nZIHSrbuY/lYZXxrRizNP6Bp2OYeloBARCYG7M3lWMR3atA696V8sCgoRkRA8/34F7320je+PHkK3\njuE2/YtFQSEi0sJ27K3mh3OWc2rfHMaf3if2hJDpYraISAt76JWV7Nh3kN+MOyklmv7FojMKEZEW\ntHjddp5buJ7rzurHsOOODrucuCgoRERaSKTp31J6dm7HrRelTtO/WBQUIiIt5Ol31rJi8y7uvnxY\nSjX9i0VBISLSAjbu2Mej81Zx/pDuXHJiajX9i0VBISLSAu59cRm17txzxYkp1/QvFgWFiEiSzV++\nhVdKNnPL+Xn06ZJ6Tf9iUVCIiCTRvupD3F1YwsDuHVO26V8s6XM1RUQkDf3s9dWUb9/HHyakbtO/\nWNKzahGRNLBqS6Tp35dH9OaMAanb9C8WBYWISBK4O1NmF3NU29bcOXZI2OU0i4JCRCQJ/u/9ChZ+\ntI07xgyha4o3/YtFQSEikmDb90Sa/o3om8O/56d+079YFBQiIgn20CsrqNp3kAeuTI+mf7EoKERE\nEmjxum3MXLSBG87uz9Ce6dH0LxYFhYhIghw8VMvkWcUc17kd374gL+xyEiauoDCz0Wa20sxKzWxS\nA8fNzB4Lji8xsxF1js0ws61mVlxvThczm2dmq4PvxzR/OSIi4fn12x9Fmv5dcSJHpVHTv1hiBoWZ\ntQKeAMYAw4CrzGxYvWFjgLzgawLwZJ1jTwOjG7jrScB8d88D5ge3RUTSUsWOffzva6u5cGh3Lh7W\nI+xyEiqeM4qRQKm7l7l7NTATKKg3pgB41iMWADlm1hPA3d8CtjVwvwXAM8H2M8C4pixARCQV3FNY\nQq07d1+efk3/YoknKHoBG+rcLg/2NXZMfT3cfVOwvRnIrAgWkazx2rItvLpsC9++YFBaNv2LJSUu\nZru7A97QMTObYGZFZlZUWVnZwpWJiES3t7qGuwtLGNSjI984p3/Y5SRFPEFRAdR9x0jvYF9jx9S3\n5dOnp4LvWxsa5O7T3T3f3fNzc3PjKFdEpOU8Nr+Uih37uH/cSRzZKiV+9064eFa1CMgzs/5m1gYY\nDxTWG1MIXB28+mkUUFXnaaXDKQSuCbavAV5oRN0iIqFbuXkXv/xbGV85rTcj+3cJu5ykiRkU7l4D\n3AzMBZYDf3T3EjObaGYTg2FzgDKgFHgK+Nan883sOeBdYLCZlZvZDcGhB4GLzGw1cGFwW0QkLdTW\nOlNmL6Vju9bcMXZo2OUkVVwv9HX3OUTCoO6+aXW2HbjpMHOvOsz+T4AL4q5URCSF/Pn9chat3c5D\nXz6JLke1CbucpMrMJ9RERJJo+55qfjRnOfnHH8NXTkv/pn+xKChERBrpwZdXsGt/DfdfOTwjmv7F\noqAQEWmERWu38YeiSNO/IcdmRtO/WBQUIiJxOniolimziumV055vX5g5Tf9iyZyuVSIiSTbj7x+x\ncssunro6nw5tsufhU2cUIiJxKN++N2j614OLMqzpXywKChGROPygcFnk+xX1m2dnPgWFiEgMr5Zs\n5rXlW7j1wjx6H5N5Tf9iUVCIiESx50ANPygsYXCPTlx/dmY2/Ysle67GiIg0wWPzV7Oxaj9/uurU\njG36F0t2rlpEJA4rNu/kV3//iP/I78Pp/TK36V8sCgoRkQbU1jpTZhXTqV1rJo0ZEnY5oVJQiIg0\n4E+LN1C0bjt3jB3KMRne9C8WBYWISD3b9lTzo5dXMLJfF/5tRO+wywmdgkJEpJ4fzVnO7ixq+heL\ngkJEpI6FH23jT4vL+cY5AxjUo1PY5aQEBYWISKC6ppYps5fSK6c9/3XBwLDLSRl6H4WISOBXf/+I\nVVt288ssa/oXi84oRESADdv28tP5q7h4WA8uzLKmf7EoKEQk67k7Pygs4Qgz7r7ixLDLSTkKChHJ\neq8u28L8FVu59cI8euW0D7uclKOgEJGs9mnTvyHHduK6z2dn079YdLVGRLLaT+evZlPVfh7/avY2\n/Yslrr8VMxttZivNrNTMJjVw3MzsseD4EjMbEWuumZ1iZgvM7AMzKzKzkYlZkohIfJZvijT9G396\nH047Pnub/sUSMyjMrBXwBDAGGAZcZWb1P+JpDJAXfE0Anoxj7sPAPe5+CjA1uC0i0iJqa50ps4vp\n3P5Ivj86u5v+xRLPGcVIoNTdy9y9GpgJFNQbUwA86xELgBwz6xljrgNHB9udgY3NXIuISNz+WLSB\nxeu2c6ea/sUUzzWKXsCGOrfLgTPiGNMrxtxbgblm9mMigXVW/GWLiDTdJ7sPRJr+9e/Cl0f0Cruc\nlBfmlZsbge+4ex/gO8CvGhpkZhOCaxhFlZWVLVqgiGSmH728gj0Hanhg3HDM1PQvlniCogLoU+d2\n72BfPGOizb0GeD7Y/hORp6k+w92nu3u+u+fn5ubGUa6IyOEtKPuEPy8uZ8IXBpCnpn9xiScoFgF5\nZtbfzNoA44HCemMKgauDVz+NAqrcfVOMuRuBLwbb5wOrm7kWEZGoIk3/iul9THtuOT8v7HLSRsxr\nFO5eY2Y3A3OBVsAMdy8xs4nB8WnAHGAsUArsBa6LNje4628CPzWz1sB+Iq+WEhFJml/+vYzSrbuZ\ncW0+7du0CructGHuHnYNccvPz/eioqKwyxCRNLRh214u+smbnDuoO9O+flrY5bQoM1vs7vlNna+3\nIYpIxnN37g6a/k29vP7bwCQWBYWIZLy5JVt4fcVWbrtoEMep6V+jKShEJKPtPlDDPS+WMLTn0Vx7\nVr+wy0lLCgoRyWj/O28Vm6r2c/+44bRW078m0d+aiGSsZRt38ut31nLVyL6cdvwxYZeTthQUIpKR\namudybOXktP+SL4/enDY5aQ1BYWIZKSZizbwj/U7uHPsUHI6qOlfcygoRCTjfLz7AA+9soIz+nfh\nS2r612wKChHJOD+cs5y91TU8cKWa/iWCgkJEMsq7az7h+fcrmPCFAQzsrqZ/iaCgEJGMEWn6t5Q+\nXdpz83lq+pco8XxwkYhIWnjqb2WsqdzDr689XU3/EkhnFCKSEdZ/spfH5q9mzPBjOW9I97DLySgK\nChFJe+7O1MJiWh+hpn/JoKAQkbT3SvFm/rqyktsuHkzPzmr6l2gKChFJa5Gmf8sY1vNorjnz+LDL\nyUi6mC0iae0n81axZdd+nvzPEWr6lyT6WxWRtFVcUcWv3/6Ir47sy6l91fQvWRQUIpKWamudKbOL\n6XJUG26/ZEjY5WQ0BYWIpKXnFq3ngw07mHzpUDp3ODLscjKagkJE0k7lrgM89PIKzhzQlXGnqOlf\nsikoRCTt/HDOcvYdPMR949T0ryUoKEQkrbyz5mNm/aOCiV88gYHdO4ZdTlaIKyjMbLSZrTSzUjOb\n1MBxM7PHguNLzGxEPHPN7BYzW2FmJWb2cPOXIyKZ7EDNIabMLqZvlw7cdN7AsMvJGjHfR2FmrYAn\ngIuAcmCRmRW6+7I6w8YAecHXGcCTwBnR5prZeUAB8Dl3P2Bmas4iIlFNf7OMsso9PH3d6bQ7Uk3/\nWko8ZxQjgVJ3L3P3amAmkQf4ugqAZz1iAZBjZj1jzL0ReNDdDwC4+9YErEdEMtS6T/bw+BulXHpS\nT84drN8rW1I8QdEL2FDndnmwL54x0eYOAs4xs/fM7E0zO70xhYtI9nB3pr5QwpGtjuCuy9T0r6WF\neTG7NdAFGAV8D/ijNfDyBTObYGZFZlZUWVnZ0jWKSAqYs3Qzb66q5LaLBnFs53Zhl5N14gmKCqBP\nndu9g33xjIk2txx4Pni6aiFQC3Sr/4e7+3R3z3f3/Nzc3DjKFZFMsmv/Qe59qYQTjzuaq9X0LxTx\nBMUiIM/M+ptZG2A8UFhvTCFwdfDqp1FAlbtvijF3NnAegJkNAtoAHzd7RSKSUR6dt4qtuw7wwJUn\nqelfSGK+6snda8zsZmAu0AqY4e4lZjYxOD4NmAOMBUqBvcB10eYGdz0DmGFmxUA1cI27e0JXJyJp\nrbiiimfeWcvXzujLKX1ywi4na1k6PTbn5+d7UVFR2GWISAs4VOt86edvU7FjH/O/ey6d26ufU1OZ\n2WJ3z2/qfJ3HiUhK+v3C9XxYXsVdlw1TSIRMQSEiKWfrrv08/MoKPj+wK1d87riwy8l6CgoRSTk/\n/MtyDhys5d4CNf1LBQoKEUkpb5d+zOwPNjLx3BM4IVdN/1KBgkJEUsaBmkPcNbuY47t24FvnnhB2\nORKI+fJYEZGW8os3yyj7eA/PXD9STf9SiM4oRCQlrP040vTvspN78sVB6sKQShQUIhI6d+euF4pp\no6Z/KUlBISKh+8vSTfxt9cf898WD6HG0mv6lGgWFiIRq5/6D3PviMk7q1Zmvn9kv7HKkAbqYLSKh\nevTVVVTuPsAvr8mn1RF6z0Qq0hmFiIRmaXkVz767lq+POp6Te6vpX6pSUIhIKA7VOpNnL6Vrx7b8\n9yWDwy5HolBQiEgofvfeOpaUVzHl0qEc3U5N/1KZgkJEWtzWXft55JWVnD2wm5r+pQEFhYi0uPtf\nWs6BQ7XcN05N/9KBgkJEWtTfVldS+OFGbvziCfTvdlTY5UgcFBQi0mL2HzzE1BdK6Ne1Azeq6V/a\n0PsoRKTFTHtzDR99vIff3KCmf+lEZxQi0iI++ngPP39jDZd/7jjOyVPTv3SioBCRpHN3pr5QTNvW\nR3DXpUPDLkcaSUEhIkn34pJI07/vjR5MdzX9SzsKChFJqp37D3LfS8s4uXdnvnbG8WGXI00QV1CY\n2WgzW2lmpWY2qYHjZmaPBceXmNmIRsz9rpm5mXVr3lJEJBX9z9yVfLL7AA+MO0lN/9JUzKAws1bA\nE8AYYBhwlZnV/2SRMUBe8DUBeDKeuWbWB7gYWN/slYhIyllSvoNnF6zj6jP7cVLvzmGXI00UzxnF\nSKDU3cvcvRqYCRTUG1MAPOsRC4AcM+sZx9yfALcD3tyFiEhqOVTrTJ5VTLeObbnt4kFhlyPNEE9Q\n9AI21LldHuyLZ8xh55pZAVDh7h82smYRSQO/XbCOpRVVTL1smJr+pblQ3nBnZh2AO4k87RRr7AQi\nT2fRt2/fJFcmIomwZed+Hpm7knPyunHZyT3DLkeaKZ4zigqgT53bvYN98Yw53P4TgP7Ah2a2Ntj/\nvpkdW/8Pd/fp7p7v7vm5uXqTjkg6uO+lZVQfquW+AjX9ywTxBMUiIM/M+ptZG2A8UFhvTCFwdfDq\np1FAlbtvOtxcd1/q7t3dvZ+79yPylNQId9+cqIWJSDjeWlXJS0s2cdO5A+mnpn8ZIeZTT+5eY2Y3\nA3OBVsAMdy8xs4nB8WnAHGAsUArsBa6LNjcpKxGR0O0/eIi7XihmQLejmHjugLDLkQSJ6xqFu88h\nEgZ1902rs+3ATfHObWBMv3jqEJHU9vO/rmHdJ3v53TfOoG1rNf3LFHpntogkRFnlbqb9dQ0FpxzH\n5wfq/bOZREEhIs3m7tz1QjFtjzyCyWr6l3EUFCLSbIUfbuTt0k+4/ZLBdO+kpn+ZRkEhIs1Ste8g\n9720nM/17sxX1fQvI+kT7kSkWX48dyXb9hzg6etOV9O/DKUzChFpsg827OC370Wa/g3vpaZ/mUpB\nISJNUnOolsmzlpLbsS3fVdO/jKagEJEm+c2CdZRs3Mndl59IJzX9y2gKChFptC079/M/r67iC4Ny\nGXvSZ1q0SYZRUIhIo937z6Z/J6rpXxZQUIhIo/x15Vb+smQTt5w3kOO7qulfNlBQiEjc9h88xNQX\nShiQexQTvqimf9lC76MQkbj9/I1S1m/by+/V9C+r6IxCROJSunU3T765hitP7cVZavqXVRQUIhKT\nu3PX7GLaH9mKO8eq6V+2UVCISEwvfLCRd8s+4fbRQ8jt1DbscqSFKShEJKqqvQe5/y/LOKVPDl8d\n2TfsciQEupgtIlE9PHcF2/ZU8/R1IzlCTf+yks4oROSw/rF+O79fuJ5rz+qvpn9ZTEEhIg2KNP0r\npkendtympn9ZTUEhIg165t11LNu0k6mXD6NjWz1Lnc0UFCLyGZur9vPoqys5d3AuY4ar6V+2U1CI\nyGfc+1IJNbXOvVcMV9M/iS8ozGy0ma00s1Izm9TAcTOzx4LjS8xsRKy5ZvaIma0Ixs8ys5zELElE\nmuONlVuZs3Qzt5w/kL5dO4RdjqSAmEFhZq2AJ4AxwDDgKjMbVm/YGCAv+JoAPBnH3HnAcHc/GVgF\n3NHs1YhIs0Sa/hVzQu5RfPMLavonEfGcUYwESt29zN2rgZlAQb0xBcCzHrEAyDGzntHmuvur7l4T\nzF8A9E7AekSkGR5/vZQN2/Zx/7iT1PRP/imeoOgFbKhzuzzYF8+YeOYCXA+8HEctIpIkpVt38Yu3\n1vClU3tx5gldwy5HUkjoF7PNbDJQA/zuMMcnmFmRmRVVVla2bHEiWcLdmfJp079L1fRP/lU8QVEB\n9Klzu3ewL54xUeea2bXAZcDX3N0b+sPdfbq757t7fm5ubhzlikhjzfpHBQvKtjFpzFC6dVTTP/lX\n8QTFIiDPzPqbWRtgPFBYb0whcHXw6qdRQJW7b4o218xGA7cDV7j73gStR0Qaacfeah74y3JO7ZvD\n+NP7xJ4gWSfm2y3dvcbMbgbmAq2AGe5eYmYTg+PTgDnAWKAU2AtcF21ucNePA22BecHrtBe4+8RE\nLk5EYnt47kp27DvIb8adpKZ/0qC43pfv7nOIhEHdfdPqbDtwU7xzg/0DG1WpiCTc4nXb+f176/nG\n2f0ZdtzRYZcjKSr0i9kiEo6aQ7VMmV3MsUe349aL1PRPDk+dvkSy1NPvrGX5pp1M+88RavonUemM\nQiQLbdyxj0fnreL8Id255EQ1/ZPoFBQiWejeF5dR6849V5yopn8Sk4JCJMu8vmILr5Rs5pbz8+jT\nRU3/JDYFhUgW2Vd9iKkvlDCwe0e+eY6a/kl8dAVLJIv87PXVlG/fx8wJo2jTWr8nSnz0L0UkS6ze\nsoun/lbGl0f0ZtQANf2T+CkoRLKAuzN5djEd2rTmzrFDwi5H0oyCQiQL/N/7FSz8aBuTxgyhq5r+\nSSMpKEQy3I691fxwznJG9M3hP/LV9E8aT0EhkuEeemUFVfsO8sCVavonTaOgEMlgi9dt47mFG7j+\n8/0Y2lNN/6RpFBQiGergoVomzyqmZ+d23Hqhmv5J0+l9FCIZ6um317Ji8y5+8fXTOEpN/6QZdEYh\nkoE27tjHT15bxQVDunPxsB5hlyNpTkEhkoHuebGEWnd+oKZ/kgAKCpEM89qyLcwt2cK3Lxikpn+S\nEAoKkQyyt7qGuwtLyOvekRvO7h92OZIhdIVLJIP87PVSKnbs4w9q+icJpH9JIhli1ZZdPPVWGV85\nrTdnqOmfJJCCQiQDuDtTZhXTsV1r7hg7NOxyJMMoKEQywJ8Xl7Nw7TbuGDOELke1CbscyTBxBYWZ\njTazlWZWamaTGjhuZvZYcHyJmY2INdfMupjZPDNbHXw/JjFLEsku2/dEmv7lH38MXzlNTf8k8WIG\nhZm1Ap4AxgDDgKvMbFi9YWOAvOBrAvBkHHMnAfPdPQ+YH9wWkUZ68OUV7Npfw/1XDlfTP0mKeM4o\nRgKl7l7m7tXATKCg3pgC4FmPWADkmFnPGHMLgGeC7WeAcc1ci0jWKVq7jT8UbeCGs/sz5Fg1/ZPk\niOflsb2ADXVulwNnxDGmV4y5Pdx9U7C9GUhan4GfzV9N4Ycbk3X3IqHZsnM/vXLa8+0L88IuRTJY\nSryPwt3dzLyhY2Y2gcjTWfTt27dJ95/bqS15PTo2vUCRFDX42E7ccHZ/OrRJif/KkqHi+ddVAdS9\nQtY72BfPmCOjzN1iZj3dfVPwNNXWhv5wd58OTAfIz89vMExiGT+yL+NHNi1kRESyXTzXKBYBeWbW\n38zaAOOBwnpjCoGrg1c/jQKqgqeVos0tBK4Jtq8BXmjmWkREJAlinlG4e42Z3QzMBVoBM9y9xMwm\nBsenAXOAsUApsBe4Ltrc4K4fBP5oZjcA64B/T+jKREQkIcy9Sc/mhCI/P9+LiorCLkNEJK2Y2WJ3\nz2/qfL0zW0REolJQiIhIVAoKERGJSkEhIiJRKShERCSqtHrVk5lVEnkpbVN0Az5OYDnpRGvPTlp7\ndmpo7ce7e25T7zCtgqI5zKyoOS8PS2dau9aebbT2xK5dTz2JiEhUCgoREYkqm4JietgFhEhrz05a\ne3ZK+Nqz5hqFiIg0TTadUYiISBOkZVCY2WgzW2lmpWb2mc/aDtqdPxYcX2JmI2LNNbMuZjbPzFYH\n349pqfU0RpLW/oiZrQjGzzKznJZaT2MkY+11jn/XzNzMuiV7HU2RrLWb2S3Bz77EzB5uibU0VpL+\nzZ9iZgvM7AMzKzKzkS21nsZo5tpnmNlWMyuuN6fxj3XunlZfRNqVrwEGAG2AD4Fh9caMBV4GDBgF\nvBdrLvAwMCnYngQ8FPZaW3DtFwOtg+2HsmntwfE+RFrhrwO6hb3WFvy5nwe8BrQNbncPe60tuPZX\ngTF15v817LUmcu3BsS8AI4DienMa/ViXjmcUI4FSdy9z92pgJlBQb0wB8KxHLAByLPIpetHmFgDP\nBNvPAOOSvZAmSMra3f1Vd68J5i8g8kmEqSZZP3eAnwC3A6l6wS5Za78ReNDdDwC4e4OfMhmyZK3d\ngaOD7c7AxmQvpAmas3bc/S1gWwP32+jHunQMil7Ahjq3y4N98YyJNreHRz6VD2Az0CNRBSdQstZe\n1/VEfkNJNUlZu5kVABXu/mGiC06gZP3cBwHnmNl7ZvammZ2e0KoTI1lrvxV4xMw2AD8G7khgzYnS\nnLVH0+jHunQMiqTzyDlZqv52mTRmNhmoAX4Xdi0twcw6AHcCU8OuJSStgS5EnrL4HpFPnLRwS2ox\nNwLfcfc+wHeAX4VcTyjifaxLx6CoIPKc8qd6B/viGRNt7pZPT9mC76l4Gp6stWNm1wKXAV8L/vGk\nmmSs/QSgP/Chma0N9r9vZscmtPLmS9bPvRx4PnjaYiFQS6RPUCpJ1tqvAZ4Ptv9E5GmeVNOctUfT\n+Me6sC/YNPaLyG9BZUT+g396gefEemMu5V8v8CyMNRd4hH+9wPNw2GttwbWPBpYBuWGvsaXXXm/+\nWlLzYnayfu4TgXuD7UFEnsKwsNfbQmtfDpwbbF8ALA57rYlce53j/fjsxexGP9aF/pfRxL/AscAq\nIq8ImBzsmwhMDLYNeCI4vhTIjzY32N8VmA+sJvJKkC5hr7MF114aPEh8EHxNC3udLbX2eve/lhQM\niiT+3NsAvwWKgfeB88NeZwuu/WxgMZEH3/eA08JeZxLW/hywCThI5OzxhmB/ox/r9M5sERGJKh2v\nUYiISAtSUIiISFQKChERiUpBISIiUSkoREQkKgWFiIhEpaAQEZGoFBQiIhLV/wMEu63zuFqqqwAA\nAABJRU5ErkJggg==\n", 412 | "text/plain": [ 413 | "" 414 | ] 415 | }, 416 | "metadata": {}, 417 | "output_type": "display_data" 418 | } 419 | ], 420 | "source": [ 421 | "plt.show()" 422 | ] 423 | }, 424 | { 425 | "cell_type": "code", 426 | "execution_count": null, 427 | "metadata": { 428 | "collapsed": true 429 | }, 430 | "outputs": [], 431 | "source": [] 432 | } 433 | ], 434 | "metadata": { 435 | "kernelspec": { 436 | "display_name": "Python 2", 437 | "language": "python", 438 | "name": "python2" 439 | }, 440 | "language_info": { 441 | "codemirror_mode": { 442 | "name": "ipython", 443 | "version": 2 444 | }, 445 | "file_extension": ".py", 446 | "mimetype": "text/x-python", 447 | "name": "python", 448 | "nbconvert_exporter": "python", 449 | "pygments_lexer": "ipython2", 450 | "version": "2.7.13" 451 | } 452 | }, 453 | "nbformat": 4, 454 | "nbformat_minor": 0 455 | } 456 | --------------------------------------------------------------------------------