├── Report.pdf ├── Data ├── SpotCurve_5Y.csv ├── cds.csv ├── SwaptionVolMatrix_5Y.csv ├── SpotCurve.csv └── SwaptionVolMatrix.csv ├── .idea └── vcs.xml ├── LICENSE ├── BlackScholesSolver.py ├── .gitignore ├── CDSBootstrapping.py ├── Validation.py ├── main.py ├── CVASwap.py ├── Bootstrapping.py ├── Volatility.py └── LMM.py /Report.pdf: -------------------------------------------------------------------------------- https://raw.githubusercontent.com/CianODuffy/LiborMarketModel/HEAD/Report.pdf -------------------------------------------------------------------------------- /Data/SpotCurve_5Y.csv: -------------------------------------------------------------------------------- 1 | term,yield 2 | 0.5,0.017535 3 | 1,0.018992 4 | 2,0.02078 5 | 3,0.02169 6 | 4,0.022105 7 | 5,0.022437 8 | 6,0.022765 9 | -------------------------------------------------------------------------------- /Data/cds.csv: -------------------------------------------------------------------------------- 1 | cds 2 | 0.0012435 3 | 0.001946 4 | 0.00224175 5 | 0.0025375 6 | 0.0029375 7 | 0.0033375 8 | 0.0037875 9 | 0.0042375 10 | 0.0047293 11 | 0.0052211 12 | -------------------------------------------------------------------------------- /Data/SwaptionVolMatrix_5Y.csv: -------------------------------------------------------------------------------- 1 | expiry,1,2,3,4,5 2 | 0.5,0.1624,0.1973,0.2254,0.2264,0.2276 3 | 1,0.1935,0.2224,0.2388,0.2454, 4 | 2,0.2307,0.2503,0.2583,, 5 | 3,0.2654,0.2768,,, 6 | 4,0.2828,,,, 7 | -------------------------------------------------------------------------------- /.idea/vcs.xml: -------------------------------------------------------------------------------- 1 | 2 | 3 | 4 | 5 | 6 | -------------------------------------------------------------------------------- /Data/SpotCurve.csv: -------------------------------------------------------------------------------- 1 | term,yield 2 | 0.5,0.017535 3 | 1,0.018992 4 | 2,0.02078 5 | 3,0.02169 6 | 4,0.022105 7 | 5,0.022437 8 | 6,0.022765 9 | 7,0.023106 10 | 8,0.023412 11 | 9,0.0237 12 | 10,0.02398 13 | 11,0.024238 14 | 12,0.024445 15 | 13,0.024618 16 | 14,0.02469 17 | 15,0.024897 18 | 16,0.025035 19 | 17,0.02511 20 | 18,0.02522 21 | 19,0.025295 22 | 20,0.025345 23 | 25,0.025439 24 | 30,0.025423 25 | 35,0.02529 26 | 40,0.02523 27 | 50,0.024901 28 | -------------------------------------------------------------------------------- /Data/SwaptionVolMatrix.csv: -------------------------------------------------------------------------------- 1 | expiry,1,2,3,4,5,6,7,8,9,10,15,20 2 | 1,0.1935,0.2224,0.2388,0.2454,0.2454,0.2445,0.2438,0.243,0.2424,0.2412,0.2319,0.2264 3 | 2,0.2307,0.2503,0.2583,0.2593,0.2601,0.2586,0.2569,0.2558,0.2544,0.2527,0.2385,0.2326 4 | 3,0.2654,0.2768,0.274,0.2711,0.2689,0.2659,0.2635,0.261,0.259,0.257,0.2422,0.2336 5 | 4,0.2828,0.2845,0.2794,0.2754,0.2712,0.2684,0.2653,0.2631,0.2614,0.2594,0.2438,0.2345 6 | 5,0.2844,0.2827,0.2794,0.2758,0.2729,0.269,0.2657,0.2632,0.261,0.2586,0.2432,0.2329 7 | 7,0.2728,0.2723,0.2691,0.2652,0.2622,0.2593,0.2567,0.2543,0.2521,0.2498,0.2363,0.2254 8 | 10,0.2564,0.2514,0.25,0.2484,0.2467,0.2447,0.2428,0.2408,0.2388,0.2365,0.2204,0.2131 9 | 15,0.2311,0.2239,0.2203,0.2171,0.2163,0.2162,0.2163,0.2163,0.2164,0.216,0.2054,0.2002 10 | 20,0.2111,0.2037,0.2009,0.1991,0.1997,0.1998,0.1998,0.1998,0.1997,0.1992,0.1902,0.1895 11 | -------------------------------------------------------------------------------- /LICENSE: -------------------------------------------------------------------------------- 1 | MIT License 2 | 3 | Copyright (c) 2019 Cian O'Duffy 4 | 5 | Permission is hereby granted, free of charge, to any person obtaining a copy 6 | of this software and associated documentation files (the "Software"), to deal 7 | in the Software without restriction, including without limitation the rights 8 | to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 9 | copies of the Software, and to permit persons to whom the Software is 10 | furnished to do so, subject to the following conditions: 11 | 12 | The above copyright notice and this permission notice shall be included in all 13 | copies or substantial portions of the Software. 14 | 15 | THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 16 | IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 17 | FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE 18 | AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER 19 | LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, 20 | OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE 21 | SOFTWARE. 22 | -------------------------------------------------------------------------------- /BlackScholesSolver.py: -------------------------------------------------------------------------------- 1 | from scipy.optimize import minimize_scalar 2 | import numpy as np 3 | 4 | # Inverts black swaption formula to determine volatility from price 5 | class BlackScholesSolver(): 6 | def __init__(self, volatility): 7 | self.volatility = volatility 8 | 9 | def set_parameters(self, start, swap_length, price): 10 | self.start = start 11 | self.swap_length = swap_length 12 | self.price = price 13 | self.strike = self.volatility.bootstrapping.get_forward_swap_rates(self.start, 14 | self.swap_length) 15 | 16 | def objective_function_payer(self, implied_volatility): 17 | price = self.volatility.get_swaption_price_t0_payer(self.start, 18 | self.swap_length, self.strike, implied_volatility) 19 | return np.power(price - self.price, 2) 20 | 21 | def solve_and_get_implied_volatility_payer(self): 22 | result = minimize_scalar(self.objective_function_payer) 23 | return result.x 24 | 25 | def objective_function_receiver(self, implied_volatility): 26 | price = self.volatility.get_swaption_price_t0_receiver(self.start, 27 | self.swap_length, self.strike, implied_volatility) 28 | return np.power(price - self.price, 2) 29 | 30 | def solve_and_get_implied_volatility_receiver(self): 31 | result = minimize_scalar(self.objective_function_receiver) 32 | return result.x -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | *.egg-info/ 24 | .installed.cfg 25 | *.egg 26 | MANIFEST 27 | 28 | # PyInstaller 29 | # Usually these files are written by a python script from a template 30 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 31 | *.manifest 32 | *.spec 33 | 34 | # Installer logs 35 | pip-log.txt 36 | pip-delete-this-directory.txt 37 | 38 | # Unit test / coverage reports 39 | htmlcov/ 40 | .tox/ 41 | .coverage 42 | .coverage.* 43 | .cache 44 | nosetests.xml 45 | coverage.xml 46 | *.cover 47 | .hypothesis/ 48 | .pytest_cache/ 49 | 50 | # Translations 51 | *.mo 52 | *.pot 53 | 54 | # Django stuff: 55 | *.log 56 | local_settings.py 57 | db.sqlite3 58 | 59 | # Flask stuff: 60 | instance/ 61 | .webassets-cache 62 | 63 | # Scrapy stuff: 64 | .scrapy 65 | 66 | # Sphinx documentation 67 | docs/_build/ 68 | 69 | # PyBuilder 70 | target/ 71 | 72 | # Jupyter Notebook 73 | .ipynb_checkpoints 74 | 75 | # pyenv 76 | .python-version 77 | 78 | # celery beat schedule file 79 | celerybeat-schedule 80 | 81 | # SageMath parsed files 82 | *.sage.py 83 | 84 | # Environments 85 | .env 86 | .venv 87 | env/ 88 | venv/ 89 | ENV/ 90 | env.bak/ 91 | venv.bak/ 92 | 93 | # Spyder project settings 94 | .spyderproject 95 | .spyproject 96 | 97 | # Rope project settings 98 | .ropeproject 99 | 100 | # mkdocs documentation 101 | /site 102 | 103 | # mypy 104 | .mypy_cache/ 105 | -------------------------------------------------------------------------------- /CDSBootstrapping.py: -------------------------------------------------------------------------------- 1 | import pandas as pd 2 | import numpy as np 3 | 4 | # bootstraps probabilites of survival from CDS spread given a discount curve 5 | class CDSBootstrapping(): 6 | def __init__(self, R, term_increment): 7 | path = 'Data/cds.csv' 8 | self.name = 'cds' 9 | self.imported_cds_spreads = pd.read_csv(path) 10 | self.R = R 11 | self.LGD = 1 - R 12 | self.term_increment = term_increment 13 | self.length = int(self.imported_cds_spreads.shape[0]) 14 | self.set_interpolated_cds() 15 | 16 | def set_interpolated_cds(self): 17 | self.interpolated_cds = np.zeros(self.length) 18 | for i in range(self.length): 19 | self.interpolated_cds[i] = self.imported_cds_spreads.at[i, self.name] 20 | 21 | def get_probability_of_survival(self, ois_discount_curve, multiplier): 22 | probability_of_survival = np.zeros(self.length + 1) 23 | probability_of_survival[0] = 1 24 | 25 | probability_of_survival[1] = self.LGD/(self.LGD + self.term_increment*self.interpolated_cds[0]*multiplier) 26 | 27 | for i in range(2, self.length+1): 28 | main_term = probability_of_survival[i-1]\ 29 | *self.LGD/(self.LGD + self.term_increment*self.interpolated_cds[i-1]*multiplier) 30 | sum = 0 31 | for j in range(1, i): 32 | sum += ois_discount_curve[j]*(self.LGD*probability_of_survival[j-1] - 33 | (self.LGD + self.term_increment*self.interpolated_cds[i-1] 34 | *multiplier)*probability_of_survival[j]) 35 | bottom = ois_discount_curve[i]*(self.LGD + self.term_increment*self.interpolated_cds[i-1]*multiplier) 36 | probability_of_survival[i] = main_term + sum/bottom 37 | return probability_of_survival 38 | 39 | -------------------------------------------------------------------------------- /Validation.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import LMM as lmm 3 | import matplotlib.pyplot as plt 4 | 5 | # validates the LMM forward rate simulations using martingale tests and other 6 | # tests 7 | class Validation(): 8 | def __init__(self): 9 | swaption_vol_cva_dataset_path = 'Data/SwaptionVolMatrix_5Y.csv' 10 | swap_curve_cva_dataset_path = 'Data/SpotCurve_5Y.csv' 11 | self.lmm = lmm.LMM(swaption_vol_cva_dataset_path, swap_curve_cva_dataset_path) 12 | self.calculate_martingale_ratios_for_CVA_dataset_bonds_at_expiry() 13 | 14 | swaption_vol_extended_dataset_path = 'Data/SwaptionVolMatrix.csv' 15 | swap_curve_extended_dataset_path = 'Data/SpotCurve.csv' 16 | self.lmm = lmm.LMM(swaption_vol_extended_dataset_path, swap_curve_extended_dataset_path) 17 | self.calculate_10_year_ZC_martingale_test() 18 | 19 | self.calculate_zero_coupon_bond_projections() 20 | # uncomment to do diffusion check 21 | # self.check_diffusion_has_zero_mean() 22 | 23 | ##[terms,time, sim] 24 | self.forward_sims = self.lmm.forward_sims 25 | self.number_of_terms = self.lmm.number_of_terms 26 | self.time_increment = self.lmm.time_increment 27 | self.bootstrapping = self.lmm.bootstrapping 28 | self.number_of_sims = self.lmm.number_of_sims 29 | 30 | def set_martingale_differences_for_zero_coupon_bond(self): 31 | self.martingale_differences = np.ones((self.number_of_terms, 3)) 32 | # loop through zero coupon bonds 33 | for i in range(1, self.number_of_terms+1): 34 | bond_pv = self.get_expectation_of_zero_coupon_bond(i) 35 | t0_bond_pv = self.bootstrapping.zero_coupon_prices[i] 36 | self.martingale_differences[i - 1,0] = bond_pv 37 | self.martingale_differences[i - 1, 1] = t0_bond_pv 38 | self.martingale_differences[i - 1, 2] = bond_pv / t0_bond_pv - 1 39 | np.savetxt('martingale_test.csv', self.martingale_differences, delimiter=',') 40 | 41 | def calculate_martingale_ratios_for_CVA_dataset_bonds_at_expiry(self): 42 | numeraire_index = 10 43 | self.lmm.run_projection(numeraire_index, numeraire_index) 44 | bonds = np.zeros(numeraire_index) 45 | ratio = np.zeros(numeraire_index) 46 | difference = np.zeros(numeraire_index) 47 | 48 | for i in range(1, numeraire_index): 49 | numeraire_value = self.lmm.DF[numeraire_index, i,:] 50 | t0_value = self.lmm.DF[numeraire_index,0,0] 51 | bonds[i] = np.mean(1/numeraire_value)*t0_value 52 | difference[i] = bonds[i] - self.lmm.DF[i,0,0] 53 | ratio[i] = bonds[i]/self.lmm.DF[i,0,0] 54 | np.savetxt('martingale_ratio_at_bond_expiry_CVA_dataset.csv', ratio, delimiter=',') 55 | 56 | def calculate_zero_coupon_bond_projections(self): 57 | numeraire_index = 40 58 | start_bond = 20 59 | self.lmm.volatility.mc_adjustment_factor = 1 60 | self.lmm.volatility.a = 0.01368861 61 | self.lmm.volatility.b = 0.07921976 62 | self.lmm.volatility.c = 0.33920146 63 | self.lmm.volatility.d = 0.08416935 64 | self.lmm.volatility.instantiate_arrays() 65 | self.lmm.run_projection(numeraire_index, numeraire_index) 66 | bonds = np.zeros((numeraire_index - start_bond, numeraire_index)) 67 | ratio = np.zeros((numeraire_index - start_bond, numeraire_index)) 68 | 69 | for i in range(start_bond, numeraire_index): 70 | for j in range(i+1): 71 | numeraire_value = self.lmm.DF[numeraire_index, j, :] 72 | t0_numeraire_value = self.lmm.DF[numeraire_index, 0, 0] 73 | t0_ratio = self.lmm.DF[i, 0, 0]/t0_numeraire_value 74 | bonds[i-start_bond,j] = np.mean(self.lmm.DF[i, j,:] / numeraire_value) * t0_numeraire_value 75 | ratio[i-start_bond,j] = (np.mean(self.lmm.DF[i, j,:] / numeraire_value))/t0_ratio 76 | np.savetxt('matingale_test_ratio_projections.csv', ratio, delimiter=',') 77 | 78 | def calculate_10_year_ZC_martingale_test(self): 79 | numeraire_index = 40 80 | start_bond = 20 81 | self.lmm.volatility.mc_adjustment_factor = 1 82 | self.lmm.volatility.a = 0.01368861 83 | self.lmm.volatility.b = 0.07921976 84 | self.lmm.volatility.c = 0.33920146 85 | self.lmm.volatility.d = 0.08416935 86 | self.lmm.volatility.instantiate_arrays() 87 | self.lmm.run_projection(numeraire_index, numeraire_index) 88 | # self.lmm.run_projection_predictor_corrector(numeraire_index, numeraire_index) 89 | 90 | bonds = np.zeros((numeraire_index - start_bond, numeraire_index)) 91 | ratio = np.zeros((4, numeraire_index)) 92 | for j in range(start_bond + 1): 93 | numeraire_value = self.lmm.DF[numeraire_index, j, :] 94 | t0_numeraire_value = self.lmm.DF[numeraire_index, 0, 0] 95 | t0_ratio = self.lmm.DF[start_bond, 0, 0] / t0_numeraire_value 96 | bonds[start_bond - start_bond, j] = np.mean(self.lmm.DF[start_bond, j, :] / numeraire_value) * t0_numeraire_value 97 | ratio[0, j] = np.percentile(self.lmm.DF[start_bond, j, :] / numeraire_value, 5) / t0_ratio 98 | ratio[1, j] = (np.mean(self.lmm.DF[start_bond, j, :] / numeraire_value)) / t0_ratio 99 | ratio[2, j] = np.percentile(self.lmm.DF[start_bond, j, :] / numeraire_value, 50) / t0_ratio 100 | ratio[3, j] = np.percentile(self.lmm.DF[start_bond, j, :] / numeraire_value, 95) / t0_ratio 101 | np.savetxt('10_year_ZC_martingale_test.csv', ratio, delimiter=',') 102 | 103 | def get_expectation_of_zero_coupon_bond(self, zero_coupon_index): 104 | forward_rate_index = zero_coupon_index - 1 105 | product = 1/(np.ones(self.number_of_sims) + self.time_increment*self.forward_sims[0,0,:]) 106 | 107 | for i in range(1, forward_rate_index+1): 108 | product = product/(np.ones(self.number_of_sims) + self.time_increment*self.forward_sims[i,i,:]) 109 | output = np.mean(product) 110 | return output 111 | 112 | def check_diffusion_has_zero_mean(self): 113 | number_of_tests = 40 114 | mean = np.zeros((number_of_tests,12)) 115 | 116 | for i in range(number_of_tests): 117 | diffusion = self.lmm.get_diffusion() 118 | mean[i,:] = np.mean(diffusion, axis=1) 119 | mean_of_mean = np.mean(mean) 120 | 121 | 122 | -------------------------------------------------------------------------------- /main.py: -------------------------------------------------------------------------------- 1 | import LMM as lmm 2 | import numpy as np 3 | from mpl_toolkits.mplot3d import Axes3D 4 | import matplotlib.pyplot as plt 5 | from matplotlib import cm 6 | from matplotlib.ticker import LinearLocator, FormatStrFormatter 7 | import Bootstrapping as boot 8 | import CVASwap as cva 9 | import Validation as val 10 | 11 | #THE PURPOSE OF THIS SCRIPT IS TO CALL THE OTHER METHODS AND PRINT RESULTS 12 | #run main to run the other scripts. 13 | # modules are run for 10,000 simulations as a compromise between run-time 14 | # and acheiving good results for the monte-carlo optimisation 15 | # in the project 50,000 simulations was used. 16 | 17 | swaption_vol_cva_dataset_path = 'Data/SwaptionVolMatrix_5Y.csv' 18 | swap_curve_cva_dataset_path = 'Data/SpotCurve_5Y.csv' 19 | swaption_vol_extended_dataset_path = 'Data/SwaptionVolMatrix.csv' 20 | swap_curve_extended_dataset_path = 'Data/SpotCurve.csv' 21 | 22 | # run cva calculation 23 | cva_object = cva.CVASwap() 24 | 25 | # Martingale testing and zero mean diffusion methods in here 26 | validate = val.Validation() 27 | 28 | 29 | # Carry out analytical calibration for extended dataset 30 | libor_fit_parameters_extended = lmm.LMM(swaption_vol_extended_dataset_path, swap_curve_extended_dataset_path) 31 | libor_fit_parameters_extended.volatility.mc_adjustment_factor = 1 32 | libor_fit_parameters_extended.volatility.a = 0.5 33 | libor_fit_parameters_extended.volatility.b = 0.5 34 | libor_fit_parameters_extended.volatility.c = 0.5 35 | libor_fit_parameters_extended.volatility.d = 0.5 36 | libor_fit_parameters_extended.volatility.fit_parameters() 37 | s = 'Extended dataset analytical calibration - a = ' + str(libor_fit_parameters_extended.volatility.a) \ 38 | + '\n Extended dataset analytical calibration - b = ' + str(libor_fit_parameters_extended.volatility.b) \ 39 | + '\n Extended dataset analytical calibration - c = ' + str(libor_fit_parameters_extended.volatility.c) \ 40 | + '\n Extended dataset analytical calibration - d = ' + str(libor_fit_parameters_extended.volatility.d) 41 | print(s) 42 | 43 | 44 | # calculate swaption vols and prices for cva dataset using factor reduction 45 | libor_cva_dataset_factor_reduction = lmm.LMM(swaption_vol_cva_dataset_path, swap_curve_cva_dataset_path) 46 | libor_cva_dataset_factor_reduction.use_factor_reduction = True 47 | libor_cva_dataset_factor_reduction.volatility.use_factor_reduction = True 48 | libor_cva_dataset_factor_reduction.volatility.instantiate_arrays() 49 | libor_cva_dataset_factor_reduction.set_swaption_prices_for_atm_calibration() 50 | libor_cva_dataset_factor_reduction.set_implied_volatilities_from_prices() 51 | 52 | 53 | # fit mc adjustment factor 54 | libor_cva_dataset_mc_adjustment = lmm.LMM(swaption_vol_cva_dataset_path, swap_curve_cva_dataset_path) 55 | libor_cva_dataset_mc_adjustment.fit_adjustment_factor() 56 | k = libor_cva_dataset_mc_adjustment.volatility.mc_adjustment_factor 57 | s = 'k(monte carlo) calculated as = ' + str(k) 58 | print(s) 59 | 60 | 61 | # calculate swaption vols and prices for cva dataset 62 | libor_cva_dataset = lmm.LMM(swaption_vol_cva_dataset_path, swap_curve_cva_dataset_path) 63 | libor_cva_dataset.set_swaption_prices_for_atm_calibration() 64 | libor_cva_dataset.set_implied_volatilities_from_prices() 65 | 66 | # calculate swaption vols and prices for extended dataset 67 | libor_extended_dataset = lmm.LMM(swaption_vol_extended_dataset_path, swap_curve_extended_dataset_path) 68 | libor_extended_dataset.volatility.mc_adjustment_factor = 1 69 | libor_extended_dataset.volatility.a = 0.01368861 70 | libor_extended_dataset.volatility.b = 0.07921976 71 | libor_extended_dataset.volatility.c = 0.33920146 72 | libor_extended_dataset.volatility.d = 0.08416935 73 | libor_extended_dataset.volatility.instantiate_arrays() 74 | libor_extended_dataset.set_swaption_prices_for_atm_calibration() 75 | libor_extended_dataset.set_implied_volatilities_from_prices() 76 | 77 | # run extended dataset and produce charts 78 | libor_extended_dataset_produce_charts = lmm.LMM(swaption_vol_extended_dataset_path, swap_curve_extended_dataset_path) 79 | libor_extended_dataset_produce_charts.volatility.mc_adjustment_factor = 1 80 | libor_extended_dataset_produce_charts.number_of_sims = 100 81 | libor_extended_dataset_produce_charts.volatility.a = 0.01368861 82 | libor_extended_dataset_produce_charts.volatility.b = 0.07921976 83 | libor_extended_dataset_produce_charts.volatility.c = 0.33920146 84 | libor_extended_dataset_produce_charts.volatility.d = 0.08416935 85 | libor_extended_dataset_produce_charts.volatility.instantiate_arrays() 86 | numeraire_index = 40 87 | libor_extended_dataset_produce_charts.run_projection(numeraire_index, numeraire_index) 88 | raw_sims = libor_extended_dataset_produce_charts.forward_sims 89 | forward_sims = np.delete(raw_sims, (numeraire_index), axis=1) 90 | mean = np.mean(forward_sims) 91 | median = np.median(forward_sims) 92 | upper_quartile = np.percentile(forward_sims, 75) 93 | max = np.max(forward_sims) 94 | numeraire_rate_sims = forward_sims[numeraire_index-1,:,:] 95 | cash_rate = np.zeros((numeraire_index, libor_extended_dataset_produce_charts.number_of_sims)) 96 | 97 | for i in range(numeraire_index): 98 | cash_rate[i,:] = forward_sims[i,i,:] 99 | 100 | min_cash = np.min(cash_rate) 101 | lower_quartile_cash = np.percentile(cash_rate, 25) 102 | min_numeraire = np.min(numeraire_rate_sims) 103 | lower_numeraire = np.percentile(numeraire_rate_sims, 25) 104 | 105 | 106 | plt.plot(numeraire_rate_sims) 107 | plt.xticks([ 108 | 0, 109 | 1 , 110 | 2 , 111 | 3 , 112 | 4 , 113 | 5 , 114 | 6 , 115 | 7 , 116 | 8 , 117 | 9 , 118 | 10 , 119 | 11 , 120 | 12 , 121 | 13 , 122 | 14 , 123 | 15 , 124 | 16 , 125 | 17 , 126 | 18 , 127 | 19 , 128 | 20 , 129 | 21 , 130 | 22 , 131 | 23 , 132 | 24 , 133 | 25 , 134 | 26 , 135 | 27 , 136 | 28 , 137 | 29 , 138 | 30 , 139 | 31 , 140 | 32 , 141 | 33 , 142 | 34 , 143 | 35 , 144 | 36 , 145 | 37 , 146 | 38 , 147 | 39], 148 | [ 149 | '0.0', 150 | '0.5', 151 | '1.0', 152 | '1.5', 153 | '2.0', 154 | '2.5', 155 | '3.0', 156 | '3.5', 157 | '4.0', 158 | '4.5', 159 | '5.0', 160 | '5.5', 161 | '6.0', 162 | '6.5', 163 | '7.0', 164 | '7.5', 165 | '8.0', 166 | '8.5', 167 | '9.0', 168 | '9.5', 169 | '10.0', 170 | '10.5', 171 | '11.0', 172 | '11.5', 173 | '12.0', 174 | '12.5', 175 | '13.0', 176 | '13.5', 177 | '14.0', 178 | '14.5', 179 | '15.0', 180 | '15.5', 181 | '16.0', 182 | '16.5', 183 | '17.0', 184 | '17.5', 185 | '18.0', 186 | '18.5', 187 | '19.0', 188 | '19.5']) 189 | plt.show() 190 | 191 | plt.plot(cash_rate) 192 | plt.xticks([ 193 | 0, 194 | 1 , 195 | 2 , 196 | 3 , 197 | 4 , 198 | 5 , 199 | 6 , 200 | 7 , 201 | 8 , 202 | 9 , 203 | 10 , 204 | 11 , 205 | 12 , 206 | 13 , 207 | 14 , 208 | 15 , 209 | 16 , 210 | 17 , 211 | 18 , 212 | 19 , 213 | 20 , 214 | 21 , 215 | 22 , 216 | 23 , 217 | 24 , 218 | 25 , 219 | 26 , 220 | 27 , 221 | 28 , 222 | 29 , 223 | 30 , 224 | 31 , 225 | 32 , 226 | 33 , 227 | 34 , 228 | 35 , 229 | 36 , 230 | 37 , 231 | 38 , 232 | 39], 233 | [ 234 | '0.0', 235 | '0.5', 236 | '1.0', 237 | '1.5', 238 | '2.0', 239 | '2.5', 240 | '3.0', 241 | '3.5', 242 | '4.0', 243 | '4.5', 244 | '5.0', 245 | '5.5', 246 | '6.0', 247 | '6.5', 248 | '7.0', 249 | '7.5', 250 | '8.0', 251 | '8.5', 252 | '9.0', 253 | '9.5', 254 | '10.0', 255 | '10.5', 256 | '11.0', 257 | '11.5', 258 | '12.0', 259 | '12.5', 260 | '13.0', 261 | '13.5', 262 | '14.0', 263 | '14.5', 264 | '15.0', 265 | '15.5', 266 | '16.0', 267 | '16.5', 268 | '17.0', 269 | '17.5', 270 | '18.0', 271 | '18.5', 272 | '19.0', 273 | '19.5']) 274 | plt.show() 275 | 276 | 277 | 278 | 279 | 280 | 281 | -------------------------------------------------------------------------------- /CVASwap.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import Volatility as vol 3 | import pandas as pd 4 | import Bootstrapping as boot 5 | import copy as copy 6 | import BlackScholesSolver as bss 7 | from LMM import * 8 | import matplotlib.pyplot as plt 9 | import CDSBootstrapping as cds_boot 10 | 11 | # 1. calculates LOIS adjusted OIS simulations 12 | # 2. calculates CVA for 5 year IRS with Goldman Sachs as at 31/12/17 13 | # 3. Conducts sensitivity analysis to LOIS and LOIS-CDS correlation 14 | class CVASwap(): 15 | def __init__(self): 16 | self.recovery_rate = 0.4 17 | swaption_vol_cva_dataset_path = 'Data/SwaptionVolMatrix_5Y.csv' 18 | swap_curve_cva_dataset_path = 'Data/SpotCurve_5Y.csv' 19 | path = 'ois.csv' 20 | self.lmm = LMM(swaption_vol_cva_dataset_path, swap_curve_cva_dataset_path) 21 | self.time_increment = self.lmm.time_increment 22 | self.bootstrapping = self.lmm.bootstrapping 23 | self.lois = 0.00282385038573462 24 | self.working_lois = self.lois 25 | self.numeraire_index = 10 26 | self.set_ois_discount_curve(self.lois) 27 | self.cds_bootstrapping = cds_boot.CDSBootstrapping(self.recovery_rate, 28 | self.time_increment) 29 | self.lmm.run_projection(self.numeraire_index, self.numeraire_index) 30 | ##[terms,time, sim] 31 | self.forward_sims = self.lmm.forward_sims 32 | self.number_of_sims = self.lmm.number_of_sims 33 | self.notional = 1 34 | self.LGD = self.notional*(1- self.recovery_rate) 35 | self.swap_term = 5 36 | self.set_ois_discount_sims() 37 | self.calculate_CVA_for_5_year_swap() 38 | self.calculate_martingale_ratios_for_CVA_dataset_bonds_at_expiry_ois() 39 | # uncomment this to run sensiivity analysis 40 | # self.run_sensitivity_analysis() 41 | 42 | def set_ois_discount_curve(self, lois): 43 | libor_forward_rates = self.lmm.starting_forward_curve 44 | self.ois_discount_factors = np.ones(self.numeraire_index + 1) 45 | 46 | for i in range(1, self.numeraire_index + 1): 47 | self.ois_discount_factors[i] = self.ois_discount_factors[i-1]/(1 + self.time_increment*(libor_forward_rates[i-1] - lois)) 48 | 49 | def set_ois_discount_sims(self): 50 | lois_sims = self.working_lois * np.ones(self.number_of_sims) 51 | self.ois_DF = np.ones((self.numeraire_index + 1, 52 | self.numeraire_index + 1, 53 | self.number_of_sims)) 54 | for n in range(self.numeraire_index + 1): 55 | for i in range(n + 1, self.numeraire_index + 1): 56 | df_prod = np.ones(self.number_of_sims) 57 | for k in range(n, i): 58 | df_prod = df_prod / \ 59 | (np.ones(self.number_of_sims) + self.time_increment * (self.forward_sims[k, n, :] - lois_sims)) 60 | self.ois_DF[i, n, :] = df_prod 61 | 62 | def get_pv_of_swap_alternate(self, swap_length_steps, strike, time_index): 63 | swap_rate_libor = self.lmm.get_forward_swap_rate(time_index, swap_length_steps) 64 | annuity = np.zeros(self.number_of_sims) 65 | 66 | for i in range(time_index + 1, self.numeraire_index+1): 67 | annuity += self.ois_DF[i,time_index,:] 68 | output = (swap_rate_libor - strike)*self.time_increment*annuity 69 | return output 70 | 71 | def calculate_exposure(self, swap_pv): 72 | output = np.maximum(swap_pv, np.zeros(self.number_of_sims)) 73 | return output 74 | 75 | def run_sensitivity_analysis(self): 76 | swap_rate = float(self.bootstrapping.imported_swap_curve.iloc[self.swap_term][self.bootstrapping.yield_name]) 77 | swap_length_steps = int(self.swap_term / self.time_increment) 78 | correlation_increments = 8 79 | lois_increments = 62 80 | cva_matrix = np.zeros((correlation_increments, lois_increments)) 81 | lois_values = np.zeros(lois_increments) 82 | correlations = np.zeros(correlation_increments) 83 | cds_multipliers = np.zeros(lois_increments) 84 | 85 | for i in range(correlation_increments): 86 | correlation = 0.2 + 0.2*i 87 | correlations[i] = correlation 88 | for j in range(lois_increments): 89 | lois_multiplier = 0.1 + j*0.1 90 | lois_values[j] = self.lois*lois_multiplier 91 | self.working_lois = self.lois*lois_multiplier 92 | self.set_ois_discount_curve(self.working_lois) 93 | self.set_ois_discount_sims() 94 | cds_multiplier = correlation*lois_multiplier 95 | cds_multipliers[j] = cds_multiplier 96 | cva_matrix[i,j] = self.calculate_cva(swap_length_steps, swap_rate, cds_multiplier) 97 | np.savetxt('cva_matrix.csv', cva_matrix, delimiter=',') 98 | np.savetxt('lois_values.csv', lois_values, delimiter=',') 99 | 100 | def calculate_CVA_for_5_year_swap(self): 101 | swap_rate = float(self.bootstrapping.imported_swap_curve.iloc[self.swap_term][self.bootstrapping.yield_name]) 102 | swap_length_steps = int(self.swap_term / self.time_increment) 103 | cds_multiplier = 1 104 | cva = self.calculate_cva(swap_length_steps, swap_rate, cds_multiplier) 105 | 106 | exposure_stats = self.get_stats(self.exposure, swap_length_steps) 107 | mtm_stats = self.get_stats(self.swap_mtm, swap_length_steps) 108 | discounted_exposure_stats = self.get_stats(self.discounted_exposures, swap_length_steps) 109 | 110 | np.savetxt('exposure_stats.csv', exposure_stats, delimiter=',') 111 | np.savetxt('mtm_stats.csv', mtm_stats, delimiter=',') 112 | np.savetxt('discounted_exposure_stats.csv', discounted_exposure_stats, delimiter=',') 113 | 114 | def calculate_martingale_ratios_for_CVA_dataset_bonds_at_expiry_ois(self): 115 | bonds = np.zeros(self.numeraire_index) 116 | ratio = np.zeros(self.numeraire_index) 117 | difference = np.zeros(self.numeraire_index) 118 | 119 | for i in range(1, self.numeraire_index): 120 | numeraire_value = self.ois_DF[self.numeraire_index, i, :] 121 | t0_value = self.ois_DF[self.numeraire_index, 0, 0] 122 | bonds[i] = np.mean(1 / numeraire_value) * t0_value 123 | difference[i] = bonds[i] - self.ois_DF[i, 0, 0] 124 | ratio[i] = bonds[i] / self.ois_DF[i, 0, 0] 125 | np.savetxt('martingale_ratio_at_bond_expiry_CVA_dataset_ois_adjustment.csv', ratio, delimiter=',') 126 | 127 | def get_stats(self, sims, swap_length_steps): 128 | output = np.zeros((swap_length_steps + 1, 4)) 129 | output[:, 0] = np.mean(sims, axis=1) 130 | output[:, 1] = np.median(sims, axis=1) 131 | # no non-int percentiles in numpy so average 97 and 98th percentiles to get 97.5th percentile 132 | output[:, 2] = (np.percentile(sims, 97, axis=1) + np.percentile(sims, 98, axis=1)) / 2 133 | output[:, 3] = np.percentile(sims, 99, axis=1) 134 | return output 135 | 136 | 137 | def calculate_exposures(self, swap_length_steps, swap_rate): 138 | self.exposure = np.zeros((swap_length_steps + 1, self.number_of_sims)) 139 | self.swap_mtm = np.zeros((swap_length_steps + 1, self.number_of_sims)) 140 | 141 | for t in range(swap_length_steps): 142 | swap_pv = self.get_pv_of_swap_alternate(swap_length_steps, swap_rate,t) 143 | self.swap_mtm[t] = swap_pv 144 | swap_exposure = self.calculate_exposure(swap_pv) 145 | self.exposure[t] = swap_exposure 146 | return self.exposure 147 | 148 | def get_discounted_exposures(self, exposures, swap_length_steps): 149 | self.discounted_exposures = np.zeros((swap_length_steps + 1, self.number_of_sims)) 150 | 151 | for i in range(swap_length_steps+1): 152 | self.discounted_exposures[i,:] = self.ois_discount_factors[self.numeraire_index]\ 153 | *exposures[i]/self.ois_DF[self.numeraire_index,i,:] 154 | return self.discounted_exposures 155 | 156 | 157 | def calculate_cva(self, swap_length_steps, swap_rate, cds_multiplier): 158 | exposures = self.calculate_exposures(swap_length_steps, swap_rate) 159 | discounted_exposures = self.get_discounted_exposures(exposures, swap_length_steps) 160 | probability_of_survival = self.cds_bootstrapping.get_probability_of_survival(self.ois_discount_factors, cds_multiplier) 161 | # np.savetxt('probability_of_survival.csv', probability_of_survival, delimiter=',') 162 | sum = np.zeros(self.number_of_sims) 163 | for i in range(1, swap_length_steps): 164 | marginal_probability_of_default = probability_of_survival[i-1] \ 165 | - probability_of_survival[i] 166 | intepolated_exposure = (discounted_exposures[i - 1] + discounted_exposures[i]) / 2 167 | sum += np.maximum(0, marginal_probability_of_default)*intepolated_exposure 168 | self.CVA_stochastic = sum*self.LGD 169 | self.CVA_mean = np.mean(self.CVA_stochastic) 170 | CVA_99 = np.percentile(self.CVA_stochastic, 99) 171 | CVA_ninety_seventh = (np.percentile(self.CVA_stochastic, 97) + np.percentile(self.CVA_stochastic, 98)) / 2 172 | CVA_seventy_fifth = np.percentile(self.CVA_stochastic, 75) 173 | CVA_twenty_fifth = np.percentile(self.CVA_stochastic, 25) 174 | s = 'The mean CVA value of a 5 year USD libor interest rate swap as at 31/12/17 is ' + str(self.CVA_mean) + ' for a notional of 1.' 175 | print(s) 176 | s = 'The 99th percentile CVA value of a 5 year USD libor interest rate swap as at 31/12/17 is ' + str( 177 | CVA_99) + ' for a notional of 1.' 178 | print(s) 179 | return self.CVA_mean 180 | 181 | 182 | 183 | -------------------------------------------------------------------------------- /Bootstrapping.py: -------------------------------------------------------------------------------- 1 | import math as math 2 | import numpy as np 3 | import pandas as pd 4 | import matplotlib.pyplot as plt 5 | 6 | 7 | # 1. Imports swap curve and interpolates using linear interpolation, svensson fitting and cubic spline 8 | # 2. Using linear interpolation bootstraps zero coupon bond values from swap rates 9 | # 3. Generates Z matrix for use in Rebonato method 10 | # 4. Calculates forward swap rates from forward rates for use in swaption pricing. 11 | class Bootstrapping(): 12 | def __init__(self, swap_curve_path): 13 | # path = 'SpotCurve cut down.csv' 14 | path = swap_curve_path 15 | self.term_name = 'term' 16 | self.yield_name = 'yield' 17 | self.term_increment = 0.5 18 | self.notional = 1.0 19 | self.min_term = 0.5 20 | 21 | self.imported_swap_curve = pd.read_csv(path) 22 | self.max_term = int(max(self.imported_swap_curve[self.term_name])) 23 | self.number_of_terms = int((self.max_term) / self.term_increment) 24 | self.set_linear_interpolated_swap_rates_vector() 25 | # self.svensson = sven.svensson() 26 | # self.svensson.fit_parameters(self.imported_swap_curve) 27 | # self.swap_curve_function = \ 28 | # np.poly1d(np.polyfit(self.imported_swap_curve[self.term_name], 29 | # self.imported_swap_curve[self.yield_name], 3)) 30 | 31 | # xp = np.linspace(0.5, 50, 100) 32 | # plt.plot(self.imported_swap_curve[self.term_name], self.imported_swap_curve[self.yield_name], 33 | # '.', xp, self.swap_curve_function(xp), '-', xp, self.swap_curve_function(xp), '--') 34 | # plt.ylim(-2, 2) 35 | # plt.show() 36 | 37 | # xp = np.linspace(0.5, 50, 100) 38 | # plt.plot(self.imported_swap_curve[self.term_name], self.imported_swap_curve[self.yield_name], 39 | # '.', xp, self.svensson.get_value(xp), '-', xp, self.svensson.get_value(xp), '--') 40 | # plt.ylim(-2, 2) 41 | # plt.show() 42 | self.set_zero_coupon_prices_from_swap_rates() 43 | # xp = np.linspace(0.25, 29.75, 119) 44 | # plt.plot(xp, self.zero_coupon_prices, 45 | # '.', xp, self.swap_curve_function(xp), '-', xp, self.swap_curve_function(xp), '--') 46 | # plt.ylim(-2, 2) 47 | # plt.show() 48 | self.set_forward_curve() 49 | 50 | # Ai in Jaekal notes. t = 0. start_index = i 51 | def get_t0_floating_leg_value(self, start_index, end_index): 52 | sum = 0 53 | for j in range(start_index, end_index): 54 | sum += self.zero_coupon_prices[j+1]*self.forward_curve[j]*self.term_increment*self.notional 55 | return sum 56 | 57 | # Bi in Jaekal notes. t = 0 58 | def get_t0_fixed_leg_value(self, start_index, end_index): 59 | sum = 0 60 | for j in range(start_index, end_index): 61 | sum += self.zero_coupon_prices[j+1]*self.term_increment*self.notional 62 | return sum 63 | 64 | def convert_index_to_term_forward(self, i): 65 | return i*self.term_increment 66 | 67 | def set_Z_matrix(self, number_of_indexes): 68 | self.Z_matrix = np.zeros((number_of_indexes, number_of_indexes, number_of_indexes)) 69 | 70 | for start_index in range(0, number_of_indexes): 71 | for end_index in range(start_index + 1, number_of_indexes): 72 | for k in range(start_index, end_index): 73 | self.Z_matrix[start_index, end_index, k] = self.get_Z(start_index, end_index, k) 74 | 75 | def get_Z(self, start_index, end_index, k): 76 | if k < start_index: 77 | return 0 78 | if k == start_index: 79 | return 1 80 | 81 | const_weights_approx = self.notional*self.zero_coupon_prices[k+1]\ 82 | *self.forward_curve[k]*self.term_increment\ 83 | /self.get_t0_floating_leg_value(start_index, end_index) 84 | top = (self.get_t0_floating_leg_value(start_index, end_index) 85 | *self.get_t0_fixed_leg_value(k, end_index) - self.get_t0_floating_leg_value(k, end_index) 86 | *self.get_t0_fixed_leg_value(start_index, end_index))\ 87 | *self.forward_curve[k]*self.term_increment 88 | bottom = self.get_t0_floating_leg_value(start_index, end_index)*(1 + self.forward_curve[k]*self.term_increment) 89 | shape_correction = top/bottom 90 | return const_weights_approx + shape_correction 91 | 92 | def set_linear_interpolated_swap_rates_vector(self): 93 | self.interpolated_swap_rates = np.zeros(self.number_of_terms) 94 | previous_swap_length_steps = 0 95 | previous_swap_length = 0 96 | previous_swap_rate = 0 97 | count = 0 98 | 99 | for row_index, row in self.imported_swap_curve.iterrows(): 100 | swap_length = row[self.term_name] 101 | swap_length_steps = int((swap_length-self.min_term)/ self.term_increment) 102 | swap_rate = float(row[self.yield_name]) 103 | 104 | if row_index == 0: 105 | self.interpolated_swap_rates[count] = swap_rate 106 | previous_swap_length_steps = swap_length_steps 107 | previous_swap_length = swap_length 108 | previous_swap_rate = swap_rate 109 | count += 1 110 | else: 111 | distance = swap_length_steps - previous_swap_length_steps 112 | 113 | for i in range(1, distance): 114 | interpolate_length = previous_swap_length + self.term_increment*i 115 | interpolate_swap_rate = (interpolate_length - previous_swap_length)*swap_rate/(swap_length - previous_swap_length)\ 116 | + (swap_length - interpolate_length)*previous_swap_rate/(swap_length - previous_swap_length) 117 | self.interpolated_swap_rates[count] = interpolate_swap_rate 118 | count += 1 119 | self.interpolated_swap_rates[count] = swap_rate 120 | previous_swap_rate = swap_rate 121 | previous_swap_length = swap_length 122 | previous_swap_length_steps = swap_length_steps 123 | count += 1 124 | # np.savetxt('linear_interpolated_swap_values.csv', self.interpolated_swap_rates, delimiter=',') 125 | 126 | def get_forward_swap_rates(self, start, swap_length): 127 | start_index = int(start/self.term_increment) 128 | end_index = int((start + swap_length)/self.term_increment) 129 | top = self.zero_coupon_prices[start_index] - self.zero_coupon_prices[end_index] 130 | bottom = 0 131 | 132 | for i in range(start_index + 1, end_index + 1): 133 | bottom += self.term_increment * self.zero_coupon_prices[i] 134 | return top/bottom 135 | 136 | # start index = alpha, end_index = beta 137 | def get_forward_swap_rates_from_forward_rates(self, start_index, end_index, forward_rates): 138 | number_of_sims = len(forward_rates[0,:]) 139 | 140 | product = 1/(1+ self.term_increment*forward_rates[start_index, :]) 141 | for i in range(start_index + 1, end_index): 142 | product = product/(1 + self.term_increment*forward_rates[i, :]) 143 | floating_leg = np.ones(number_of_sims) - product 144 | 145 | bottom = 0 146 | 147 | for i in range(start_index, end_index): 148 | product = 1/(1+ self.term_increment*forward_rates[start_index, :]) 149 | 150 | for j in range(start_index + 1, i+1): 151 | product = product/(1 + self.term_increment*forward_rates[j, :]) 152 | 153 | bottom += self.term_increment*product 154 | return floating_leg/bottom 155 | 156 | # forward_rates [term, sim] 157 | def get_discount_factors_from_forward_rates(self, start_index, end_index, forward_rates, lois): 158 | number_of_sims = len(forward_rates[0, :]) 159 | lois_vector = np.ones(number_of_sims)*lois 160 | 161 | product = 1/(1 + self.term_increment*(forward_rates[start_index, :] - lois_vector)) 162 | for i in range(start_index + 1, end_index): 163 | product/(1 + self.term_increment*(forward_rates[i, :] - lois_vector)) 164 | return product 165 | 166 | def set_ois_adjusted_discount_factors(self, lois): 167 | self.ois_discount_factors = np.zeros(self.number_of_terms + 1) 168 | self.ois_discount_factors[0] = 1 169 | self.ois_discount_factors[1] = 1 / (1 + self.term_increment * (self.forward_curve[0] - lois)) 170 | 171 | for i in range(2, self.number_of_terms + 1): 172 | self.ois_discount_factors[i] = self.ois_discount_factors[i-1]/ (1 + self.term_increment * (self.forward_curve[i-1] - lois)) 173 | 174 | # zero_coupon_prices[0] = 1 175 | def set_zero_coupon_prices_from_swap_rates(self): 176 | self.zero_coupon_prices = np.zeros(self.number_of_terms + 1) 177 | 178 | self.zero_coupon_prices[0] = 1 179 | self.zero_coupon_prices[1] = 1/(1 + self.term_increment*self.interpolated_swap_rates[0]) 180 | 181 | for i in range(2, self.number_of_terms + 1): 182 | rN = self.interpolated_swap_rates[i - 1] 183 | sum = 0 184 | for k in range(1, i): 185 | sum += self.zero_coupon_prices[k] 186 | self.zero_coupon_prices[i] = (1 - rN*sum*self.term_increment)/(1+ rN*self.term_increment) 187 | 188 | def get_pv_of_swap_t0(self, swap_length_steps, swap_rate, forward_sims): 189 | fixed_leg = 0 190 | floating_leg = 0 191 | 192 | for i in range(swap_length_steps): 193 | zero_coupon_bond = 1 / (1 + self.term_increment * (forward_sims[0, :])) 194 | 195 | for j in range(1, i + 1): 196 | zero_coupon_bond = zero_coupon_bond / ( 197 | 1 + self.term_increment * (forward_sims[j, :])) 198 | fixed_leg += zero_coupon_bond * swap_rate * self.term_increment 199 | floating_leg += zero_coupon_bond * forward_sims[i, :] * self.term_increment 200 | return floating_leg - fixed_leg 201 | 202 | 203 | # forward_curve[0] = t = 0, 0.5 term spot rate 204 | def set_forward_curve(self): 205 | self.forward_curve = np.zeros(self.number_of_terms) 206 | self.forward_curve[0] = self.interpolated_swap_rates[0] 207 | 208 | for i in range(1, self.number_of_terms): 209 | self.forward_curve[i] = (self.zero_coupon_prices[i]/self.zero_coupon_prices[i+1] - 1)/self.term_increment 210 | 211 | -------------------------------------------------------------------------------- /Volatility.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import scipy.optimize as op 3 | from scipy.optimize import least_squares 4 | from math import * 5 | import matplotlib.pyplot as plt 6 | import pandas as pd 7 | from scipy.stats import norm 8 | import copy 9 | 10 | # 1. implements Rebonato method for calibration to swaption implied volatilities 11 | # 2. generates correlation and diffusion matrices for use in the LMM module 12 | # 3. Calculates swaption price from swaption volatility using Black formula for swaptions 13 | class Volatility(): 14 | def __init__(self, number_of_factors, bootstrapping, use_factor_reduction, swaption_vol_matrix_path): 15 | path = swaption_vol_matrix_path 16 | self.use_factor_reduction = use_factor_reduction 17 | self.vol_matrix = pd.read_csv(path) 18 | self.bootstrapping = bootstrapping 19 | self.term_name = 'expiry' 20 | self.terms = np.array(self.vol_matrix[self.term_name], float) 21 | self.tenors = np.array(self.vol_matrix.columns)[1:].astype(np.float) 22 | self.a = 2.81170636 23 | self.b = 0.30994865 24 | self.c = 0.08030116 25 | self.d = -2.74048478 26 | self.beta = 0.1 27 | self.number_of_factors = number_of_factors 28 | self.min_term = self.bootstrapping.min_term 29 | self.time_increment = self.min_term 30 | self.max_term = self.bootstrapping.max_term 31 | self.number_of_terms = self.bootstrapping.number_of_terms 32 | self.mc_adjustment_factor = 0.954816569302 33 | # self.mc_adjustment_factor = 1 34 | 35 | self.constant_increment_times = \ 36 | np.arange(0, self.max_term, self.time_increment) 37 | self.instantiate_arrays() 38 | 39 | def instantiate_arrays(self): 40 | self.set_vol_array() 41 | self.set_correlation_and_covariance_matrix() 42 | self.set_diffusion_matrix() 43 | 44 | def set_vol_array(self): 45 | time_array = np.zeros(self.number_of_terms) 46 | term_array = np.linspace(self.time_increment, self.max_term, num=(self.number_of_terms)) 47 | self.working_vol_array = ((self.a + self.b*(term_array - time_array))*np.exp(-self.c*(term_array - time_array)) + self.d)*self.mc_adjustment_factor 48 | self.forward_vol_matrix = np.diag(self.working_vol_array) 49 | 50 | def set_calibrated_vol_matrix(self): 51 | self.bootstrapping.set_Z_matrix(self.number_of_terms) 52 | self.calibrated_swaption_vol_matrix = copy.deepcopy(self.vol_matrix) 53 | start_time = 0.0 54 | 55 | for row_index, row in self.calibrated_swaption_vol_matrix.iterrows(): 56 | values = row.drop([self.term_name]) 57 | for column_index, v in values.items(): 58 | if False == isnan(v): 59 | swap_start_index = int(row[self.term_name] / self.time_increment) 60 | swap_end_index = int(float(column_index) / self.time_increment + swap_start_index) 61 | calculated_vol = self.get_swap_volatility(start_time, swap_start_index, swap_end_index) 62 | self.calibrated_swaption_vol_matrix.at[row_index, column_index] = calculated_vol 63 | np.savetxt('calibrated_vols.csv', self.calibrated_swaption_vol_matrix, delimiter=',') 64 | 65 | 66 | def calculate_volatility(self, time, term): 67 | output = ((self.a + self.b*(term - time))*np.exp(-self.c*(term - time)) + self.d) 68 | return output 69 | 70 | def set_parameters_swap(self, parameters): 71 | self.a = parameters[0] 72 | self.b = parameters[1] 73 | self.c = parameters[2] 74 | self.d = parameters[3] 75 | 76 | def get_parameters_swap(self): 77 | output = np.zeros(4) 78 | output[0] = self.a 79 | output[1] = self.b 80 | output[2] = self.c 81 | output[3] = self.d 82 | return output 83 | 84 | def get_swaption_price_t0_payer(self, start, swap_length, strike, swaption_volatility): 85 | d1 = (np.log(self.bootstrapping.get_forward_swap_rates(start, swap_length)/strike) + 86 | start*np.power(swaption_volatility, 2)/2)/(swaption_volatility*sqrt(start)) 87 | d2 = d1 - swaption_volatility*sqrt(start) 88 | Nd1 = norm.cdf(d1) 89 | Nd2 = norm.cdf(d2) 90 | 91 | sum = 0 92 | start_index = int((start/self.time_increment) - 1) 93 | end_index = int(((start + swap_length)/self.time_increment)-1) 94 | 95 | for i in range(start_index + 1, end_index + 1): 96 | sum += self.time_increment\ 97 | *self.bootstrapping.zero_coupon_prices[i]\ 98 | *(self.bootstrapping.get_forward_swap_rates(start, swap_length) 99 | *Nd1 - strike*Nd2) 100 | return sum 101 | 102 | def get_swaption_price_t0_receiver(self, start, swap_length, strike, swaption_volatility): 103 | d1 = (np.log(self.bootstrapping.get_forward_swap_rates(start, swap_length)/strike) + 104 | start*np.power(swaption_volatility, 2)/2)/(swaption_volatility*sqrt(start)) 105 | d2 = d1 - swaption_volatility*sqrt(start) 106 | Nd1 = norm.cdf(-d1) 107 | Nd2 = norm.cdf(-d2) 108 | 109 | sum = 0 110 | start_index = int((start/self.time_increment) - 1) 111 | end_index = int(((start + swap_length)/self.time_increment)-1) 112 | 113 | for i in range(start_index + 1, end_index + 1): 114 | sum += self.time_increment\ 115 | *self.bootstrapping.zero_coupon_prices[i]\ 116 | *(strike*Nd2 -self.bootstrapping.get_forward_swap_rates(start, swap_length)*Nd1) 117 | return sum 118 | 119 | def set_swaption_price_matrix(self): 120 | self.swaption_prices = copy.deepcopy(self.vol_matrix) 121 | 122 | for row_index, row in self.vol_matrix.iterrows(): 123 | values = row.drop([self.term_name]) 124 | for column_index, v in values.items(): 125 | if False == isnan(v): 126 | swap_length = float(column_index) 127 | start = row[self.term_name] 128 | # if swap_length < 15.0 and start < 15.0: 129 | K = self.bootstrapping.get_forward_swap_rates(start, swap_length) 130 | swaption_price_t0 = self.get_swaption_price_t0_payer(start, swap_length, K, v) 131 | self.swaption_prices.at[row_index, column_index] = swaption_price_t0 132 | 133 | def fit_parameters(self): 134 | initial_parameters = self.get_parameters_swap() 135 | self.bootstrapping.set_Z_matrix(self.number_of_terms) 136 | result = least_squares(self.objective_function_swap, initial_parameters) 137 | return result 138 | 139 | 140 | def objective_function_swap(self, parameters): 141 | num_rows = self.vol_matrix[self.term_name].count() 142 | num_columns = len(self.vol_matrix.columns) - 1 143 | number_of_iterations = int(num_rows*num_columns) 144 | sum = np.zeros(number_of_iterations) 145 | N = 0 146 | start_time = 0.0 147 | self.set_parameters_swap(parameters) 148 | 149 | for row_index, row in self.vol_matrix.iterrows(): 150 | values = row.drop([self.term_name]) 151 | for column_index, v in values.items(): 152 | if False == isnan(v): 153 | swap_start_index = int(row[self.term_name]/self.time_increment) 154 | swap_end_index = int(float(column_index)/self.time_increment + swap_start_index) 155 | calculated_vol = self.get_swap_volatility(start_time, swap_start_index, swap_end_index) 156 | sum[N] = calculated_vol - v 157 | N += 1 158 | return sum 159 | 160 | def get_swap_volatility(self, option_start_time, swap_start_index, swap_end_index): 161 | total_sum = 0 162 | for k in range(swap_start_index, swap_end_index): 163 | for l in range(swap_start_index, swap_end_index): 164 | time_k = self.constant_increment_times[k] 165 | time_l = self.constant_increment_times[l] 166 | swap_start_time = swap_start_index*self.time_increment 167 | 168 | integrated_covariance_end = self.get_integrated_covariance(swap_start_time, time_k, time_l) 169 | integrated_covariance_start = self.get_integrated_covariance(option_start_time, time_k, time_l) 170 | 171 | integrated_covariance = (integrated_covariance_end - integrated_covariance_start)\ 172 | /(swap_start_time - option_start_time) 173 | first_Z = self.bootstrapping.Z_matrix[swap_start_index, swap_end_index, k] 174 | second_Z = self.bootstrapping.Z_matrix[swap_start_index, swap_end_index, l] 175 | total_sum += first_Z*integrated_covariance*second_Z 176 | output = np.sqrt(total_sum) 177 | return output 178 | 179 | 180 | def get_correlation(self, term1, term2): 181 | return np.exp(-self.beta * abs(term1 - term2)) 182 | 183 | def set_correlation_and_covariance_matrix(self): 184 | self.correlation_matrix = np.ones((self.number_of_terms, self.number_of_terms)) 185 | 186 | for i in range(0, self.number_of_terms): 187 | for j in range(0, self.number_of_terms): 188 | self.correlation_matrix[i, j] = self.get_correlation((i+1)*self.time_increment, (j+1)*self.time_increment) 189 | self.covariance = np.matmul(self.forward_vol_matrix, np.matmul(self.correlation_matrix, self.forward_vol_matrix)) 190 | 191 | def set_diffusion_matrix(self): 192 | if self.mc_adjustment_factor == 0: 193 | self.chol_covariance = self.covariance 194 | self.working_chol_matrix = self.covariance 195 | else: 196 | self.chol_covariance = np.linalg.cholesky(self.covariance) 197 | 198 | if self.use_factor_reduction: 199 | self.working_chol_matrix = self.chol_covariance[:, :self.number_of_factors] 200 | 201 | for i in range(0, self.number_of_terms): 202 | for j in range(0, self.number_of_factors): 203 | sum = np.sum(np.power(self.chol_covariance[i,:self.number_of_factors], 2)) 204 | 205 | quotient = self.covariance[i, i] / sum 206 | self.working_chol_matrix[i, j] = self.working_chol_matrix[i, j] * np.sqrt(quotient) 207 | else: 208 | self.working_chol_matrix = self.chol_covariance 209 | 210 | 211 | def get_integrated_covariance(self, time, term_i, term_j): 212 | first_line = 4*self.a*np.power(self.c, 2)*self.d*(np.exp(self.c*(time - term_j)) 213 | + np.exp(self.c*(time - term_i))) \ 214 | + 4*np.power(self.c, 3)*np.power(self.d, 2)*time 215 | second_line = -4*self.b*self.c*self.d*np.exp(self.c*(time - term_i))*(self.c*(time - term_i) - 1) \ 216 | - 4*self.b*self.c*self.d*np.exp(self.c*(time - term_j))*(self.c*(time - term_j) - 1) 217 | third_line = np.exp(self.c*(2*time - term_i - term_j))\ 218 | *(2*np.power(self.a, 2)*np.power(self.c, 2) 219 | + 2*self.a*self.b*self.c*(1 + self.c*(term_i + term_j - 2*time)) 220 | + np.power(self.b, 2)*(1 + 2*np.power(self.c, 2)*(time - term_i)*(time - term_j) 221 | + self.c*(term_i + term_j - 2*time))) 222 | multiplier = self.get_correlation(term_i, term_j)/(4*np.power(self.c, 3)) 223 | return multiplier*(first_line + second_line + third_line) 224 | 225 | def get_correlated_volatility_cholesky_matrix(self, time): 226 | c_matrix = np.ones([self.number_of_terms,self.number_of_terms]) 227 | 228 | for i in range(0, self.number_of_terms): 229 | term1 = i*self.time_increment 230 | for j in range(0, self.number_of_terms): 231 | term2 = j*self.time_increment 232 | c_matrix[i, j] = self.get_correlation(term1, term2)\ 233 | *self.calculate_volatility(time, term1)\ 234 | *self.calculate_volatility(time, term2) 235 | a_bar_matrix = np.linalg.cholesky(c_matrix) 236 | 237 | if self.use_factor_reduction: 238 | reduced = np.zeros([self.number_of_terms, self.number_of_factors]) 239 | 240 | for i in range(0, self.number_of_terms): 241 | for j in range(0, self.number_of_factors): 242 | sum = 0 243 | 244 | for k in range(0, self.number_of_factors): 245 | sum += np.power(a_bar_matrix[i, k],2) 246 | 247 | quotient = c_matrix[i, i]/sum 248 | reduced[i, j] = a_bar_matrix[i, j] * np.sqrt(quotient) 249 | return reduced 250 | return a_bar_matrix 251 | 252 | 253 | 254 | 255 | 256 | 257 | 258 | 259 | 260 | -------------------------------------------------------------------------------- /LMM.py: -------------------------------------------------------------------------------- 1 | import numpy as np 2 | import Volatility as vol 3 | import pandas as pd 4 | import Bootstrapping as boot 5 | import copy as copy 6 | import BlackScholesSolver as bss 7 | from scipy.optimize import least_squares 8 | from math import * 9 | from scipy.optimize import minimize_scalar 10 | 11 | # 1. runs the libor market model and generates forward rate simulations 12 | # 2. produces swaption prices and implied volatilities 13 | # 3. fits an adjustment factor to adjust for drift error 14 | class LMM(): 15 | def __init__(self, swaption_vol_matrix_path, swap_curve_path): 16 | self.number_of_factors = 3 17 | self.use_factor_reduction = False 18 | self.number_of_sims = 10000 19 | self.max_projection_time = 40 20 | self.iterations = 0 21 | self.bootstrapping = boot.Bootstrapping(swap_curve_path) 22 | self.volatility = vol.Volatility(self.number_of_factors, self.bootstrapping, 23 | self.use_factor_reduction, swaption_vol_matrix_path) 24 | self.time_increment = self.bootstrapping.term_increment 25 | self.bs_solver = bss.BlackScholesSolver(self.volatility) 26 | self.number_of_terms = self.bootstrapping.number_of_terms 27 | ##[terms,time, sim] 28 | self.starting_forward_curve = self.bootstrapping.forward_curve 29 | 30 | def get_random_numbers(self): 31 | if self.use_factor_reduction: 32 | return np.random.normal(0, 1, (self.number_of_factors, self.number_of_sims)) 33 | return np.random.normal(0, 1, (self.number_of_terms, self.number_of_sims)) 34 | 35 | def set_implied_volatilities_from_prices(self): 36 | self.implied_volatilities_model_payer = copy.deepcopy(self.swaption_prices_calibration_payer) 37 | self.implied_volatilities_model_receiver = copy.deepcopy(self.swaption_prices_calibration_receiver) 38 | 39 | for row_index, row in self.swaption_prices_calibration_payer.iterrows(): 40 | start = row[self.volatility.term_name] 41 | values = row.drop([self.volatility.term_name]) 42 | for column_index, v in values.items(): 43 | if False == isnan(v): 44 | swap_length = float(column_index) 45 | self.bs_solver.set_parameters(start, swap_length, v) 46 | implied_volatility_payer = self.bs_solver.solve_and_get_implied_volatility_payer() 47 | self.implied_volatilities_model_payer.at[row_index, column_index] = implied_volatility_payer 48 | 49 | for row_index, row in self.swaption_prices_calibration_receiver.iterrows(): 50 | start = row[self.volatility.term_name] 51 | values = row.drop([self.volatility.term_name]) 52 | for column_index, v in values.items(): 53 | if False == isnan(v): 54 | swap_length = float(column_index) 55 | self.bs_solver.set_parameters(start, swap_length, v) 56 | implied_volatility_receiver = self.bs_solver.solve_and_get_implied_volatility_receiver() 57 | self.implied_volatilities_model_receiver.at[row_index, column_index] = implied_volatility_receiver 58 | 59 | np.savetxt('implied_volatility_model_payer.csv', self.implied_volatilities_model_payer, delimiter=',') 60 | np.savetxt('implied_volatility_model_receiver.csv', self.implied_volatilities_model_receiver, delimiter=',') 61 | 62 | 63 | def objective_function(self, parameters): 64 | self.iterations += 1 65 | self.volatility.set_parameters_swap(parameters) 66 | self.volatility.instantiate_arrays() 67 | self.set_swaption_prices_for_atm_calibration() 68 | sum = np.zeros(15) 69 | N=0 70 | for row_index, row in self.volatility.swaption_prices.iterrows(): 71 | values = row.drop([self.volatility.term_name]) 72 | for column_index, v in values.items(): 73 | if False == isnan(v): 74 | difference = v - self.swaption_prices_calibration_payer.at[row_index, column_index] 75 | sum[N] = difference 76 | N += 1 77 | return sum 78 | 79 | def objective_function_line_search(self, factor): 80 | self.iterations += 1 81 | self.volatility.mc_adjustment_factor = factor 82 | self.volatility.instantiate_arrays() 83 | self.set_swaption_prices_for_atm_calibration() 84 | self.set_implied_volatilities_from_prices() 85 | sum = np.zeros(15) 86 | N = 0 87 | for row_index, row in self.volatility.vol_matrix.iterrows(): 88 | values = row.drop([self.volatility.term_name]) 89 | for column_index, v in values.items(): 90 | if False == isnan(v): 91 | difference = v - self.implied_volatilities_model_payer.at[row_index, column_index] 92 | sum[N] = difference 93 | N += 1 94 | value = np.sum(np.power(sum,2)) 95 | return value 96 | 97 | def fit_adjustment_factor(self): 98 | result = minimize_scalar(self.objective_function_line_search, bounds=(0.1, 0.999), method='bounded') 99 | 100 | def set_swaption_prices_for_atm_calibration(self): 101 | self.swaption_prices_calibration_payer = copy.deepcopy(self.volatility.vol_matrix) 102 | self.swaption_prices_calibration_receiver = copy.deepcopy(self.volatility.vol_matrix) 103 | self.put_call_difference = copy.deepcopy(self.volatility.vol_matrix) 104 | 105 | for row_index, row in self.volatility.vol_matrix.iterrows(): 106 | number_of_time_steps_to_option_expiry = int(row[self.volatility.term_name] / self.time_increment) 107 | start = row[self.volatility.term_name] 108 | values = row.drop([self.volatility.term_name]) 109 | for column_index, v in values.items(): 110 | if False == isnan(v): 111 | swap_length = float(column_index) 112 | swap_length_steps = int(swap_length/self.time_increment) 113 | beta = swap_length_steps + number_of_time_steps_to_option_expiry 114 | numeraire_index = beta 115 | self.run_projection(numeraire_index, number_of_time_steps_to_option_expiry) 116 | forward_swap_rate = self.get_forward_swap_rate(number_of_time_steps_to_option_expiry, numeraire_index) 117 | strike = self.bootstrapping.get_forward_swap_rates(start, swap_length) 118 | strike_vector = np.ones(self.number_of_sims) * strike 119 | sum = np.zeros(self.number_of_sims) 120 | 121 | for i in range(number_of_time_steps_to_option_expiry + 1, numeraire_index + 1): 122 | sum += self.time_increment * self.DF[i, number_of_time_steps_to_option_expiry, :] 123 | 124 | payoff_receiver = np.maximum(strike_vector - forward_swap_rate, 0) * sum \ 125 | / self.DF[numeraire_index, number_of_time_steps_to_option_expiry, :] 126 | 127 | payoff_payer = np.maximum(forward_swap_rate - strike_vector, 0)*sum\ 128 | /self.DF[numeraire_index, number_of_time_steps_to_option_expiry,:] 129 | 130 | receiver_swaption = np.mean(payoff_receiver) * self.DF[numeraire_index, 0, 0] 131 | payer_swaption = np.mean(payoff_payer) * self.DF[numeraire_index, 0,0] 132 | 133 | 134 | self.swaption_prices_calibration_receiver.at[row_index, column_index] \ 135 | = receiver_swaption 136 | 137 | self.swaption_prices_calibration_payer.at[row_index, column_index] \ 138 | = payer_swaption 139 | 140 | np.savetxt('swap_rate_price_payer_model.csv', self.swaption_prices_calibration_payer, delimiter=',') 141 | np.savetxt('swap_rate_price_receiver_model.csv', self.swaption_prices_calibration_receiver, delimiter=',') 142 | 143 | def set_forward_sims(self, numeraire_index, number_of_projection_periods): 144 | self.forward_sims = np.zeros((numeraire_index, 145 | number_of_projection_periods+1, 146 | self.number_of_sims)) 147 | self.forward_sims[:, 0, :] = np.tile(self.starting_forward_curve[:numeraire_index], 148 | (self.number_of_sims, 1)).transpose() 149 | 150 | def get_forward_swap_rate(self, time_index, numeraire_index): 151 | sum = np.zeros(self.number_of_sims) 152 | 153 | for i in range(time_index+1, numeraire_index + 1): 154 | sum += self.time_increment*self.DF[i, time_index, :] 155 | output = (1 - (self.DF[numeraire_index, time_index,:]))/sum 156 | return output 157 | 158 | def set_discount_factors(self, numeraire_index, number_of_projection_periods): 159 | self.DF = np.ones((numeraire_index+1, 160 | number_of_projection_periods+1, 161 | self.number_of_sims)) 162 | for n in range(number_of_projection_periods+1): 163 | for i in range(n + 1, numeraire_index + 1): 164 | df_prod = np.ones(self.number_of_sims) 165 | for k in range(n, i): 166 | df_prod = df_prod / (np.ones(self.number_of_sims) + self.time_increment * self.forward_sims[k,n,:]) 167 | self.DF[i,n,:] = df_prod 168 | 169 | 170 | def run_projection(self, numeraire_index, number_of_projection_periods): 171 | self.set_forward_sims(numeraire_index, number_of_projection_periods) 172 | 173 | for n in range(number_of_projection_periods): 174 | diffusion = self.get_diffusion() 175 | 176 | for i in range(n+1, numeraire_index): 177 | summation = np.zeros(self.number_of_sims) 178 | 179 | for k in range(i + 1, numeraire_index): 180 | forward_sims = self.forward_sims[k, n, :] 181 | top = forward_sims * self.time_increment 182 | bottom = np.ones(self.number_of_sims) + forward_sims * self.time_increment 183 | quotient = top / bottom 184 | correlation = self.volatility.correlation_matrix[i, k] 185 | volatility = self.volatility.working_vol_array[k] 186 | summation += quotient * correlation * volatility 187 | 188 | drift = summation * self.volatility.working_vol_array[i] 189 | correction = np.ones(self.number_of_sims)*self.volatility.covariance[i,i]/2 190 | # as diffusion is time-homogenous it is the difference between term and time that counts 191 | step_diffusion = diffusion[i-n-1,:] 192 | step = self.forward_sims[i, n, :] * np.exp((-drift - correction) * self.time_increment + step_diffusion) 193 | self.forward_sims[i, n+1, :] = step 194 | self.set_discount_factors(numeraire_index, number_of_projection_periods) 195 | 196 | def run_projection_predictor_corrector(self, numeraire_index, number_of_projection_periods): 197 | self.set_forward_sims(numeraire_index, number_of_projection_periods) 198 | previous_drift = np.zeros((numeraire_index, self.number_of_sims)) 199 | 200 | for n in range(number_of_projection_periods): 201 | diffusion = self.get_diffusion() 202 | 203 | for i in range(n + 1, numeraire_index): 204 | summation = np.zeros(self.number_of_sims) 205 | 206 | for k in range(i + 1, numeraire_index): 207 | forward_sims = self.forward_sims[k, n, :] 208 | top = forward_sims * self.time_increment 209 | bottom = np.ones(self.number_of_sims) + forward_sims * self.time_increment 210 | quotient = top / bottom 211 | correlation = self.volatility.correlation_matrix[i, k] 212 | volatility = self.volatility.working_vol_array[k] 213 | summation += quotient * correlation * volatility 214 | 215 | constant_drift = summation * self.volatility.working_vol_array[i] 216 | 217 | if n == 0: 218 | working_drift = constant_drift 219 | else: 220 | working_drift = (previous_drift[i,:] + constant_drift)/2 221 | 222 | correction = np.ones(self.number_of_sims) * self.volatility.covariance[i, i] / 2 223 | step_diffusion = diffusion[i - n - 1, :] 224 | step = self.forward_sims[i, n, :] * np.exp((-working_drift - correction) * self.time_increment + step_diffusion) 225 | self.forward_sims[i, n + 1, :] = step 226 | previous_drift[i,:] = working_drift 227 | self.set_discount_factors(numeraire_index, number_of_projection_periods) 228 | 229 | def get_diffusion(self): 230 | random = self.get_random_numbers() 231 | vol_matrix = self.volatility.working_chol_matrix 232 | output = vol_matrix.dot(random) 233 | return output 234 | 235 | # Option expiry numeraire projection code. Not used in the main project 236 | # But used in the change of numeraire figure 12 results 237 | # Does not show same drift error as the run_projection code. 238 | # def run_projection_option_expiry_numeraire(self, numeraire_index): 239 | # self.set_forward_sims(self.number_of_terms - numeraire_index - 1, numeraire_index) 240 | # 241 | # for n in range(self.number_of_terms - numeraire_index - 1): 242 | # diffusion = self.get_diffusion() 243 | # 244 | # for i in range(numeraire_index, self.number_of_terms): 245 | # if (i > n): 246 | # summation = np.zeros(self.number_of_sims) 247 | # for k in range(i + 1, self.number_of_terms): 248 | # # forward_sims = np.exp(self.log_forward_sims[k, time_index, :]) 249 | # forward_sims = self.forward_sims[k - numeraire_index, n, :] 250 | # top = forward_sims * self.time_increment 251 | # bottom = np.ones(self.number_of_sims) + forward_sims * self.time_increment 252 | # quotient = top / bottom 253 | # correlation = self.volatility.correlation_matrix[i, k] 254 | # volatility = self.volatility.working_vol_array[k - n] 255 | # summation += quotient * correlation * volatility 256 | # 257 | # drift = summation * self.volatility.working_vol_array[i - n] 258 | # correction = np.ones(self.number_of_sims) * self.volatility.covariance[i, i] / 2 259 | # step_diffusion = diffusion[i - n - 1, :] 260 | # step = self.forward_sims[i - numeraire_index, n, :] * np.exp( 261 | # (-drift - correction) * self.time_increment + step_diffusion) 262 | # self.forward_sims[i - numeraire_index, n + 1, :] = step 263 | 264 | 265 | 266 | 267 | 268 | 269 | 270 | 271 | 272 | 273 | 274 | --------------------------------------------------------------------------------