├── .gitattributes ├── .gitignore ├── 00 Deterministic Growth-Endogeneous Grid.py ├── 01 Growth-Endogeneous Grid.py ├── 01 Growth-Howard Improvement.py ├── 01 Growth-Perturbation.py ├── 01 Growth-Value Function.py ├── 02 RBC Money-Perturbation.py ├── 02 RBC with Adjustment Costs-Perturbation.py ├── 02 RBC with Money-Perturbation.py ├── 02 RBC-Howard Improvement.py ├── 02 RBC-Perturbation - Fiscal Monetary Interaction.py ├── 02 RBC-Perturbation Government.py ├── 02 RBC-Perturbation Seperable Utility.py ├── 02 RBC-Perturbation.py ├── 02 RBC-Value Function.py ├── 03 Aiyagari-Howard Improvement.py ├── 03 Aiyagari-Value Function-Distribution.py ├── 03 Aiyagari-Value Function-Monte Carlo.py ├── 04 Bilbiie Income Risk.py ├── 04 Bilbiie Sticky Wages.py ├── 04 Bilbiie.py └── README.md /.gitattributes: -------------------------------------------------------------------------------- 1 | # Auto detect text files and perform LF normalization 2 | * text=auto 3 | -------------------------------------------------------------------------------- /.gitignore: -------------------------------------------------------------------------------- 1 | # Byte-compiled / optimized / DLL files 2 | __pycache__/ 3 | *.py[cod] 4 | *$py.class 5 | 6 | # C extensions 7 | *.so 8 | 9 | # Distribution / packaging 10 | .Python 11 | build/ 12 | develop-eggs/ 13 | dist/ 14 | downloads/ 15 | eggs/ 16 | .eggs/ 17 | lib/ 18 | lib64/ 19 | parts/ 20 | sdist/ 21 | var/ 22 | wheels/ 23 | pip-wheel-metadata/ 24 | share/python-wheels/ 25 | *.egg-info/ 26 | .installed.cfg 27 | *.egg 28 | MANIFEST 29 | 30 | # PyInstaller 31 | # Usually these files are written by a python script from a template 32 | # before PyInstaller builds the exe, so as to inject date/other infos into it. 33 | *.manifest 34 | *.spec 35 | 36 | # Installer logs 37 | pip-log.txt 38 | pip-delete-this-directory.txt 39 | 40 | # Unit test / coverage reports 41 | htmlcov/ 42 | .tox/ 43 | .nox/ 44 | .coverage 45 | .coverage.* 46 | .cache 47 | nosetests.xml 48 | coverage.xml 49 | *.cover 50 | *.py,cover 51 | .hypothesis/ 52 | .pytest_cache/ 53 | 54 | # Translations 55 | *.mo 56 | *.pot 57 | 58 | # Django stuff: 59 | *.log 60 | local_settings.py 61 | db.sqlite3 62 | db.sqlite3-journal 63 | 64 | # Flask stuff: 65 | instance/ 66 | .webassets-cache 67 | 68 | # Scrapy stuff: 69 | .scrapy 70 | 71 | # Sphinx documentation 72 | docs/_build/ 73 | 74 | # PyBuilder 75 | target/ 76 | 77 | # Jupyter Notebook 78 | .ipynb_checkpoints 79 | 80 | # IPython 81 | profile_default/ 82 | ipython_config.py 83 | 84 | # pyenv 85 | .python-version 86 | 87 | # pipenv 88 | # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. 89 | # However, in case of collaboration, if having platform-specific dependencies or dependencies 90 | # having no cross-platform support, pipenv may install dependencies that don't work, or not 91 | # install all needed dependencies. 92 | #Pipfile.lock 93 | 94 | # celery beat schedule file 95 | celerybeat-schedule 96 | 97 | # SageMath parsed files 98 | *.sage.py 99 | 100 | # Environments 101 | .env 102 | .venv 103 | env/ 104 | venv/ 105 | ENV/ 106 | env.bak/ 107 | venv.bak/ 108 | 109 | # Spyder project settings 110 | .spyderproject 111 | .spyproject 112 | 113 | # Rope project settings 114 | .ropeproject 115 | 116 | # mkdocs documentation 117 | /site 118 | 119 | # mypy 120 | .mypy_cache/ 121 | .dmypy.json 122 | dmypy.json 123 | 124 | # Pyre type checker 125 | .pyre/ 126 | convergence.png 127 | *.png 128 | -------------------------------------------------------------------------------- /00 Deterministic Growth-Endogeneous Grid.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | 4 | This solves the stochastic growth model with value function iteration 5 | 6 | """ 7 | 8 | import time 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | from numba import jit 12 | from scipy.optimize import fsolve 13 | from scipy.interpolate import interp1d 14 | 15 | 16 | class HH(): 17 | 18 | def __init__(self,theta = 0.4, beta = 0.99, delta = 0.019, sigma = 1, nk = 500): 19 | """Initialize the class with standard parameters""" 20 | self.theta, self.beta, self.delta = theta, beta, delta 21 | self.sigma, self.nk = sigma, nk 22 | 23 | #steady state quantities 24 | self.k_ss = ((beta*theta)/(1-beta*(1-delta)))**(1/(1-theta)) 25 | self.c_ss = self.k_ss*(1-beta*(1-delta)-beta*theta*delta)/(beta*theta) 26 | 27 | #discretizing the k grid 28 | self.kmin = 0.75*self.k_ss 29 | self.kmax = 1.25*self.k_ss 30 | self.k = np.linspace(self.kmin,self.kmax,self.nk) 31 | 32 | def utility(self, c): 33 | """Utility function, dependent on consumption c and sigma""" 34 | if self.sigma == 1: 35 | util = np.log(c) 36 | else: 37 | util = c**(1-self.sigma)/(1-self.sigma) 38 | return util 39 | 40 | def utility_prime(self, c): 41 | """Derivative of the utility function""" 42 | return c**(-self.sigma) 43 | 44 | def utility_prime_inverse(self, marginal): 45 | """Given marginal utility, gives back consumption""" 46 | return marginal**(-1/self.sigma) 47 | 48 | 49 | # Setting up stuff 50 | nk = 500 51 | hh = HH(nk = nk) 52 | nk, beta, sigma = hh.nk, hh.beta, hh.sigma 53 | k = hh.k 54 | # Cash in hand 55 | Gy = k**hh.theta + (1-hh.delta)*k 56 | Gyend = np.copy(Gy) 57 | kend = np.copy(Gy) 58 | interest = hh.theta*k**(hh.theta-1) + (1-hh.delta) 59 | # Initial guess for the policy function is all capital 60 | g = k 61 | g_new = np.copy(g) 62 | # Passing an interpolated function into the loop 63 | g_inter = interp1d(k, g) 64 | tol = 10**(-8) 65 | maxiter = 5000 66 | error = 1 67 | iter = 0 68 | test1 = (maxiter > iter) 69 | test2 = (error > tol) 70 | start1 = time.time() 71 | while test1 and test2: 72 | iter += 1 73 | for j in range(nk): 74 | g_new[j] = hh.utility_prime_inverse(beta*interest[j]*hh.utility_prime(g[j])) 75 | Gyend[j] = g_new[j] + k[j] 76 | res = lambda k: Gyend[j] - k**(hh.theta) - (1-hh.delta)*k 77 | kend[j] = fsolve(res, k[j]) 78 | 79 | g_inter = interp1d(kend,g_new,bounds_error = False, fill_value="extrapolate") 80 | error = np.linalg.norm(g_inter(k) - g) 81 | g = g_inter(k) 82 | test1 = (maxiter > iter) 83 | test2 = (error > tol) 84 | if iter % 50 == 0: 85 | print(iter, error) 86 | stop1 = time.time() 87 | print("\nEGM converged after %F seconds and %.0F iterations." % ((stop1-start1),iter)) 88 | 89 | 90 | # Given the optimal policy, generate a value function 91 | error = 1 92 | tol = 10**(-5) 93 | test = (error > tol) 94 | V = np.zeros(nk) 95 | V_new = np.copy(V) 96 | while test: 97 | V_new = hh.utility(g) + beta*V 98 | error = np.linalg.norm(V_new - V) 99 | V = V_new 100 | test = (error > tol) 101 | 102 | 103 | # Policy function for consumption into policy function for capital 104 | sigma = g 105 | g = hh.k**hh.theta + (1-hh.delta)*hh.k - sigma 106 | 107 | 108 | #print(toc-tic) 109 | fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize=(10,5)) 110 | axes[0].plot(hh.k,V.transpose()) 111 | axes[0].set_title("Value functions") 112 | 113 | axes[1].plot(hh.k,g.transpose()) 114 | axes[1].plot(hh.k,hh.k) 115 | axes[1].set_title('Policy functions') 116 | plt.show() 117 | plt.savefig("convergence.png") 118 | 119 | 120 | # Setup the arrays 121 | T = 200 122 | g = interp1d(k,g,bounds_error = False, fill_value="extrapolate") 123 | sigma = interp1d(k,sigma,bounds_error = False, fill_value="extrapolate") 124 | K = np.zeros(T) 125 | K[0] = hh.k[int(nk/100)] 126 | 127 | 128 | # Simulating the economy period by period 129 | for t in range(1,T): 130 | K[t] = g(K[t-1]) 131 | out = K**hh.theta 132 | cons = out - g(K) + (1-hh.delta)*K 133 | cons1 = sigma(K) 134 | inv = out - cons 135 | 136 | 137 | # Plot the development of the economy 138 | t = range(T) 139 | fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize=(10,5)) 140 | 141 | axes[0].plot(t, K, label = "Development of capital") 142 | axes[0].plot(t, np.ones(T)*hh.k_ss, label = "Steady state of capital") 143 | axes[0].set_title("Trajectory of capital") 144 | axes[0].set_xlabel("Period") 145 | axes[0].set_ylabel("Capital") 146 | 147 | axes[1].plot(t, out, label = "Output") 148 | axes[1].plot(t, cons, label = "Consumption") 149 | axes[1].plot(t, inv, label = "Investment") 150 | axes[1].set_title("GDP components") 151 | axes[1].set_xlabel("Period") 152 | axes[1].set_ylabel("GDP components") 153 | axes[1].legend(loc=5) 154 | plt.show() 155 | plt.savefig("simulation.png") 156 | 157 | 158 | # Calculating the steady state 159 | res = lambda k: g(k) - k 160 | steady_state = fsolve(res, hh.k_ss) 161 | print("\nThe numerical steady state is %F, while the true is %F." %(steady_state, hh.k_ss)) 162 | 163 | 164 | # Calculating the Euler equation error -------------------------------------------------------------------------------- /01 Growth-Endogeneous Grid.py: -------------------------------------------------------------------------------- 1 | 2 | """ 3 | 4 | This solves the stochastic growth model with the endogeneous grid method. 5 | 6 | The code adapts the McKay code on the endogeneous grid method to the 7 | stochastic growth model. 8 | 9 | """ 10 | 11 | import time 12 | import numpy as np 13 | import matplotlib.pyplot as plt 14 | import quantecon as qe 15 | from numba import jit 16 | from scipy.interpolate import interp2d 17 | 18 | 19 | # Supress warning 20 | import warnings 21 | warnings.filterwarnings("ignore") 22 | 23 | 24 | class HH(): 25 | """ 26 | Setups a class containing all necessary information to solve the model. 27 | """ 28 | 29 | def __init__(self,theta = 0.4, beta = 0.99, delta = 0.019, sigma = 2, 30 | rho = 0.95, stdz = 0.007, nz = 21, nk = 500, m = 3): 31 | """Initialize the class with standard parameters""" 32 | self.theta, self.beta, self.delta = theta, beta, delta 33 | self.sigma, self.rho, self.stdz = sigma, rho, stdz 34 | self.nz, self.nk, self.m = nz, nk, m 35 | 36 | #steady state quantities 37 | self.k_ss = ((beta*theta)/(1-beta*(1-delta)))**(1/(1-theta)) 38 | self.c_ss = self.k_ss*(1-beta*(1-delta)-beta*theta*delta)/(beta*theta) 39 | 40 | #discretizing the k grid 41 | self.kmin = 0.75*self.k_ss 42 | self.kmax = 1.25*self.k_ss 43 | self.k = np.linspace(self.kmin,self.kmax,self.nk) 44 | 45 | # Approximations 46 | self.mc = qe.markov.approximation.tauchen(self.rho,self.stdz,0,self.m, 47 | self.nz) 48 | self.P = self.mc.P 49 | self.zs = np.exp(self.mc.state_values) 50 | 51 | # Prices 52 | capital, prod = np.meshgrid(self.k, self.zs) 53 | self.R = theta*capital**(theta-1)*prod + (1-delta) 54 | self.w = (1-theta)*capital**(theta)*prod 55 | 56 | def utility(self, c): 57 | """Utility function, dependent on consumption c and sigma""" 58 | if self.sigma == 1: 59 | util = np.log(c) 60 | else: 61 | util = c**(1-self.sigma)/(1-self.sigma) 62 | return util 63 | 64 | def uprime(self, c): 65 | """Marginal utility, dependent on consumption and sigma""" 66 | return c**(-self.sigma) 67 | 68 | def uprime_inv(self, mu): 69 | """Inverse of the marginal utility, 70 | dependent on consumption, and sigma""" 71 | return mu**(-1.0/self.sigma) 72 | 73 | 74 | # Generating a HH class: 75 | hh = HH(nz = 21, nk=500) 76 | mc, P, zs = hh.mc, hh.P, hh.zs 77 | 78 | 79 | # Extracting parameters 80 | nz, nk, beta, sigma = hh.nz, hh.nk, hh.beta, hh.sigma 81 | k, zs = hh.k, hh.zs 82 | tiledGrid = np.tile(k,(nz,1)) 83 | 84 | 85 | # Interpolation function 86 | def interp(x,y,x1): 87 | N = len(x) 88 | # Searching for the position in the grid where x1 belongs into 89 | i = np.minimum(np.maximum(np.searchsorted(x,x1,side='right'),1),N-1) 90 | # Defining the values which we use for the linear interpolation 91 | xl = x[i-1] 92 | xr = x[i] 93 | yl = y[i-1] 94 | yr = y[i] 95 | # Actual interpolation 96 | y1 = yl + (yr-yl)/(xr-xl) * (x1-xl) 97 | above = x1 > x[-1] 98 | below = x1 < x[0] 99 | # Where x1 is above the highest x, give back a modified interpolation 100 | y1 = np.where(above,y[-1] + (x1 - x[-1]) * (y[-1]-y[-2])/(x[-1]-x[-2]), y1) 101 | # Where x1 is below, give back y[0] and else the interpolated value 102 | #y1 = np.where(below,y[0],y1) 103 | y1 = np.where(below, y[0] + (x1 - x[0]) * (y[1] - y[0])/(x[1] - x[0]), y1) 104 | return y1, i 105 | 106 | 107 | # Class with prices 108 | interest = hh.R 109 | wage = hh.w 110 | 111 | 112 | # Given endogeneous assets, get back consumption 113 | def get_c(G,CurrentAssets = tiledGrid): 114 | """Function returning the vector of consumption for a given policy 115 | function G, which we interpolate onto the exogeneous grid""" 116 | return np.vstack([interest[i,:]*CurrentAssets[i]+wage[i,:]- 117 | interp(G[i],k,CurrentAssets[i])[0] for i in range(nz)]) 118 | 119 | 120 | # Function doing one iteration of the EGM algorithm 121 | def eulerBack(G,Pr,Pr_P,HH): 122 | """Function taking as input the savings rule G defined on a', prices Pr 123 | this and Pr_P next period, as well as a household class HH. 124 | 1. Extracts the consumption this period cp 125 | 2. Calculates marginal utility upcp 126 | 3. Computes the expected value and discounts it to this period upc 127 | 4. From FOC gets optimal consumption allocation 128 | 5. Get's the endogeneous grid in this period""" 129 | # compute next period's consumption conditional on next period's income 130 | cp = get_c(G) 131 | upcp = HH.uprime(cp) 132 | #compute E(u'(cp)) 133 | Eupcp = np.dot(HH.P,upcp) 134 | 135 | #use upc = R' * beta*Eupcp to solve for upc 136 | upc = beta*Pr_P.R*Eupcp 137 | 138 | #invert uprime to solve for c 139 | c = HH.uprime_inv(upc) 140 | 141 | #use budget constraint to find previous assets 142 | # (a' + c - y)/R = a 143 | a = (tiledGrid + c - Pr.w)/ Pr.R 144 | return a, c 145 | 146 | 147 | def SolveEGM(G,Pr,HH, tol = 1e-8): 148 | """Solves the households problem with the complete EGM algorithm.""" 149 | test = True 150 | for it in range(10000): 151 | a, c = eulerBack(G,Pr,Pr,HH) 152 | 153 | if it % 50 == 0: 154 | test = np.abs(a-G)/(np.abs(a)+np.abs(G)+tol) 155 | print("it = {0}, test = {1}".format(it,test.max())) 156 | if np.all(test < tol): 157 | break 158 | G = a 159 | return G, c 160 | 161 | 162 | #initialize policy function 163 | G = 10+0.1*tiledGrid 164 | 165 | 166 | # Running the function 167 | start = time.time() 168 | G, C = SolveEGM(G,hh,hh) 169 | stop = time.time() 170 | print("\nSolving the model with EGM took %F seconds" % (stop - start)) 171 | 172 | 173 | G = np.vstack([interp(G[i],k,tiledGrid[i])[0] for i in range(nz)]) 174 | 175 | 176 | # Function, finding the nearest neighbor 177 | @jit 178 | def find_nearest(array, value): 179 | """Function, finding the nearest element to value in the array "array" """ 180 | array = np.asarray(array) 181 | idx = (np.abs(array - value)).argmin() 182 | return array[idx] 183 | 184 | 185 | # Finding the indicator function for VFI 186 | indk = np.copy(G) 187 | for ik in range(nk): 188 | for iz in range(nz): 189 | nearestk = find_nearest(k, G[iz,ik]) 190 | indk[iz,ik] = np.where(k == nearestk)[0] 191 | 192 | 193 | # Extracting the value function to G 194 | def Value(C,indk, maxiter = 1000, tol = 10**(-8)): 195 | start = time.time() 196 | error = 1 197 | iter = 0 198 | V = hh.utility(C)/(1-hh.beta) 199 | Vnew = np.copy(V) 200 | test1 = (error > tol) 201 | test2 = (iter < maxiter) 202 | while test1 and test2: 203 | iter += 1 204 | for i in range(nz): 205 | for j in range(nk): 206 | Vnew[i,j] = hh.utility(C[i,j]) + beta*np.dot(hh.P[i,:], V[:,int(indk[i,j])]) 207 | error = np.linalg.norm(Vnew-V) 208 | V = np.copy(Vnew) 209 | test1 = (error > 10**(-8)) 210 | test2 = (iter < maxiter) 211 | stop = time.time() 212 | print("Extracting the associated value function took %F seconds." %(stop - start)) 213 | return V 214 | 215 | 216 | # Getting the value function 217 | V = Value(C,indk) 218 | 219 | 220 | 221 | # Plotting the solution of the households problem 222 | fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize=(10,5)) 223 | axes[0].plot(hh.k,V.transpose()) 224 | axes[0].set_title("Value functions") 225 | 226 | axes[1].plot(hh.k,G.transpose()) 227 | axes[1].plot(hh.k,hh.k) 228 | axes[1].set_title('Policy functions') 229 | plt.show() 230 | 231 | 232 | # Simulate the economy 233 | T = 5000 234 | A = hh.mc.simulate(T, init = mc.state_values[int((hh.nz-1)/2)]) 235 | A = np.exp(A) 236 | K = np.zeros(T) 237 | Kind = hh.nk/2 238 | K[0] = hh.k[int(Kind)] 239 | g = interp2d(hh.k,hh.zs,G) 240 | sigma = interp2d(hh.k,hh.zs,C) 241 | 242 | 243 | # Simulating the economy period by period 244 | for t in range(1,T): 245 | K[t] = g(K[t-1],A[t-1]) 246 | out = A*K**hh.theta 247 | cons = np.copy(out) 248 | for t in range(T): 249 | cons[t] = out[t] - g(K[t],A[t]) + (1-hh.delta)*K[t] 250 | inv = out - cons 251 | 252 | 253 | # Plot the development of the economy 254 | t = range(T) 255 | fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize=(10,5)) 256 | 257 | axes[0].plot(t, K) 258 | axes[0].set_title("Trajectory of capital") 259 | axes[0].set_xlabel("Period") 260 | axes[0].set_ylabel("Capital") 261 | 262 | axes[1].plot(t, out, label = "Output") 263 | axes[1].plot(t, cons, label = "Consumption") 264 | axes[1].plot(t, inv, label = "Investment") 265 | axes[1].set_title("GDP components") 266 | axes[1].set_xlabel("Period") 267 | axes[1].set_ylabel("GDP components") 268 | axes[1].legend(loc=5) 269 | plt.show() 270 | 271 | 272 | # Printing results of the simulation 273 | print("\nThe stochastic steady state is %F, with the true being %F" 274 | % (np.mean(K), hh.k_ss)) 275 | print("\nThe volatility of output, consumption and investment are %F, %F, and %F." 276 | % (np.std(out)*100/np.mean(out),np.std(cons)*100/np.mean(cons), 277 | np.std(inv)*100/np.mean(inv))) 278 | 279 | -------------------------------------------------------------------------------- /01 Growth-Howard Improvement.py: -------------------------------------------------------------------------------- 1 | """ 2 | Spyder Editor 3 | 4 | This solves the stochastic growth model with policy function iteration 5 | 6 | """ 7 | 8 | import time 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | import quantecon as qe 12 | from numba import jit 13 | 14 | # Supress warning 15 | import warnings 16 | warnings.filterwarnings("ignore") 17 | 18 | 19 | #parameters 20 | theta = 0.4 21 | delta = 0.019 22 | sigma = 2 23 | beta = 0.99 24 | nz = 21 25 | rho = 0.95 26 | stdz = 0.007 27 | m = 3 28 | nk = 500 29 | 30 | 31 | #discretizing the grid 32 | mc = qe.markov.approximation.tauchen(rho,stdz,0,m,nz) 33 | P = mc.P 34 | zs = np.exp(mc.state_values) 35 | 36 | 37 | #steady state quantities 38 | k_ss = ((beta*theta)/(1-beta*(1-delta)))**(1/(1-theta)) 39 | y_ss = k_ss**theta 40 | c_ss = k_ss*(1-beta*(1-delta)-beta*theta*delta)/(beta*theta) 41 | 42 | 43 | #discretizing the k grid 44 | kmin = 0.8*k_ss 45 | kmax = 1.2*k_ss 46 | k = np.linspace(kmin,kmax,nk) 47 | 48 | 49 | #create value and policy function objects 50 | g = np.ones((nz,nk))*c_ss 51 | newg = np.copy(g) 52 | V = np.copy(g) 53 | newV = np.copy(V) 54 | indk = np.copy(V) 55 | 56 | 57 | # Function to search nearest value on the grid 58 | @jit 59 | def find_nearest(array, value): 60 | array = np.asarray(array) 61 | idx = (np.abs(array - value)).argmin() 62 | return array[idx] 63 | 64 | 65 | # Finding the indicator for the value function iteration 66 | for ik in range(nk): 67 | for iz in range(nz): 68 | nearestk = find_nearest(k, zs[iz]*k[ik]**theta + (1-delta)*k[ik] - g[iz,ik]) 69 | indk[iz,ik] = np.where(k == nearestk)[0] 70 | 71 | 72 | #tolerance levels 73 | tolv = 10**-8 74 | maxiter = 1000 75 | 76 | 77 | # Howard improvement algorithm 78 | @jit 79 | def policy(g = g, newg = newg, V = V, newV = newV, indk = indk, sigma = sigma, beta = beta, nk = nk, nz = nz, k = k, zs = zs, P = P): 80 | iter = 0 81 | iter1 = iter 82 | diffg = 10 83 | start = time.time() 84 | 85 | while (diffg > tolv and iter tolv and iter11: 109 | raise RuntimeError("Model does not satisfy BK conditions -- non-existence") 110 | 111 | if np.max(np.linalg.eig(S)[0]) >1: 112 | raise RuntimeError("Model does not satisfy BK conditions -- mulitple stable solutions") 113 | 114 | # Impact matrix 115 | # Solution is x_{t}=P*x_{t-1}+Q*eps_t 116 | Q = -np.linalg.inv(B+A@P) @ E 117 | 118 | return P, Q 119 | 120 | 121 | # Using the function to solve the system 122 | P, Q = SolveSystem(A,B,C,E) 123 | 124 | 125 | # Calculate an impulse response 126 | T = 200 127 | IRF_RBC = np.zeros((nX,T)) 128 | IRF_RBC[:,0] = Q * 0.01 129 | 130 | 131 | # Impulse response functions for 100 periods 132 | for t in range(1,T): 133 | IRF_RBC[:,t] = P@IRF_RBC[:,t-1] 134 | 135 | # Normalizing with respect to the steady state 136 | for i in range(nX): 137 | IRF_RBC[i,:] = IRF_RBC[i,:] / X_SS[i] * 100 138 | # Normalizing the interest rate into percentage points difference 139 | IRF_RBC[1] = IRF_RBC[1] * X_SS[1] 140 | 141 | 142 | # List with the variable names 143 | names = ["TFP", "Interest", "Wage", "Capital", "Output", "Consumption"] 144 | 145 | # Plotting the results of the IRF 146 | fig, axes = plt.subplots(nrows = 2, ncols = 3, figsize = (10,5)) 147 | for i in range(nX): 148 | row = i // 3 149 | col = i % 3 150 | axes[row, col].plot(IRF_RBC[i,:]) 151 | axes[row, col].plot(np.zeros(T)) 152 | axes[row, col].set_title(names[i]) 153 | fig.tight_layout() 154 | # Dropping the empty plot 155 | #fig.delaxes(axes[1][2]) 156 | plt.show() 157 | -------------------------------------------------------------------------------- /01 Growth-Value Function.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | 4 | This solves the stochastic growth model with value function iteration 5 | 6 | """ 7 | 8 | import time 9 | import numpy as np 10 | import matplotlib.pyplot as plt 11 | import quantecon as qe 12 | from numba import jit 13 | 14 | # Supress warning 15 | import warnings 16 | warnings.filterwarnings("ignore") 17 | 18 | 19 | class HH(): 20 | 21 | def __init__(self,theta = 0.4, beta = 0.99, delta = 0.019, sigma = 2, 22 | rho = 0.95, stdz = 0.007, nz = 21, nk = 500, m = 3): 23 | """Initialize the class with standard parameters""" 24 | self.theta, self.beta, self.delta = theta, beta, delta 25 | self.sigma, self.rho, self.stdz = sigma, rho, stdz 26 | self.nz, self.nk, self.m = nz, nk, m 27 | 28 | #steady state quantities 29 | self.k_ss = ((beta*theta)/(1-beta*(1-delta)))**(1/(1-theta)) 30 | self.c_ss = self.k_ss*(1-beta*(1-delta)-beta*theta*delta)/(beta*theta) 31 | 32 | #discretizing the k grid 33 | kmin = 0.8*self.k_ss 34 | kmax = 1.2*self.k_ss 35 | self.k = np.linspace(kmin,kmax,self.nk) 36 | 37 | def utiltiy(self, c): 38 | """Utility function, dependent on consumption c and sigma""" 39 | if self.sigma == 1: 40 | util = np.log(c) 41 | else: 42 | util = c**(1-self.sigma)/(1-self.sigma) 43 | return util 44 | 45 | def markov(self): 46 | """Approximates the transistion probability of an AR(1) process 47 | using the methodology of Tauchen (1986) using the quantecon package 48 | 49 | Gives back the markov chain, the transition probabilities and the 50 | respective values of the states. 51 | """ 52 | self.mc = qe.markov.approximation.tauchen(self.rho,self.stdz,0,self.m, 53 | self.nz) 54 | self.P = self.mc.P 55 | self.zs = np.exp(self.mc.state_values) 56 | return self.mc, self.P, self.zs 57 | 58 | 59 | # Generating a HH class: 60 | hh = HH(nk=400) 61 | mc, P, zs = hh.markov() 62 | 63 | 64 | #Value function iteration function 65 | @jit 66 | def value_function(HH, tolv=10**(-8), maxiter=5000): 67 | """ 68 | Value function iteration that, given a class of HH solves for the 69 | optimal value function, and its associated policy function. 70 | """ 71 | # Extracting the values 72 | theta, beta, delta, sigma = HH.theta, HH.beta, HH.delta, HH.sigma 73 | nz, nk, zs, k, P = HH.nz, HH.nk, HH.zs, HH.k, HH.P 74 | c_ss = HH.c_ss 75 | 76 | start = time.time() 77 | # Setting up the initial problem 78 | u = c_ss**(1-sigma)/(1-sigma) 79 | V = np.ones((nz,nk))*u/(1-beta) 80 | v = np.zeros((nz,nk)) 81 | g = np.zeros((nz,nk)) 82 | newV = np.zeros((nz,nk)) 83 | iter = 0 84 | diffV = 10 85 | # Checking the continuation criteria 86 | test1 = (iter < maxiter) 87 | test2 = (diffV > tolv) 88 | while (test1 and test2): 89 | iter = iter+1 90 | for iz in range(0,nz): 91 | for ik in range(0,nk): 92 | c = zs[iz]*k[ik]**theta+(1-delta)*k[ik]-k 93 | u = c**(1-sigma)/(1-sigma) 94 | u[c<0]= -1000000000 95 | v = u+beta*(np.dot(P[iz,:],V)) 96 | newV[iz,ik] = max(v) 97 | ind = np.argmax(v) 98 | g[iz,ik] = k[ind] 99 | diffV = np.linalg.norm(newV-V) 100 | V = newV.copy() 101 | print(iter, diffV) 102 | test1 = (itertolv) 104 | stop = time.time() 105 | print("\nValue function iteration converged after %.0F iterations and %.5F seconds" % (iter, (stop-start))) 106 | return V, g 107 | 108 | 109 | # Running the function 110 | V, g = value_function(hh) 111 | 112 | 113 | #Plotting the value and policy function 114 | fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize=(10,5)) 115 | axes[0].plot(hh.k,V.transpose()) 116 | axes[0].set_title("Value functions") 117 | 118 | axes[1].plot(hh.k,g.transpose()) 119 | axes[1].plot(hh.k,hh.k) 120 | axes[1].set_title('Policy functions') 121 | plt.show() 122 | plt.savefig("convergence.png") 123 | 124 | 125 | # Simulate the economy 126 | T = 5000 127 | 128 | 129 | # Setup the arrays for the later simulation 130 | A = hh.mc.simulate(T, init = mc.state_values[int((hh.nz-1)/2)]) 131 | Aind = mc.get_index(A) 132 | A = np.exp(A) 133 | K = np.zeros(T) 134 | Kind = np.copy(K) 135 | Kind[0] = hh.nk/2 136 | K[0] = hh.k[int(Kind[0])] 137 | 138 | 139 | # Simulating the economy period by period 140 | for t in range(1,T): 141 | K[t] = g[int(Aind[t-1]),int(Kind[t-1])] 142 | Kind[t] = np.where(K[t] == hh.k)[0] 143 | out = A*K**hh.theta 144 | cons = out - g[np.int64(Aind),np.int64(Kind)] + (1-hh.delta)*K 145 | inv = out - cons 146 | 147 | 148 | # Plot the simulation of the economy 149 | t = range(T) 150 | fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize=(10,5)) 151 | 152 | axes[0].plot(t, K) 153 | axes[0].set_title("Trajectory of capital") 154 | axes[0].set_xlabel("Period") 155 | axes[0].set_ylabel("Capital") 156 | 157 | axes[1].plot(t, out, label = "Output") 158 | axes[1].plot(t, cons, label = "Consumption") 159 | axes[1].plot(t, inv, label = "Investment") 160 | axes[1].set_title("GDP components") 161 | axes[1].set_xlabel("Period") 162 | axes[1].set_ylabel("GDP components") 163 | axes[1].legend(loc=5) 164 | plt.show() 165 | plt.savefig("simulation.png") 166 | 167 | 168 | print("\nThe stochastic steady state is %F, with the true being %F" 169 | % (np.mean(K), hh.k_ss)) 170 | print("\nThe volatility of output, consumption and investment are %F, %F, and %F." 171 | % (np.std(out)*100/np.mean(out),np.std(cons)*100/np.mean(cons), 172 | np.std(inv)*100/np.mean(inv))) 173 | 174 | -------------------------------------------------------------------------------- /02 RBC Money-Perturbation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Solves the RBC model with money and without capital using Perturbation 3 | Extension of the McKay material with respect to plotting and statistics 4 | Reference: https://alisdairmckay.com/Notes/HetAgents/index.html 5 | """ 6 | 7 | 8 | import autograd.numpy as np 9 | from autograd import jacobian 10 | np.set_printoptions(suppress=True,precision=4) 11 | import matplotlib.pyplot as plt 12 | import warnings 13 | 14 | 15 | # Number of Variables 16 | nX = 9 17 | # Number of shocks 18 | nEps = 2 19 | # Indexing the variables 20 | iZ, iY, iC, iW, iI, iM, iPI, iL, iEPS = range(nX) 21 | 22 | 23 | # Parameters 24 | alpha = 0.4 25 | beta = 0.99 26 | gamma = 1 27 | # Disutility from labor 28 | psi = 2 29 | 30 | # Chi of 5 gives approximately 1/3 of hours work in equilibrium 31 | chi = 20 32 | 33 | # Utility from money 34 | phi = 1 35 | 36 | # Growth rate of money supply 37 | mu = 0.01 38 | 39 | # Autocorrelation of the shock to money supply and technology 40 | rho_m = 0.0 41 | rho_a = 0.98 42 | 43 | # Defining a function, which gives back the steady state 44 | def SteadyState(): 45 | Z = 1. 46 | PI = mu 47 | I = (1+PI)/beta - 1 48 | L = ((1-alpha)/chi*Z**(gamma-1))**(1/(alpha+gamma*(1-alpha)+psi)) 49 | W = (1-alpha)*Z*L**(-alpha) 50 | Y = Z*L**(1-alpha) 51 | C = Y 52 | M = (C**(-gamma)*I/(1+I))**(-1/phi) 53 | EPS = 1 54 | 55 | X = np.zeros(nX) 56 | X[[iZ, iY, iC, iW, iI, iM, iPI, iL, iEPS]] = (Z, Y, C, W, I, M, PI, L, EPS) 57 | return X 58 | 59 | 60 | # Get the steady state 61 | X_SS = SteadyState() 62 | X_EXP = np.array(("Prod.", "Output", "Consumption", "Wage", "Interest", "Real Money", "Inflation", "Labour", "Aux", )) 63 | epsilon_SS = np.zeros(nEps) 64 | print("Variables: {}".format(X_EXP)) 65 | print("Steady state: {}".format(X_SS)) 66 | 67 | 68 | # Model equations 69 | def F(X_Lag,X,X_Prime,epsilon): 70 | # Unpack 71 | Z, Y, C, W, I, M, PI, L, EPS = X 72 | Z_L, Y_L, C_L, W_L, I_L, M_L, PI_L, L_L, EPS_L = X_Lag 73 | Z_P, Y_P, C_P, W_P, I_P, M_P, PI_P, L_P, EPS_P = X_Prime 74 | epsilon_a, epsilon_m = epsilon 75 | return np.hstack(( 76 | rho_a*np.log(Z_L) + epsilon_a - np.log(Z), # TFP evolution 77 | Z*L**(1-alpha) - Y, # Production function 78 | Y - C, # Aggregate resource constraint 79 | (1-alpha)*Z*L**(-alpha) - W, # MPL 80 | C**(-gamma) - beta*C_P**(-gamma)*(1+I)/(1+PI_P), # Euler equation 81 | C**(-gamma)*I/(1+I) - (M)**(-phi), # Real Money demand 82 | np.log(EPS) - rho_m*np.log(EPS_L) - epsilon_m, # Auxilary equation for epsilon 83 | M - (1+mu)/(1+PI)*M_L*np.exp(np.log(EPS)), # Real Money supply development 84 | W - chi*L**psi*C**gamma, # Labour allocation 85 | )) 86 | 87 | 88 | # Check whether at the steady state F is zero 89 | assert(np.allclose( F(X_SS,X_SS,X_SS,epsilon_SS) , np.zeros(nX))) 90 | 91 | 92 | # Compute the numerical derivative 93 | A = jacobian(lambda x: F(X_SS,X_SS,x,epsilon_SS))(X_SS) 94 | B = jacobian(lambda x: F(X_SS,x,X_SS,epsilon_SS))(X_SS) 95 | C = jacobian(lambda x: F(x,X_SS,X_SS,epsilon_SS))(X_SS) 96 | E = jacobian(lambda x: F(X_SS,X_SS,X_SS,x))(epsilon_SS) 97 | 98 | 99 | # Function to solve the system based on McKays material 100 | def SolveSystem(A,B,C,E,P0=None): 101 | # Solve the system using linear time iteration as in Rendahl (2017) 102 | #print("Solving the system") 103 | MAXIT = 1000 104 | if P0 is None: 105 | P = np.zeros(A.shape) 106 | else: 107 | P = P0 108 | 109 | S = np.zeros(A.shape) 110 | 111 | for it in range(MAXIT): 112 | P = -np.linalg.lstsq(B+A@P,C,rcond=None)[0] 113 | S = -np.linalg.lstsq(B+C@S,A,rcond=None)[0] 114 | test = np.max(np.abs(C+B@P+A@P@P)) 115 | #if it % 20 == 0: 116 | #print(test) 117 | if test < 1e-10: 118 | break 119 | 120 | 121 | if it == MAXIT-1: 122 | warnings.warn('LTI did not converge.') 123 | 124 | 125 | # test Blanchard-Kahn conditions 126 | if np.max(np.linalg.eig(P)[0]) >1: 127 | raise RuntimeError("Model does not satisfy BK conditions -- non-existence") 128 | 129 | if np.max(np.linalg.eig(S)[0]) >1: 130 | raise RuntimeError("Model does not satisfy BK conditions -- mulitple stable solutions") 131 | 132 | # Impact matrix 133 | # Solution is x_{t}=P*x_{t-1}+Q*eps_t 134 | Q = -np.linalg.inv(B+A@P) @ E 135 | 136 | return P, Q 137 | 138 | 139 | # Using the function to solve the system 140 | P, Q = SolveSystem(A,B,C,E) 141 | 142 | # Calculate an impulse response for a real monetary shock 143 | T = 40 144 | IRF_RBC = np.zeros((nX,T)) 145 | # First entry is productivity shock and second is the monetary shock 146 | shock = np.array((0,0.01)) 147 | IRF_RBC[:,0] = np.transpose(Q @ shock) 148 | 149 | # Impulse response functions for 100 periods 150 | for t in range(1,T): 151 | IRF_RBC[:,t] = P@IRF_RBC[:,t-1] 152 | 153 | 154 | # Normalizing with respect to the steady state 155 | for i in range(nX): 156 | IRF_RBC[i,:] = IRF_RBC[i,:] / X_SS[i] * 100 157 | # Normalizing the interest rate and inflation into percentage points difference 158 | IRF_RBC[4] = IRF_RBC[4] * X_SS[4] 159 | IRF_RBC[6] = IRF_RBC[6] * X_SS[6] 160 | 161 | # Drop all IRFs that are below e**(-15) 162 | criterion = ((np.abs(IRF_RBC) < 10**(-10))) 163 | IRF_RBC[criterion] = 0.0 164 | 165 | 166 | # List with the variable names 167 | names = ["Prod.", "Output", "Consumption", "Wage", "Interest", "Money", "Inflation", "Labour"] 168 | 169 | 170 | # Plotting the results of the IRF 171 | fig, axes = plt.subplots(nrows = 2, ncols = 4, figsize = (10,5)) 172 | for i in range(nX-1): 173 | row = i // 4 174 | col = i % 4 175 | axes[row, col].plot(IRF_RBC[i,:]) 176 | axes[row, col].plot(np.zeros(T)) 177 | axes[row, col].set_title(names[i]) 178 | fig.tight_layout() 179 | 180 | 181 | 182 | # Comparison of the volatility of real variables and the model variables 183 | sigma_a = np.sqrt(0.000049) 184 | sigma_m = np.sqrt(0.0001) 185 | T = 5000 186 | TT = 500 # Periods that are plotted in the end 187 | 188 | # Defining empty matrices for simulation and drawing shocks 189 | SIM_RBC = np.zeros((nX,T)) 190 | eps_a = np.random.normal(0,sigma_a,T) 191 | eps_m = np.random.normal(0,sigma_m,T) 192 | eps_t = np.array((eps_a, eps_m)) 193 | 194 | # Calculating the intercept for the simulation 195 | intercept = (np.eye(nX) - P)@X_SS 196 | 197 | # Initialize the variables at their steady state 198 | SIM_RBC[:,0] = X_SS 199 | for t in range(1,T): 200 | # Development of individual variables 201 | SIM_RBC[:,t] = intercept + P@SIM_RBC[:,t-1] + Q@eps_t[:,t] 202 | # Transition of shock in logs 203 | SIM_RBC[0,t] = np.exp(P[0,0]*np.log(SIM_RBC[0,t-1]) + eps_t[0,t]) 204 | SIM_RBC[8,t] = np.exp(P[8,8]*np.log(SIM_RBC[8,t-1]) + eps_t[1,t]) 205 | 206 | 207 | # Plotting the development 208 | fig, axes = plt.subplots(nrows = 2, ncols = 4, figsize = (18,9)) 209 | for i in range(nX-1): 210 | row = i // 4 211 | col = i % 4 212 | axes[row, col].plot(SIM_RBC[i,0:TT]) 213 | axes[row, col].plot(np.ones(TT)*X_SS[i]) 214 | axes[row, col].set_title(names[i]) 215 | fig.tight_layout() 216 | plt.show() -------------------------------------------------------------------------------- /02 RBC with Adjustment Costs-Perturbation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Solves an RBC model with capital adjustment costs using Perturbation 3 | Extension of the McKay material introducing investment adjustment costs 4 | """ 5 | 6 | 7 | import autograd.numpy as np 8 | from autograd import jacobian 9 | np.set_printoptions(suppress=True,precision=4) 10 | import matplotlib.pyplot as plt 11 | import warnings 12 | 13 | 14 | # Number of Variables 15 | nX = 9 16 | # Number of shocks 17 | nEps = 1 18 | # Indexing the variables 19 | iZ, iY, iC, iI, iR, iQ, iK, iW, iL = range(nX) 20 | 21 | 22 | # Parameters 23 | alpha = 0.4 24 | beta = 0.99 25 | gamma = 2 26 | vega = 0.36 27 | delta = 0.019 28 | phi = 2 29 | rho = 0.95 30 | 31 | 32 | # Defining a function, which gives back the steady state 33 | def SteadyState(): 34 | Z = 1. 35 | R = 1/beta - (1-delta) 36 | Q = 1 37 | W = (1-alpha)*((alpha*Z)/R)**(alpha/(1-alpha)) 38 | KL = (R/alpha)**(1./(alpha-1)) 39 | YL = KL**alpha 40 | CL = YL - delta*KL 41 | Ll = (1-vega)/vega*CL/(1-alpha)*KL**(-alpha) 42 | L = 1/(1+Ll) 43 | K = KL*L 44 | Y = YL*L 45 | C = CL*L 46 | I = Y - C 47 | 48 | X = np.zeros(nX) 49 | X[[iZ, iY, iC, iI, iR, iQ, iK, iW, iL]] = (Z, Y, C, I, R, Q, K, W, L) 50 | return X 51 | 52 | 53 | # Get the steady state 54 | X_SS = SteadyState() 55 | X_EXP = np.array(("Prod.", "Output", "Consumption", "Investment", "Interest", "Price of Capital", "Capital", "Wage", "Labour", )) 56 | epsilon_SS = 0.0 57 | print("Variables: {}".format(X_EXP)) 58 | print("Steady state: {}".format(X_SS)) 59 | 60 | 61 | # Model equations 62 | def F(X_Lag,X,X_Prime,epsilon): 63 | 64 | # Unpack 65 | Z, Y, C, I, R, Q, K, W, L = X 66 | Z_L, Y_L, C_L, I_L, R_L, Q_L, K_L, W_L, L_L = X_Lag 67 | Z_P, Y_P, C_P, I_P, R_P, Q_P, K_P, W_P, L_P = X_Prime 68 | return np.hstack(( 69 | beta * (R_P + (1-delta))*Q_P * vega/C_P*(C_P**vega*(1-L_P)**(1-vega))**(1-gamma) / 70 | (vega/C*(C**vega*(1-L)**(1-vega))**(1-gamma)) - Q, # Euler equation 71 | Q*(1-phi/2*(I/I_L-1)**2-phi*(I/I_L-1)*I/I_L) + beta*vega/C_P*(C_P**vega*(1-L_P)**(1-vega))**(1-gamma) / 72 | (vega/C*(C**vega*(1-L)**(1-vega))**(1-gamma)) * Q_P * phi *(I_P/I-1)*(I_P/I)**2 - 1, # Development of Q 73 | alpha * Z * (K_L/L) **(alpha-1) - R, # MPK 74 | (1-alpha)*Z*(K_L/L)**(alpha) - W, # MPL 75 | C/(1-L) - vega/(1-vega)*(1-alpha)*Z*(K_L/L)**alpha, # Labour allocation 76 | (1-delta) * K_L + Y - C - K, # Aggregate resource constraint 77 | Z*K_L**alpha * (L)**(1-alpha) - Y, # Production function 78 | (1-delta) * K_L + I - K, # Investment 79 | rho * np.log(Z_L) + epsilon - np.log(Z) # TFP evolution 80 | )) 81 | 82 | 83 | # Check whether at the steady state F is zero 84 | assert(np.allclose( F(X_SS,X_SS,X_SS,epsilon_SS) , np.zeros(nX))) 85 | 86 | 87 | # Compute the numerical derivative 88 | A = jacobian(lambda x: F(X_SS,X_SS,x,epsilon_SS))(X_SS) 89 | B = jacobian(lambda x: F(X_SS,x,X_SS,epsilon_SS))(X_SS) 90 | C = jacobian(lambda x: F(x,X_SS,X_SS,epsilon_SS))(X_SS) 91 | E = jacobian(lambda x: F(X_SS,X_SS,X_SS,x))(epsilon_SS) 92 | 93 | 94 | # Function to solve the system based on McKays material 95 | def SolveSystem(A,B,C,E,P0=None): 96 | # Solve the system using linear time iteration as in Rendahl (2017) 97 | #print("Solving the system") 98 | MAXIT = 1000 99 | if P0 is None: 100 | P = np.zeros(A.shape) 101 | else: 102 | P = P0 103 | 104 | S = np.zeros(A.shape) 105 | 106 | for it in range(MAXIT): 107 | P = -np.linalg.lstsq(B+A@P,C,rcond=None)[0] 108 | S = -np.linalg.lstsq(B+C@S,A,rcond=None)[0] 109 | test = np.max(np.abs(C+B@P+A@P@P)) 110 | #if it % 20 == 0: 111 | #print(test) 112 | if test < 1e-10: 113 | break 114 | 115 | 116 | if it == MAXIT-1: 117 | warnings.warn('LTI did not converge.') 118 | 119 | 120 | # test Blanchard-Kahn conditions 121 | if np.max(np.linalg.eig(P)[0]) >1: 122 | raise RuntimeError("Model does not satisfy BK conditions -- non-existence") 123 | 124 | if np.max(np.linalg.eig(S)[0]) >1: 125 | raise RuntimeError("Model does not satisfy BK conditions -- mulitple stable solutions") 126 | 127 | # Impact matrix 128 | # Solution is x_{t}=P*x_{t-1}+Q*eps_t 129 | Q = -np.linalg.inv(B+A@P) @ E 130 | 131 | return P, Q 132 | 133 | 134 | # Using the function to solve the system 135 | P, Q = SolveSystem(A,B,C,E) 136 | 137 | 138 | # Calculate an impulse response 139 | T = 200 140 | IRF_RBC = np.zeros((nX,T)) 141 | IRF_RBC[:,0] = Q * 0.01 142 | 143 | 144 | # Impulse response functions for 100 periods 145 | for t in range(1,T): 146 | IRF_RBC[:,t] = P@IRF_RBC[:,t-1] 147 | 148 | 149 | # Normalizing with respect to the steady state 150 | for i in range(nX): 151 | IRF_RBC[i,:] = IRF_RBC[i,:] / X_SS[i] * 100 152 | # Normalizing the interest rate into percentage points difference 153 | IRF_RBC[1] = IRF_RBC[1] * X_SS[1] 154 | 155 | 156 | # List with the variable names 157 | names = ["TFP", "Output", "Consumption", "Investment", "Interest", "Price of Capital", "Capital", "Wage", "Labour"] 158 | 159 | # Drop all IRFs that are below e**(-15) 160 | criterion = ((np.abs(IRF_RBC) < 10**(-10))) 161 | IRF_RBC[criterion] = 0.0 162 | 163 | 164 | # Plotting the results of the IRF 165 | fig, axes = plt.subplots(nrows = 3, ncols = 3, figsize = (10,5)) 166 | for i in range(nX): 167 | row = i // 3 168 | col = i % 3 169 | axes[row, col].plot(IRF_RBC[i,:]) 170 | axes[row, col].plot(np.zeros(T)) 171 | axes[row, col].set_title(names[i]) 172 | fig.tight_layout() 173 | plt.show() 174 | 175 | 176 | # Comparison of the volatility of real variables and the model variables 177 | sigma = np.sqrt(0.000049) 178 | T = 5000 179 | TT = 500 # Periods that are plotted in the end 180 | # Defining empty matrices for simulation and drawing shocks 181 | SIM_RBC = np.zeros((nX,T)) 182 | eps_t = np.random.normal(0,sigma,T) 183 | # Calculating the intercept for the simulation 184 | intercept = (np.eye(nX) - P)@X_SS 185 | # Initialize the variables at their steady state 186 | SIM_RBC[:,0] = X_SS 187 | for t in range(1,T): 188 | # Development of individual variables 189 | SIM_RBC[:,t] = intercept + P@SIM_RBC[:,t-1] + eps_t[t]*Q 190 | # Transition of shock in logs 191 | SIM_RBC[0,t] = np.exp(P[0,0]*np.log(SIM_RBC[0,t-1]) + Q[0] * eps_t[t]) 192 | 193 | 194 | # Plotting the development 195 | fig, axes = plt.subplots(nrows = 3, ncols = 3, figsize = (10,5)) 196 | for i in range(nX): 197 | row = i // 3 198 | col = i % 3 199 | axes[row, col].plot(SIM_RBC[i,0:TT]) 200 | axes[row, col].plot(np.ones(TT)*X_SS[i]) 201 | axes[row, col].set_title(names[i]) 202 | fig.tight_layout() 203 | plt.show() 204 | 205 | 206 | # Quickly renaming for easier reference 207 | Y = SIM_RBC 208 | 209 | 210 | # Print the results of the simulation 211 | print("\nThe stochastic steady state is %F, with the true being %F" % (np.mean(Y[iK,:]), X_SS[iK])) 212 | print("The volatility of output, consumption and investment are %F, %F, and %F." % (np.std(Y[iY])*100/np.mean(Y[iY]),np.std(Y[iC])*100/np.mean(Y[iC]), np.std(Y[iI])*100/np.mean(Y[iI]))) 213 | print("The mean of consumption, investment, capital, and labor in relation to output are %F, %F, %F, and %F." % (np.mean(Y[iC]*100/Y[iY]), np.mean(Y[iI]*100/Y[iY]), np.mean(Y[iK]*100/(4*Y[iY])), np.mean(Y[iL]*100))) 214 | print("The CV of consumption, investment and labor in relation to the CV of output are %F, %F, and %F." % ((np.std(Y[iC])*100/np.mean(Y[iC]))/(np.std(Y[iY])*100/np.mean(Y[iY])),(np.std(Y[iI])*100/np.mean(Y[iI]))/(np.std(Y[iY])*100/np.mean(Y[iY])),(np.std(Y[iL])*100/np.mean(Y[iL]))/(np.std(Y[iY])*100/np.mean(Y[iY])))) 215 | print("The correlation of consumption, investment and labor with output are %F, %F, and %F." %(np.corrcoef(Y[iY],Y[iC])[0,1], np.corrcoef(Y[iY],Y[iI])[0,1], np.corrcoef(Y[iY], Y[iL])[0,1])) 216 | 217 | print("\nThe volatility, the relative CV and correlation with output of Q are %F, %F, and %F." %(np.std(Y[iQ])*100/np.mean(Y[iQ]),(np.std(Y[iQ])*100/np.mean(Y[iQ]))/(np.std(Y[iY])*100/np.mean(Y[iY])), np.corrcoef(Y[iY], Y[iQ])[1,0])) 218 | -------------------------------------------------------------------------------- /02 RBC with Money-Perturbation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Solves the RBC model with money and without capital using Perturbation 3 | Extension of the McKay material with respect to plotting and statistics 4 | Reference: https://alisdairmckay.com/Notes/HetAgents/index.html 5 | """ 6 | 7 | 8 | import autograd.numpy as np 9 | from autograd import jacobian 10 | np.set_printoptions(suppress=True,precision=4) 11 | import matplotlib.pyplot as plt 12 | import warnings 13 | 14 | 15 | # Number of Variables 16 | nX = 9 17 | # Number of shocks 18 | nEps = 2 19 | # Indexing the variables 20 | iZ, iY, iC, iW, iI, iM, iPI, iL, iEPS = range(nX) 21 | 22 | 23 | # Parameters 24 | alpha = 0.4 25 | beta = 0.99 26 | gamma = 1 27 | # Disutility from labor 28 | psi = 2 29 | 30 | # Chi of 5 gives approximately 1/3 of hours work in equilibrium 31 | chi = 20 32 | 33 | # Utility from money 34 | phi = 2 35 | 36 | # Growth rate of money supply 37 | mu = 0.01 38 | 39 | # Autocorrelation of the shock to money supply and technology 40 | rho_m = 0.75 41 | rho_a = 0.98 42 | 43 | # Defining a function, which gives back the steady state 44 | def SteadyState(): 45 | Z = 1. 46 | PI = mu 47 | I = (1+PI)/beta - 1 48 | L = ((1-alpha)/chi*Z**(gamma-1))**(1/(alpha+gamma*(1-alpha)+psi)) 49 | W = (1-alpha)*Z*L**(-alpha) 50 | Y = Z*L**(1-alpha) 51 | C = Y 52 | M = (C**(-gamma)*I/(1+I))**(-1/phi) 53 | EPS = 1 54 | 55 | X = np.zeros(nX) 56 | X[[iZ, iY, iC, iW, iI, iM, iPI, iL, iEPS]] = (Z, Y, C, W, I, M, PI, L, EPS) 57 | return X 58 | 59 | 60 | # Get the steady state 61 | X_SS = SteadyState() 62 | X_EXP = np.array(("Prod.", "Output", "Consumption", "Wage", "Interest", "Real Money", "Inflation", "Labour", "Aux", )) 63 | epsilon_SS = np.zeros(nEps) 64 | print("Variables: {}".format(X_EXP)) 65 | print("Steady state: {}".format(X_SS)) 66 | 67 | 68 | # Model equations 69 | def F(X_Lag,X,X_Prime,epsilon): 70 | # Unpack 71 | Z, Y, C, W, I, M, PI, L, EPS = X 72 | Z_L, Y_L, C_L, W_L, I_L, M_L, PI_L, L_L, EPS_L = X_Lag 73 | Z_P, Y_P, C_P, W_P, I_P, M_P, PI_P, L_P, EPS_P = X_Prime 74 | epsilon_a, epsilon_m = epsilon 75 | return np.hstack(( 76 | rho_a*np.log(Z_L) + epsilon_a - np.log(Z), # TFP evolution 77 | Z*L**(1-alpha) - Y, # Production function 78 | Y - C, # Aggregate resource constraint 79 | (1-alpha)*Z*L**(-alpha) - W, # MPL 80 | C**(-gamma) - beta*C_P**(-gamma)*(1+I)/(1+PI_P), # Euler equation 81 | C**(-gamma)*I/(1+I) - (M)**(-phi), # Real Money demand 82 | np.log(EPS) - rho_m*np.log(EPS_L) - epsilon_m, # Auxilary equation for epsilon 83 | M - (1+mu)/(1+PI)*M_L*np.exp(np.log(EPS)), # Real Money supply development 84 | W - chi*L**psi*C**gamma, # Labour allocation 85 | )) 86 | 87 | 88 | # Check whether at the steady state F is zero 89 | assert(np.allclose( F(X_SS,X_SS,X_SS,epsilon_SS) , np.zeros(nX))) 90 | 91 | 92 | # Compute the numerical derivative 93 | A = jacobian(lambda x: F(X_SS,X_SS,x,epsilon_SS))(X_SS) 94 | B = jacobian(lambda x: F(X_SS,x,X_SS,epsilon_SS))(X_SS) 95 | C = jacobian(lambda x: F(x,X_SS,X_SS,epsilon_SS))(X_SS) 96 | E = jacobian(lambda x: F(X_SS,X_SS,X_SS,x))(epsilon_SS) 97 | 98 | 99 | # Function to solve the system based on McKays material 100 | def SolveSystem(A,B,C,E,P0=None): 101 | # Solve the system using linear time iteration as in Rendahl (2017) 102 | #print("Solving the system") 103 | MAXIT = 1000 104 | if P0 is None: 105 | P = np.zeros(A.shape) 106 | else: 107 | P = P0 108 | 109 | S = np.zeros(A.shape) 110 | 111 | for it in range(MAXIT): 112 | P = -np.linalg.lstsq(B+A@P,C,rcond=None)[0] 113 | S = -np.linalg.lstsq(B+C@S,A,rcond=None)[0] 114 | test = np.max(np.abs(C+B@P+A@P@P)) 115 | #if it % 20 == 0: 116 | #print(test) 117 | if test < 1e-10: 118 | break 119 | 120 | 121 | if it == MAXIT-1: 122 | warnings.warn('LTI did not converge.') 123 | 124 | 125 | # test Blanchard-Kahn conditions 126 | if np.max(np.linalg.eig(P)[0]) >1: 127 | raise RuntimeError("Model does not satisfy BK conditions -- non-existence") 128 | 129 | if np.max(np.linalg.eig(S)[0]) >1: 130 | raise RuntimeError("Model does not satisfy BK conditions -- mulitple stable solutions") 131 | 132 | # Impact matrix 133 | # Solution is x_{t}=P*x_{t-1}+Q*eps_t 134 | Q = -np.linalg.inv(B+A@P) @ E 135 | 136 | return P, Q 137 | 138 | 139 | # Using the function to solve the system 140 | P, Q = SolveSystem(A,B,C,E) 141 | 142 | # Calculate an impulse response for a real monetary shock 143 | T = 40 144 | IRF_RBC = np.zeros((nX,T)) 145 | # First shock is productivity and second shock is monetary 146 | shock = np.array((0,0.01)) 147 | IRF_RBC[:,0] = np.transpose(Q @ shock) 148 | 149 | # Impulse response functions for 100 periods 150 | for t in range(1,T): 151 | IRF_RBC[:,t] = P@IRF_RBC[:,t-1] 152 | 153 | 154 | # Normalizing with respect to the steady state 155 | for i in range(nX): 156 | IRF_RBC[i,:] = IRF_RBC[i,:] / X_SS[i] * 100 157 | # Normalizing the interest rate and inflation into percentage points difference 158 | IRF_RBC[4] = IRF_RBC[4] * X_SS[4] 159 | IRF_RBC[6] = IRF_RBC[6] * X_SS[6] 160 | 161 | # Drop all IRFs that are below e**(-15) 162 | criterion = ((np.abs(IRF_RBC) < 10**(-10))) 163 | IRF_RBC[criterion] = 0.0 164 | 165 | 166 | # List with the variable names 167 | names = ["Prod.", "Output", "Consumption", "Wage", "Interest", "Money", "Inflation", "Labour"] 168 | 169 | 170 | # Plotting the results of the IRF 171 | fig, axes = plt.subplots(nrows = 2, ncols = 4, figsize = (10,5)) 172 | for i in range(nX-1): 173 | row = i // 4 174 | col = i % 4 175 | axes[row, col].plot(IRF_RBC[i,:]) 176 | axes[row, col].plot(np.zeros(T)) 177 | axes[row, col].set_title(names[i]) 178 | fig.tight_layout() 179 | 180 | 181 | 182 | # Comparison of the volatility of real variables and the model variables 183 | sigma_a = np.sqrt(0.000049) 184 | sigma_m = np.sqrt(0.0001) 185 | T = 5000 186 | TT = 500 # Periods that are plotted in the end 187 | 188 | # Defining empty matrices for simulation and drawing shocks 189 | SIM_RBC = np.zeros((nX,T)) 190 | eps_a = np.random.normal(0,sigma_a,T) 191 | eps_m = np.random.normal(0,sigma_m,T) 192 | eps_t = np.array((eps_a, eps_m)) 193 | 194 | # Calculating the intercept for the simulation 195 | intercept = (np.eye(nX) - P)@X_SS 196 | 197 | # Initialize the variables at their steady state 198 | SIM_RBC[:,0] = X_SS 199 | for t in range(1,T): 200 | # Development of individual variables 201 | SIM_RBC[:,t] = intercept + P@SIM_RBC[:,t-1] + Q@eps_t[:,t] 202 | # Transition of shock in logs 203 | SIM_RBC[0,t] = np.exp(P[0,0]*np.log(SIM_RBC[0,t-1]) + eps_t[0,t]) 204 | SIM_RBC[8,t] = np.exp(P[8,8]*np.log(SIM_RBC[8,t-1]) + eps_t[1,t]) 205 | 206 | 207 | # Plotting the development 208 | fig, axes = plt.subplots(nrows = 2, ncols = 4, figsize = (10,5)) 209 | for i in range(nX-1): 210 | row = i // 4 211 | col = i % 4 212 | axes[row, col].plot(SIM_RBC[i,0:TT]) 213 | axes[row, col].plot(np.ones(TT)*X_SS[i]) 214 | axes[row, col].set_title(names[i]) 215 | fig.tight_layout() 216 | plt.show() 217 | 218 | -------------------------------------------------------------------------------- /02 RBC-Howard Improvement.py: -------------------------------------------------------------------------------- 1 | """ 2 | This solves the RBC model with endogeneous labor supply 3 | with the howard improvement algorithm as illustrated in 4 | Ljungqvist and Sargent (2012), p. 106 5 | """ 6 | 7 | import time 8 | import numpy as np 9 | import matplotlib.pyplot as plt 10 | import quantecon as qe 11 | import scipy.optimize as opt 12 | from numba import jit 13 | 14 | # Supress warning 15 | import warnings 16 | warnings.filterwarnings("ignore") 17 | 18 | 19 | #parameters 20 | theta = 0.4; 21 | delta = 0.019; 22 | sigma = 2; 23 | vega = 0.36; 24 | beta = 0.99; 25 | nz = np.int(21); 26 | rho = 0.95; 27 | stdz = np.sqrt(0.000049); 28 | m = 3; 29 | sims = 10; 30 | nk = np.int(250); 31 | 32 | #discretizing the grid 33 | mc = qe.markov.approximation.tauchen(rho,stdz,0,m,nz) 34 | P = mc.P 35 | zs = np.exp(mc.state_values) 36 | #zs = mc.state_values+1 # As alternative symetric shocks 37 | 38 | #steady state quantities 39 | kl_ss = ((beta*theta)/(1-beta*(1-delta)))**(1/(1-theta)) 40 | yl_ss = kl_ss**theta 41 | cl_ss = yl_ss - delta*kl_ss 42 | ll_ss = (1-vega)/vega*cl_ss/(1-theta)*kl_ss**(-theta) 43 | l_ss = 1/(1+ll_ss) 44 | k_ss = kl_ss*l_ss 45 | y_ss = k_ss**theta 46 | c_ss = y_ss - delta*k_ss 47 | 48 | 49 | #discretizing the k grid 50 | kmin = 0.8*k_ss 51 | kmax = 1.2*k_ss 52 | k = np.linspace(kmin,kmax,nk) 53 | 54 | 55 | #create value and policy function objects 56 | g = np.ones((nz,nk))*c_ss 57 | h = np.ones((nz,nk))*l_ss 58 | newg = np.copy(g) 59 | newh = np.copy(h) 60 | V = np.copy(g) 61 | newV = np.copy(V) 62 | indk = np.copy(V) 63 | 64 | 65 | starts = time.time() 66 | # Setting up the matrixes as a function of future k 67 | consumption = np.zeros((nz,nk,nk)) 68 | labor = np.copy(consumption) 69 | for iz in range(nz): 70 | for ik in range(nk): 71 | for jk in range(nk): 72 | res = lambda l: zs[iz]*k[ik]**theta*l**(1-theta) + (1-delta)*k[ik] - k[jk] - vega/(1-vega)*(1-l)*(1-theta)*zs[iz]*(k[ik]/l)**theta 73 | labor[iz,ik,jk] = opt.fsolve(res, l_ss) 74 | consumption[iz,ik,jk] = vega/(1-vega)*(1-labor[iz,ik,jk])*(1-theta)*(k[ik]/labor[iz,ik,jk])**(theta) 75 | stops = time.time() 76 | print("Interpolation completed after %F seconds." %(stops - starts)) 77 | 78 | 79 | # Function to search nearest value on the grid 80 | @jit 81 | def find_nearest(array, value): 82 | array = np.asarray(array) 83 | idx = (np.abs(array - value)).argmin() 84 | return array[idx] 85 | 86 | 87 | # Finding the indicator for the value function iteration 88 | for ik in range(nk): 89 | for iz in range(nz): 90 | nearestk = find_nearest(k, zs[iz]*k[ik]**theta*h[iz,ik]**(1-theta) + (1-delta)*k[ik] - g[iz,ik]) 91 | indk[iz,ik] = np.where(k == nearestk)[0] 92 | 93 | 94 | #tolerance levels 95 | tolv = 10**-8 96 | maxiter = 1000 97 | 98 | 99 | # Howard improvement algorithm 100 | @jit 101 | def policy(g = g, h = h, newg = newg, newh = newh, V = V, newV = newV, indk = indk, sigma = sigma, vega = vega, beta = beta, nk = nk, nz = nz, k = k, zs = zs, P = P): 102 | iter = 0 103 | iter1 = iter 104 | diffg = 10 105 | start = time.time() 106 | 107 | while (diffg > tolv and iter tolv and iter11] = -1000000 133 | v = u + beta*(np.dot(P[iz,:], V)) 134 | ind = np.argmax(v) 135 | indk[iz,ik] = ind 136 | newg[iz,ik] = c[ind] 137 | newh[iz,ik] = l[ind] 138 | diffg = max(np.linalg.norm(newg-g),np.linalg.norm(newh-h)) 139 | g = np.copy(newg) 140 | h = np.copy(newh) 141 | print(iter, diffg) 142 | stop = time.time() 143 | print("\nPolicy function iteration converged after %.0F iterations and %.5F seconds" % (iter, (stop-start))) 144 | return g, h, V 145 | 146 | 147 | # Running the function 148 | g, h, V = policy() 149 | print("\nThe population of the matrix took %F seconds." % (stops-starts)) 150 | 151 | 152 | # Transforming the policy function to be for capital 153 | kbar, zbar = np.meshgrid(k,zs) 154 | g = zbar*kbar**theta*h**(1-theta) + (1-delta)*kbar - g 155 | 156 | 157 | # Plotting the Value function, and the policy function 158 | fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize=(10,5)) 159 | axes[0].plot(k,V.transpose()) 160 | axes[0].set_title("Value functions") 161 | 162 | axes[1].plot(k,g.transpose()) 163 | axes[1].plot(k,k) 164 | axes[1].set_title('Policy functions') 165 | #plt.show() 166 | #plt.savefig("convergence.png") 167 | 168 | 169 | # Simulate the economy 170 | T = 5000 171 | 172 | 173 | # Setup the arrays 174 | A = mc.simulate(T, init = mc.state_values[int((nz-1)/2)]) 175 | Aind = mc.get_index(A) 176 | A = np.exp(A) 177 | K = np.zeros(T) 178 | Kind = np.copy(K) 179 | Kind[0] = nk/2 180 | K[0] = k[int(Kind[0])] 181 | 182 | 183 | # Simulating the economy period by period 184 | for t in range(1,T): 185 | K[t] = g[int(Aind[t-1]),int(Kind[t-1])] 186 | Kind[t] = np.where(find_nearest(k,K[t]) == k)[0] 187 | 188 | 189 | lab = h[np.int64(Aind),np.int64(Kind)] 190 | out = A*K**theta*lab**(1-theta) 191 | cons = out - g[np.int64(Aind),np.int64(Kind)] + (1-delta)*K 192 | inv = out - cons 193 | 194 | 195 | # Plot the development of the economy 196 | t = range(T) 197 | fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize=(10,5)) 198 | 199 | axes[0].plot(t, K) 200 | axes[0].set_title("Trajectory of capital") 201 | axes[0].set_xlabel("Period") 202 | axes[0].set_ylabel("Capital") 203 | 204 | axes[1].plot(t, out, label = "Output") 205 | axes[1].plot(t, cons, label = "Consumption") 206 | axes[1].plot(t, inv, label = "Investment") 207 | axes[1].set_title("GDP components") 208 | axes[1].set_xlabel("Period") 209 | axes[1].set_ylabel("GDP components") 210 | axes[1].legend(loc=5) 211 | plt.show() 212 | #plt.savefig("simulation.png") 213 | 214 | 215 | print("\nThe stochastic steady state is %F, with the true being %F" % (np.mean(K), k_ss)) 216 | print("\nThe volatility of output, consumption and investment are %F, %F, and %F." % (np.std(out)*100/np.mean(out),np.std(cons)*100/np.mean(cons), np.std(inv)*100/np.mean(inv))) 217 | print("\nThe mean of consumption, investment, capital, and labor in relation to output are %F, %F, %F, and %F." % (np.mean(cons*100/out), np.mean(inv*100/out), np.mean(K*100/(4*out)), np.mean(lab*100))) 218 | print("\nThe CV of consumption, investment and labor in relation to the CV of output are %F, %F, and %F." % ((np.std(cons)*100/np.mean(cons))/(np.std(out)*100/np.mean(out)),(np.std(inv)*100/np.mean(inv))/(np.std(out)*100/np.mean(out)),(np.std(lab)*100/np.mean(lab))/(np.std(out)*100/np.mean(out)))) 219 | print("\nThe correlation of consumption, investment and labor with output are %F, %F, and %F." %(np.corrcoef(out,cons)[0,1], np.corrcoef(out,inv)[0,1], np.corrcoef(out, lab)[0,1])) -------------------------------------------------------------------------------- /02 RBC-Perturbation - Fiscal Monetary Interaction.py: -------------------------------------------------------------------------------- 1 | """ 2 | Solves the classic RBC model using Perturbation 3 | Extension of the McKay material with respect to plotting and statistics 4 | Reference: https://alisdairmckay.com/Notes/HetAgents/index.html 5 | """ 6 | 7 | 8 | import autograd.numpy as np 9 | from autograd import jacobian 10 | np.set_printoptions(suppress=True,precision=4) 11 | import matplotlib.pyplot as plt 12 | import warnings 13 | 14 | 15 | # Number of Variables 16 | nX = 4 17 | # Number of shocks 18 | nEps = 2 19 | # Indexing the variables 20 | ib, iR, iPi, iS = range(nX) 21 | 22 | 23 | # Parameters 24 | alpha = 0.0 25 | beta = 0.99 26 | gamma = 0.0 27 | 28 | 29 | # Defining a function, which gives back the steady state 30 | def SteadyState(): 31 | B = 10 32 | R = 1 / beta 33 | Pi = 1 34 | S = B * (R / Pi - 1) 35 | 36 | X = np.zeros(nX) 37 | X[[ib, iR, iPi, iS]] = (B, R, Pi, S) 38 | return X 39 | 40 | 41 | # Get the steady state 42 | X_SS = SteadyState() 43 | X_EXP = np.array(("Bonds", "Interest", "Inflation", "Surplus", )) 44 | epsilon_SS = np.zeros(nEps) 45 | print("Variables: {}".format(X_EXP)) 46 | print("Steady state: {}".format(X_SS)) 47 | 48 | 49 | # Model equations 50 | def F(X_Lag,X,X_Prime,epsilon): 51 | 52 | # Unpack 53 | B, R, Pi, S = X 54 | B_L, R_L, Pi_L, S_L = X_Lag 55 | B_P, R_P, Pi_P, S_P = X_Prime 56 | return np.hstack(( 57 | 1 / beta - R / Pi_P, # Euler equation 58 | B + S - B_L * R / Pi, # Government BC 59 | S - X_SS[iS] - gamma * (B_L / R_L - X_SS[ib] / X_SS[iR]) - epsilon[0], # Behavior Government 60 | R - X_SS[iR] - alpha * beta * (Pi - X_SS[iPi]) - epsilon[1], # Taylor rule 61 | )) 62 | 63 | 64 | # Check whether at the steady state F is zero 65 | assert(np.allclose( F(X_SS,X_SS,X_SS,epsilon_SS) , np.zeros(nX))) 66 | 67 | 68 | # Compute the numerical derivative 69 | A = jacobian(lambda x: F(X_SS,X_SS,x,epsilon_SS))(X_SS) 70 | B = jacobian(lambda x: F(X_SS,x,X_SS,epsilon_SS))(X_SS) 71 | C = jacobian(lambda x: F(x,X_SS,X_SS,epsilon_SS))(X_SS) 72 | E = jacobian(lambda x: F(X_SS,X_SS,X_SS,x))(epsilon_SS) 73 | 74 | 75 | # Function to solve the system based on McKays material 76 | def SolveSystem(A,B,C,E,P0=None): 77 | # Solve the system using linear time iteration as in Rendahl (2017) 78 | #print("Solving the system") 79 | MAXIT = 1000 80 | if P0 is None: 81 | P = np.zeros(A.shape) 82 | else: 83 | P = P0 84 | 85 | S = np.zeros(A.shape) 86 | 87 | for it in range(MAXIT): 88 | P = -np.linalg.lstsq(B+A@P,C,rcond=None)[0] 89 | S = -np.linalg.lstsq(B+C@S,A,rcond=None)[0] 90 | test = np.max(np.abs(C+B@P+A@P@P)) 91 | #if it % 20 == 0: 92 | #print(test) 93 | if test < 1e-10: 94 | break 95 | 96 | 97 | if it == MAXIT-1: 98 | warnings.warn('LTI did not converge.') 99 | 100 | 101 | # test Blanchard-Kahn conditions 102 | if np.max(np.linalg.eig(P)[0]) >1: 103 | raise RuntimeError("Model does not satisfy BK conditions -- non-existence") 104 | 105 | if np.max(np.linalg.eig(S)[0]) >1: 106 | raise RuntimeError("Model does not satisfy BK conditions -- mulitple stable solutions") 107 | 108 | # Impact matrix 109 | # Solution is x_{t}=P*x_{t-1}+Q*eps_t 110 | Q = -np.linalg.inv(B+A@P) @ E 111 | 112 | return P, Q 113 | 114 | 115 | # Using the function to solve the system 116 | P, Q = SolveSystem(A,B,C,E) 117 | 118 | 119 | # Calculate an impulse response 120 | T = 20 121 | IRF_RBC = np.zeros((nX,T)) 122 | IRF_RBC[:,0] = Q[:, 1] * 0.01 123 | 124 | 125 | # Impulse response functions for T periods 126 | for t in range(1,T): 127 | IRF_RBC[:,t] = P@IRF_RBC[:,t-1] 128 | 129 | # Drop all IRFs that are below e**(-15) 130 | criterion = ((np.abs(IRF_RBC) < 10**(-10))) 131 | IRF_RBC[criterion] = 0.0 132 | 133 | 134 | ## Normalizing with respect to the steady state 135 | #for i in range(nX): 136 | # IRF_RBC[i,:] = IRF_RBC[i,:] / X_SS[i] * 100 137 | 138 | 139 | # List with the variable names 140 | names = ["Bonds", "Interest", "Inflation", "Surplus",] 141 | 142 | 143 | # Plotting the results of the IRF 144 | fig, axes = plt.subplots(nrows = 2, ncols = 2, figsize = (10,5)) 145 | for i in range(nX): 146 | row = i // 2 147 | col = i % 2 148 | axes[row, col].plot(IRF_RBC[i,:]) 149 | axes[row, col].plot(np.zeros(T)) 150 | axes[row, col].set_title(names[i]) 151 | fig.tight_layout() 152 | plt.show() 153 | 154 | 155 | ## Comparison of the volatility of real variables and the model variables 156 | #sigma = np.sqrt(0.000049) 157 | #T = 5000 158 | #TT = 500 # Periods that are plotted in the end 159 | ## Defining empty matrices for simulation and drawing shocks 160 | #SIM_RBC = np.zeros((nX,T)) 161 | #eps_t = np.random.normal(0,sigma,T) 162 | ## Calculating the intercept for the simulation 163 | #intercept = (np.eye(nX) - P)@X_SS 164 | ## Initialize the variables at their steady state 165 | #SIM_RBC[:,0] = X_SS 166 | #for t in range(1,T): 167 | # # Development of individual variables 168 | # SIM_RBC[:,t] = intercept + P@SIM_RBC[:,t-1] + eps_t[t]*Q 169 | # # Transition of shock in logs 170 | # SIM_RBC[0,t] = np.exp(P[0,0]*np.log(SIM_RBC[0,t-1]) + Q[0] * eps_t[t]) 171 | # 172 | # 173 | ## Plotting the development 174 | #fig, axes = plt.subplots(nrows = 2, ncols = 4, figsize = (18,9)) 175 | #for i in range(nX): 176 | # row = i // 4 177 | # col = i % 4 178 | # axes[row, col].plot(SIM_RBC[i,0:TT]) 179 | # axes[row, col].plot(np.ones(TT)*X_SS[i]) 180 | # axes[row, col].set_title(names[i]) 181 | #fig.tight_layout() 182 | #plt.show() 183 | # 184 | # 185 | ## Quickly renaming for easier reference 186 | #Y = SIM_RBC 187 | # 188 | # 189 | ## Print the results of the simulation 190 | #print("\nThe stochastic steady state is %F, with the true being %F" % (np.mean(Y[iK,:]), X_SS[iK])) 191 | #print("The volatility of output, consumption and investment are %F, %F, and %F." % (np.std(Y[iY])*100/np.mean(Y[iY]),np.std(Y[iC])*100/np.mean(Y[iC]), np.std(Y[iI])*100/np.mean(Y[iI]))) 192 | #print("The mean of consumption, investment, capital, and labor in relation to output are %F, %F, %F, and %F." % (np.mean(Y[iC]*100/Y[iY]), np.mean(Y[iI]*100/Y[iY]), np.mean(Y[iK]*100/(4*Y[iY])), np.mean(Y[iL]*100))) 193 | #print("The CV of consumption, investment and labor in relation to the CV of output are %F, %F, and %F." % ((np.std(Y[iC])*100/np.mean(Y[iC]))/(np.std(Y[iY])*100/np.mean(Y[iY])),(np.std(Y[iI])*100/np.mean(Y[iI]))/(np.std(Y[iY])*100/np.mean(Y[iY])),(np.std(Y[iL])*100/np.mean(Y[iL]))/(np.std(Y[iY])*100/np.mean(Y[iY])))) 194 | #print("The correlation of consumption, investment and labor with output are %F, %F, and %F." %(np.corrcoef(Y[iY],Y[iC])[0,1], np.corrcoef(Y[iY],Y[iI])[0,1], np.corrcoef(Y[iY], Y[iL])[0,1])) 195 | # 196 | # -------------------------------------------------------------------------------- /02 RBC-Perturbation Government.py: -------------------------------------------------------------------------------- 1 | """ 2 | Solves the classic RBC model using Perturbation 3 | Extension of the McKay material with respect to plotting and statistics 4 | Reference: https://alisdairmckay.com/Notes/HetAgents/index.html 5 | """ 6 | 7 | 8 | import autograd.numpy as np 9 | from autograd import jacobian 10 | np.set_printoptions(suppress=True,precision=4) 11 | import matplotlib.pyplot as plt 12 | import warnings 13 | 14 | 15 | 16 | # Number of Variables 17 | nX = 9 18 | # Number of shocks 19 | nEps = 1 20 | # Indexing the variables 21 | iZ, iY, iC, iI, iG, iR, iK, iW, iL = range(nX) 22 | 23 | 24 | # Parameters 25 | alpha = 0.4 26 | beta = 0.99 27 | gamma = 2 28 | vega = 0.36 29 | delta = 0.019 30 | rho = 0.95 31 | rho_g = 0.9 32 | omega = 0.2 33 | sigma_z = np.sqrt(0.000049) 34 | sigma_g = np.sqrt(0.000009) 35 | 36 | # Defining a function, which gives back the steady state 37 | def SteadyState(): 38 | Z = 1. 39 | R = 1/beta 40 | W = (1-alpha)*((alpha*Z)/(R-(1-delta)))**(alpha/(1-alpha)) 41 | KL = ((R-1+delta)/alpha)**(1./(alpha-1)) 42 | YL = KL**alpha 43 | CL = (1-omega)*YL - delta*KL 44 | Ll = (1-vega)/vega*CL/(1-alpha)*KL**(-alpha) 45 | L = 1/(1+Ll) 46 | K = KL*L 47 | Y = YL*L 48 | G = 0.2*Y 49 | C = CL*L 50 | I = Y - C - G 51 | 52 | X = np.zeros(nX) 53 | X[[iZ, iY, iC, iI, iG, iR, iK, iW, iL]] = (Z, Y, C, I, G, R, K, W, L) 54 | return X 55 | 56 | 57 | # Get the steady state 58 | X_SS = SteadyState() 59 | X_EXP = np.array(("Prod.", "Output", "Consumption", "Investment", "Government Exp.", "Interest", "Capital", "Wage", "Labour", )) 60 | epsilon_SS = np.zeros(2) 61 | print("Variables: {}".format(X_EXP)) 62 | print("Steady state: {}".format(X_SS)) 63 | 64 | 65 | # Model equations 66 | def F(X_Lag,X,X_Prime,epsilon,X_SS): 67 | 68 | # Unpack 69 | epsilon_z, epsilon_g = epsilon 70 | Z_SS, Y_SS, C_SS, I_SS, G_SS, R_SS, K_SS, W_SS, L_SS = X_SS 71 | Z, Y, C, I, G, R, K, W, L = X 72 | Z_L, Y_L, C_L, I_L, G_L, R_L, K_L, W_L, L_L = X_Lag 73 | Z_P, Y_P, C_P, I_P, G_P, R_P, K_P, W_P, L_P = X_Prime 74 | return np.hstack(( 75 | beta * R_P * vega/C_P*(C_P**vega*(1-L_P)**(1-vega))**(1-gamma) / 76 | (vega/C*(C**vega*(1-L)**(1-vega))**(1-gamma)) - 1.0, # Euler equation 77 | alpha * Z * (K_L/L) **(alpha-1) + 1 -delta - R, # MPK 78 | (1-alpha)*Z*(K_L/L)**(alpha) - W, # MPL 79 | C/(1-L) - vega/(1-vega)*(1-alpha)*Z*(K_L/L)**alpha, # Labour allocation 80 | Y - C - G - I, # Aggregate resource constraint 81 | Z*K_L**alpha * (L)**(1-alpha) - Y, # Production function 82 | (1-delta) * K_L + I - K, # Investment 83 | rho * np.log(Z_L) + epsilon_z - np.log(Z), # TFP evolution 84 | rho_g * np.log(G_L) + (1-rho_g)*np.log(omega*Y_SS) + epsilon_g - np.log(G) # Law of motion for G 85 | )) 86 | 87 | 88 | # Check whether at the steady state F is zero 89 | print(F(X_SS,X_SS,X_SS,epsilon_SS, X_SS)) 90 | assert(np.allclose( F(X_SS,X_SS,X_SS,epsilon_SS, X_SS) , np.zeros(nX))) 91 | 92 | 93 | # Compute the numerical derivative 94 | A = jacobian(lambda x: F(X_SS,X_SS,x,epsilon_SS, X_SS))(X_SS) 95 | B = jacobian(lambda x: F(X_SS,x,X_SS,epsilon_SS, X_SS))(X_SS) 96 | C = jacobian(lambda x: F(x,X_SS,X_SS,epsilon_SS, X_SS))(X_SS) 97 | E = jacobian(lambda x: F(X_SS,X_SS,X_SS,x, X_SS))(epsilon_SS) 98 | 99 | 100 | # Function to solve the system based on McKays material 101 | def SolveSystem(A,B,C,E,P0=None): 102 | # Solve the system using linear time iteration as in Rendahl (2017) 103 | #print("Solving the system") 104 | MAXIT = 1000 105 | if P0 is None: 106 | P = np.zeros(A.shape) 107 | else: 108 | P = P0 109 | 110 | S = np.zeros(A.shape) 111 | 112 | for it in range(MAXIT): 113 | P = -np.linalg.lstsq(B+A@P,C,rcond=None)[0] 114 | S = -np.linalg.lstsq(B+C@S,A,rcond=None)[0] 115 | test = np.max(np.abs(C+B@P+A@P@P)) 116 | #if it % 20 == 0: 117 | #print(test) 118 | if test < 1e-10: 119 | break 120 | 121 | 122 | if it == MAXIT-1: 123 | warnings.warn('LTI did not converge.') 124 | 125 | 126 | # test Blanchard-Kahn conditions 127 | if np.max(np.linalg.eig(P)[0]) >1: 128 | raise RuntimeError("Model does not satisfy BK conditions -- non-existence") 129 | 130 | if np.max(np.linalg.eig(S)[0]) >1: 131 | raise RuntimeError("Model does not satisfy BK conditions -- mulitple stable solutions") 132 | 133 | # Impact matrix 134 | # Solution is x_{t}=P*x_{t-1}+Q*eps_t 135 | Q = -np.linalg.inv(B+A@P) @ E 136 | 137 | return P, Q 138 | 139 | 140 | # Using the function to solve the system 141 | P, Q = SolveSystem(A,B,C,E) 142 | 143 | 144 | # Calculate an impulse response 145 | T = 200 146 | IRF_RBC = np.zeros((nX,T)) 147 | IRF_RBC[:,0] = np.dot(Q, np.array((0,0.01))) 148 | 149 | 150 | # Impulse response functions for 100 periods 151 | for t in range(1,T): 152 | IRF_RBC[:,t] = P@IRF_RBC[:,t-1] 153 | 154 | 155 | # Normalizing with respect to the steady state 156 | for i in range(nX): 157 | IRF_RBC[i,:] = IRF_RBC[i,:] / X_SS[i] * 100 158 | # Normalizing the interest rate into percentage points difference 159 | IRF_RBC[1] = IRF_RBC[1] * X_SS[1] 160 | 161 | 162 | # List with the variable names 163 | names = ["TFP", "Output", "Consumption", "Investment", "Government Exp.", "Interest", "Capital", "Wage", "Labour"] 164 | 165 | 166 | # Plotting the results of the IRF 167 | fig, axes = plt.subplots(nrows = 3, ncols = 3, figsize = (10,5)) 168 | for i in range(nX): 169 | row = i // 3 170 | col = i % 3 171 | axes[row, col].plot(IRF_RBC[i,:]) 172 | axes[row, col].plot(np.zeros(T)) 173 | axes[row, col].set_title(names[i]) 174 | fig.tight_layout() 175 | #plt.show() 176 | 177 | 178 | # Comparison of the volatility of real variables and the model variables 179 | T = 50000 180 | TT = 500 # Periods that are plotted in the end 181 | # Defining empty matrices for simulation and drawing shocks 182 | SIM_RBC = np.zeros((nX,T)) 183 | mean = np.array([0,0]) 184 | cov = np.array([[sigma_z,-0.0005],[-0.0005, sigma_g]]) 185 | eps_t = np.random.multivariate_normal(mean,cov,T) 186 | 187 | # Calculating the intercept for the simulation 188 | intercept = (np.eye(nX) - P)@X_SS 189 | # Initialize the variables at their steady state 190 | SIM_RBC[:,0] = X_SS 191 | for t in range(1,T): 192 | # Development of individual variables 193 | SIM_RBC[:,t] = intercept + P@SIM_RBC[:,t-1] + Q@eps_t[t] 194 | # Transition of shock in logs, first is TFP, second is Gov. Exp. 195 | SIM_RBC[0,t] = np.exp(P[0,0]*np.log(SIM_RBC[0,t-1]) + Q[0]@eps_t[t,:]) 196 | SIM_RBC[4,t] = np.exp(P[4,4]*np.log(SIM_RBC[4,t-1]) + (1-rho_g)*np.log(omega*X_SS[iY]) + Q[4]@eps_t[t,:]) 197 | 198 | 199 | # Plotting the development 200 | fig, axes = plt.subplots(nrows = 3, ncols = 3, figsize = (10,5)) 201 | for i in range(nX): 202 | row = i // 3 203 | col = i % 3 204 | axes[row, col].plot(SIM_RBC[i,0:TT]) 205 | axes[row, col].plot(np.ones(TT)*X_SS[i]) 206 | axes[row, col].set_title(names[i]) 207 | fig.tight_layout() 208 | plt.show() 209 | 210 | 211 | # Quickly renaming for easier reference 212 | Y = SIM_RBC 213 | 214 | 215 | # Print the results of the simulation 216 | print("\nThe stochastic steady state is %F, with the true being %F" % (np.mean(Y[iK,:]), X_SS[iK])) 217 | print("The volatility of output, consumption and investment are %F, %F, and %F." % (np.std(Y[iY])*100/np.mean(Y[iY]),np.std(Y[iC])*100/np.mean(Y[iC]), np.std(Y[iI])*100/np.mean(Y[iI]))) 218 | print("The mean of consumption, investment, capital, and labor in relation to output are %F, %F, %F, and %F." % (np.mean(Y[iC]*100/Y[iY]), np.mean(Y[iI]*100/Y[iY]), np.mean(Y[iK]*100/(4*Y[iY])), np.mean(Y[iL]*100))) 219 | print("The CV of consumption, investment and labor in relation to the CV of output are %F, %F, and %F." % ((np.std(Y[iC])*100/np.mean(Y[iC]))/(np.std(Y[iY])*100/np.mean(Y[iY])),(np.std(Y[iI])*100/np.mean(Y[iI]))/(np.std(Y[iY])*100/np.mean(Y[iY])),(np.std(Y[iL])*100/np.mean(Y[iL]))/(np.std(Y[iY])*100/np.mean(Y[iY])))) 220 | print("The correlation of consumption, investment and labor with output are %F, %F, and %F." %(np.corrcoef(Y[iY],Y[iC])[0,1], np.corrcoef(Y[iY],Y[iI])[0,1], np.corrcoef(Y[iY], Y[iL])[0,1])) 221 | 222 | -------------------------------------------------------------------------------- /02 RBC-Perturbation Seperable Utility.py: -------------------------------------------------------------------------------- 1 | """ 2 | Solves the classic RBC model using Perturbation 3 | Extension of the McKay material with respect to plotting and statistics 4 | Reference: https://alisdairmckay.com/Notes/HetAgents/index.html 5 | """ 6 | 7 | 8 | import autograd.numpy as np 9 | from autograd import jacobian 10 | np.set_printoptions(suppress=True,precision=4) 11 | import matplotlib.pyplot as plt 12 | import warnings 13 | 14 | 15 | # Number of Variables 16 | nX = 8 17 | # Number of shocks 18 | nEps = 1 19 | # Indexing the variables 20 | iZ, iY, iC, iI, iR, iK, iW, iL = range(nX) 21 | 22 | 23 | # Parameters, calibrated to quarterly frequency 24 | alpha = 1/3 25 | beta = 0.99 26 | gamma = 1 27 | psi = 1 28 | delta = 0.025 29 | rho = 0.98 30 | sigmaz = 0.007 31 | 32 | # Calibration of chi 33 | Z = 1. 34 | R = 1/beta 35 | W = (1-alpha)*((alpha*Z)/(R-(1-delta)))**(alpha/(1-alpha)) 36 | KL = ((R-1+delta)/alpha)**(1./(alpha-1)) 37 | YL = KL**alpha 38 | CH = YL - delta*KL 39 | C = CH*1/3 40 | chi = W/((1/3)**psi*C) 41 | 42 | 43 | # Defining a function, which gives back the steady state 44 | def SteadyState(): 45 | Z = 1. 46 | R = 1/beta 47 | W = (1-alpha)*((alpha*Z)/(R-(1-delta)))**(alpha/(1-alpha)) 48 | KL = ((R-1+delta)/alpha)**(1./(alpha-1)) 49 | YL = KL**alpha 50 | CL = YL - delta*KL 51 | YC = YL/CL 52 | L = ((1-alpha)*YC/chi)**(1/(1+psi)) 53 | K = KL*L 54 | Y = YL*L 55 | C = CL*L 56 | I = Y - C 57 | 58 | X = np.zeros(nX) 59 | X[[iZ, iY, iC, iI, iR, iK, iW, iL]] = (Z, Y, C, I, R, K, W, L) 60 | return X 61 | 62 | 63 | # Get the steady state 64 | X_SS = SteadyState() 65 | X_EXP = np.array(("Prod.", "Output", "Consumption", "Investment", "Interest", "Capital", "Wage", "Labour", )) 66 | epsilon_SS = 0.0 67 | print("Variables: {}".format(X_EXP)) 68 | print("Steady state: {}".format(X_SS)) 69 | 70 | 71 | # Model equations 72 | def F(X_Lag,X,X_Prime,epsilon): 73 | 74 | # Unpack 75 | Z, Y, C, I, R, K, W, L = X 76 | Z_L, Y_L, C_L, I_L, R_L, K_L, W_L, L_L = X_Lag 77 | Z_P, Y_P, C_P, I_P, R_P, K_P, W_P, L_P = X_Prime 78 | return np.hstack(( 79 | beta * R_P * (C/C_P) - 1.0, # Euler equation 80 | alpha * Z * (K_L/L) **(alpha-1) + 1 - delta - R, # MPK 81 | (1-alpha)*Z*(K_L/L)**(alpha) - W, # MPL 82 | W - chi*L**psi*C, # Labour allocation 83 | (1-delta) * K_L + Y - C - K, # Aggregate resource constraint 84 | Z*K_L**alpha * (L)**(1-alpha) - Y, # Production function 85 | (1-delta) * K_L + I - K, # Investment 86 | rho * np.log(Z_L) + epsilon - np.log(Z) # TFP evolution 87 | )) 88 | 89 | 90 | # Check whether at the steady state F is zero 91 | assert(np.allclose( F(X_SS,X_SS,X_SS,epsilon_SS) , np.zeros(nX))) 92 | 93 | 94 | # Compute the numerical derivative 95 | A = jacobian(lambda x: F(X_SS,X_SS,x,epsilon_SS))(X_SS) 96 | B = jacobian(lambda x: F(X_SS,x,X_SS,epsilon_SS))(X_SS) 97 | C = jacobian(lambda x: F(x,X_SS,X_SS,epsilon_SS))(X_SS) 98 | E = jacobian(lambda x: F(X_SS,X_SS,X_SS,x))(epsilon_SS) 99 | 100 | 101 | # Function to solve the system based on McKays material 102 | def SolveSystem(A,B,C,E,P0=None): 103 | # Solve the system using linear time iteration as in Rendahl (2017) 104 | #print("Solving the system") 105 | MAXIT = 1000 106 | if P0 is None: 107 | P = np.zeros(A.shape) 108 | else: 109 | P = P0 110 | 111 | S = np.zeros(A.shape) 112 | 113 | for it in range(MAXIT): 114 | P = -np.linalg.lstsq(B+A@P,C,rcond=None)[0] 115 | S = -np.linalg.lstsq(B+C@S,A,rcond=None)[0] 116 | test = np.max(np.abs(C+B@P+A@P@P)) 117 | #if it % 20 == 0: 118 | #print(test) 119 | if test < 1e-10: 120 | break 121 | 122 | 123 | if it == MAXIT-1: 124 | warnings.warn('LTI did not converge.') 125 | print(test) 126 | 127 | 128 | # test Blanchard-Kahn conditions 129 | if np.max(np.linalg.eig(P)[0]) >1: 130 | raise RuntimeError("Model does not satisfy BK conditions -- non-existence") 131 | 132 | if np.max(np.linalg.eig(S)[0]) >1: 133 | raise RuntimeError("Model does not satisfy BK conditions -- mulitple stable solutions") 134 | 135 | # Impact matrix 136 | # Solution is x_{t}=P*x_{t-1}+Q*eps_t 137 | Q = -np.linalg.inv(B+A@P) @ E 138 | 139 | return P, Q 140 | 141 | 142 | # Using the function to solve the system 143 | P, Q = SolveSystem(A,B,C,E) 144 | 145 | 146 | # Calculate an impulse response to a 1% (=0.01) shock to tfp 147 | T = 40 148 | IRF_RBC = np.zeros((nX,T)) 149 | IRF_RBC[:,0] = Q * 0.01 150 | 151 | 152 | # Impulse response functions for 100 periods 153 | for t in range(1,T): 154 | IRF_RBC[:,t] = P@IRF_RBC[:,t-1] 155 | 156 | 157 | # Normalizing with respect to the steady state 158 | for i in range(nX): 159 | IRF_RBC[i,:] = IRF_RBC[i,:] / X_SS[i] * 100 160 | # Normalizing the interest rate into percentage points difference 161 | IRF_RBC[1] = IRF_RBC[1] * X_SS[1] 162 | 163 | # Drop all IRFs that are below e**(-15) 164 | criterion = ((np.abs(IRF_RBC) < 10**(-10))) 165 | IRF_RBC[criterion] = 0.0 166 | 167 | # List with the variable names 168 | names = ["TFP", "Output", "Consumption", "Investment", "Interest", "Capital", "Wage", "Labour"] 169 | 170 | 171 | # Plotting the results of the IRF 172 | fig, axes = plt.subplots(nrows = 2, ncols = 4, figsize = (10,5)) 173 | for i in range(nX): 174 | row = i // 4 175 | col = i % 4 176 | axes[row, col].plot(IRF_RBC[i,:]) 177 | axes[row, col].plot(np.zeros(T)) 178 | axes[row, col].set_title(names[i]) 179 | fig.tight_layout() 180 | 181 | 182 | # Comparison of the volatility of real variables and the model variables 183 | sigma = sigmaz 184 | T = 10000 185 | 186 | # Periods that are plotted in the end (equal to 25 years) 187 | TT = 1000 188 | 189 | # Defining empty matrices for simulation and drawing shocks 190 | SIM_RBC = np.zeros((nX,T)) 191 | eps_t = np.random.normal(0,sigma,T) 192 | 193 | # Calculating the intercept for the simulation 194 | intercept = (np.eye(nX) - P)@X_SS 195 | 196 | # Initialize the variables at their steady state 197 | SIM_RBC[:,0] = X_SS 198 | for t in range(1,T): 199 | # Development of individual variables 200 | SIM_RBC[:,t] = intercept + P@SIM_RBC[:,t-1] + eps_t[t]*Q 201 | # Transition of shock in logs 202 | SIM_RBC[0,t] = np.exp(P[0,0]*np.log(SIM_RBC[0,t-1]) + Q[0] * eps_t[t]) 203 | 204 | 205 | # Plotting the development 206 | fig, axes = plt.subplots(nrows = 2, ncols = 4, figsize = (10,5)) 207 | for i in range(nX): 208 | row = i // 4 209 | col = i % 4 210 | axes[row, col].plot(SIM_RBC[i,0:TT]) 211 | axes[row, col].plot(np.ones(TT)*X_SS[i]) 212 | axes[row, col].set_title(names[i]) 213 | fig.tight_layout() 214 | plt.show() 215 | 216 | 217 | # Quickly renaming for easier reference 218 | Y = SIM_RBC 219 | 220 | 221 | # Print the results of the simulation 222 | print("\nThe stochastic steady state is %F, with the true being %F" % (np.mean(Y[iK,:]), X_SS[iK])) 223 | print("The volatility of output, consumption and investment are %F, %F, and %F." % (np.std(Y[iY])*100/np.mean(Y[iY]),np.std(Y[iC])*100/np.mean(Y[iC]), np.std(Y[iI])*100/np.mean(Y[iI]))) 224 | print("The mean of consumption, investment, capital, and labor in relation to output are %F, %F, %F, and %F." % (np.mean(Y[iC]*100/Y[iY]), np.mean(Y[iI]*100/Y[iY]), np.mean(Y[iK]*100/(4*Y[iY])), np.mean(Y[iL]*100))) 225 | print("The CV of consumption, investment and labor in relation to the CV of output are %F, %F, and %F." % ((np.std(Y[iC])*100/np.mean(Y[iC]))/(np.std(Y[iY])*100/np.mean(Y[iY])),(np.std(Y[iI])*100/np.mean(Y[iI]))/(np.std(Y[iY])*100/np.mean(Y[iY])),(np.std(Y[iL])*100/np.mean(Y[iL]))/(np.std(Y[iY])*100/np.mean(Y[iY])))) 226 | print("The correlation of consumption, investment and labor with output are %F, %F, and %F." %(np.corrcoef(Y[iY],Y[iC])[0,1], np.corrcoef(Y[iY],Y[iI])[0,1], np.corrcoef(Y[iY], Y[iL])[0,1])) 227 | 228 | print(Q) -------------------------------------------------------------------------------- /02 RBC-Perturbation.py: -------------------------------------------------------------------------------- 1 | """ 2 | Solves the classic RBC model using Perturbation 3 | Extension of the McKay material with respect to plotting and statistics 4 | Reference: https://alisdairmckay.com/Notes/HetAgents/index.html 5 | """ 6 | 7 | 8 | import autograd.numpy as np 9 | from autograd import jacobian 10 | np.set_printoptions(suppress=True,precision=4) 11 | import matplotlib.pyplot as plt 12 | import warnings 13 | 14 | 15 | # Number of Variables 16 | nX = 8 17 | # Number of shocks 18 | nEps = 1 19 | # Indexing the variables 20 | iZ, iY, iC, iI, iR, iK, iW, iL = range(nX) 21 | 22 | 23 | # Parameters 24 | alpha = 0.4 25 | beta = 0.99 26 | gamma = 2 27 | vega = 0.36 28 | delta = 0.019 29 | rho = 0.99 30 | 31 | 32 | # Defining a function, which gives back the steady state 33 | def SteadyState(): 34 | Z = 1. 35 | R = 1/beta 36 | W = (1-alpha)*((alpha*Z)/(R-(1-delta)))**(alpha/(1-alpha)) 37 | KL = ((R-1+delta)/alpha)**(1./(alpha-1)) 38 | YL = KL**alpha 39 | CL = YL - delta*KL 40 | Ll = (1-vega)/vega*CL/(1-alpha)*KL**(-alpha) 41 | L = 1/(1+Ll) 42 | K = KL*L 43 | Y = YL*L 44 | C = CL*L 45 | I = Y - C 46 | 47 | X = np.zeros(nX) 48 | X[[iZ, iY, iC, iI, iR, iK, iW, iL]] = (Z, Y, C, I, R, K, W, L) 49 | return X 50 | 51 | 52 | # Get the steady state 53 | X_SS = SteadyState() 54 | X_EXP = np.array(("Prod.", "Output", "Consumption", "Investment", "Interest", "Capital", "Wage", "Labour", )) 55 | epsilon_SS = 0.0 56 | print("Variables: {}".format(X_EXP)) 57 | print("Steady state: {}".format(X_SS)) 58 | 59 | 60 | # Model equations 61 | def F(X_Lag,X,X_Prime,epsilon): 62 | 63 | # Unpack 64 | Z, Y, C, I, R, K, W, L = X 65 | Z_L, Y_L, C_L, I_L, R_L, K_L, W_L, L_L = X_Lag 66 | Z_P, Y_P, C_P, I_P, R_P, K_P, W_P, L_P = X_Prime 67 | return np.hstack(( 68 | beta * R_P * vega/C_P*(C_P**vega*(1-L_P)**(1-vega))**(1-gamma) / 69 | (vega/C*(C**vega*(1-L)**(1-vega))**(1-gamma)) - 1.0, # Euler equation 70 | alpha * Z * (K_L/L) **(alpha-1) + 1 -delta - R, # MPK 71 | (1-alpha)*Z*(K_L/L)**(alpha) - W, # MPL 72 | C/(1-L) - vega/(1-vega)*(1-alpha)*Z*(K_L/L)**alpha, # Labour allocation 73 | (1-delta) * K_L + Y - C - K, # Aggregate resource constraint 74 | Z*K_L**alpha * (L)**(1-alpha) - Y, # Production function 75 | (1-delta) * K_L + I - K, # Investment 76 | rho * np.log(Z_L) + epsilon - np.log(Z) # TFP evolution 77 | )) 78 | 79 | 80 | # Check whether at the steady state F is zero 81 | assert(np.allclose( F(X_SS,X_SS,X_SS,epsilon_SS) , np.zeros(nX))) 82 | 83 | 84 | # Compute the numerical derivative 85 | A = jacobian(lambda x: F(X_SS,X_SS,x,epsilon_SS))(X_SS) 86 | B = jacobian(lambda x: F(X_SS,x,X_SS,epsilon_SS))(X_SS) 87 | C = jacobian(lambda x: F(x,X_SS,X_SS,epsilon_SS))(X_SS) 88 | E = jacobian(lambda x: F(X_SS,X_SS,X_SS,x))(epsilon_SS) 89 | 90 | 91 | # Function to solve the system based on McKays material 92 | def SolveSystem(A,B,C,E,P0=None): 93 | # Solve the system using linear time iteration as in Rendahl (2017) 94 | #print("Solving the system") 95 | MAXIT = 1000 96 | if P0 is None: 97 | P = np.zeros(A.shape) 98 | else: 99 | P = P0 100 | 101 | S = np.zeros(A.shape) 102 | 103 | for it in range(MAXIT): 104 | P = -np.linalg.lstsq(B+A@P,C,rcond=None)[0] 105 | S = -np.linalg.lstsq(B+C@S,A,rcond=None)[0] 106 | test = np.max(np.abs(C+B@P+A@P@P)) 107 | #if it % 20 == 0: 108 | #print(test) 109 | if test < 1e-10: 110 | break 111 | 112 | 113 | if it == MAXIT-1: 114 | warnings.warn('LTI did not converge.') 115 | 116 | 117 | # test Blanchard-Kahn conditions 118 | if np.max(np.linalg.eig(P)[0]) >1: 119 | raise RuntimeError("Model does not satisfy BK conditions -- non-existence") 120 | 121 | if np.max(np.linalg.eig(S)[0]) >1: 122 | raise RuntimeError("Model does not satisfy BK conditions -- mulitple stable solutions") 123 | 124 | # Impact matrix 125 | # Solution is x_{t}=P*x_{t-1}+Q*eps_t 126 | Q = -np.linalg.inv(B+A@P) @ E 127 | 128 | return P, Q 129 | 130 | 131 | # Using the function to solve the system 132 | P, Q = SolveSystem(A,B,C,E) 133 | 134 | 135 | # Calculate an impulse response 136 | T = 200 137 | IRF_RBC = np.zeros((nX,T)) 138 | IRF_RBC[:,0] = Q * 0.01 139 | 140 | 141 | # Impulse response functions for 100 periods 142 | for t in range(1,T): 143 | IRF_RBC[:,t] = P@IRF_RBC[:,t-1] 144 | 145 | 146 | # Normalizing with respect to the steady state 147 | for i in range(nX): 148 | IRF_RBC[i,:] = IRF_RBC[i,:] / X_SS[i] * 100 149 | # Normalizing the interest rate into percentage points difference 150 | IRF_RBC[1] = IRF_RBC[1] * X_SS[1] 151 | 152 | 153 | # List with the variable names 154 | names = ["TFP", "Output", "Consumption", "Investment", "Interest", "Capital", "Wage", "Labour"] 155 | 156 | 157 | # Plotting the results of the IRF 158 | fig, axes = plt.subplots(nrows = 2, ncols = 4, figsize = (10,5)) 159 | for i in range(nX): 160 | row = i // 4 161 | col = i % 4 162 | axes[row, col].plot(IRF_RBC[i,:]) 163 | axes[row, col].plot(np.zeros(T)) 164 | axes[row, col].set_title(names[i]) 165 | fig.tight_layout() 166 | #plt.show() 167 | 168 | 169 | # Comparison of the volatility of real variables and the model variables 170 | sigma = np.sqrt(0.000049) 171 | T = 5000 172 | TT = 500 # Periods that are plotted in the end 173 | # Defining empty matrices for simulation and drawing shocks 174 | SIM_RBC = np.zeros((nX,T)) 175 | eps_t = np.random.normal(0,sigma,T) 176 | # Calculating the intercept for the simulation 177 | intercept = (np.eye(nX) - P)@X_SS 178 | # Initialize the variables at their steady state 179 | SIM_RBC[:,0] = X_SS 180 | for t in range(1,T): 181 | # Development of individual variables 182 | SIM_RBC[:,t] = intercept + P@SIM_RBC[:,t-1] + eps_t[t]*Q 183 | # Transition of shock in logs 184 | SIM_RBC[0,t] = np.exp(P[0,0]*np.log(SIM_RBC[0,t-1]) + Q[0] * eps_t[t]) 185 | 186 | 187 | # Plotting the development 188 | fig, axes = plt.subplots(nrows = 2, ncols = 4, figsize = (10,5)) 189 | for i in range(nX): 190 | row = i // 4 191 | col = i % 4 192 | axes[row, col].plot(SIM_RBC[i,0:TT]) 193 | axes[row, col].plot(np.ones(TT)*X_SS[i]) 194 | axes[row, col].set_title(names[i]) 195 | fig.tight_layout() 196 | plt.show() 197 | 198 | 199 | # Quickly renaming for easier reference 200 | Y = SIM_RBC 201 | 202 | 203 | # Print the results of the simulation 204 | print("\nThe stochastic steady state is %F, with the true being %F" % (np.mean(Y[iK,:]), X_SS[iK])) 205 | print("The volatility of output, consumption and investment are %F, %F, and %F." % (np.std(Y[iY])*100/np.mean(Y[iY]),np.std(Y[iC])*100/np.mean(Y[iC]), np.std(Y[iI])*100/np.mean(Y[iI]))) 206 | print("The mean of consumption, investment, capital, and labor in relation to output are %F, %F, %F, and %F." % (np.mean(Y[iC]*100/Y[iY]), np.mean(Y[iI]*100/Y[iY]), np.mean(Y[iK]*100/(4*Y[iY])), np.mean(Y[iL]*100))) 207 | print("The CV of consumption, investment and labor in relation to the CV of output are %F, %F, and %F." % ((np.std(Y[iC])*100/np.mean(Y[iC]))/(np.std(Y[iY])*100/np.mean(Y[iY])),(np.std(Y[iI])*100/np.mean(Y[iI]))/(np.std(Y[iY])*100/np.mean(Y[iY])),(np.std(Y[iL])*100/np.mean(Y[iL]))/(np.std(Y[iY])*100/np.mean(Y[iY])))) 208 | print("The correlation of consumption, investment and labor with output are %F, %F, and %F." %(np.corrcoef(Y[iY],Y[iC])[0,1], np.corrcoef(Y[iY],Y[iI])[0,1], np.corrcoef(Y[iY], Y[iL])[0,1])) 209 | 210 | -------------------------------------------------------------------------------- /02 RBC-Value Function.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Spyder Editor 4 | 5 | This solves the RBC model with value function iteration and 6 | endogeneous labor supply 7 | 8 | """ 9 | 10 | import time 11 | import numpy as np 12 | import matplotlib.pyplot as plt 13 | import scipy.optimize as opt 14 | import quantecon as qe 15 | from numba import jit 16 | 17 | # Supress warning 18 | import warnings 19 | warnings.filterwarnings("ignore") 20 | 21 | 22 | #parameters 23 | theta = 0.4; 24 | delta = 0.019; 25 | sigma = 2; 26 | vega = 0.36; 27 | beta = 0.99; 28 | nk = 100; 29 | nz = np.int(21); 30 | rho = 0.95; 31 | stdz = np.sqrt(0.000049); 32 | m = 3; 33 | sims = 10; 34 | nk = np.int(250); 35 | 36 | #discretizing the grid 37 | mc = qe.markov.approximation.tauchen(rho,stdz,0,m,nz) 38 | P = mc.P 39 | zs = np.exp(mc.state_values) 40 | #zs = mc.state_values+1 # As alternative symetric shocks 41 | 42 | #steady state quantities 43 | kl_ss = ((beta*theta)/(1-beta*(1-delta)))**(1/(1-theta)) 44 | yl_ss = kl_ss**theta 45 | cl_ss = yl_ss - delta*kl_ss 46 | ll_ss = (1-vega)/vega*cl_ss/(1-theta)*kl_ss**(-theta) 47 | l_ss = 1/(1+ll_ss) 48 | k_ss = kl_ss*l_ss 49 | y_ss = k_ss**theta 50 | c_ss = y_ss - delta*k_ss 51 | 52 | 53 | #discretizing the k grid 54 | kmin = 0.8*k_ss 55 | kmax = 1.2*k_ss 56 | k = np.linspace(kmin,kmax,nk) 57 | 58 | 59 | #tolerance levels 60 | tolv = 10**-8 61 | maxiter = 5000 62 | 63 | starts = time.time() 64 | # Setting up the matrixes as a function of future k 65 | consumption = np.zeros((nz,nk,nk)) 66 | labor = np.copy(consumption) 67 | for iz in range(nz): 68 | for ik in range(nk): 69 | for jk in range(nk): 70 | res = lambda l: zs[iz]*k[ik]**theta*l**(1-theta) + (1-delta)*k[ik] - k[jk] - vega/(1-vega)*(1-l)*(1-theta)*zs[iz]*(k[ik]/l)**theta 71 | labor[iz,ik,jk] = opt.fsolve(res, l_ss) 72 | consumption[iz,ik,jk] = vega/(1-vega)*(1-labor[iz,ik,jk])*(1-theta)*(k[ik]/labor[iz,ik,jk])**(theta) 73 | stops = time.time() 74 | 75 | #Value function iteration 76 | @jit 77 | def value_function(nz = nz, nk = nk, consumption = consumption, labor = labor, vega = vega, beta = beta, sigma = sigma, P = P, k = k): 78 | iter = 0 79 | diffV = 10 80 | u = c_ss**(1-sigma)/(1-sigma) 81 | V = np.ones((nz,nk))*u/(1-beta) 82 | v = np.zeros((nz,nk)) 83 | g = np.zeros((nz,nk)) 84 | h = np.copy(g) 85 | newV = np.zeros((nz,nk)) 86 | start = time.time() 87 | while (diffV > tolv and iter1] = -1000000000 96 | v = u+beta*(np.dot(P[iz,:],V)) 97 | newV[iz,ik] = max(v) 98 | ind = np.argmax(v) 99 | g[iz,ik] = k[ind] 100 | h[iz,ik] = l[ind] 101 | diffV = np.linalg.norm(newV-V) 102 | V = newV.copy() 103 | print(iter, diffV) 104 | stop = time.time() 105 | print("\nValue function iteration converged after %.0F iterations and %.5F seconds" % (iter, (stop-start))) 106 | return g, h, V 107 | 108 | 109 | # Running the function 110 | g, h, V = value_function() 111 | print("\nThe population of the matrix took %F seconds." % (stops-starts)) 112 | 113 | # Plotting the value function 114 | fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize=(10,5)) 115 | axes[0].plot(k,V.transpose()) 116 | axes[0].set_title("Value functions") 117 | 118 | axes[1].plot(k,g.transpose()) 119 | axes[1].plot(k,k) 120 | axes[1].set_title('Policy functions') 121 | #plt.show() 122 | #plt.savefig("convergence.png") 123 | 124 | 125 | # Simulate the economy 126 | T = 5000 127 | 128 | # Setup the arrays 129 | A = mc.simulate(T, init = mc.state_values[int((nz-1)/2)]) 130 | Aind = mc.get_index(A) 131 | A = np.exp(A) 132 | K = np.zeros(T) 133 | Kind = np.copy(K) 134 | Kind[0] = nk/2 135 | K[0] = k[int(Kind[0])] 136 | 137 | 138 | # Simulating the economy period by period 139 | for t in range(1,T): 140 | K[t] = g[int(Aind[t-1]),int(Kind[t-1])] 141 | Kind[t] = np.where(K[t] == k)[0] 142 | 143 | 144 | lab = h[np.int64(Aind),np.int64(Kind)] 145 | out = A*K**theta*lab**(1-theta) 146 | cons = out - g[np.int64(Aind),np.int64(Kind)] + (1-delta)*K 147 | inv = out - cons 148 | 149 | 150 | # Plot the development of the economy 151 | t = range(T) 152 | fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize=(10,5)) 153 | 154 | axes[0].plot(t, K) 155 | axes[0].set_title("Trajectory of capital") 156 | axes[0].set_xlabel("Period") 157 | axes[0].set_ylabel("Capital") 158 | 159 | axes[1].plot(t, out, label = "Output") 160 | axes[1].plot(t, cons, label = "Consumption") 161 | axes[1].plot(t, inv, label = "Investment") 162 | axes[1].set_title("GDP components") 163 | axes[1].set_xlabel("Period") 164 | axes[1].set_ylabel("GDP components") 165 | axes[1].legend(loc=5) 166 | plt.show() 167 | #plt.savefig("simulation.png") 168 | 169 | 170 | print("\nThe stochastic steady state is %F, with the true being %F" % (np.mean(K), k_ss)) 171 | print("\nThe volatility of output, consumption and investment are %F, %F, and %F." % (np.std(out)*100/np.mean(out),np.std(cons)*100/np.mean(cons), np.std(inv)*100/np.mean(inv))) 172 | print("\nThe mean of consumption, investment, capital, and labor in relation to output are %F, %F, %F, and %F." % (np.mean(cons*100/out), np.mean(inv*100/out), np.mean(K*100/(4*out)), np.mean(lab*100))) 173 | print("\nThe CV of consumption, investment and labor in relation to the CV of output are %F, %F, and %F." % ((np.std(cons)*100/np.mean(cons))/(np.std(out)*100/np.mean(out)),(np.std(inv)*100/np.mean(inv))/(np.std(out)*100/np.mean(out)),(np.std(lab)*100/np.mean(lab))/(np.std(out)*100/np.mean(out)))) 174 | print("\nThe correlation of consumption, investment and labor with output are %F, %F, and %F." %(np.corrcoef(out,cons)[0,1], np.corrcoef(out,inv)[0,1], np.corrcoef(out, lab)[0,1])) -------------------------------------------------------------------------------- /03 Aiyagari-Howard Improvement.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | This solves the Aiyagari model with policy function iteration 4 | Furthermore, it aggreagtes the economy with the invariate distribution 5 | 6 | """ 7 | 8 | 9 | import numpy as np 10 | import time 11 | import matplotlib.pyplot as plt 12 | import quantecon as qe 13 | from numba import jit 14 | 15 | # Supress warning 16 | import warnings 17 | warnings.filterwarnings("ignore") 18 | 19 | 20 | class HH: 21 | """ 22 | Setups a class containing all necessary information to solve the 23 | Aiyagari (1994) model. 24 | """ 25 | 26 | def __init__(self, theta=0.36, delta=0.08, sigma=3, 27 | beta=0.96, nz=7, rho=0.9, stdev=0.2, 28 | m=3, nk=500, kmin=10**(-5), kmax=50): 29 | """Initializes the class with standard parameters""" 30 | self.theta, self.delta, self.sigma = theta, delta, sigma 31 | self.beta, self.nz, self.nk, self.m = beta, nz, nk, m 32 | self.rho, self.stdev = rho, stdev 33 | self.stdz = stdev * (1 - rho**2)**(1 / 2) 34 | self.kmin, self.kmax = kmin, kmax 35 | 36 | # Setting up the grid 37 | self.k = np.zeros(nk) 38 | for i in range(nk): 39 | self.k[i] = kmin + kmax / ((nk + 1)**2.35) * (i**2.35) 40 | 41 | def utility(self, c): 42 | """Utility function, dependent on the value of sigma""" 43 | if self.sigma == 1: 44 | u = np.log(c) 45 | else: 46 | u = c**(1 - self.sigma) / (1 - self.sigma) 47 | return u 48 | 49 | def interest(self, k): 50 | """Gives back the interest rate given a capital value""" 51 | return self.theta * (k / self.l_s)**(self.theta - 1) - self.delta 52 | 53 | def interest_reverse(self, r): 54 | """Gives back the capital value for an interest rate""" 55 | return (self.theta / (r + self.delta) 56 | )**(1 / (1 - self.theta)) * self.l_s 57 | 58 | def r_to_w(self, r): 59 | """Transforms an interest rate into a wage rate""" 60 | return (1 - self.theta) * ((self.theta / (r + self.delta)) 61 | )**(self.theta / (1 - self.theta)) 62 | 63 | def markov(self): 64 | """Approximates the transistion probability of an AR(1) process 65 | using the methodology of Tauchen (1986) using the quantecon package 66 | 67 | Uses the states, and the transition matrix to give back the 68 | transition matrix P, as well as invariante labor supply l_s 69 | """ 70 | self.mc = qe.markov.approximation.tauchen(self.rho, self.stdz, 71 | 0, self.m, self.nz) 72 | self.P = self.mc.P 73 | self.labor_states = np.exp(self.mc.state_values) 74 | inv_l = np.linalg.matrix_power(self.P, 1000) 75 | inv_dist = inv_l[0, :] 76 | inv_l = inv_l / inv_l.sum() 77 | self.l_s = np.dot(self.labor_states, inv_dist) 78 | return self.P, self.l_s 79 | 80 | 81 | # Generating a class 82 | nz = 7 83 | nk = 500 84 | sigma = 3 85 | rho = 0.6 86 | hh = HH(nz=nz, nk=nk, rho=rho, sigma=sigma) 87 | 88 | 89 | # Current level 90 | P, l_s = hh.markov() 91 | r = (3.87 - 1) / 100 92 | k_t = hh.interest_reverse(r) 93 | 94 | 95 | # Extrcting a policy function, given a Value function 96 | @jit 97 | def get_policy(V, r, HH): 98 | """Given a value function V, an interest rate r, as well as a HH class, 99 | the function computes a new policy function g, as well as the 100 | corresponding indicator function indk""" 101 | # Unpacking of parameters 102 | nz, nk, P = HH.nz, HH.nk, HH.P 103 | labor, k = HH.labor_states, HH.k 104 | beta = HH.beta 105 | R = 1 + r 106 | w = HH.r_to_w(r) 107 | 108 | # Setting up the empty matrices 109 | g = np.zeros((nz, nk)) 110 | indk = np.copy(V) 111 | for iz in range(nz): 112 | for ik in range(nk): 113 | # Calculating consumption and its corresponding utility 114 | c = w * labor[iz] + R * k[ik] - k 115 | u = HH.utility(c) 116 | # Penalize negative consumption 117 | u[c < 0] = -1000000 118 | # Get value function, maximize and get new policy function 119 | v = u + beta * (np.dot(P[iz, :], V)) 120 | ind = np.argmax(v) 121 | indk[iz, ik] = ind 122 | g[iz, ik] = k[ind] 123 | return g, indk 124 | 125 | 126 | # Function to setup J given a guessed policy function 127 | @jit 128 | def get_J(g, HH): 129 | """Using a policy function, as well as a HH class object, the function 130 | computes the J matrix according to Ljundgqvist and Sargent to 131 | calculate a new value function by matrix inversion.""" 132 | # Extracting the parameters 133 | k, nz, nk = HH.k, HH.nz, HH.nk 134 | J = np.zeros((nz, nk, nk)) 135 | for i in range(nz): 136 | for j in range(nk): 137 | J[i, j, :] = (g[i, j] == k) 138 | return J 139 | 140 | 141 | # Function to setup a similar matrix as the Q matrix to Ljundgqvist and 142 | # Sargent: 143 | @jit 144 | def get_Q(P, J): 145 | """Given the transition matrix P and the J matrix from the get_J function 146 | this function gives back the stochastic transition matrix Q as in 147 | Ljungqvist and Sargent (2012).""" 148 | # Setup empty matrices 149 | shape1 = np.shape(P)[0] 150 | shape2 = np.shape(J)[1] 151 | shape3 = shape1 * shape2 152 | Q = np.zeros((shape3, shape3)) 153 | for i in range(shape1): 154 | for j in range(shape1): 155 | pos11 = int(i * shape2) 156 | pos12 = int((i + 1) * shape2) 157 | pos21 = int(j * shape2) 158 | pos22 = int((j + 1) * shape2) 159 | Q[pos11:pos12, pos21:pos22] = P[i, j] * J[i, :] 160 | return Q 161 | 162 | 163 | # Generate a new value function 164 | @jit 165 | def get_value(Q, re, HH): 166 | """Given the stochastic transition matrix Q, a reward vector re, and 167 | a HH class instance this function gives back a new value function.""" 168 | nz, nk, beta = HH.nz, HH.nk, HH.beta 169 | matrix = np.eye(np.shape(Q)[0]) - beta * Q 170 | inverse = np.linalg.inv(matrix) 171 | # Calculating new value function as a vector 172 | v = np.dot(inverse, re) 173 | # Putting the vector back in required shape 174 | v = v.reshape(nz, nk) 175 | return v 176 | 177 | 178 | # Generate a reward vector 179 | @jit 180 | def reward(r, HH, g): 181 | """Given an interest rate r, an HH class instance and a policy function g, 182 | the function gives back a vector of utility required for the get_Value 183 | function.""" 184 | nk, nz, k, labor_states = HH.nk, HH.nz, HH.k, HH.labor_states 185 | w = hh.r_to_w(r) 186 | # Calculate the utility 187 | re = hh.utility((1 + r) * k.reshape(1, nk) + w 188 | * labor_states.reshape(nz, 1) - g) 189 | # Transform into the required format for the function 190 | return re.flatten() 191 | 192 | 193 | # Policy function iteration 194 | def policy(g, r, HH, maxiter=1000, tol=10**(-11)): 195 | """Given a guess for the policy function g, an interest rate r, and 196 | an instance of the HH class, the function performs a full policy 197 | function iteration and solves for a new policy function g, value 198 | function v, and indicator function indk. 199 | """ 200 | error = 1 201 | iter = 0 202 | # Checking, whether the requirements are met 203 | test1 = (error > tol) 204 | test2 = (iter < maxiter) 205 | while (test1 and test2): 206 | # Generate J 207 | j = get_J(g, HH) 208 | 209 | # Generate Q 210 | q = get_Q(HH.P, j) 211 | 212 | # Getting a reward vector 213 | re = reward(r, HH, g) 214 | 215 | # Generate a new value function 216 | v = get_value(q, re, HH) 217 | 218 | # Extract a policy function 219 | gnew, indk = get_policy(v, r, HH) 220 | 221 | # Compute error for this iteration 222 | error = np.linalg.norm(gnew - g) 223 | g = np.copy(gnew) 224 | iter = iter + 1 225 | 226 | # Generate new test criteria 227 | test1 = (error > tol) 228 | test2 = (iter < maxiter) 229 | return g, v, indk 230 | 231 | 232 | # Calculating the invariate distribution 233 | @jit 234 | def distribution(indk, HH, tol=10**(-11), maxiter=10000): 235 | """Given an indicator function indk, and an instance of a household HH, 236 | the function calculates an invariante distribution of households over 237 | the asset and productivity space. 238 | """ 239 | # Setup empty matrices 240 | nz, nk = HH.nz, HH.nk 241 | dist = np.ones((nz, nk)) / (nz * nk) 242 | 243 | error = 1 244 | iter = 0 245 | # Evaluate criteria 246 | test1 = (error > tol) 247 | test2 = (iter < maxiter) 248 | while (test1 and test2): 249 | distnew = np.zeros((nz, nk)) 250 | for j in range(nk): 251 | for i in range(nz): 252 | # Next periods distribution is the cummulative of the 253 | # households which migrate through the policy function and 254 | # through stochastic transition 255 | distnew[:, int(indk[i, j])] = distnew[:, int( 256 | indk[i, j])] + dist[i, j] * P[i, :] 257 | # Evaluating the error, as well as the criteria 258 | error = np.linalg.norm(distnew - dist) 259 | dist = np.copy(distnew) 260 | test1 = (error > tol) 261 | test2 = (iter < maxiter) 262 | iter = iter + 1 263 | return dist 264 | 265 | 266 | # Function to solve for the equilibrium 267 | @jit 268 | def Aiyagari(k_t, HH): 269 | """Function that completely solves the Aiyagari (1994) model""" 270 | 271 | # Unpacking parameters and setting up matrices 272 | beta = HH.beta 273 | iter = 0 274 | error = 10 275 | tol = 0.01 276 | g = np.zeros((nz, nk)) 277 | # Evaluating criteria 278 | test = (error > tol) 279 | while test: 280 | r = HH.interest(k_t) 281 | # Setting maximum interest rate 282 | if r > 1 / beta - 1: 283 | r = 1 / beta - 1 284 | iter = iter + 1 285 | 286 | # Value function iteration 287 | start1 = time.time() 288 | g, V, indk = policy(g, r, hh) 289 | stop1 = time.time() 290 | 291 | # Find capital supply 292 | start2 = time.time() 293 | dist = distribution(indk, hh) 294 | k_s = np.sum(hh.k * np.sum(dist, axis=0)) 295 | stop2 = time.time() 296 | # Getting a new interest rate 297 | r1 = HH.interest(k_s) 298 | # Evaluating error in this iteration 299 | error = np.abs(r - r1) * 100 300 | print("\n------------------------------------------------------------") 301 | print("The error in iteration %.0F is %F." % (iter, error)) 302 | print( 303 | "The capital supply is %F, and the interest rate is %F." % 304 | (k_s, r * 100)) 305 | print( 306 | "PFI and simulation took %.3F and %.3F seconds respectively" % 307 | ((stop1 - start1), (stop2 - start2))) 308 | 309 | # To make convergence easier, we altered the updating function 310 | # the closer we get to our accepted tolerance level 311 | if error > 2: 312 | k_t = 0.95 * k_t + 0.05 * k_s 313 | elif error > 0.5: 314 | k_t = 0.99 * k_t + 0.01 * k_s 315 | elif error > 0.05: 316 | k_t = 0.995 * k_t + 0.005 * k_s 317 | elif error > 0.02: 318 | k_t = 0.9975 * k_t + 0.0025 * k_s 319 | else: 320 | k_t = 0.999 * k_t + 0.001 * k_s 321 | test = (error > tol) 322 | print("\nThe equilibrium interest rate is %F." % (r * 100)) 323 | return V, g, indk, dist, r 324 | 325 | 326 | # Running the function 327 | start = time.time() 328 | V, g, indk, dist, r = Aiyagari(k_t, hh) 329 | stop = time.time() 330 | print("Solving the model took %F minutes." % ((stop - start) / 60)) 331 | 332 | # Plot the value function and the policy function 333 | fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(15, 10)) 334 | axes[0].plot(hh.k, V.transpose()) 335 | axes[0].set_title("Value functions") 336 | 337 | axes[1].plot(hh.k, g.transpose()) 338 | axes[1].plot(hh.k, hh.k) 339 | axes[1].set_title('Policy functions') 340 | #plt.show() 341 | #plt.savefig("convergence.png") 342 | 343 | 344 | # Generating the distribution 345 | dist1 = distribution(indk, hh) 346 | dist1 = np.sum(dist1, axis=0) 347 | 348 | 349 | # Density function 350 | plt.figure(figsize=(15, 10)) 351 | plt.plot(hh.k, dist1) 352 | plt.xlabel('Asset Value') 353 | plt.ylabel('Frequency') 354 | plt.title('Asset Distribution') 355 | plt.show() 356 | 357 | 358 | # Monte-Carlo simulation for asset distribution and gini 359 | T = 1000000 360 | mc = hh.mc 361 | k = hh.k 362 | sim = hh.mc.simulate_indices(T, init=int((nz - 1) / 2)) 363 | @jit 364 | def simulate(mc, indk, labor_sim=sim, k=k, N=10000, T=T): 365 | """Simulate a cross-section of households over time to derive an 366 | invariante distribution of households assets for later plotting.""" 367 | nz = np.shape(mc.P)[0] 368 | m = T / N 369 | ind = np.zeros(N) 370 | for n in range(N): 371 | labor_sim = np.concatenate((labor_sim[int(n * m):T], 372 | labor_sim[0:int(n * m)])) 373 | temp = indk[int((nz - 1) / 2), int(nk / 2)] 374 | for t in range(T): 375 | temp = indk[int(labor_sim[t]), int(temp)] 376 | ind[n] = temp 377 | a = k[np.int64(ind)] 378 | return a 379 | 380 | 381 | # Generate a new distribution 382 | dist2 = simulate(mc, indk, N=10000) 383 | 384 | 385 | # Plot the distribution 386 | plt.figure(figsize=(15, 10)) 387 | n, bins, patches = plt.hist(x=dist2, bins='auto', color='#0504aa', 388 | alpha=0.7, rwidth=0.85, histtype="stepfilled") 389 | plt.xlabel('Asset Value') 390 | plt.ylabel('Frequency') 391 | plt.title('Asset Distribution') 392 | plt.show() 393 | #plt.savefig("distribution.png") 394 | 395 | 396 | # Print the output 397 | print("\nThe equilibrium interest rate is %F." % (r * 100)) 398 | print("Solving the model took %F minutes." % ((stop - start) / 60)) 399 | print("The gini coefficient for the distribution is %F." 400 | % (qe.gini_coefficient(dist2))) 401 | -------------------------------------------------------------------------------- /03 Aiyagari-Value Function-Distribution.py: -------------------------------------------------------------------------------- 1 | # -*- coding: utf-8 -*- 2 | """ 3 | Spyder Editor 4 | 5 | This solves the Aiyagari model with value function iteration. 6 | It uses the invariante simulation to aggregate the economy. 7 | 8 | """ 9 | 10 | import numpy as np 11 | import time 12 | import matplotlib.pyplot as plt 13 | import quantecon as qe 14 | from numba import jit 15 | 16 | # Supress warning 17 | import warnings 18 | warnings.filterwarnings("ignore") 19 | 20 | 21 | class HH: 22 | """ 23 | Setups a class containing all necessary information to solve the 24 | Aiyagari (1994) model. 25 | """ 26 | 27 | def __init__(self, theta = 0.36, delta = 0.08, sigma = 3, 28 | beta = 0.96, nz = 7, rho = 0.9, stdev = 0.2, 29 | m = 3, nk = 500, kmin = 10**(-5), kmax = 50): 30 | """Initialize the class with standard parameters""" 31 | # Setup parameters 32 | self.theta, self.delta, self.sigma = theta, delta, sigma 33 | self.beta, self.nz, self.nk, self.m = beta, nz, nk, m 34 | self.rho, self.stdev = rho, stdev 35 | self.stdz = stdev*(1-rho**2)**(1/2) 36 | self.kmin, self.kmax = kmin, kmax 37 | 38 | # Setting up the grid 39 | self.k = np.zeros(nk) 40 | for i in range(nk): 41 | self.k[i] = kmin + kmax/((nk+1)**2.35)*(i**2.35) 42 | 43 | def utility(self, c): 44 | """Utility function depending on the value of sigma""" 45 | if self.sigma == 1: 46 | u = np.log(c) 47 | else: 48 | u = c**(1-self.sigma)/(1-self.sigma) 49 | return u 50 | 51 | def interest(self,k): 52 | """Gives back the interest rate, given a capital supply""" 53 | return self.theta*(k/self.l_s)**(self.theta-1) - self.delta 54 | 55 | def interest_reverse(self,r): 56 | """Given an interest rate, gives back the capital demand""" 57 | return (self.theta/(r+self.delta))**(1/(1-self.theta))*self.l_s 58 | 59 | def r_to_w(self, r): 60 | """Given an interest rate, the function calculates the wage""" 61 | return (1-self.theta)*((self.theta/(r+self.delta)))**(self.theta/(1-self.theta)) 62 | 63 | def markov(self): 64 | """Approximates the transistion probability of an AR(1) process 65 | using the methodology of Tauchen (1986) using the quantecon package 66 | 67 | Uses the states, and the transition matrix to give back the 68 | transition matrix P, as well as invariante labor supply l_s 69 | """ 70 | self.mc = qe.markov.approximation.tauchen(self.rho,self.stdz, 71 | 0,self.m,self.nz) 72 | self.P = self.mc.P 73 | self.l = np.exp(self.mc.state_values) 74 | inv_l = np.linalg.matrix_power(self.P,1000) 75 | inv_dist = inv_l[0,:] 76 | inv_l = inv_l / inv_l.sum() 77 | self.l_s = np.dot(self.l, inv_dist) 78 | return self.P, self.l_s 79 | 80 | 81 | # Generating a class 82 | nz = 7 83 | nk = 500 84 | sigma = 3 85 | rho = 0.6 86 | hh = HH(nz = nz, nk = nk, rho = rho, sigma = sigma) 87 | 88 | 89 | # Current level of initial guess 90 | P, l_s = hh.markov() 91 | r = (3.87-1)/100 92 | k_t = hh.interest_reverse(r) 93 | 94 | 95 | #Value function iteration 96 | @jit 97 | def Value(r, HH): 98 | """Given a guess for the interest rate, and a HH class the function 99 | calculates a optimal policy function, as well as the associated 100 | value function and indicator function.""" 101 | sigma, beta, P = HH.sigma, HH.beta, HH.P 102 | l, k = HH.l, HH.k 103 | w = hh.r_to_w(r) 104 | 105 | diff = 10 106 | maxiter = 1000 107 | tolv = 10**-9 108 | iter = 1 109 | 110 | # Empty matrices 111 | g = np.zeros((nz,nk)) 112 | V = np.copy(g) 113 | newV = np.copy(g) 114 | indk = np.copy(V) 115 | while (diff > tolv and iter tol) 149 | test2 = (iter < maxiter) 150 | while (test1 and test2): 151 | distnew = np.zeros((nz,nk)) 152 | for j in range(nk): 153 | for i in range(nz): 154 | distnew[:,int(indk[i,j])] = distnew[:,int(indk[i,j])] + dist[i,j]*P[i,:] 155 | error = np.linalg.norm(distnew - dist) 156 | dist = np.copy(distnew) 157 | test1 = (error > tol) 158 | test2 = (iter < maxiter) 159 | iter = iter+1 160 | return dist 161 | 162 | 163 | # Function to solve for the equilibrium 164 | @jit 165 | def Aiyagari(k_t,HH): 166 | """Function that completely solves the Aiyagari (1994) model""" 167 | beta, k = HH.beta, HH.k 168 | 169 | iter = 0 170 | error = 10 171 | tol = 0.01 172 | test = (error > tol) 173 | while test: 174 | r = HH.interest(k_t) 175 | if r > 1/beta - 1: 176 | r = 1/beta - 1 177 | iter = iter+1 178 | 179 | # Value function iteration 180 | start1 = time.time() 181 | V, g, indk = Value(r,HH) 182 | stop1 = time.time() 183 | 184 | # Find capital supply 185 | start2 = time.time() 186 | dist = distribution(indk,HH) 187 | stop2 = time.time() 188 | k_s = np.sum(k*np.sum(dist, axis = 0)) 189 | r1 = HH.interest(k_s) 190 | error = np.abs(r-r1)*100 191 | print("\n--------------------------------------------------------------------------------------") 192 | print("The error in iteration %.0F is %F." % (iter, error)) 193 | print("The capital supply is %F, and the interest rate is %F." %(k_s, r*100)) 194 | print("Value function and simulation took %.3F and %.3F seconds respectively" % ((stop1-start1), (stop2-start2))) 195 | if error > 10: 196 | k_t = 0.9*k_t + 0.1*k_s 197 | elif error > 5: 198 | k_t = 0.95*k_t + 0.05*k_s 199 | elif error > 1: 200 | k_t = 0.99*k_t + 0.01*k_s 201 | else: 202 | k_t = 0.995*k_t + 0.005*k_s 203 | #elif error > 0.5: 204 | # k_t = 0.995*k_t + 0.005*k_s 205 | #else: 206 | # k_t = 0.9975*k_t + 0.0025*k_s 207 | #k_t = 0.99*k_t + 0.01*k_s 208 | test = (error > tol) 209 | print("\nThe equilibrium interest rate is %F." % (r*100)) 210 | # Das Ziel sollte 3.87 sein 211 | # Resultat war 3.6498 (Check against QE results) 212 | return V, g, indk, dist, r 213 | 214 | 215 | # Running the function 216 | start = time.time() 217 | V, g, indk, dist, r = Aiyagari(k_t,hh) 218 | stop = time.time() 219 | print("Solving the model took %F seconds." %((stop - start))) 220 | 221 | 222 | # Plot the value function and the policy function 223 | fig, axes = plt.subplots(nrows = 2, ncols = 1, figsize=(15,10)) 224 | axes[0].plot(hh.k,V.transpose()) 225 | axes[0].set_title("Value functions") 226 | 227 | axes[1].plot(hh.k,g.transpose()) 228 | axes[1].plot(hh.k,hh.k) 229 | axes[1].set_title('Policy functions') 230 | #plt.show() 231 | #plt.savefig("convergence.png") 232 | 233 | 234 | # Generating the distribution 235 | dist1 = distribution(indk, hh) 236 | dist1 = np.sum(dist1, axis = 0) 237 | 238 | 239 | # Density function 240 | plt.figure(figsize = (15,10)) 241 | plt.plot(hh.k, dist1) 242 | plt.xlabel('Asset Value') 243 | plt.ylabel('Frequency') 244 | plt.title('Asset Distribution') 245 | plt.show() 246 | 247 | 248 | # Monte-Carlo simulation for asset distribution and gini 249 | T = 1000000 250 | mc = hh.mc 251 | k = hh.k 252 | sim = hh.mc.simulate_indices(T, init = int((nz-1)/2)) 253 | @jit 254 | def simulate(mc, indk, l = sim, k = k, N = 5000, T = T): 255 | nz = np.shape(mc.P)[0] 256 | T = np.shape(l)[0] 257 | m = T/N 258 | ind = np.zeros(N) 259 | for n in range(N): 260 | l = np.concatenate((l[int(n*m):T],l[0:int(n*m)])) 261 | temp = indk[int((nz-1)/2),int(nk/2)] 262 | for t in range(T): 263 | temp = indk[int(l[t]),int(temp)] 264 | ind[n] = temp 265 | a = k[np.int64(ind)] 266 | return a 267 | 268 | 269 | # Generate a new distribution 270 | dist2 = simulate(mc,indk,N = 10000) 271 | 272 | 273 | # Plot the distribution 274 | plt.figure(figsize = (15,10)) 275 | n, bins, patches = plt.hist(x=dist2, bins='auto', color='#0504aa',alpha=0.7, rwidth=0.85,histtype="stepfilled") 276 | plt.xlabel('Asset Value') 277 | plt.ylabel('Frequency') 278 | plt.title('Asset Distribution') 279 | plt.show() 280 | #plt.savefig("distribution.png") 281 | 282 | 283 | # Function for the gini coefficient 284 | def gini(x): 285 | # (Warning: This is a concise implementation, but it is O(n**2) 286 | # in time and memory, where n = len(x). *Don't* pass in huge 287 | # samples!) 288 | 289 | # Mean absolute difference 290 | mad = np.abs(np.subtract.outer(x, x)).mean() 291 | # Relative mean absolute difference 292 | rmad = mad/np.mean(x) 293 | # Gini coefficient 294 | g = 0.5 * rmad 295 | return g 296 | 297 | 298 | # Print the output (2.28) 299 | print("\nThe equilibrium interest rate is %F." % (r*100)) 300 | print("Solving the model took %F minutes." %((stop - start)/60)) 301 | print("The gini coefficient for the distribution is %F." %(gini(dist2))) -------------------------------------------------------------------------------- /03 Aiyagari-Value Function-Monte Carlo.py: -------------------------------------------------------------------------------- 1 | """ 2 | 3 | This solves the Aiyagari model with value function iteration. 4 | It uses Monte-Carlo simulation to aggregate the economy. 5 | 6 | """ 7 | 8 | 9 | import numpy as np 10 | import time 11 | import matplotlib.pyplot as plt 12 | import quantecon as qe 13 | from numba import jit 14 | 15 | # Supress warning 16 | import warnings 17 | warnings.filterwarnings("ignore") 18 | 19 | 20 | class HH: 21 | """ 22 | Setups a class containing all necessary information to solve the 23 | Aiyagari (1994) model. 24 | """ 25 | 26 | def __init__(self, theta=0.36, delta=0.08, sigma=3, 27 | beta=0.96, nz=7, rho=0.9, stdev=0.2, 28 | m=3, nk=500, kmin=10**(-5), kmax=50, 29 | T=1000000): 30 | """Initialize the class with standard parameters""" 31 | self.theta, self.delta, self.sigma = theta, delta, sigma 32 | self.beta, self.nz, self.nk, self.m = beta, nz, nk, m 33 | self.rho, self.stdev, self.T = rho, stdev, T 34 | self.stdz = stdev * (1 - rho**2)**(1 / 2) 35 | self.kmin, self.kmax = kmin, kmax 36 | 37 | # Setting up the grid 38 | self.k = np.zeros(nk) 39 | for i in range(nk): 40 | self.k[i] = kmin + kmax / ((nk + 1)**2.35) * (i**2.35) 41 | 42 | def utility(self, c): 43 | """Utility function depending on the value of sigma""" 44 | if self.sigma == 1: 45 | u = np.log(c) 46 | else: 47 | u = c**(1 - self.sigma) / (1 - self.sigma) 48 | return u 49 | 50 | def interest(self, k): 51 | """Gives back the interest rate, given a capital supply""" 52 | return self.theta * (k / self.l_s)**(self.theta - 1) - self.delta 53 | 54 | def interest_reverse(self, r): 55 | """Given an interest rate, gives back the capital demand""" 56 | return (self.theta / (r + self.delta) 57 | )**(1 / (1 - self.theta)) * self.l_s 58 | 59 | def r_to_w(self, r): 60 | """Given an interest rate, the function calculates the wage""" 61 | return (1 - self.theta) * ((self.theta / (r + self.delta)) 62 | )**(self.theta / (1 - self.theta)) 63 | 64 | def markov(self): 65 | """Approximates the transistion probability of an AR(1) process 66 | using the methodology of Tauchen (1986) using the quantecon package 67 | 68 | Uses the states, and the transition matrix to give back the 69 | transition matrix P, as well as invariante labor supply l_s 70 | """ 71 | self.mc = qe.markov.approximation.tauchen(self.rho, self.stdz, 72 | 0, self.m, self.nz) 73 | 74 | self.sim = self.mc.simulate_indices( 75 | self.T, init=int((self.nz - 1) / 2)) 76 | 77 | self.P = self.mc.P 78 | self.labor_states = np.exp(self.mc.state_values) 79 | inv_l = np.linalg.matrix_power(self.P, 1000) 80 | inv_dist = inv_l[0, :] 81 | inv_l = inv_l / inv_l.sum() 82 | self.l_s = np.dot(self.labor_states, inv_dist) 83 | return self.P, self.l_s, self.sim 84 | 85 | 86 | # Generating a class 87 | nz = 7 88 | nk = 500 89 | sigma = 3 90 | rho = 0.6 91 | hh = HH(nz=nz, nk=nk, rho=rho, sigma=sigma) 92 | 93 | 94 | # Current level of initial guess 95 | P, l_s, sim = hh.markov() 96 | r = (3.87 - 1) / 100 97 | k_t = hh.interest_reverse(r) 98 | 99 | 100 | # Value function iteration 101 | @jit 102 | def Value(r, HH, tolv=10**(-8), maxiter=1000): 103 | """Given a guess for the interest rate, and a HH class the function 104 | calculates a optimal policy function, as well as the associated 105 | value function and indicator function.""" 106 | # Unpacking 107 | w = HH.r_to_w(r) 108 | sigma, beta, P = HH.sigma, HH.beta, HH.P 109 | labor, k = hh.labor_states, hh.k 110 | diff = 10 111 | iter = 1 112 | 113 | # Setting up empty matrices 114 | g = np.zeros((nz, nk)) 115 | V = np.copy(g) 116 | newV = np.copy(g) 117 | indk = np.copy(V) 118 | 119 | # Evaluating the criteria 120 | test1 = (diff > tolv) 121 | test2 = (iter < maxiter) 122 | while (test1 and test2): 123 | for iz in range(nz): 124 | for ik in range(nk): 125 | # Calculating consumption and it's utility 126 | c = w * labor[iz] + (1 + r) * k[ik] - k 127 | if sigma != 1: 128 | u = c**(1 - sigma) / (1 - sigma) 129 | else: 130 | u = np.log(np.abs(c)) 131 | # Panelizing negative consumption 132 | u[c < 0] = -1000000 133 | # Get the value function, maximize and update policy function 134 | v = u + beta * (np.dot(P[iz, :], V)) 135 | ind = np.argmax(v) 136 | newV[iz, ik] = v[ind] 137 | indk[iz, ik] = ind 138 | g[iz, ik] = k[ind] 139 | diff = np.linalg.norm(newV - V) 140 | V = np.copy(newV) 141 | iter += 1 142 | return V, g, indk 143 | 144 | 145 | # Simulate the economy and find capital supply 146 | @jit 147 | def simulate(HH, indk, N=5000): 148 | """Given the HH class and an indicator function associated to a policy 149 | function, we simulate the economy forward for N=5000 individuals, 150 | until we reach a stationary distribution.""" 151 | # Unpacking parameters 152 | mc, lab, k, T = HH.mc, HH.sim, HH.k, HH.T 153 | nz = np.shape(mc.P)[0] 154 | T = np.shape(lab)[0] 155 | m = T / N 156 | ind = np.zeros(N) 157 | for n in range(N): 158 | # Resample the original markov chain 159 | lab = np.concatenate((lab[int(n * m):T], lab[0:int(n * m)])) 160 | temp = indk[int((nz - 1) / 2), int(nk / 2)] 161 | # Given the markov chain simulate the wealth development 162 | for t in range(T): 163 | temp = indk[int(lab[t]), int(temp)] 164 | ind[n] = temp 165 | a = k[np.int64(ind)] 166 | return a 167 | 168 | 169 | # Function to solve for the equilibrium 170 | @jit 171 | def Aiyagari(HH, k_t): 172 | """Function that completely solves the Aiyagari (1994) model.""" 173 | # Extracting parameters and setting up initial values 174 | beta = HH.beta 175 | iter = 0 176 | error = 10 177 | tol = 0.01 178 | test = (error > tol) 179 | while test: 180 | # Extracting the interest rate and providing an upper bound 181 | r = HH.interest(k_t) 182 | if r > 1 / beta - 1: 183 | r = 1 / beta - 1 184 | iter = iter + 1 185 | 186 | # Value function iteration 187 | start1 = time.time() 188 | V, g, indk = Value(r, HH) 189 | stop1 = time.time() 190 | 191 | # Find capital supply 192 | start2 = time.time() 193 | a = simulate(HH, indk) 194 | stop2 = time.time() 195 | k_s = np.mean(a) 196 | # Getting an interest rate and checking the error 197 | r1 = HH.interest(k_s) 198 | error = np.abs(r - r1) * 100 199 | print("\n--------------------------------------------------------------------------------------") 200 | print("The error in iteration %.0F is %F." % (iter, error)) 201 | print("The capital supply is %F, and the interest rate is %F." % 202 | (k_s, r * 100)) 203 | print("Value function and simulation took %.3F and %.3F seconds respectively" % ( 204 | (stop1 - start1), (stop2 - start2))) 205 | # Gradient updating of the capital guess 206 | if error > 2: 207 | k_t = 0.95 * k_t + 0.05 * k_s 208 | elif error > 0.5: 209 | k_t = 0.99 * k_t + 0.01 * k_s 210 | elif error > 0.05: 211 | k_t = 0.995 * k_t + 0.005 * k_s 212 | else: 213 | k_t = 0.999 * k_t + 0.001 * k_s 214 | test = (error > tol) 215 | print("\nThe equilibrium interest rate is %F." % (r * 100)) 216 | return V, g, indk, a, r 217 | 218 | 219 | # Running the function 220 | start = time.time() 221 | V, g, indk, a, r = Aiyagari(hh, k_t) 222 | stop = time.time() 223 | print("Solving the model took %F minutes." % ((stop - start) / 60)) 224 | 225 | 226 | # Plot the value function and the policy function 227 | fig, axes = plt.subplots(nrows=2, ncols=1, figsize=(15, 10)) 228 | axes[0].plot(hh.k, V.transpose()) 229 | axes[0].set_title("Value functions") 230 | 231 | axes[1].plot(hh.k, g.transpose()) 232 | axes[1].plot(hh.k, hh.k) 233 | axes[1].set_title('Policy functions') 234 | #plt.show() 235 | #plt.savefig("convergence.png") 236 | 237 | 238 | # Generate a new distribution 239 | dist = simulate(hh, indk) 240 | 241 | 242 | # Plot the distribution 243 | plt.figure(figsize=(15, 10)) 244 | n, bins, patches = plt.hist( 245 | x=dist, bins='auto', color='#0504aa', alpha=0.7, rwidth=0.85, histtype="stepfilled") 246 | plt.xlabel('Asset Value') 247 | plt.ylabel('Frequency') 248 | plt.title('Asset Distribution') 249 | plt.show() 250 | #plt.savefig("distribution.png") 251 | 252 | 253 | # Function for the gini coefficient 254 | def gini(x): 255 | # (Warning: This is a concise implementation, but it is O(n**2) 256 | # in time and memory, where n = len(x). *Don't* pass in huge 257 | # samples!) 258 | 259 | # Mean absolute difference 260 | mad = np.abs(np.subtract.outer(x, x)).mean() 261 | # Relative mean absolute difference 262 | rmad = mad / np.mean(x) 263 | # Gini coefficient 264 | g = 0.5 * rmad 265 | return g 266 | 267 | 268 | print("\nThe equilibrium interest rate is %F." % (r * 100)) 269 | print("Solving the model took %F minutes." % ((stop - start) / 60)) 270 | print("The gini coefficient for the distribution is %F." % (gini(dist))) 271 | -------------------------------------------------------------------------------- /04 Bilbiie Income Risk.py: -------------------------------------------------------------------------------- 1 | """ 2 | Solves the classic RBC model using Perturbation 3 | Extension of the McKay material with respect to plotting and statistics 4 | Reference: https://alisdairmckay.com/Notes/HetAgents/index.html 5 | """ 6 | 7 | import autograd.numpy as np 8 | from autograd import jacobian 9 | np.set_printoptions(suppress=True,precision=4) 10 | import matplotlib.pyplot as plt 11 | import warnings 12 | import scipy.optimize as opt 13 | from prettytable import PrettyTable 14 | 15 | 16 | # Number of Variables 17 | nX = 16 18 | # Number of shocks 19 | nEps = 2 20 | # Indexing the variables 21 | iCs, iCh, iC, iNs, iNh, iN, iY, iPi, iD, iV, iW, iI, iL, iS, iXI, iXS = range(nX) 22 | 23 | 24 | # Parameters 25 | beta = 0.99 26 | sigma = 2.0 27 | varphi = 2.0 28 | eta = 11 29 | psi = 200 30 | tauD = 0.15 31 | tauS = 0.0 # no profits under: eta / (eta - 1.0) - 1.0 32 | L = 0.15 # Share of HtM 33 | s = 0.96 34 | h = 2 - s - (1 - s) / L 35 | rhoI = 0.75 36 | rhoS = 0.5 37 | ps_Y = 0.0 38 | phi = 2.5 39 | 40 | 41 | # Defining the marginal utility functions 42 | Uc = lambda C: C ** (-sigma) 43 | inv_Uc = lambda Uc: Uc ** (-1 / sigma) 44 | Un = lambda N, chi: chi * N ** varphi 45 | inv_Un = lambda Un, chi: (1 / chi * Un) ** (1 / varphi) 46 | 47 | 48 | # Given an initial guess for consumption gives back error 49 | def fChi(Chi): 50 | C = 1.0 51 | L = (1 - s) / (2 - s - h) 52 | S = s 53 | W = (1 + tauS) * (eta - 1.0) / eta 54 | Y = C 55 | N = Y 56 | 57 | # Calculated values 58 | D = (1+tauS) * Y - W * N - tauS * Y 59 | fCh = lambda Ch: W * inv_Un(W * Uc(Ch), Chi) + tauD / L * D - Ch 60 | fCs = lambda Cs: W * inv_Un(W * Uc(Cs), Chi) + (1 - tauD) / (1 - L) * D - Cs 61 | Ch = opt.fsolve(fCh, 0.95)[0] 62 | Cs = opt.fsolve(fCs, 1.05)[0] 63 | 64 | return Ch * L + Cs * (1 - L) - C 65 | 66 | # Finding the value for Chi s.t. C = 1.0 67 | Chi = opt.fsolve(fChi, 1.0)[0] 68 | 69 | 70 | 71 | # Defining a function, which gives back the steady state, Y = C = 1.0 normalized 72 | def SteadyState(): 73 | # Given / normalized values 74 | L = (1 - s) / (2 - s - h) 75 | Pi = 1.0 76 | S = s 77 | W = (1 + tauS) * (eta - 1.0) / eta 78 | C = 1.0 79 | Y = C 80 | N = Y 81 | 82 | # Calculated values 83 | D = (1+tauS) * Y - W * N - tauS * Y 84 | V = beta / (1-beta) * D 85 | 86 | # Finding consumption choice 87 | fCh = lambda Ch: W * inv_Un(W * Uc(Ch), Chi) + tauD / L * D - Ch 88 | fCs = lambda Cs: W * inv_Un(W * Uc(Cs), Chi) + (1 - tauD) / (1 - L) * D - Cs 89 | Ch = opt.fsolve(fCh, 0.95)[0] 90 | Cs = opt.fsolve(fCs, 1.05)[0] 91 | 92 | # Finding labor choice 93 | Ns = inv_Un(W * Uc(Cs), Chi) ** (1 / varphi) 94 | Nh = inv_Un(W * Uc(Ch), Chi) ** (1 / varphi) 95 | 96 | # Aggregation 97 | N = L * Nh + (1 - L) * Ns 98 | 99 | # Closing the system 100 | I = 1.0 / (beta * (S + (1 - S) * Uc(Ch/Cs))) * Pi ** phi 101 | XI = 1.0 102 | XS = 1.0 103 | 104 | X = np.zeros(nX) 105 | X[[iCs, iCh, iC, iNs, iNh, iN, iY, iPi, iD, iV, iW, iI, iL, iS, iXI, iXS]] = (Cs, Ch, C, Ns, Nh, N, Y, Pi, D, V, W, I, L, S, XI, XS) 106 | return X 107 | 108 | # Adjusting the necessary parameters to incorporate inequality 109 | 110 | 111 | 112 | # Get the steady state 113 | table = PrettyTable() 114 | X_SS = SteadyState() 115 | table.add_column("Variables", ["CS", "CH", "C", "Ns", "Nh", "N", "Y", "Pi", "D", "V", "W", "I", "L", "S", "Shock I", "Shock S"]) 116 | epsilon_SS = np.zeros(2) 117 | table.add_column("Values", np.round(X_SS, 4)) 118 | print(" ") 119 | print(table) 120 | # print("Variables: {}".format(X_EXP)) 121 | # print("Steady state: {}".format(X_SS)) 122 | 123 | 124 | # Model equations 125 | def F(X_Lag,X,X_Prime,epsilon,XSS): 126 | 127 | # Unpack 128 | epsilon_i, epsilon_s = epsilon 129 | Cs, Ch, C, Ns, Nh, N, Y, Pi, D, V, W, I, L, S, XI, XS = X 130 | Cs_L, Ch_L, C_L, Ns_L, Nh_L, N_L, Y_L, Pi_L, D_L, V_L, W_L, I_L, L_L, S_L, XI_L, XS_L = X_Lag 131 | Cs_P, Ch_P, C_P, Ns_P, Nh_P, N_P, Y_P, Pi_P, D_P, V_P, W_P, I_P, L_P, S_P, XI_P, XS_P = X_Prime 132 | Cs_SS, Ch_SS, C_SS, Ns_SS, Nh_SS, N_SS, Y_SS, Pi_SS, D_SS, V_SS, W_SS, I_SS, L_SS, S_SS, XI_SS, XS_SS = XSS 133 | return np.hstack(( 134 | # Shocks 135 | XI - XI_L ** rhoI * np.exp(epsilon_i), # Transition of MP shock 136 | XS - XS_L ** rhoS * np.exp(epsilon_s), # Transition of risk shock 137 | 138 | # Household 139 | S / s - XS * (Y / Y_SS) ** ps_Y, # Idiosyncratic risk 140 | Uc(Cs) - beta * I / Pi_P * (S_P * Uc(Cs_P) + (1 - S_P) * Uc(Ch_P)), # Euler equation bonds 141 | Uc(Cs) - beta * (V_P + D_P) / V * Uc(Cs_P), # Euler equation stocks 142 | Cs - W * Ns - (1 - tauD) / (1 - L) * D, # BC of saver 143 | Ch - W * Nh - tauD / L * D, # BC of HtM household 144 | Un(Nh, Chi) - W * Uc(Ch), # Labor supply of HtM household 145 | Un(Ns, Chi) - W * Uc(Cs), # Labor supply of Saver household 146 | 147 | # Distributional changes 148 | L - h * L_L - (1 - S) * (1 - L_L), # Distribution changes over time 149 | 150 | # Aggregation 151 | C - L * Ch - (1-L) * Cs, # Aggregate consumption 152 | N - L * Nh - (1-L) * Ns, # Aggregate labor supply 153 | #C - (1 - psi / 2 * (Pi - 1.0) ** 2) * Y, # Goods market clearing 154 | 155 | # Firms 156 | Y - N, # Production function 157 | D - (1 + tauS) * Y + W * N + tauS * Y, # Profits 158 | (Pi - 1.0) * Pi - beta * (Uc(Cs_P / Cs) * Y_P / Y * (Pi_P - 1.0) * Pi_P) - eta / psi * (W - 1 / (1 / (1 + tauS) * eta / (eta - 1))), # Phillips curve 159 | 160 | # Monetary policy 161 | I - I_SS * Pi ** phi * XI # Taylor rule 162 | )) 163 | 164 | 165 | # Check whether at the steady state F is zero 166 | print(" ") 167 | print("F at the steady state has the values:") 168 | print(F(X_SS,X_SS,X_SS,epsilon_SS, X_SS)) 169 | assert(np.allclose( F(X_SS,X_SS,X_SS,epsilon_SS, X_SS) , np.zeros(nX))) 170 | 171 | 172 | # Compute the numerical derivative 173 | A = jacobian(lambda x: F(X_SS,X_SS,x,epsilon_SS,X_SS))(X_SS) 174 | B = jacobian(lambda x: F(X_SS,x,X_SS,epsilon_SS,X_SS))(X_SS) 175 | C = jacobian(lambda x: F(x,X_SS,X_SS,epsilon_SS,X_SS))(X_SS) 176 | E = jacobian(lambda x: F(X_SS,X_SS,X_SS,x,X_SS))(epsilon_SS) 177 | 178 | 179 | # Function to solve the system based on McKays material 180 | def SolveSystem(A,B,C,E,P0=None): 181 | # Solve the system using linear time iteration as in Rendahl (2017) 182 | #print("Solving the system") 183 | MAXIT = 1000 184 | if P0 is None: 185 | P = np.zeros(A.shape) 186 | else: 187 | P = P0 188 | 189 | S = np.zeros(A.shape) 190 | 191 | for it in range(MAXIT): 192 | P = -np.linalg.lstsq(B+A@P,C,rcond=None)[0] 193 | S = -np.linalg.lstsq(B+C@S,A,rcond=None)[0] 194 | test = np.max(np.abs(C+B@P+A@P@P)) 195 | #if it % 20 == 0: 196 | #print(test) 197 | if test < 1e-10: 198 | break 199 | 200 | 201 | if it == MAXIT-1: 202 | warnings.warn('LTI did not converge.') 203 | 204 | 205 | # test Blanchard-Kahn conditions 206 | if np.max(np.linalg.eig(P)[0]) >1: 207 | raise RuntimeError("Model does not satisfy BK conditions -- non-existence") 208 | 209 | if np.max(np.linalg.eig(S)[0]) >1: 210 | raise RuntimeError("Model does not satisfy BK conditions -- mulitple stable solutions") 211 | 212 | # Impact matrix 213 | # Solution is x_{t}=P*x_{t-1}+Q*eps_t 214 | Q = -np.linalg.inv(B+A@P) @ E 215 | 216 | return P, Q 217 | 218 | 219 | # Using the function to solve the system 220 | P, Q = SolveSystem(A,B,C,E) 221 | 222 | 223 | # Calculate an impulse response 224 | T = 15 225 | IRF_MP = np.zeros((nX,T)) 226 | IRF_S = np.copy(IRF_MP) 227 | # First shock is monetary, second is risk 228 | shockMP = np.array((0.01, 0.0)) 229 | shockS = np.array((0.0, -0.025)) 230 | IRF_MP[:,0] = np.transpose(Q @ shockMP) 231 | IRF_S[:,0] = np.transpose(Q @ shockS) 232 | 233 | # Impulse response functions for 100 periods 234 | for t in range(1,T): 235 | IRF_MP[:,t] = P @ IRF_MP[:,t-1] 236 | IRF_S[:,t] = P @ IRF_S[:,t-1] 237 | 238 | # Drop all IRFs that are below e**(-15) 239 | criterion_MP = ((np.abs(IRF_MP) < 10**(-10))) 240 | criterion_S = ((np.abs(IRF_S) < 10**(-10))) 241 | IRF_MP[criterion_MP] = 0.0 242 | IRF_S[criterion_S] = 0.0 243 | 244 | # Dividend is zero in the steady state 245 | floors = [ f for f in range(nX) if f != 8 or 12] 246 | 247 | # Normalizing with respect to the steady state 248 | for i in floors: 249 | IRF_MP[i,:] = IRF_MP[i,:] * 100 250 | IRF_S[i,:] = IRF_S[i,:] * 100 251 | 252 | 253 | # List with the variable names 254 | names = ["CS", "CH", "C", "Ns", "Nh", "N", "Y", "Pi", "D", "V", "W", "I", "L", "S", "XI", "XS"] 255 | 256 | IRFS = PrettyTable() 257 | IRFS.add_column("IRFs", IRF_MP) 258 | print(IRFS) 259 | 260 | # Plotting the results of the IRF to a MP shock 261 | fig, axes = plt.subplots(nrows = 4, ncols = 4, figsize = (10,6)) 262 | for i in range(nX): 263 | row = i // 4 # Ganzahlige Division 264 | col = i % 4 # Rest 265 | axes[row, col].plot(IRF_MP[i,:]) 266 | axes[row, col].plot(np.zeros(T)) 267 | axes[row, col].set_title(names[i]) 268 | fig.tight_layout() 269 | plt.show() 270 | 271 | # Plotting the results of the IRF to a S shock 272 | fig, axes = plt.subplots(nrows = 4, ncols = 4, figsize = (10,6)) 273 | for i in range(nX): 274 | row = i // 4 # Ganzahlige Division 275 | col = i % 4 # Rest 276 | axes[row, col].plot(IRF_S[i,:]) 277 | axes[row, col].plot(np.zeros(T)) 278 | axes[row, col].set_title(names[i]) 279 | fig.tight_layout() 280 | plt.show() -------------------------------------------------------------------------------- /04 Bilbiie Sticky Wages.py: -------------------------------------------------------------------------------- 1 | """ 2 | Solves the classic RBC model using Perturbation 3 | Extension of the McKay material with respect to plotting and statistics 4 | Reference: https://alisdairmckay.com/Notes/HetAgents/index.html 5 | """ 6 | 7 | 8 | import autograd.numpy as np 9 | from autograd import jacobian 10 | np.set_printoptions(suppress=True,precision=4) 11 | import matplotlib.pyplot as plt 12 | import warnings 13 | 14 | 15 | 16 | # Number of Variables 17 | nX = 12 18 | # Number of shocks 19 | nEps = 1 20 | # Indexing the variables 21 | iCs, iCh, iC, iNs, iNh, iN, iY, iPi, iD, iW, iI, iX = range(nX) 22 | 23 | 24 | # Parameters 25 | beta = 0.99 26 | sigma = 2 27 | varphi = 1.0 28 | eta = 11 29 | psi = 500 30 | tauD = 0 31 | tauS = eta / (eta - 1.0) - 1.0 32 | s = 0.98 33 | rho = 0.7 34 | lambdas = 0.22 35 | phi = 1.5 36 | 37 | 38 | # Defining a function, which gives back the steady state 39 | def SteadyState(): 40 | Cs = 1.0 41 | Ch = 1.0 42 | C = lambdas * Ch + (1-lambdas) * Cs 43 | W = 1.0 44 | Ns = (W * Cs ** (-sigma)) ** (1 / varphi) 45 | Nh = (W * Ch ** (-sigma)) ** (1 / varphi) 46 | N = lambdas * Nh + (1-lambdas) * Ns 47 | Pi = 1.0 48 | Y = C 49 | D = (1+tauS) * Y - W * N - tauS * Y 50 | I = 1.0 / beta * Pi ** phi 51 | EX = 1.0 52 | 53 | X = np.zeros(nX) 54 | X[[iCs, iCh, iC, iNs, iNh, iN, iY, iPi, iD, iW, iI, iX]] = (Cs, Ch, C, Ns, Nh, N, Y, Pi, D, W, I, EX) 55 | return X 56 | 57 | 58 | # Get the steady state 59 | X_SS = SteadyState() 60 | X_EXP = np.array(("CS", "CH", "C", "Ns", "Nh", "N", "Y", "Pi", "D", "W", "I", "P", "Shock")) 61 | epsilon_SS = np.zeros(1) 62 | print("Variables: {}".format(X_EXP)) 63 | print("Steady state: {}".format(X_SS)) 64 | 65 | 66 | # Model equations 67 | def F(X_Lag,X,X_Prime,epsilon): 68 | 69 | # Unpack 70 | epsilon_m = epsilon 71 | Cs, Ch, C, Ns, Nh, N, Y, Pi, D, W, I, EX = X 72 | Cs_L, Ch_L, C_L, Ns_L, Nh_L, N_L, Y_L, Pi_L, D_L, W_L, I_L, EX_L = X_Lag 73 | Cs_P, Ch_P, C_P, Ns_P, Nh_P, N_P, Y_P, Pi_P, D_P, W_P, I_P, EX_P = X_Prime 74 | return np.hstack(( 75 | Cs ** (-sigma) - beta * I / Pi_P * (s * Cs_P ** (-sigma) + (1 - s) * Ch_P ** (-sigma)), # Euler equation 76 | Cs - W * Ns - (1 - tauD) / (1 - lambdas) * D, # BC of saver 77 | Ch - W * Nh - tauD / lambdas * D, # BC of HtM household 78 | Nh ** varphi - W * Ch ** (-sigma), # Labor supply of HtM household 79 | Ns ** varphi - W * Cs ** (-sigma), # Labor supply of Saver household 80 | C - lambdas * Ch - (1-lambdas) * Cs, # Aggregate consumption 81 | N - lambdas * Nh - (1-lambdas) * Ns, # Aggregate labor supply 82 | D - (1 + tauS) * Y + W * N + tauS * Y, # Profits 83 | (Pi - 1.0) * Pi - beta * ((Cs_P / Cs) ** (-sigma) * Y / Y_P * (Pi_P - 1.0) * Pi_P) - eta / psi * (W - 1 / (1 / (1 + tauS) * eta / (eta - 1))), # Phillips curve 84 | Y - N, # Production function 85 | #C - (1 - psi / 2 * (Pi - 1.0) ** 2) * Y, # Goods market clearing 86 | I - 1 / beta * Pi ** phi * EX, # Taylor rule 87 | EX - EX_L ** rho * np.exp(epsilon_m) # Transition of shock 88 | )) 89 | 90 | 91 | # Check whether at the steady state F is zero 92 | print(F(X_SS,X_SS,X_SS,epsilon_SS)) 93 | assert(np.allclose( F(X_SS,X_SS,X_SS,epsilon_SS) , np.zeros(nX))) 94 | 95 | 96 | # Compute the numerical derivative 97 | A = jacobian(lambda x: F(X_SS,X_SS,x,epsilon_SS))(X_SS) 98 | B = jacobian(lambda x: F(X_SS,x,X_SS,epsilon_SS))(X_SS) 99 | C = jacobian(lambda x: F(x,X_SS,X_SS,epsilon_SS))(X_SS) 100 | E = jacobian(lambda x: F(X_SS,X_SS,X_SS,x))(epsilon_SS) 101 | 102 | 103 | # Function to solve the system based on McKays material 104 | def SolveSystem(A,B,C,E,P0=None): 105 | # Solve the system using linear time iteration as in Rendahl (2017) 106 | #print("Solving the system") 107 | MAXIT = 1000 108 | if P0 is None: 109 | P = np.zeros(A.shape) 110 | else: 111 | P = P0 112 | 113 | S = np.zeros(A.shape) 114 | 115 | for it in range(MAXIT): 116 | P = -np.linalg.lstsq(B+A@P,C,rcond=None)[0] 117 | S = -np.linalg.lstsq(B+C@S,A,rcond=None)[0] 118 | test = np.max(np.abs(C+B@P+A@P@P)) 119 | #if it % 20 == 0: 120 | #print(test) 121 | if test < 1e-10: 122 | break 123 | 124 | 125 | if it == MAXIT-1: 126 | warnings.warn('LTI did not converge.') 127 | 128 | 129 | # test Blanchard-Kahn conditions 130 | if np.max(np.linalg.eig(P)[0]) >1: 131 | raise RuntimeError("Model does not satisfy BK conditions -- non-existence") 132 | 133 | if np.max(np.linalg.eig(S)[0]) >1: 134 | raise RuntimeError("Model does not satisfy BK conditions -- mulitple stable solutions") 135 | 136 | # Impact matrix 137 | # Solution is x_{t}=P*x_{t-1}+Q*eps_t 138 | Q = -np.linalg.inv(B+A@P) @ E 139 | 140 | return P, Q 141 | 142 | 143 | # Using the function to solve the system 144 | P, Q = SolveSystem(A,B,C,E) 145 | 146 | 147 | # Calculate an impulse response 148 | T = 20 149 | IRF_RBC = np.zeros((nX,T)) 150 | IRF_RBC[:,0] = np.dot(Q, np.array(0.01))[:,0] 151 | 152 | 153 | # Impulse response functions for 100 periods 154 | for t in range(1,T): 155 | IRF_RBC[:,t] = P@IRF_RBC[:,t-1] 156 | 157 | 158 | # Normalizing with respect to the steady state 159 | floors = [ f for f in range(nX) if f != 8 ] 160 | for i in floors: 161 | IRF_RBC[i,:] = IRF_RBC[i,:] / X_SS[i] * 100 162 | 163 | 164 | # List with the variable names 165 | names = ["CS", "CH", "C", "Ns", "Nh", "N", "Y", "Pi", "D", "W", "I", "X"] 166 | 167 | 168 | # Plotting the results of the IRF 169 | fig, axes = plt.subplots(nrows = 4, ncols = 3, figsize = (10,5)) 170 | for i in range(nX): 171 | row = i // 3 # Ganzahlige Division 172 | col = i % 3 # Rest 173 | axes[row, col].plot(IRF_RBC[i,:]) 174 | axes[row, col].plot(np.zeros(T)) 175 | axes[row, col].set_title(names[i]) 176 | fig.tight_layout() 177 | plt.show() 178 | 179 | -------------------------------------------------------------------------------- /04 Bilbiie.py: -------------------------------------------------------------------------------- 1 | """ 2 | Solves the classic RBC model using Perturbation 3 | Extension of the McKay material with respect to plotting and statistics 4 | Reference: https://alisdairmckay.com/Notes/HetAgents/index.html 5 | """ 6 | 7 | 8 | import autograd.numpy as np 9 | from autograd import jacobian 10 | np.set_printoptions(suppress=True,precision=4) 11 | import matplotlib.pyplot as plt 12 | import warnings 13 | from prettytable import PrettyTable 14 | 15 | 16 | # Number of Variables 17 | nX = 12 18 | # Number of shocks 19 | nEps = 1 20 | # Indexing the variables 21 | iCs, iCh, iC, iNs, iNh, iN, iY, iPi, iD, iW, iI, iX = range(nX) 22 | 23 | 24 | # Parameters 25 | beta = 0.99 26 | sigma = 2 27 | varphi = 1.0 28 | eta = 11 29 | psi = 500 30 | tauD = 0 31 | tauS = eta / (eta - 1.0) - 1.0 32 | s = 0.98 33 | rho = 0.7 34 | lambdas = 0.22 35 | phi = 1.5 36 | 37 | 38 | # Defining a function, which gives back the steady state 39 | def SteadyState(): 40 | Cs = 1.0 41 | Ch = 1.0 42 | C = lambdas * Ch + (1-lambdas) * Cs 43 | W = 1.0 44 | Ns = (W * Cs ** (-sigma)) ** (1 / varphi) 45 | Nh = (W * Ch ** (-sigma)) ** (1 / varphi) 46 | N = lambdas * Nh + (1-lambdas) * Ns 47 | Pi = 1.0 48 | Y = C 49 | D = (1+tauS) * Y - W * N - tauS * Y 50 | I = 1.0 / beta * Pi ** phi 51 | EX = 1.0 52 | 53 | X = np.zeros(nX) 54 | X[[iCs, iCh, iC, iNs, iNh, iN, iY, iPi, iD, iW, iI, iX]] = (Cs, Ch, C, Ns, Nh, N, Y, Pi, D, W, I, EX) 55 | return X 56 | 57 | 58 | # Get the steady state 59 | table = PrettyTable() 60 | X_SS = SteadyState() 61 | table.add_column("Variables", ["CS", "CH", "C", "Ns", "Nh", "N", "Y", "Pi", "D", "W", "I", "Shock"]) 62 | epsilon_SS = np.zeros(1) 63 | table.add_column("Values", np.round(X_SS, 4)) 64 | print(" ") 65 | print(table) 66 | # print("Variables: {}".format(X_EXP)) 67 | # print("Steady state: {}".format(X_SS)) 68 | 69 | 70 | # Model equations 71 | def F(X_Lag,X,X_Prime,epsilon): 72 | 73 | # Unpack 74 | epsilon_m = epsilon 75 | Cs, Ch, C, Ns, Nh, N, Y, Pi, D, W, I, EX = X 76 | Cs_L, Ch_L, C_L, Ns_L, Nh_L, N_L, Y_L, Pi_L, D_L, W_L, I_L, EX_L = X_Lag 77 | Cs_P, Ch_P, C_P, Ns_P, Nh_P, N_P, Y_P, Pi_P, D_P, W_P, I_P, EX_P = X_Prime 78 | return np.hstack(( 79 | Cs ** (-sigma) - beta * I / Pi_P * (s * Cs_P ** (-sigma) + (1 - s) * Ch_P ** (-sigma)), # Euler equation 80 | Cs - W * Ns - (1 - tauD) / (1 - lambdas) * D, # BC of saver 81 | Ch - W * Nh - tauD / lambdas * D, # BC of HtM household 82 | Nh ** varphi - W * Ch ** (-sigma), # Labor supply of HtM household 83 | Ns ** varphi - W * Cs ** (-sigma), # Labor supply of Saver household 84 | C - lambdas * Ch - (1-lambdas) * Cs, # Aggregate consumption 85 | N - lambdas * Nh - (1-lambdas) * Ns, # Aggregate labor supply 86 | D - (1 + tauS) * Y + W * N + tauS * Y, # Profits 87 | (Pi - 1.0) * Pi - beta * ((Cs_P / Cs) ** (-sigma) * Y_P / Y * (Pi_P - 1.0) * Pi_P) - eta / psi * (W - 1 / (1 / (1 + tauS) * eta / (eta - 1))), # Phillips curve 88 | Y - N, # Production function 89 | #C - (1 - psi / 2 * (Pi - 1.0) ** 2) * Y, # Goods market clearing 90 | I - 1 / beta * Pi ** phi * EX, # Taylor rule 91 | EX - EX_L ** rho * np.exp(epsilon_m) # Transition of shock 92 | )) 93 | 94 | 95 | # Check whether at the steady state F is zero 96 | print(F(X_SS,X_SS,X_SS,epsilon_SS)) 97 | assert(np.allclose( F(X_SS,X_SS,X_SS,epsilon_SS) , np.zeros(nX))) 98 | 99 | 100 | # Compute the numerical derivative 101 | A = jacobian(lambda x: F(X_SS,X_SS,x,epsilon_SS))(X_SS) 102 | B = jacobian(lambda x: F(X_SS,x,X_SS,epsilon_SS))(X_SS) 103 | C = jacobian(lambda x: F(x,X_SS,X_SS,epsilon_SS))(X_SS) 104 | E = jacobian(lambda x: F(X_SS,X_SS,X_SS,x))(epsilon_SS) 105 | 106 | 107 | # Function to solve the system based on McKays material 108 | def SolveSystem(A,B,C,E,P0=None): 109 | # Solve the system using linear time iteration as in Rendahl (2017) 110 | #print("Solving the system") 111 | MAXIT = 1000 112 | if P0 is None: 113 | P = np.zeros(A.shape) 114 | else: 115 | P = P0 116 | 117 | S = np.zeros(A.shape) 118 | 119 | for it in range(MAXIT): 120 | P = -np.linalg.lstsq(B+A@P,C,rcond=None)[0] 121 | S = -np.linalg.lstsq(B+C@S,A,rcond=None)[0] 122 | test = np.max(np.abs(C+B@P+A@P@P)) 123 | #if it % 20 == 0: 124 | #print(test) 125 | if test < 1e-10: 126 | break 127 | 128 | 129 | if it == MAXIT-1: 130 | warnings.warn('LTI did not converge.') 131 | 132 | 133 | # test Blanchard-Kahn conditions 134 | if np.max(np.linalg.eig(P)[0]) >1: 135 | raise RuntimeError("Model does not satisfy BK conditions -- non-existence") 136 | 137 | if np.max(np.linalg.eig(S)[0]) >1: 138 | raise RuntimeError("Model does not satisfy BK conditions -- mulitple stable solutions") 139 | 140 | # Impact matrix 141 | # Solution is x_{t}=P*x_{t-1}+Q*eps_t 142 | Q = -np.linalg.inv(B+A@P) @ E 143 | 144 | return P, Q 145 | 146 | 147 | # Using the function to solve the system 148 | P, Q = SolveSystem(A,B,C,E) 149 | 150 | 151 | # Calculate an impulse response 152 | T = 20 153 | IRF_RBC = np.zeros((nX,T)) 154 | IRF_RBC[:,0] = np.dot(Q, np.array(0.01))[:,0] 155 | 156 | 157 | # Impulse response functions for 100 periods 158 | for t in range(1,T): 159 | IRF_RBC[:,t] = P@IRF_RBC[:,t-1] 160 | 161 | 162 | # Normalizing with respect to the steady state 163 | floors = [ f for f in range(nX) if f != 8 ] 164 | for i in floors: 165 | IRF_RBC[i,:] = IRF_RBC[i,:] / X_SS[i] * 100 166 | 167 | 168 | # List with the variable names 169 | names = ["CS", "CH", "C", "Ns", "Nh", "N", "Y", "Pi", "D", "W", "I", "X"] 170 | 171 | 172 | # Plotting the results of the IRF 173 | fig, axes = plt.subplots(nrows = 4, ncols = 3, figsize = (10,5)) 174 | for i in range(nX): 175 | row = i // 3 # Ganzahlige Division 176 | col = i % 3 # Rest 177 | axes[row, col].plot(IRF_RBC[i,:]) 178 | axes[row, col].plot(np.zeros(T)) 179 | axes[row, col].set_title(names[i]) 180 | fig.tight_layout() 181 | plt.show() 182 | 183 | -------------------------------------------------------------------------------- /README.md: -------------------------------------------------------------------------------- 1 | # Macro-in-Python 2 | ### Solution of Macroeconomic Models in Python 3 | 4 | These files contain my (amateur) approach to solve macroeconomic models using Python. The code is not written for being elegant, neither for speed, therefore, optimization is needed and comments are welcome. 5 | 6 | A large part of the code is based on the following resources: 7 | 8 | - Introduction to computational economics using fortran by Fehr and Kindermann, 2018 9 | - Recursive Macroeconomic Theory by Sargent and Ljungqvist, 2012 10 | - Dynamic General Equilibrium Modeling by Herr and Maussner, 2009 11 | - João B. Duarte's Ph.D. course on Macro in Python (https://github.com/jbduarte/Advanced_Macro) 12 | - McKay's short course on heterogeneous agent macroeconomics (https://alisdairmckay.com/Notes/HetAgents/index.html) 13 | 14 | At the moment the codes include: 15 | 16 | - Simple stochastic growth model (Value function iteration, Howard improvement algorithm, Endogeneous Grid method, and Perturbation) 17 | - Simple RBC model with labor choice (Value function iteration, Howard improvement algorithm, and Perturbation) 18 | - A variety of versions of the RBC model with different frictions (Perturbation only) 19 | - Aiyagari model with aggregation using Monte Carlo simulation and an invariante distribution (Value function iteration, and Howard improvement algorithm) 20 | --------------------------------------------------------------------------------