Coverage for HARK / ConsumptionSaving / ConsNewKeynesianModel.py: 89%
287 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-25 05:22 +0000
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-25 05:22 +0000
1"""
2This file has a slightly modified and extended version of ConsIndShock that is
3meant to be used in heterogeneous agents new Keynesian (HANK) models. The micro-
4economic model is identical, but additional primitive parameters have been added
5to the specification of the income process. These parameters would have no inde-
6pendent meaning in a "micro only" setting, but with dynamic equilibrium elements
7(as in HANK models), they can have meaning.
8"""
10from copy import deepcopy
11import numpy as np
12from scipy import sparse as sp
14from HARK.ConsumptionSaving.ConsIndShockModel import (
15 IndShockConsumerType,
16 make_basic_CRRA_solution_terminal,
17 solve_one_period_ConsIndShock,
18 make_lognormal_kNrm_init_dstn,
19 make_lognormal_pLvl_init_dstn,
20)
22from HARK.Calibration.Income.IncomeProcesses import (
23 construct_HANK_lognormal_income_process_unemployment,
24 get_PermShkDstn_from_IncShkDstn,
25 get_TranShkDstn_from_IncShkDstn,
26)
28from HARK.utilities import (
29 gen_tran_matrix_1D,
30 gen_tran_matrix_2D,
31 jump_to_grid_1D,
32 jump_to_grid_2D,
33 make_grid_exp_mult,
34 make_assets_grid,
35)
37# Make a dictionary of constructors for the idiosyncratic income shocks model
38newkeynesian_constructor_dict = {
39 "IncShkDstn": construct_HANK_lognormal_income_process_unemployment,
40 "PermShkDstn": get_PermShkDstn_from_IncShkDstn,
41 "TranShkDstn": get_TranShkDstn_from_IncShkDstn,
42 "aXtraGrid": make_assets_grid,
43 "kNrmInitDstn": make_lognormal_kNrm_init_dstn,
44 "pLvlInitDstn": make_lognormal_pLvl_init_dstn,
45 "solution_terminal": make_basic_CRRA_solution_terminal,
46}
48# Make a dictionary with parameters for the default constructor for kNrmInitDstn
49default_kNrmInitDstn_params = {
50 "kLogInitMean": 0.0, # Mean of log initial capital
51 "kLogInitStd": 1.0, # Stdev of log initial capital
52 "kNrmInitCount": 15, # Number of points in initial capital discretization
53}
55# Make a dictionary with parameters for the default constructor for pLvlInitDstn
56default_pLvlInitDstn_params = {
57 "pLogInitMean": 0.0, # Mean of log permanent income
58 "pLogInitStd": 0.0, # Stdev of log permanent income
59 "pLvlInitCount": 15, # Number of points in initial capital discretization
60}
62# Default parameters to make IncShkDstn using construct_lognormal_income_process_unemployment
63default_IncShkDstn_params = {
64 "PermShkStd": [0.1], # Standard deviation of log permanent income shocks
65 "PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks
66 "TranShkStd": [0.1], # Standard deviation of log transitory income shocks
67 "TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks
68 "UnempPrb": 0.05, # Probability of unemployment while working
69 "IncUnemp": 0.3, # Unemployment benefits replacement rate while working
70 "T_retire": 0, # Period of retirement (0 --> no retirement)
71 "UnempPrbRet": 0.005, # Probability of "unemployment" while retired
72 "IncUnempRet": 0.0, # "Unemployment" benefits when retired
73 "tax_rate": [0.0], # Flat tax rate on labor income NEW FOR HANK
74 "labor": [1.0], # Intensive margin labor supply NEW FOR HANK
75 "wage": [1.0], # Wage rate scaling factor NEW FOR HANK
76}
78# Default parameters to make aXtraGrid using make_assets_grid
79default_aXtraGrid_params = {
80 "aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value
81 "aXtraMax": 50, # Maximum end-of-period "assets above minimum" value
82 "aXtraNestFac": 3, # Exponential nesting factor for aXtraGrid
83 "aXtraCount": 100, # Number of points in the grid of "assets above minimum"
84 "aXtraExtra": None, # Additional other values to add in grid (optional)
85}
87# Make a dictionary to specify an idiosyncratic income shocks consumer type
88init_newkeynesian = {
89 # BASIC HARK PARAMETERS REQUIRED TO SOLVE THE MODEL
90 "cycles": 0, # Infinite horizon model
91 "T_cycle": 1, # Number of periods in the cycle for this agent type
92 "constructors": newkeynesian_constructor_dict, # See dictionary above
93 # PRIMITIVE RAW PARAMETERS REQUIRED TO SOLVE THE MODEL
94 "CRRA": 2.0, # Coefficient of relative risk aversion
95 "Rfree": [1.03], # Interest factor on retained assets
96 "DiscFac": 0.96, # Intertemporal discount factor
97 "LivPrb": [0.98], # Survival probability after each period
98 "PermGroFac": [1.0], # Permanent income growth factor
99 "BoroCnstArt": 0.0, # Artificial borrowing constraint
100 "vFuncBool": False, # Whether to calculate the value function during solution
101 "CubicBool": False, # Whether to use cubic spline interpolation when True
102 # (Uses linear spline interpolation for cFunc when False)
103 # PARAMETERS REQUIRED TO SIMULATE THE MODEL
104 "AgentCount": 10000, # Number of agents of this type
105 "T_age": None, # Age after which simulated agents are automatically killed
106 "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor
107 # (The portion of PermGroFac attributable to aggregate productivity growth)
108 "NewbornTransShk": False, # Whether Newborns have transitory shock
109 # ADDITIONAL OPTIONAL PARAMETERS
110 "PerfMITShk": False, # Do Perfect Foresight MIT Shock
111 # (Forces Newborns to follow solution path of the agent they replaced if True)
112 "neutral_measure": False, # Whether to use permanent income neutral measure (see Harmenberg 2021)
113 # ADDITIONAL PARAMETERS FOR GRID-BASED TRANSITION SIMULATION
114 "mMin": 0.001,
115 "mMax": 50,
116 "mCount": 200,
117 "mFac": 3,
118}
119init_newkeynesian.update(default_kNrmInitDstn_params)
120init_newkeynesian.update(default_pLvlInitDstn_params)
121init_newkeynesian.update(default_IncShkDstn_params)
122init_newkeynesian.update(default_aXtraGrid_params)
125class NewKeynesianConsumerType(IndShockConsumerType):
126 """
127 A slight extension of IndShockConsumerType that permits individual labor supply,
128 the wage rate, and the labor income tax rate to enter the income shock process.
129 """
131 default_ = {
132 "params": init_newkeynesian,
133 "solver": solve_one_period_ConsIndShock,
134 "track_vars": ["aNrm", "cNrm", "mNrm", "pLvl"],
135 }
137 def define_distribution_grid(
138 self,
139 dist_mGrid=None,
140 dist_pGrid=None,
141 m_density=0,
142 num_pointsM=None,
143 timestonest=None,
144 num_pointsP=55,
145 max_p_fac=30.0,
146 ):
147 """
148 Defines the grid on which the distribution is defined. Stores the grid of market resources and permanent income as attributes of self.
149 Grid for normalized market resources and permanent income may be prespecified
150 as dist_mGrid and dist_pGrid, respectively. If not then default grid is computed based off given parameters.
152 Parameters
153 ----------
154 dist_mGrid : np.array
155 Prespecified grid for distribution over normalized market resources
157 dist_pGrid : np.array
158 Prespecified grid for distribution over permanent income.
160 m_density: float
161 Density of normalized market resources grid. Default value is mdensity = 0.
162 Only affects grid of market resources if dist_mGrid=None.
164 num_pointsM: float
165 Number of gridpoints for market resources grid.
167 num_pointsP: float
168 Number of gridpoints for permanent income.
169 This grid will be exponentiated by the function make_grid_exp_mult.
171 max_p_fac : float
172 Factor that scales the maximum value of permanent income grid.
173 Larger values increases the maximum value of permanent income grid.
175 Returns
176 -------
177 None
178 """
180 # If true Use Harmenberg 2021's Neutral Measure. For more information, see https://econ-ark.org/materials/harmenberg-aggregation?launch
181 if not hasattr(self, "neutral_measure"):
182 self.neutral_measure = False
184 if num_pointsM is None:
185 m_points = self.mCount
186 else:
187 m_points = num_pointsM
189 if timestonest is None:
190 timestonest = self.mFac
191 elif not isinstance(timestonest, (int, float)):
192 raise TypeError("timestonest must be a numeric value (int or float).")
194 if self.cycles == 0:
195 if not hasattr(dist_mGrid, "__len__"):
196 mGrid = make_grid_exp_mult(
197 ming=self.mMin,
198 maxg=self.mMax,
199 ng=m_points,
200 timestonest=timestonest,
201 ) # Generate Market resources grid given density and number of points
203 for i in range(m_density):
204 m_shifted = np.delete(mGrid, -1)
205 m_shifted = np.insert(m_shifted, 0, 1.00000000e-04)
206 dist_betw_pts = mGrid - m_shifted
207 dist_betw_pts_half = dist_betw_pts / 2
208 new_A_grid = m_shifted + dist_betw_pts_half
209 mGrid = np.concatenate((mGrid, new_A_grid))
210 mGrid = np.sort(mGrid)
212 self.dist_mGrid = mGrid
214 else:
215 # If grid of market resources prespecified then use as mgrid
216 self.dist_mGrid = dist_mGrid
218 if not hasattr(dist_pGrid, "__len__"):
219 num_points = num_pointsP # Number of permanent income gridpoints
220 # Dist_pGrid is taken to cover most of the ergodic distribution
221 # set variance of permanent income shocks
222 p_variance = self.PermShkStd[0] ** 2
223 # Maximum Permanent income value
224 max_p = max_p_fac * (p_variance / (1 - self.LivPrb[0])) ** 0.5
225 one_sided_grid = make_grid_exp_mult(
226 1.05 + 1e-3, np.exp(max_p), num_points, 3
227 )
228 self.dist_pGrid = np.append(
229 np.append(1.0 / np.fliplr([one_sided_grid])[0], np.ones(1)),
230 one_sided_grid,
231 ) # Compute permanent income grid
232 else:
233 # If grid of permanent income prespecified then use it as pgrid
234 self.dist_pGrid = dist_pGrid
236 if (
237 self.neutral_measure is True
238 ): # If true Use Harmenberg 2021's Neutral Measure. For more information, see https://econ-ark.org/materials/harmenberg-aggregation?launch
239 self.dist_pGrid = np.array([1])
241 elif self.cycles > 1:
242 raise Exception(
243 "define_distribution_grid requires cycles = 0 or cycles = 1"
244 )
246 elif self.T_cycle != 0:
247 if num_pointsM is None:
248 m_points = self.mCount
249 else:
250 m_points = num_pointsM
252 if not hasattr(dist_mGrid, "__len__"):
253 mGrid = make_grid_exp_mult(
254 ming=self.mMin,
255 maxg=self.mMax,
256 ng=m_points,
257 timestonest=timestonest,
258 ) # Generate Market resources grid given density and number of points
260 for i in range(m_density):
261 m_shifted = np.delete(mGrid, -1)
262 m_shifted = np.insert(m_shifted, 0, 1.00000000e-04)
263 dist_betw_pts = mGrid - m_shifted
264 dist_betw_pts_half = dist_betw_pts / 2
265 new_A_grid = m_shifted + dist_betw_pts_half
266 mGrid = np.concatenate((mGrid, new_A_grid))
267 mGrid = np.sort(mGrid)
269 self.dist_mGrid = mGrid
271 else:
272 # If grid of market resources prespecified then use as mgrid
273 self.dist_mGrid = dist_mGrid
275 if not hasattr(dist_pGrid, "__len__"):
276 self.dist_pGrid = [] # list of grids of permanent income
278 for i in range(self.T_cycle):
279 num_points = num_pointsP
280 # Dist_pGrid is taken to cover most of the ergodic distribution
281 # set variance of permanent income shocks this period
282 p_variance = self.PermShkStd[i] ** 2
283 # Consider probability of staying alive this period
284 max_p = max_p_fac * (p_variance / (1 - self.LivPrb[i])) ** 0.5
285 one_sided_grid = make_grid_exp_mult(
286 1.05 + 1e-3, np.exp(max_p), num_points, 2
287 )
289 # Compute permanent income grid this period. Grid of permanent income may differ dependent on PermShkStd
290 dist_pGrid = np.append(
291 np.append(1.0 / np.fliplr([one_sided_grid])[0], np.ones(1)),
292 one_sided_grid,
293 )
294 self.dist_pGrid.append(dist_pGrid)
296 else:
297 # If grid of permanent income prespecified then use as pgrid
298 self.dist_pGrid = dist_pGrid
300 if (
301 self.neutral_measure is True
302 ): # If true Use Harmenberg 2021's Neutral Measure. For more information, see https://econ-ark.org/materials/harmenberg-aggregation?launch
303 self.dist_pGrid = self.T_cycle * [np.array([1])]
305 def calc_transition_matrix(self, shk_dstn=None):
306 """
307 Calculates how the distribution of agents across market resources
308 transitions from one period to the next. If finite horizon problem, then calculates
309 a list of transition matrices, consumption and asset policy grids for each period of the problem.
310 The transition matrix/matrices and consumption and asset policy grid(s) are stored as attributes of self.
313 Parameters
314 ----------
315 shk_dstn: list
316 list of income shock distributions. Each Income Shock Distribution should be a DiscreteDistribution Object (see Distribution.py)
317 Returns
318 -------
319 None
321 """
323 if self.cycles == 0: # Infinite Horizon Problem
324 if not hasattr(shk_dstn, "pmv"):
325 shk_dstn = self.IncShkDstn
327 dist_mGrid = self.dist_mGrid # Grid of market resources
328 dist_pGrid = self.dist_pGrid # Grid of permanent incomes
329 # assets next period
330 aNext = dist_mGrid - self.solution[0].cFunc(dist_mGrid)
332 self.aPol_Grid = aNext # Steady State Asset Policy Grid
333 # Steady State Consumption Policy Grid
334 self.cPol_Grid = self.solution[0].cFunc(dist_mGrid)
336 # Obtain shock values and shock probabilities from income distribution
337 # Bank Balances next period (Interest rate * assets)
338 bNext = self.Rfree[0] * aNext
339 shk_prbs = shk_dstn[0].pmv # Probability of shocks
340 tran_shks = shk_dstn[0].atoms[1] # Transitory shocks
341 perm_shks = shk_dstn[0].atoms[0] # Permanent shocks
342 LivPrb = self.LivPrb[0] # Update probability of staying alive
344 # New borns have this distribution (assumes start with no assets and permanent income=1)
345 NewBornDist = jump_to_grid_2D(
346 tran_shks, np.ones_like(tran_shks), shk_prbs, dist_mGrid, dist_pGrid
347 )
349 if len(dist_pGrid) == 1:
350 NewBornDist = jump_to_grid_1D(
351 np.ones_like(tran_shks), shk_prbs, dist_mGrid
352 )
353 # Compute Transition Matrix given shocks and grids.
354 self.tran_matrix = gen_tran_matrix_1D(
355 dist_mGrid,
356 bNext,
357 shk_prbs,
358 perm_shks,
359 tran_shks,
360 LivPrb,
361 NewBornDist,
362 )
364 else:
365 NewBornDist = jump_to_grid_2D(
366 np.ones_like(tran_shks),
367 np.ones_like(tran_shks),
368 shk_prbs,
369 dist_mGrid,
370 dist_pGrid,
371 )
373 # Generate Transition Matrix
374 # Compute Transition Matrix given shocks and grids.
375 self.tran_matrix = gen_tran_matrix_2D(
376 dist_mGrid,
377 dist_pGrid,
378 bNext,
379 shk_prbs,
380 perm_shks,
381 tran_shks,
382 LivPrb,
383 NewBornDist,
384 )
386 elif self.cycles > 1:
387 raise Exception("calc_transition_matrix requires cycles = 0 or cycles = 1")
389 elif self.T_cycle != 0: # finite horizon problem
390 if not hasattr(shk_dstn, "pmv"):
391 shk_dstn = self.IncShkDstn
393 self.cPol_Grid = []
394 # List of consumption policy grids for each period in T_cycle
395 self.aPol_Grid = []
396 # List of asset policy grids for each period in T_cycle
397 self.tran_matrix = [] # List of transition matrices
399 dist_mGrid = self.dist_mGrid
401 for k in range(self.T_cycle):
402 if type(self.dist_pGrid) == list:
403 # Permanent income grid this period
404 dist_pGrid = self.dist_pGrid[k]
405 else:
406 dist_pGrid = (
407 self.dist_pGrid
408 ) # If here then use prespecified permanent income grid
410 # Consumption policy grid in period k
411 Cnow = self.solution[k].cFunc(dist_mGrid)
412 self.cPol_Grid.append(Cnow) # Add to list
414 aNext = dist_mGrid - Cnow # Asset policy grid in period k
415 self.aPol_Grid.append(aNext) # Add to list
417 bNext = self.Rfree[k] * aNext
419 # Obtain shocks and shock probabilities from income distribution this period
420 shk_prbs = shk_dstn[k].pmv # Probability of shocks this period
421 # Transitory shocks this period
422 tran_shks = shk_dstn[k].atoms[1]
423 # Permanent shocks this period
424 perm_shks = shk_dstn[k].atoms[0]
425 # Update probability of staying alive this period
426 LivPrb = self.LivPrb[k]
428 if len(dist_pGrid) == 1:
429 # New borns have this distribution (assumes start with no assets and permanent income=1)
430 NewBornDist = jump_to_grid_1D(
431 np.ones_like(tran_shks), shk_prbs, dist_mGrid
432 )
433 # Compute Transition Matrix given shocks and grids.
434 TranMatrix_M = gen_tran_matrix_1D(
435 dist_mGrid,
436 bNext,
437 shk_prbs,
438 perm_shks,
439 tran_shks,
440 LivPrb,
441 NewBornDist,
442 )
443 self.tran_matrix.append(TranMatrix_M)
445 else:
446 NewBornDist = jump_to_grid_2D(
447 np.ones_like(tran_shks),
448 np.ones_like(tran_shks),
449 shk_prbs,
450 dist_mGrid,
451 dist_pGrid,
452 )
453 # Compute Transition Matrix given shocks and grids.
454 TranMatrix = gen_tran_matrix_2D(
455 dist_mGrid,
456 dist_pGrid,
457 bNext,
458 shk_prbs,
459 perm_shks,
460 tran_shks,
461 LivPrb,
462 NewBornDist,
463 )
464 self.tran_matrix.append(TranMatrix)
466 def calc_ergodic_dist(self, transition_matrix=None):
467 """
468 Calculates the ergodic distribution across normalized market resources and
469 permanent income as the eigenvector associated with the eigenvalue 1.
470 The distribution is stored as attributes of self both as a vector and as a reshaped array with the ij'th element representing
471 the probability of being at the i'th point on the mGrid and the j'th
472 point on the pGrid.
474 Parameters
475 ----------
476 transition_matrix: List
477 list with one transition matrix whose ergordic distribution is to be solved
478 Returns
479 -------
480 None
481 """
483 if not isinstance(transition_matrix, list):
484 transition_matrix = [self.tran_matrix]
486 eigen, ergodic_distr = sp.linalg.eigs(
487 transition_matrix[0], v0=np.ones(len(transition_matrix[0])), k=1, which="LM"
488 ) # Solve for ergodic distribution
489 ergodic_distr = ergodic_distr.real / np.sum(ergodic_distr.real)
491 self.vec_erg_dstn = ergodic_distr # distribution as a vector
492 # distribution reshaped into len(mgrid) by len(pgrid) array
493 self.erg_dstn = ergodic_distr.reshape(
494 (len(self.dist_mGrid), len(self.dist_pGrid))
495 )
497 def compute_steady_state(self):
498 # Compute steady state to perturb around
499 self.cycles = 0
500 self.solve()
502 # Use Harmenberg Measure
503 self.neutral_measure = True
504 self.construct("IncShkDstn", "TranShkDstn", "PermShkDstn")
506 # Non stochastic simuation
507 self.define_distribution_grid()
508 self.calc_transition_matrix()
510 self.c_ss = self.cPol_Grid # Normalized Consumption Policy grid
511 self.a_ss = self.aPol_Grid # Normalized Asset Policy grid
513 self.calc_ergodic_dist() # Calculate ergodic distribution
514 # Steady State Distribution as a vector (m*p x 1) where m is the number of gridpoints on the market resources grid
515 ss_dstn = self.vec_erg_dstn
517 self.A_ss = np.dot(self.a_ss, ss_dstn)[0]
518 self.C_ss = np.dot(self.c_ss, ss_dstn)[0]
520 return self.A_ss, self.C_ss
522 def calc_jacobian(self, shk_param, T):
523 """
524 Calculates the Jacobians of aggregate consumption and aggregate assets.
525 Parameters that can be shocked are LivPrb, PermShkStd,TranShkStd, DiscFac,
526 UnempPrb, Rfree, IncUnemp, and DiscFac.
528 Parameters:
529 -----------
531 shk_param: string
532 name of variable to be shocked
534 T: int
535 dimension of Jacobian Matrix. Jacobian Matrix is a TxT square Matrix
538 Returns
539 ----------
540 CJAC: numpy.array
541 TxT Jacobian Matrix of Aggregate Consumption with respect to shk_param
543 AJAC: numpy.array
544 TxT Jacobian Matrix of Aggregate Assets with respect to shk_param
546 """
548 # Set up finite Horizon dictionary
549 params = deepcopy(self.__dict__["parameters"])
550 params["T_cycle"] = T # Dimension of Jacobian Matrix
552 # Specify a dictionary of lists because problem we are solving is
553 # technically finite horizon so variables can be time varying (see
554 # section on fake news algorithm in
555 # https://onlinelibrary.wiley.com/doi/abs/10.3982/ECTA17434 )
556 params["LivPrb"] = params["T_cycle"] * [self.LivPrb[0]]
557 params["PermGroFac"] = params["T_cycle"] * [self.PermGroFac[0]]
558 params["PermShkStd"] = params["T_cycle"] * [self.PermShkStd[0]]
559 params["TranShkStd"] = params["T_cycle"] * [self.TranShkStd[0]]
560 params["Rfree"] = params["T_cycle"] * [self.Rfree[0]]
561 params["UnempPrb"] = params["T_cycle"] * [self.UnempPrb]
562 params["IncUnemp"] = params["T_cycle"] * [self.IncUnemp]
563 params["wage"] = params["T_cycle"] * [self.wage[0]]
564 params["labor"] = params["T_cycle"] * [self.labor[0]]
565 params["tax_rate"] = params["T_cycle"] * [self.tax_rate[0]]
566 params["cycles"] = 1 # "finite horizon", sort of
568 # Create instance of a finite horizon agent
569 FinHorizonAgent = NewKeynesianConsumerType(**params)
571 dx = 0.0001 # Size of perturbation
572 # Period in which the change in the interest rate occurs (second to last period)
573 i = params["T_cycle"] - 1
575 FinHorizonAgent.IncShkDstn = params["T_cycle"] * [self.IncShkDstn[0]]
577 # If parameter is in time invariant list then add it to time vary list
578 FinHorizonAgent.del_from_time_inv(shk_param)
579 FinHorizonAgent.add_to_time_vary(shk_param)
581 # this condition is because some attributes are specified as lists while other as floats
582 if type(getattr(self, shk_param)) == list:
583 perturbed_list = (
584 (i) * [getattr(self, shk_param)[0]]
585 + [getattr(self, shk_param)[0] + dx]
586 + (params["T_cycle"] - i - 1) * [getattr(self, shk_param)[0]]
587 ) # Sequence of interest rates the agent faces
588 else:
589 perturbed_list = (
590 (i) * [getattr(self, shk_param)]
591 + [getattr(self, shk_param) + dx]
592 + (params["T_cycle"] - i - 1) * [getattr(self, shk_param)]
593 ) # Sequence of interest rates the agent faces
594 setattr(FinHorizonAgent, shk_param, perturbed_list)
595 self.parameters[shk_param] = perturbed_list
597 # Update income process if perturbed parameter enters the income shock distribution
598 FinHorizonAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn")
600 # Solve the "finite horizon" model assuming that it ends back in steady state
601 FinHorizonAgent.solve(presolve=False, from_solution=self.solution[0])
603 # Use Harmenberg Neutral Measure
604 FinHorizonAgent.neutral_measure = True
605 FinHorizonAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn")
607 # Calculate Transition Matrices
608 FinHorizonAgent.define_distribution_grid()
609 FinHorizonAgent.calc_transition_matrix()
611 # Normalized consumption Policy Grids across time
612 c_t = FinHorizonAgent.cPol_Grid
613 a_t = FinHorizonAgent.aPol_Grid
615 # Append steady state policy grid into list of policy grids as HARK does not provide the initial policy
616 c_t.append(self.c_ss)
617 a_t.append(self.a_ss)
619 # Fake News Algorithm begins below ( To find fake news algorithm See page 2388 of https://onlinelibrary.wiley.com/doi/abs/10.3982/ECTA17434 )
621 ##########
622 # STEP 1 # of fake news algorithm, As in the paper for Curly Y and Curly D. Here the policies are over assets and consumption so we denote them as curly C and curly D.
623 ##########
624 a_ss = self.aPol_Grid # steady state Asset Policy
625 c_ss = self.cPol_Grid # steady state Consumption Policy
626 tranmat_ss = self.tran_matrix # Steady State Transition Matrix
628 # List of asset policies grids where households expect the shock to occur in the second to last Period
629 a_t = FinHorizonAgent.aPol_Grid
630 # add steady state assets to list as it does not get appended in calc_transition_matrix method
631 a_t.append(self.a_ss)
633 # List of consumption policies grids where households expect the shock to occur in the second to last Period
634 c_t = FinHorizonAgent.cPol_Grid
635 # add steady state consumption to list as it does not get appended in calc_transition_matrix method
636 c_t.append(self.c_ss)
638 da0_s = [] # Deviation of asset policy from steady state policy
639 dc0_s = [] # Deviation of Consumption policy from steady state policy
640 for i in range(T):
641 da0_s.append(a_t[T - i] - a_ss)
642 dc0_s.append(c_t[T - i] - c_ss)
644 da0_s = np.array(da0_s)
645 dc0_s = np.array(dc0_s)
647 # Steady state distribution of market resources (permanent income weighted distribution)
648 D_ss = self.vec_erg_dstn.T[0]
649 dA0_s = []
650 dC0_s = []
651 for i in range(T):
652 dA0_s.append(np.dot(da0_s[i], D_ss))
653 dC0_s.append(np.dot(dc0_s[i], D_ss))
655 dA0_s = np.array(dA0_s)
656 # This is equivalent to the curly Y scalar detailed in the first step of the algorithm
657 A_curl_s = dA0_s / dx
659 dC0_s = np.array(dC0_s)
660 C_curl_s = dC0_s / dx
662 # List of computed transition matrices for each period
663 tranmat_t = FinHorizonAgent.tran_matrix
664 tranmat_t.append(tranmat_ss)
666 # List of change in transition matrix relative to the steady state transition matrix
667 dlambda0_s = []
668 for i in range(T):
669 dlambda0_s.append(tranmat_t[T - i] - tranmat_ss)
671 dlambda0_s = np.array(dlambda0_s)
673 dD0_s = []
674 for i in range(T):
675 dD0_s.append(np.dot(dlambda0_s[i], D_ss))
677 dD0_s = np.array(dD0_s)
678 D_curl_s = dD0_s / dx # Curly D in the sequence space jacobian
680 ########
681 # STEP2 # of fake news algorithm
682 ########
684 # Expectation Vectors
685 exp_vecs_a = []
686 exp_vecs_c = []
688 # First expectation vector is the steady state policy
689 exp_vec_a = a_ss
690 exp_vec_c = c_ss
691 for i in range(T):
692 exp_vecs_a.append(exp_vec_a)
693 exp_vec_a = np.dot(tranmat_ss.T, exp_vec_a)
695 exp_vecs_c.append(exp_vec_c)
696 exp_vec_c = np.dot(tranmat_ss.T, exp_vec_c)
698 # Turn expectation vectors into arrays
699 exp_vecs_a = np.array(exp_vecs_a)
700 exp_vecs_c = np.array(exp_vecs_c)
702 #########
703 # STEP3 # of the algorithm. In particular equation 26 of the published paper.
704 #########
705 # Fake news matrices
706 Curl_F_A = np.zeros((T, T)) # Fake news matrix for assets
707 Curl_F_C = np.zeros((T, T)) # Fake news matrix for consumption
709 # First row of Fake News Matrix
710 Curl_F_A[0] = A_curl_s
711 Curl_F_C[0] = C_curl_s
713 for i in range(T - 1):
714 for j in range(T):
715 Curl_F_A[i + 1][j] = np.dot(exp_vecs_a[i], D_curl_s[j])
716 Curl_F_C[i + 1][j] = np.dot(exp_vecs_c[i], D_curl_s[j])
718 ########
719 # STEP4 # of the algorithm
720 ########
722 # Function to compute jacobian matrix from fake news matrix
723 def J_from_F(F):
724 J = F.copy()
725 for t in range(1, F.shape[0]):
726 J[1:, t] += J[:-1, t - 1]
727 return J
729 J_A = J_from_F(Curl_F_A)
730 J_C = J_from_F(Curl_F_C)
732 ########
733 # Additional step due to compute Zeroth Column of the Jacobian
734 ########
736 params = deepcopy(self.__dict__["parameters"])
737 params["T_cycle"] = 2 # Just need one transition matrix
738 params["LivPrb"] = params["T_cycle"] * [self.LivPrb[0]]
739 params["PermGroFac"] = params["T_cycle"] * [self.PermGroFac[0]]
740 params["PermShkStd"] = params["T_cycle"] * [self.PermShkStd[0]]
741 params["TranShkStd"] = params["T_cycle"] * [self.TranShkStd[0]]
742 params["Rfree"] = params["T_cycle"] * [self.Rfree[0]]
743 params["UnempPrb"] = params["T_cycle"] * [self.UnempPrb]
744 params["IncUnemp"] = params["T_cycle"] * [self.IncUnemp]
745 params["IncShkDstn"] = params["T_cycle"] * [self.IncShkDstn[0]]
746 params["wage"] = params["T_cycle"] * [self.wage[0]]
747 params["labor"] = params["T_cycle"] * [self.labor[0]]
748 params["tax_rate"] = params["T_cycle"] * [self.tax_rate[0]]
749 params["cycles"] = 1 # Now it's "finite" horizon while things are changing
751 # Create instance of a finite horizon agent for calculation of zeroth
752 ZerothColAgent = NewKeynesianConsumerType(**params)
754 # If parameter is in time invariant list then add it to time vary list
755 ZerothColAgent.del_from_time_inv(shk_param)
756 ZerothColAgent.add_to_time_vary(shk_param)
758 # Update income process if perturbed parameter enters the income shock distribution
759 ZerothColAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn")
761 # Solve the "finite horizon" problem, again assuming that steady state comes
762 # after the shocks
763 ZerothColAgent.solve(presolve=False, from_solution=self.solution[0])
765 # this condition is because some attributes are specified as lists while other as floats
766 if type(getattr(self, shk_param)) == list:
767 perturbed_list = [getattr(self, shk_param)[0] + dx] + (
768 params["T_cycle"] - 1
769 ) * [
770 getattr(self, shk_param)[0]
771 ] # Sequence of interest rates the agent faces
772 else:
773 perturbed_list = [getattr(self, shk_param) + dx] + (
774 params["T_cycle"] - 1
775 ) * [getattr(self, shk_param)]
776 # Sequence of interest rates the agent
778 setattr(ZerothColAgent, shk_param, perturbed_list) # Set attribute to agent
779 self.parameters[shk_param] = perturbed_list
781 # Use Harmenberg Neutral Measure
782 ZerothColAgent.neutral_measure = True
783 ZerothColAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn")
785 # Calculate Transition Matrices
786 ZerothColAgent.define_distribution_grid()
787 ZerothColAgent.calc_transition_matrix()
789 tranmat_t_zeroth_col = ZerothColAgent.tran_matrix
790 dstn_t_zeroth_col = self.vec_erg_dstn.T[0]
792 C_t_no_sim = np.zeros(T)
793 A_t_no_sim = np.zeros(T)
795 for i in range(T):
796 if i == 0:
797 dstn_t_zeroth_col = np.dot(tranmat_t_zeroth_col[i], dstn_t_zeroth_col)
798 else:
799 dstn_t_zeroth_col = np.dot(tranmat_ss, dstn_t_zeroth_col)
801 C_t_no_sim[i] = np.dot(self.cPol_Grid, dstn_t_zeroth_col)
802 A_t_no_sim[i] = np.dot(self.aPol_Grid, dstn_t_zeroth_col)
804 J_A.T[0] = (A_t_no_sim - self.A_ss) / dx
805 J_C.T[0] = (C_t_no_sim - self.C_ss) / dx
807 return J_C, J_A