Coverage for HARK/ConsumptionSaving/ConsNewKeynesianModel.py: 89%

287 statements  

« prev     ^ index     » next       coverage.py v7.11.0, created at 2025-11-02 05:14 +0000

1""" 

2This file has a slightly modified and extended version of ConsIndShock that is 

3meant to be used in heterogeneous agents new Keynesian (HANK) models. The micro- 

4economic model is identical, but additional primitive parameters have been added 

5to the specification of the income process. These parameters would have no inde- 

6pendent meaning in a "micro only" setting, but with dynamic equilibrium elements 

7(as in HANK models), they can have meaning. 

8""" 

9 

10from copy import deepcopy 

11import numpy as np 

12from scipy import sparse as sp 

13 

14from HARK.ConsumptionSaving.ConsIndShockModel import ( 

15 IndShockConsumerType, 

16 make_basic_CRRA_solution_terminal, 

17 solve_one_period_ConsIndShock, 

18 make_lognormal_kNrm_init_dstn, 

19 make_lognormal_pLvl_init_dstn, 

20) 

21 

22from HARK.Calibration.Income.IncomeProcesses import ( 

23 construct_HANK_lognormal_income_process_unemployment, 

24 get_PermShkDstn_from_IncShkDstn, 

25 get_TranShkDstn_from_IncShkDstn, 

26) 

27 

28from HARK.utilities import ( 

29 gen_tran_matrix_1D, 

30 gen_tran_matrix_2D, 

31 jump_to_grid_1D, 

32 jump_to_grid_2D, 

33 make_grid_exp_mult, 

34 make_assets_grid, 

35) 

36 

37# Make a dictionary of constructors for the idiosyncratic income shocks model 

38newkeynesian_constructor_dict = { 

39 "IncShkDstn": construct_HANK_lognormal_income_process_unemployment, 

40 "PermShkDstn": get_PermShkDstn_from_IncShkDstn, 

41 "TranShkDstn": get_TranShkDstn_from_IncShkDstn, 

42 "aXtraGrid": make_assets_grid, 

43 "kNrmInitDstn": make_lognormal_kNrm_init_dstn, 

44 "pLvlInitDstn": make_lognormal_pLvl_init_dstn, 

45 "solution_terminal": make_basic_CRRA_solution_terminal, 

46} 

47 

48# Make a dictionary with parameters for the default constructor for kNrmInitDstn 

49default_kNrmInitDstn_params = { 

50 "kLogInitMean": 0.0, # Mean of log initial capital 

51 "kLogInitStd": 1.0, # Stdev of log initial capital 

52 "kNrmInitCount": 15, # Number of points in initial capital discretization 

53} 

54 

55# Make a dictionary with parameters for the default constructor for pLvlInitDstn 

56default_pLvlInitDstn_params = { 

57 "pLogInitMean": 0.0, # Mean of log permanent income 

58 "pLogInitStd": 0.0, # Stdev of log permanent income 

59 "pLvlInitCount": 15, # Number of points in initial capital discretization 

60} 

61 

62# Default parameters to make IncShkDstn using construct_lognormal_income_process_unemployment 

63default_IncShkDstn_params = { 

64 "PermShkStd": [0.1], # Standard deviation of log permanent income shocks 

65 "PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks 

66 "TranShkStd": [0.1], # Standard deviation of log transitory income shocks 

67 "TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks 

68 "UnempPrb": 0.05, # Probability of unemployment while working 

69 "IncUnemp": 0.3, # Unemployment benefits replacement rate while working 

70 "T_retire": 0, # Period of retirement (0 --> no retirement) 

71 "UnempPrbRet": 0.005, # Probability of "unemployment" while retired 

72 "IncUnempRet": 0.0, # "Unemployment" benefits when retired 

73 "tax_rate": [0.0], # Flat tax rate on labor income NEW FOR HANK 

74 "labor": [1.0], # Intensive margin labor supply NEW FOR HANK 

75 "wage": [1.0], # Wage rate scaling factor NEW FOR HANK 

76} 

77 

78# Default parameters to make aXtraGrid using make_assets_grid 

79default_aXtraGrid_params = { 

80 "aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value 

81 "aXtraMax": 50, # Maximum end-of-period "assets above minimum" value 

82 "aXtraNestFac": 3, # Exponential nesting factor for aXtraGrid 

83 "aXtraCount": 100, # Number of points in the grid of "assets above minimum" 

84 "aXtraExtra": None, # Additional other values to add in grid (optional) 

85} 

86 

87# Make a dictionary to specify an idiosyncratic income shocks consumer type 

88init_newkeynesian = { 

89 # BASIC HARK PARAMETERS REQUIRED TO SOLVE THE MODEL 

90 "cycles": 0, # Infinite horizon model 

91 "T_cycle": 1, # Number of periods in the cycle for this agent type 

92 "constructors": newkeynesian_constructor_dict, # See dictionary above 

93 # PRIMITIVE RAW PARAMETERS REQUIRED TO SOLVE THE MODEL 

94 "CRRA": 2.0, # Coefficient of relative risk aversion 

95 "Rfree": [1.03], # Interest factor on retained assets 

96 "DiscFac": 0.96, # Intertemporal discount factor 

97 "LivPrb": [0.98], # Survival probability after each period 

98 "PermGroFac": [1.0], # Permanent income growth factor 

99 "BoroCnstArt": 0.0, # Artificial borrowing constraint 

100 "vFuncBool": False, # Whether to calculate the value function during solution 

101 "CubicBool": False, # Whether to use cubic spline interpolation when True 

102 # (Uses linear spline interpolation for cFunc when False) 

103 # PARAMETERS REQUIRED TO SIMULATE THE MODEL 

104 "AgentCount": 10000, # Number of agents of this type 

105 "T_age": None, # Age after which simulated agents are automatically killed 

106 "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor 

107 # (The portion of PermGroFac attributable to aggregate productivity growth) 

108 "NewbornTransShk": False, # Whether Newborns have transitory shock 

109 # ADDITIONAL OPTIONAL PARAMETERS 

110 "PerfMITShk": False, # Do Perfect Foresight MIT Shock 

111 # (Forces Newborns to follow solution path of the agent they replaced if True) 

112 "neutral_measure": False, # Whether to use permanent income neutral measure (see Harmenberg 2021) 

113 # ADDITIONAL PARAMETERS FOR GRID-BASED TRANSITION SIMULATION 

114 "mMin": 0.001, 

115 "mMax": 50, 

116 "mCount": 200, 

117 "mFac": 3, 

118} 

119init_newkeynesian.update(default_kNrmInitDstn_params) 

120init_newkeynesian.update(default_pLvlInitDstn_params) 

121init_newkeynesian.update(default_IncShkDstn_params) 

122init_newkeynesian.update(default_aXtraGrid_params) 

123 

124 

125class NewKeynesianConsumerType(IndShockConsumerType): 

126 """ 

127 A slight extension of IndShockConsumerType that permits individual labor supply, 

128 the wage rate, and the labor income tax rate to enter the income shock process. 

129 """ 

130 

131 default_ = { 

132 "params": init_newkeynesian, 

133 "solver": solve_one_period_ConsIndShock, 

134 } 

135 

136 def define_distribution_grid( 

137 self, 

138 dist_mGrid=None, 

139 dist_pGrid=None, 

140 m_density=0, 

141 num_pointsM=None, 

142 timestonest=None, 

143 num_pointsP=55, 

144 max_p_fac=30.0, 

145 ): 

146 """ 

147 Defines the grid on which the distribution is defined. Stores the grid of market resources and permanent income as attributes of self. 

148 Grid for normalized market resources and permanent income may be prespecified 

149 as dist_mGrid and dist_pGrid, respectively. If not then default grid is computed based off given parameters. 

150 

151 Parameters 

152 ---------- 

153 dist_mGrid : np.array 

154 Prespecified grid for distribution over normalized market resources 

155 

156 dist_pGrid : np.array 

157 Prespecified grid for distribution over permanent income. 

158 

159 m_density: float 

160 Density of normalized market resources grid. Default value is mdensity = 0. 

161 Only affects grid of market resources if dist_mGrid=None. 

162 

163 num_pointsM: float 

164 Number of gridpoints for market resources grid. 

165 

166 num_pointsP: float 

167 Number of gridpoints for permanent income. 

168 This grid will be exponentiated by the function make_grid_exp_mult. 

169 

170 max_p_fac : float 

171 Factor that scales the maximum value of permanent income grid. 

172 Larger values increases the maximum value of permanent income grid. 

173 

174 Returns 

175 ------- 

176 None 

177 """ 

178 

179 # If true Use Harmenberg 2021's Neutral Measure. For more information, see https://econ-ark.org/materials/harmenberg-aggregation?launch 

180 if not hasattr(self, "neutral_measure"): 

181 self.neutral_measure = False 

182 

183 if num_pointsM is None: 

184 m_points = self.mCount 

185 else: 

186 m_points = num_pointsM 

187 

188 if timestonest is None: 

189 timestonest = self.mFac 

190 elif not isinstance(timestonest, (int, float)): 

191 raise TypeError("timestonest must be a numeric value (int or float).") 

192 

193 if self.cycles == 0: 

194 if not hasattr(dist_mGrid, "__len__"): 

195 mGrid = make_grid_exp_mult( 

196 ming=self.mMin, 

197 maxg=self.mMax, 

198 ng=m_points, 

199 timestonest=timestonest, 

200 ) # Generate Market resources grid given density and number of points 

201 

202 for i in range(m_density): 

203 m_shifted = np.delete(mGrid, -1) 

204 m_shifted = np.insert(m_shifted, 0, 1.00000000e-04) 

205 dist_betw_pts = mGrid - m_shifted 

206 dist_betw_pts_half = dist_betw_pts / 2 

207 new_A_grid = m_shifted + dist_betw_pts_half 

208 mGrid = np.concatenate((mGrid, new_A_grid)) 

209 mGrid = np.sort(mGrid) 

210 

211 self.dist_mGrid = mGrid 

212 

213 else: 

214 # If grid of market resources prespecified then use as mgrid 

215 self.dist_mGrid = dist_mGrid 

216 

217 if not hasattr(dist_pGrid, "__len__"): 

218 num_points = num_pointsP # Number of permanent income gridpoints 

219 # Dist_pGrid is taken to cover most of the ergodic distribution 

220 # set variance of permanent income shocks 

221 p_variance = self.PermShkStd[0] ** 2 

222 # Maximum Permanent income value 

223 max_p = max_p_fac * (p_variance / (1 - self.LivPrb[0])) ** 0.5 

224 one_sided_grid = make_grid_exp_mult( 

225 1.05 + 1e-3, np.exp(max_p), num_points, 3 

226 ) 

227 self.dist_pGrid = np.append( 

228 np.append(1.0 / np.fliplr([one_sided_grid])[0], np.ones(1)), 

229 one_sided_grid, 

230 ) # Compute permanent income grid 

231 else: 

232 # If grid of permanent income prespecified then use it as pgrid 

233 self.dist_pGrid = dist_pGrid 

234 

235 if ( 

236 self.neutral_measure is True 

237 ): # If true Use Harmenberg 2021's Neutral Measure. For more information, see https://econ-ark.org/materials/harmenberg-aggregation?launch 

238 self.dist_pGrid = np.array([1]) 

239 

240 elif self.cycles > 1: 

241 raise Exception( 

242 "define_distribution_grid requires cycles = 0 or cycles = 1" 

243 ) 

244 

245 elif self.T_cycle != 0: 

246 if num_pointsM is None: 

247 m_points = self.mCount 

248 else: 

249 m_points = num_pointsM 

250 

251 if not hasattr(dist_mGrid, "__len__"): 

252 mGrid = make_grid_exp_mult( 

253 ming=self.mMin, 

254 maxg=self.mMax, 

255 ng=m_points, 

256 timestonest=timestonest, 

257 ) # Generate Market resources grid given density and number of points 

258 

259 for i in range(m_density): 

260 m_shifted = np.delete(mGrid, -1) 

261 m_shifted = np.insert(m_shifted, 0, 1.00000000e-04) 

262 dist_betw_pts = mGrid - m_shifted 

263 dist_betw_pts_half = dist_betw_pts / 2 

264 new_A_grid = m_shifted + dist_betw_pts_half 

265 mGrid = np.concatenate((mGrid, new_A_grid)) 

266 mGrid = np.sort(mGrid) 

267 

268 self.dist_mGrid = mGrid 

269 

270 else: 

271 # If grid of market resources prespecified then use as mgrid 

272 self.dist_mGrid = dist_mGrid 

273 

274 if not hasattr(dist_pGrid, "__len__"): 

275 self.dist_pGrid = [] # list of grids of permanent income 

276 

277 for i in range(self.T_cycle): 

278 num_points = num_pointsP 

279 # Dist_pGrid is taken to cover most of the ergodic distribution 

280 # set variance of permanent income shocks this period 

281 p_variance = self.PermShkStd[i] ** 2 

282 # Consider probability of staying alive this period 

283 max_p = max_p_fac * (p_variance / (1 - self.LivPrb[i])) ** 0.5 

284 one_sided_grid = make_grid_exp_mult( 

285 1.05 + 1e-3, np.exp(max_p), num_points, 2 

286 ) 

287 

288 # Compute permanent income grid this period. Grid of permanent income may differ dependent on PermShkStd 

289 dist_pGrid = np.append( 

290 np.append(1.0 / np.fliplr([one_sided_grid])[0], np.ones(1)), 

291 one_sided_grid, 

292 ) 

293 self.dist_pGrid.append(dist_pGrid) 

294 

295 else: 

296 # If grid of permanent income prespecified then use as pgrid 

297 self.dist_pGrid = dist_pGrid 

298 

299 if ( 

300 self.neutral_measure is True 

301 ): # If true Use Harmenberg 2021's Neutral Measure. For more information, see https://econ-ark.org/materials/harmenberg-aggregation?launch 

302 self.dist_pGrid = self.T_cycle * [np.array([1])] 

303 

304 def calc_transition_matrix(self, shk_dstn=None): 

305 """ 

306 Calculates how the distribution of agents across market resources 

307 transitions from one period to the next. If finite horizon problem, then calculates 

308 a list of transition matrices, consumption and asset policy grids for each period of the problem. 

309 The transition matrix/matrices and consumption and asset policy grid(s) are stored as attributes of self. 

310 

311 

312 Parameters 

313 ---------- 

314 shk_dstn: list 

315 list of income shock distributions. Each Income Shock Distribution should be a DiscreteDistribution Object (see Distribution.py) 

316 Returns 

317 ------- 

318 None 

319 

320 """ 

321 

322 if self.cycles == 0: # Infinite Horizon Problem 

323 if not hasattr(shk_dstn, "pmv"): 

324 shk_dstn = self.IncShkDstn 

325 

326 dist_mGrid = self.dist_mGrid # Grid of market resources 

327 dist_pGrid = self.dist_pGrid # Grid of permanent incomes 

328 # assets next period 

329 aNext = dist_mGrid - self.solution[0].cFunc(dist_mGrid) 

330 

331 self.aPol_Grid = aNext # Steady State Asset Policy Grid 

332 # Steady State Consumption Policy Grid 

333 self.cPol_Grid = self.solution[0].cFunc(dist_mGrid) 

334 

335 # Obtain shock values and shock probabilities from income distribution 

336 # Bank Balances next period (Interest rate * assets) 

337 bNext = self.Rfree[0] * aNext 

338 shk_prbs = shk_dstn[0].pmv # Probability of shocks 

339 tran_shks = shk_dstn[0].atoms[1] # Transitory shocks 

340 perm_shks = shk_dstn[0].atoms[0] # Permanent shocks 

341 LivPrb = self.LivPrb[0] # Update probability of staying alive 

342 

343 # New borns have this distribution (assumes start with no assets and permanent income=1) 

344 NewBornDist = jump_to_grid_2D( 

345 tran_shks, np.ones_like(tran_shks), shk_prbs, dist_mGrid, dist_pGrid 

346 ) 

347 

348 if len(dist_pGrid) == 1: 

349 NewBornDist = jump_to_grid_1D( 

350 np.ones_like(tran_shks), shk_prbs, dist_mGrid 

351 ) 

352 # Compute Transition Matrix given shocks and grids. 

353 self.tran_matrix = gen_tran_matrix_1D( 

354 dist_mGrid, 

355 bNext, 

356 shk_prbs, 

357 perm_shks, 

358 tran_shks, 

359 LivPrb, 

360 NewBornDist, 

361 ) 

362 

363 else: 

364 NewBornDist = jump_to_grid_2D( 

365 np.ones_like(tran_shks), 

366 np.ones_like(tran_shks), 

367 shk_prbs, 

368 dist_mGrid, 

369 dist_pGrid, 

370 ) 

371 

372 # Generate Transition Matrix 

373 # Compute Transition Matrix given shocks and grids. 

374 self.tran_matrix = gen_tran_matrix_2D( 

375 dist_mGrid, 

376 dist_pGrid, 

377 bNext, 

378 shk_prbs, 

379 perm_shks, 

380 tran_shks, 

381 LivPrb, 

382 NewBornDist, 

383 ) 

384 

385 elif self.cycles > 1: 

386 raise Exception("calc_transition_matrix requires cycles = 0 or cycles = 1") 

387 

388 elif self.T_cycle != 0: # finite horizon problem 

389 if not hasattr(shk_dstn, "pmv"): 

390 shk_dstn = self.IncShkDstn 

391 

392 self.cPol_Grid = [] 

393 # List of consumption policy grids for each period in T_cycle 

394 self.aPol_Grid = [] 

395 # List of asset policy grids for each period in T_cycle 

396 self.tran_matrix = [] # List of transition matrices 

397 

398 dist_mGrid = self.dist_mGrid 

399 

400 for k in range(self.T_cycle): 

401 if type(self.dist_pGrid) == list: 

402 # Permanent income grid this period 

403 dist_pGrid = self.dist_pGrid[k] 

404 else: 

405 dist_pGrid = ( 

406 self.dist_pGrid 

407 ) # If here then use prespecified permanent income grid 

408 

409 # Consumption policy grid in period k 

410 Cnow = self.solution[k].cFunc(dist_mGrid) 

411 self.cPol_Grid.append(Cnow) # Add to list 

412 

413 aNext = dist_mGrid - Cnow # Asset policy grid in period k 

414 self.aPol_Grid.append(aNext) # Add to list 

415 

416 bNext = self.Rfree[k] * aNext 

417 

418 # Obtain shocks and shock probabilities from income distribution this period 

419 shk_prbs = shk_dstn[k].pmv # Probability of shocks this period 

420 # Transitory shocks this period 

421 tran_shks = shk_dstn[k].atoms[1] 

422 # Permanent shocks this period 

423 perm_shks = shk_dstn[k].atoms[0] 

424 # Update probability of staying alive this period 

425 LivPrb = self.LivPrb[k] 

426 

427 if len(dist_pGrid) == 1: 

428 # New borns have this distribution (assumes start with no assets and permanent income=1) 

429 NewBornDist = jump_to_grid_1D( 

430 np.ones_like(tran_shks), shk_prbs, dist_mGrid 

431 ) 

432 # Compute Transition Matrix given shocks and grids. 

433 TranMatrix_M = gen_tran_matrix_1D( 

434 dist_mGrid, 

435 bNext, 

436 shk_prbs, 

437 perm_shks, 

438 tran_shks, 

439 LivPrb, 

440 NewBornDist, 

441 ) 

442 self.tran_matrix.append(TranMatrix_M) 

443 

444 else: 

445 NewBornDist = jump_to_grid_2D( 

446 np.ones_like(tran_shks), 

447 np.ones_like(tran_shks), 

448 shk_prbs, 

449 dist_mGrid, 

450 dist_pGrid, 

451 ) 

452 # Compute Transition Matrix given shocks and grids. 

453 TranMatrix = gen_tran_matrix_2D( 

454 dist_mGrid, 

455 dist_pGrid, 

456 bNext, 

457 shk_prbs, 

458 perm_shks, 

459 tran_shks, 

460 LivPrb, 

461 NewBornDist, 

462 ) 

463 self.tran_matrix.append(TranMatrix) 

464 

465 def calc_ergodic_dist(self, transition_matrix=None): 

466 """ 

467 Calculates the ergodic distribution across normalized market resources and 

468 permanent income as the eigenvector associated with the eigenvalue 1. 

469 The distribution is stored as attributes of self both as a vector and as a reshaped array with the ij'th element representing 

470 the probability of being at the i'th point on the mGrid and the j'th 

471 point on the pGrid. 

472 

473 Parameters 

474 ---------- 

475 transition_matrix: List 

476 list with one transition matrix whose ergordic distribution is to be solved 

477 Returns 

478 ------- 

479 None 

480 """ 

481 

482 if not isinstance(transition_matrix, list): 

483 transition_matrix = [self.tran_matrix] 

484 

485 eigen, ergodic_distr = sp.linalg.eigs( 

486 transition_matrix[0], v0=np.ones(len(transition_matrix[0])), k=1, which="LM" 

487 ) # Solve for ergodic distribution 

488 ergodic_distr = ergodic_distr.real / np.sum(ergodic_distr.real) 

489 

490 self.vec_erg_dstn = ergodic_distr # distribution as a vector 

491 # distribution reshaped into len(mgrid) by len(pgrid) array 

492 self.erg_dstn = ergodic_distr.reshape( 

493 (len(self.dist_mGrid), len(self.dist_pGrid)) 

494 ) 

495 

496 def compute_steady_state(self): 

497 # Compute steady state to perturb around 

498 self.cycles = 0 

499 self.solve() 

500 

501 # Use Harmenberg Measure 

502 self.neutral_measure = True 

503 self.construct("IncShkDstn", "TranShkDstn", "PermShkDstn") 

504 

505 # Non stochastic simuation 

506 self.define_distribution_grid() 

507 self.calc_transition_matrix() 

508 

509 self.c_ss = self.cPol_Grid # Normalized Consumption Policy grid 

510 self.a_ss = self.aPol_Grid # Normalized Asset Policy grid 

511 

512 self.calc_ergodic_dist() # Calculate ergodic distribution 

513 # Steady State Distribution as a vector (m*p x 1) where m is the number of gridpoints on the market resources grid 

514 ss_dstn = self.vec_erg_dstn 

515 

516 self.A_ss = np.dot(self.a_ss, ss_dstn)[0] 

517 self.C_ss = np.dot(self.c_ss, ss_dstn)[0] 

518 

519 return self.A_ss, self.C_ss 

520 

521 def calc_jacobian(self, shk_param, T): 

522 """ 

523 Calculates the Jacobians of aggregate consumption and aggregate assets. 

524 Parameters that can be shocked are LivPrb, PermShkStd,TranShkStd, DiscFac, 

525 UnempPrb, Rfree, IncUnemp, and DiscFac. 

526 

527 Parameters: 

528 ----------- 

529 

530 shk_param: string 

531 name of variable to be shocked 

532 

533 T: int 

534 dimension of Jacobian Matrix. Jacobian Matrix is a TxT square Matrix 

535 

536 

537 Returns 

538 ---------- 

539 CJAC: numpy.array 

540 TxT Jacobian Matrix of Aggregate Consumption with respect to shk_param 

541 

542 AJAC: numpy.array 

543 TxT Jacobian Matrix of Aggregate Assets with respect to shk_param 

544 

545 """ 

546 

547 # Set up finite Horizon dictionary 

548 params = deepcopy(self.__dict__["parameters"]) 

549 params["T_cycle"] = T # Dimension of Jacobian Matrix 

550 

551 # Specify a dictionary of lists because problem we are solving is 

552 # technically finite horizon so variables can be time varying (see 

553 # section on fake news algorithm in 

554 # https://onlinelibrary.wiley.com/doi/abs/10.3982/ECTA17434 ) 

555 params["LivPrb"] = params["T_cycle"] * [self.LivPrb[0]] 

556 params["PermGroFac"] = params["T_cycle"] * [self.PermGroFac[0]] 

557 params["PermShkStd"] = params["T_cycle"] * [self.PermShkStd[0]] 

558 params["TranShkStd"] = params["T_cycle"] * [self.TranShkStd[0]] 

559 params["Rfree"] = params["T_cycle"] * [self.Rfree[0]] 

560 params["UnempPrb"] = params["T_cycle"] * [self.UnempPrb] 

561 params["IncUnemp"] = params["T_cycle"] * [self.IncUnemp] 

562 params["wage"] = params["T_cycle"] * [self.wage[0]] 

563 params["labor"] = params["T_cycle"] * [self.labor[0]] 

564 params["tax_rate"] = params["T_cycle"] * [self.tax_rate[0]] 

565 params["cycles"] = 1 # "finite horizon", sort of 

566 

567 # Create instance of a finite horizon agent 

568 FinHorizonAgent = NewKeynesianConsumerType(**params) 

569 

570 dx = 0.0001 # Size of perturbation 

571 # Period in which the change in the interest rate occurs (second to last period) 

572 i = params["T_cycle"] - 1 

573 

574 FinHorizonAgent.IncShkDstn = params["T_cycle"] * [self.IncShkDstn[0]] 

575 

576 # If parameter is in time invariant list then add it to time vary list 

577 FinHorizonAgent.del_from_time_inv(shk_param) 

578 FinHorizonAgent.add_to_time_vary(shk_param) 

579 

580 # this condition is because some attributes are specified as lists while other as floats 

581 if type(getattr(self, shk_param)) == list: 

582 perturbed_list = ( 

583 (i) * [getattr(self, shk_param)[0]] 

584 + [getattr(self, shk_param)[0] + dx] 

585 + (params["T_cycle"] - i - 1) * [getattr(self, shk_param)[0]] 

586 ) # Sequence of interest rates the agent faces 

587 else: 

588 perturbed_list = ( 

589 (i) * [getattr(self, shk_param)] 

590 + [getattr(self, shk_param) + dx] 

591 + (params["T_cycle"] - i - 1) * [getattr(self, shk_param)] 

592 ) # Sequence of interest rates the agent faces 

593 setattr(FinHorizonAgent, shk_param, perturbed_list) 

594 self.parameters[shk_param] = perturbed_list 

595 

596 # Update income process if perturbed parameter enters the income shock distribution 

597 FinHorizonAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn") 

598 

599 # Solve the "finite horizon" model assuming that it ends back in steady state 

600 FinHorizonAgent.solve(presolve=False, from_solution=self.solution[0]) 

601 

602 # Use Harmenberg Neutral Measure 

603 FinHorizonAgent.neutral_measure = True 

604 FinHorizonAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn") 

605 

606 # Calculate Transition Matrices 

607 FinHorizonAgent.define_distribution_grid() 

608 FinHorizonAgent.calc_transition_matrix() 

609 

610 # Normalized consumption Policy Grids across time 

611 c_t = FinHorizonAgent.cPol_Grid 

612 a_t = FinHorizonAgent.aPol_Grid 

613 

614 # Append steady state policy grid into list of policy grids as HARK does not provide the initial policy 

615 c_t.append(self.c_ss) 

616 a_t.append(self.a_ss) 

617 

618 # Fake News Algorithm begins below ( To find fake news algorithm See page 2388 of https://onlinelibrary.wiley.com/doi/abs/10.3982/ECTA17434 ) 

619 

620 ########## 

621 # STEP 1 # of fake news algorithm, As in the paper for Curly Y and Curly D. Here the policies are over assets and consumption so we denote them as curly C and curly D. 

622 ########## 

623 a_ss = self.aPol_Grid # steady state Asset Policy 

624 c_ss = self.cPol_Grid # steady state Consumption Policy 

625 tranmat_ss = self.tran_matrix # Steady State Transition Matrix 

626 

627 # List of asset policies grids where households expect the shock to occur in the second to last Period 

628 a_t = FinHorizonAgent.aPol_Grid 

629 # add steady state assets to list as it does not get appended in calc_transition_matrix method 

630 a_t.append(self.a_ss) 

631 

632 # List of consumption policies grids where households expect the shock to occur in the second to last Period 

633 c_t = FinHorizonAgent.cPol_Grid 

634 # add steady state consumption to list as it does not get appended in calc_transition_matrix method 

635 c_t.append(self.c_ss) 

636 

637 da0_s = [] # Deviation of asset policy from steady state policy 

638 dc0_s = [] # Deviation of Consumption policy from steady state policy 

639 for i in range(T): 

640 da0_s.append(a_t[T - i] - a_ss) 

641 dc0_s.append(c_t[T - i] - c_ss) 

642 

643 da0_s = np.array(da0_s) 

644 dc0_s = np.array(dc0_s) 

645 

646 # Steady state distribution of market resources (permanent income weighted distribution) 

647 D_ss = self.vec_erg_dstn.T[0] 

648 dA0_s = [] 

649 dC0_s = [] 

650 for i in range(T): 

651 dA0_s.append(np.dot(da0_s[i], D_ss)) 

652 dC0_s.append(np.dot(dc0_s[i], D_ss)) 

653 

654 dA0_s = np.array(dA0_s) 

655 # This is equivalent to the curly Y scalar detailed in the first step of the algorithm 

656 A_curl_s = dA0_s / dx 

657 

658 dC0_s = np.array(dC0_s) 

659 C_curl_s = dC0_s / dx 

660 

661 # List of computed transition matrices for each period 

662 tranmat_t = FinHorizonAgent.tran_matrix 

663 tranmat_t.append(tranmat_ss) 

664 

665 # List of change in transition matrix relative to the steady state transition matrix 

666 dlambda0_s = [] 

667 for i in range(T): 

668 dlambda0_s.append(tranmat_t[T - i] - tranmat_ss) 

669 

670 dlambda0_s = np.array(dlambda0_s) 

671 

672 dD0_s = [] 

673 for i in range(T): 

674 dD0_s.append(np.dot(dlambda0_s[i], D_ss)) 

675 

676 dD0_s = np.array(dD0_s) 

677 D_curl_s = dD0_s / dx # Curly D in the sequence space jacobian 

678 

679 ######## 

680 # STEP2 # of fake news algorithm 

681 ######## 

682 

683 # Expectation Vectors 

684 exp_vecs_a = [] 

685 exp_vecs_c = [] 

686 

687 # First expectation vector is the steady state policy 

688 exp_vec_a = a_ss 

689 exp_vec_c = c_ss 

690 for i in range(T): 

691 exp_vecs_a.append(exp_vec_a) 

692 exp_vec_a = np.dot(tranmat_ss.T, exp_vec_a) 

693 

694 exp_vecs_c.append(exp_vec_c) 

695 exp_vec_c = np.dot(tranmat_ss.T, exp_vec_c) 

696 

697 # Turn expectation vectors into arrays 

698 exp_vecs_a = np.array(exp_vecs_a) 

699 exp_vecs_c = np.array(exp_vecs_c) 

700 

701 ######### 

702 # STEP3 # of the algorithm. In particular equation 26 of the published paper. 

703 ######### 

704 # Fake news matrices 

705 Curl_F_A = np.zeros((T, T)) # Fake news matrix for assets 

706 Curl_F_C = np.zeros((T, T)) # Fake news matrix for consumption 

707 

708 # First row of Fake News Matrix 

709 Curl_F_A[0] = A_curl_s 

710 Curl_F_C[0] = C_curl_s 

711 

712 for i in range(T - 1): 

713 for j in range(T): 

714 Curl_F_A[i + 1][j] = np.dot(exp_vecs_a[i], D_curl_s[j]) 

715 Curl_F_C[i + 1][j] = np.dot(exp_vecs_c[i], D_curl_s[j]) 

716 

717 ######## 

718 # STEP4 # of the algorithm 

719 ######## 

720 

721 # Function to compute jacobian matrix from fake news matrix 

722 def J_from_F(F): 

723 J = F.copy() 

724 for t in range(1, F.shape[0]): 

725 J[1:, t] += J[:-1, t - 1] 

726 return J 

727 

728 J_A = J_from_F(Curl_F_A) 

729 J_C = J_from_F(Curl_F_C) 

730 

731 ######## 

732 # Additional step due to compute Zeroth Column of the Jacobian 

733 ######## 

734 

735 params = deepcopy(self.__dict__["parameters"]) 

736 params["T_cycle"] = 2 # Just need one transition matrix 

737 params["LivPrb"] = params["T_cycle"] * [self.LivPrb[0]] 

738 params["PermGroFac"] = params["T_cycle"] * [self.PermGroFac[0]] 

739 params["PermShkStd"] = params["T_cycle"] * [self.PermShkStd[0]] 

740 params["TranShkStd"] = params["T_cycle"] * [self.TranShkStd[0]] 

741 params["Rfree"] = params["T_cycle"] * [self.Rfree[0]] 

742 params["UnempPrb"] = params["T_cycle"] * [self.UnempPrb] 

743 params["IncUnemp"] = params["T_cycle"] * [self.IncUnemp] 

744 params["IncShkDstn"] = params["T_cycle"] * [self.IncShkDstn[0]] 

745 params["wage"] = params["T_cycle"] * [self.wage[0]] 

746 params["labor"] = params["T_cycle"] * [self.labor[0]] 

747 params["tax_rate"] = params["T_cycle"] * [self.tax_rate[0]] 

748 params["cycles"] = 1 # Now it's "finite" horizon while things are changing 

749 

750 # Create instance of a finite horizon agent for calculation of zeroth 

751 ZerothColAgent = NewKeynesianConsumerType(**params) 

752 

753 # If parameter is in time invariant list then add it to time vary list 

754 ZerothColAgent.del_from_time_inv(shk_param) 

755 ZerothColAgent.add_to_time_vary(shk_param) 

756 

757 # Update income process if perturbed parameter enters the income shock distribution 

758 ZerothColAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn") 

759 

760 # Solve the "finite horizon" problem, again assuming that steady state comes 

761 # after the shocks 

762 ZerothColAgent.solve(presolve=False, from_solution=self.solution[0]) 

763 

764 # this condition is because some attributes are specified as lists while other as floats 

765 if type(getattr(self, shk_param)) == list: 

766 perturbed_list = [getattr(self, shk_param)[0] + dx] + ( 

767 params["T_cycle"] - 1 

768 ) * [ 

769 getattr(self, shk_param)[0] 

770 ] # Sequence of interest rates the agent faces 

771 else: 

772 perturbed_list = [getattr(self, shk_param) + dx] + ( 

773 params["T_cycle"] - 1 

774 ) * [getattr(self, shk_param)] 

775 # Sequence of interest rates the agent 

776 

777 setattr(ZerothColAgent, shk_param, perturbed_list) # Set attribute to agent 

778 self.parameters[shk_param] = perturbed_list 

779 

780 # Use Harmenberg Neutral Measure 

781 ZerothColAgent.neutral_measure = True 

782 ZerothColAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn") 

783 

784 # Calculate Transition Matrices 

785 ZerothColAgent.define_distribution_grid() 

786 ZerothColAgent.calc_transition_matrix() 

787 

788 tranmat_t_zeroth_col = ZerothColAgent.tran_matrix 

789 dstn_t_zeroth_col = self.vec_erg_dstn.T[0] 

790 

791 C_t_no_sim = np.zeros(T) 

792 A_t_no_sim = np.zeros(T) 

793 

794 for i in range(T): 

795 if i == 0: 

796 dstn_t_zeroth_col = np.dot(tranmat_t_zeroth_col[i], dstn_t_zeroth_col) 

797 else: 

798 dstn_t_zeroth_col = np.dot(tranmat_ss, dstn_t_zeroth_col) 

799 

800 C_t_no_sim[i] = np.dot(self.cPol_Grid, dstn_t_zeroth_col) 

801 A_t_no_sim[i] = np.dot(self.aPol_Grid, dstn_t_zeroth_col) 

802 

803 J_A.T[0] = (A_t_no_sim - self.A_ss) / dx 

804 J_C.T[0] = (C_t_no_sim - self.C_ss) / dx 

805 

806 return J_C, J_A