Coverage for HARK / ConsumptionSaving / ConsNewKeynesianModel.py: 89%

287 statements  

« prev     ^ index     » next       coverage.py v7.13.4, created at 2026-03-08 05:31 +0000

1""" 

2This file has a slightly modified and extended version of ConsIndShock that is 

3meant to be used in heterogeneous agents new Keynesian (HANK) models. The micro- 

4economic model is identical, but additional primitive parameters have been added 

5to the specification of the income process. These parameters would have no inde- 

6pendent meaning in a "micro only" setting, but with dynamic equilibrium elements 

7(as in HANK models), they can have meaning. 

8""" 

9 

10from copy import deepcopy 

11import numpy as np 

12from scipy import sparse as sp 

13 

14from HARK.ConsumptionSaving.ConsIndShockModel import ( 

15 IndShockConsumerType, 

16 make_basic_CRRA_solution_terminal, 

17 solve_one_period_ConsIndShock, 

18 make_lognormal_kNrm_init_dstn, 

19 make_lognormal_pLvl_init_dstn, 

20) 

21 

22from HARK.Calibration.Income.IncomeProcesses import ( 

23 construct_HANK_lognormal_income_process_unemployment, 

24 get_PermShkDstn_from_IncShkDstn, 

25 get_TranShkDstn_from_IncShkDstn, 

26) 

27 

28from HARK.utilities import ( 

29 gen_tran_matrix_1D, 

30 gen_tran_matrix_2D, 

31 jump_to_grid_1D, 

32 jump_to_grid_2D, 

33 make_grid_exp_mult, 

34 make_assets_grid, 

35) 

36 

37# Make a dictionary of constructors for the idiosyncratic income shocks model 

38newkeynesian_constructor_dict = { 

39 "IncShkDstn": construct_HANK_lognormal_income_process_unemployment, 

40 "PermShkDstn": get_PermShkDstn_from_IncShkDstn, 

41 "TranShkDstn": get_TranShkDstn_from_IncShkDstn, 

42 "aXtraGrid": make_assets_grid, 

43 "kNrmInitDstn": make_lognormal_kNrm_init_dstn, 

44 "pLvlInitDstn": make_lognormal_pLvl_init_dstn, 

45 "solution_terminal": make_basic_CRRA_solution_terminal, 

46} 

47 

48# Make a dictionary with parameters for the default constructor for kNrmInitDstn 

49default_kNrmInitDstn_params = { 

50 "kLogInitMean": 0.0, # Mean of log initial capital 

51 "kLogInitStd": 1.0, # Stdev of log initial capital 

52 "kNrmInitCount": 15, # Number of points in initial capital discretization 

53} 

54 

55# Make a dictionary with parameters for the default constructor for pLvlInitDstn 

56default_pLvlInitDstn_params = { 

57 "pLogInitMean": 0.0, # Mean of log permanent income 

58 "pLogInitStd": 0.0, # Stdev of log permanent income 

59 "pLvlInitCount": 15, # Number of points in initial capital discretization 

60} 

61 

62# Default parameters to make IncShkDstn using construct_lognormal_income_process_unemployment 

63default_IncShkDstn_params = { 

64 "PermShkStd": [0.1], # Standard deviation of log permanent income shocks 

65 "PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks 

66 "TranShkStd": [0.1], # Standard deviation of log transitory income shocks 

67 "TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks 

68 "UnempPrb": 0.05, # Probability of unemployment while working 

69 "IncUnemp": 0.3, # Unemployment benefits replacement rate while working 

70 "T_retire": 0, # Period of retirement (0 --> no retirement) 

71 "UnempPrbRet": 0.005, # Probability of "unemployment" while retired 

72 "IncUnempRet": 0.0, # "Unemployment" benefits when retired 

73 "tax_rate": [0.0], # Flat tax rate on labor income NEW FOR HANK 

74 "labor": [1.0], # Intensive margin labor supply NEW FOR HANK 

75 "wage": [1.0], # Wage rate scaling factor NEW FOR HANK 

76} 

77 

78# Default parameters to make aXtraGrid using make_assets_grid 

79default_aXtraGrid_params = { 

80 "aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value 

81 "aXtraMax": 50, # Maximum end-of-period "assets above minimum" value 

82 "aXtraNestFac": 3, # Exponential nesting factor for aXtraGrid 

83 "aXtraCount": 100, # Number of points in the grid of "assets above minimum" 

84 "aXtraExtra": None, # Additional other values to add in grid (optional) 

85} 

86 

87# Make a dictionary to specify an idiosyncratic income shocks consumer type 

88init_newkeynesian = { 

89 # BASIC HARK PARAMETERS REQUIRED TO SOLVE THE MODEL 

90 "cycles": 0, # Infinite horizon model 

91 "T_cycle": 1, # Number of periods in the cycle for this agent type 

92 "constructors": newkeynesian_constructor_dict, # See dictionary above 

93 # PRIMITIVE RAW PARAMETERS REQUIRED TO SOLVE THE MODEL 

94 "CRRA": 2.0, # Coefficient of relative risk aversion 

95 "Rfree": [1.03], # Interest factor on retained assets 

96 "DiscFac": 0.96, # Intertemporal discount factor 

97 "LivPrb": [0.98], # Survival probability after each period 

98 "PermGroFac": [1.0], # Permanent income growth factor 

99 "BoroCnstArt": 0.0, # Artificial borrowing constraint 

100 "vFuncBool": False, # Whether to calculate the value function during solution 

101 "CubicBool": False, # Whether to use cubic spline interpolation when True 

102 # (Uses linear spline interpolation for cFunc when False) 

103 # PARAMETERS REQUIRED TO SIMULATE THE MODEL 

104 "AgentCount": 10000, # Number of agents of this type 

105 "T_age": None, # Age after which simulated agents are automatically killed 

106 "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor 

107 # (The portion of PermGroFac attributable to aggregate productivity growth) 

108 "NewbornTransShk": False, # Whether Newborns have transitory shock 

109 # ADDITIONAL OPTIONAL PARAMETERS 

110 "PerfMITShk": False, # Do Perfect Foresight MIT Shock 

111 # (Forces Newborns to follow solution path of the agent they replaced if True) 

112 "neutral_measure": False, # Whether to use permanent income neutral measure (see Harmenberg 2021) 

113 # ADDITIONAL PARAMETERS FOR GRID-BASED TRANSITION SIMULATION 

114 "mMin": 0.001, 

115 "mMax": 50, 

116 "mCount": 200, 

117 "mFac": 3, 

118} 

119init_newkeynesian.update(default_kNrmInitDstn_params) 

120init_newkeynesian.update(default_pLvlInitDstn_params) 

121init_newkeynesian.update(default_IncShkDstn_params) 

122init_newkeynesian.update(default_aXtraGrid_params) 

123 

124 

125class NewKeynesianConsumerType(IndShockConsumerType): 

126 """ 

127 A slight extension of IndShockConsumerType that permits individual labor supply, 

128 the wage rate, and the labor income tax rate to enter the income shock process. 

129 """ 

130 

131 default_ = { 

132 "params": init_newkeynesian, 

133 "solver": solve_one_period_ConsIndShock, 

134 "track_vars": ["aNrm", "cNrm", "mNrm", "pLvl"], 

135 } 

136 

137 def define_distribution_grid( 

138 self, 

139 dist_mGrid=None, 

140 dist_pGrid=None, 

141 m_density=0, 

142 num_pointsM=None, 

143 timestonest=None, 

144 num_pointsP=55, 

145 max_p_fac=30.0, 

146 ): 

147 """ 

148 Defines the grid on which the distribution is defined. Stores the grid of market resources and permanent income as attributes of self. 

149 Grid for normalized market resources and permanent income may be prespecified 

150 as dist_mGrid and dist_pGrid, respectively. If not then default grid is computed based off given parameters. 

151 

152 Parameters 

153 ---------- 

154 dist_mGrid : np.array 

155 Prespecified grid for distribution over normalized market resources 

156 

157 dist_pGrid : np.array 

158 Prespecified grid for distribution over permanent income. 

159 

160 m_density: float 

161 Density of normalized market resources grid. Default value is mdensity = 0. 

162 Only affects grid of market resources if dist_mGrid=None. 

163 

164 num_pointsM: float 

165 Number of gridpoints for market resources grid. 

166 

167 num_pointsP: float 

168 Number of gridpoints for permanent income. 

169 This grid will be exponentiated by the function make_grid_exp_mult. 

170 

171 max_p_fac : float 

172 Factor that scales the maximum value of permanent income grid. 

173 Larger values increases the maximum value of permanent income grid. 

174 

175 Returns 

176 ------- 

177 None 

178 """ 

179 

180 # If true Use Harmenberg 2021's Neutral Measure. For more information, see https://econ-ark.org/materials/harmenberg-aggregation?launch 

181 if not hasattr(self, "neutral_measure"): 

182 self.neutral_measure = False 

183 

184 if num_pointsM is None: 

185 m_points = self.mCount 

186 else: 

187 m_points = num_pointsM 

188 

189 if timestonest is None: 

190 timestonest = self.mFac 

191 elif not isinstance(timestonest, (int, float)): 

192 raise TypeError("timestonest must be a numeric value (int or float).") 

193 

194 if self.cycles == 0: 

195 if not hasattr(dist_mGrid, "__len__"): 

196 mGrid = make_grid_exp_mult( 

197 ming=self.mMin, 

198 maxg=self.mMax, 

199 ng=m_points, 

200 timestonest=timestonest, 

201 ) # Generate Market resources grid given density and number of points 

202 

203 for i in range(m_density): 

204 m_shifted = np.delete(mGrid, -1) 

205 m_shifted = np.insert(m_shifted, 0, 1.00000000e-04) 

206 dist_betw_pts = mGrid - m_shifted 

207 dist_betw_pts_half = dist_betw_pts / 2 

208 new_A_grid = m_shifted + dist_betw_pts_half 

209 mGrid = np.concatenate((mGrid, new_A_grid)) 

210 mGrid = np.sort(mGrid) 

211 

212 self.dist_mGrid = mGrid 

213 

214 else: 

215 # If grid of market resources prespecified then use as mgrid 

216 self.dist_mGrid = dist_mGrid 

217 

218 if not hasattr(dist_pGrid, "__len__"): 

219 num_points = num_pointsP # Number of permanent income gridpoints 

220 # Dist_pGrid is taken to cover most of the ergodic distribution 

221 # set variance of permanent income shocks 

222 p_variance = self.PermShkStd[0] ** 2 

223 # Maximum Permanent income value 

224 max_p = max_p_fac * (p_variance / (1 - self.LivPrb[0])) ** 0.5 

225 one_sided_grid = make_grid_exp_mult( 

226 1.05 + 1e-3, np.exp(max_p), num_points, 3 

227 ) 

228 self.dist_pGrid = np.append( 

229 np.append(1.0 / np.fliplr([one_sided_grid])[0], np.ones(1)), 

230 one_sided_grid, 

231 ) # Compute permanent income grid 

232 else: 

233 # If grid of permanent income prespecified then use it as pgrid 

234 self.dist_pGrid = dist_pGrid 

235 

236 if ( 

237 self.neutral_measure is True 

238 ): # If true Use Harmenberg 2021's Neutral Measure. For more information, see https://econ-ark.org/materials/harmenberg-aggregation?launch 

239 self.dist_pGrid = np.array([1]) 

240 

241 elif self.cycles > 1: 

242 raise Exception( 

243 "define_distribution_grid requires cycles = 0 or cycles = 1" 

244 ) 

245 

246 elif self.T_cycle != 0: 

247 if num_pointsM is None: 

248 m_points = self.mCount 

249 else: 

250 m_points = num_pointsM 

251 

252 if not hasattr(dist_mGrid, "__len__"): 

253 mGrid = make_grid_exp_mult( 

254 ming=self.mMin, 

255 maxg=self.mMax, 

256 ng=m_points, 

257 timestonest=timestonest, 

258 ) # Generate Market resources grid given density and number of points 

259 

260 for i in range(m_density): 

261 m_shifted = np.delete(mGrid, -1) 

262 m_shifted = np.insert(m_shifted, 0, 1.00000000e-04) 

263 dist_betw_pts = mGrid - m_shifted 

264 dist_betw_pts_half = dist_betw_pts / 2 

265 new_A_grid = m_shifted + dist_betw_pts_half 

266 mGrid = np.concatenate((mGrid, new_A_grid)) 

267 mGrid = np.sort(mGrid) 

268 

269 self.dist_mGrid = mGrid 

270 

271 else: 

272 # If grid of market resources prespecified then use as mgrid 

273 self.dist_mGrid = dist_mGrid 

274 

275 if not hasattr(dist_pGrid, "__len__"): 

276 self.dist_pGrid = [] # list of grids of permanent income 

277 

278 for i in range(self.T_cycle): 

279 num_points = num_pointsP 

280 # Dist_pGrid is taken to cover most of the ergodic distribution 

281 # set variance of permanent income shocks this period 

282 p_variance = self.PermShkStd[i] ** 2 

283 # Consider probability of staying alive this period 

284 max_p = max_p_fac * (p_variance / (1 - self.LivPrb[i])) ** 0.5 

285 one_sided_grid = make_grid_exp_mult( 

286 1.05 + 1e-3, np.exp(max_p), num_points, 2 

287 ) 

288 

289 # Compute permanent income grid this period. Grid of permanent income may differ dependent on PermShkStd 

290 dist_pGrid = np.append( 

291 np.append(1.0 / np.fliplr([one_sided_grid])[0], np.ones(1)), 

292 one_sided_grid, 

293 ) 

294 self.dist_pGrid.append(dist_pGrid) 

295 

296 else: 

297 # If grid of permanent income prespecified then use as pgrid 

298 self.dist_pGrid = dist_pGrid 

299 

300 if ( 

301 self.neutral_measure is True 

302 ): # If true Use Harmenberg 2021's Neutral Measure. For more information, see https://econ-ark.org/materials/harmenberg-aggregation?launch 

303 self.dist_pGrid = self.T_cycle * [np.array([1])] 

304 

305 def calc_transition_matrix(self, shk_dstn=None): 

306 """ 

307 Calculates how the distribution of agents across market resources 

308 transitions from one period to the next. If finite horizon problem, then calculates 

309 a list of transition matrices, consumption and asset policy grids for each period of the problem. 

310 The transition matrix/matrices and consumption and asset policy grid(s) are stored as attributes of self. 

311 

312 

313 Parameters 

314 ---------- 

315 shk_dstn: list 

316 list of income shock distributions. Each Income Shock Distribution should be a DiscreteDistribution Object (see Distribution.py) 

317 Returns 

318 ------- 

319 None 

320 

321 """ 

322 

323 if self.cycles == 0: # Infinite Horizon Problem 

324 if not hasattr(shk_dstn, "pmv"): 

325 shk_dstn = self.IncShkDstn 

326 

327 dist_mGrid = self.dist_mGrid # Grid of market resources 

328 dist_pGrid = self.dist_pGrid # Grid of permanent incomes 

329 # assets next period 

330 aNext = dist_mGrid - self.solution[0].cFunc(dist_mGrid) 

331 

332 self.aPol_Grid = aNext # Steady State Asset Policy Grid 

333 # Steady State Consumption Policy Grid 

334 self.cPol_Grid = self.solution[0].cFunc(dist_mGrid) 

335 

336 # Obtain shock values and shock probabilities from income distribution 

337 # Bank Balances next period (Interest rate * assets) 

338 bNext = self.Rfree[0] * aNext 

339 shk_prbs = shk_dstn[0].pmv # Probability of shocks 

340 tran_shks = shk_dstn[0].atoms[1] # Transitory shocks 

341 perm_shks = shk_dstn[0].atoms[0] # Permanent shocks 

342 LivPrb = self.LivPrb[0] # Update probability of staying alive 

343 

344 # New borns have this distribution (assumes start with no assets and permanent income=1) 

345 NewBornDist = jump_to_grid_2D( 

346 tran_shks, np.ones_like(tran_shks), shk_prbs, dist_mGrid, dist_pGrid 

347 ) 

348 

349 if len(dist_pGrid) == 1: 

350 NewBornDist = jump_to_grid_1D( 

351 np.ones_like(tran_shks), shk_prbs, dist_mGrid 

352 ) 

353 # Compute Transition Matrix given shocks and grids. 

354 self.tran_matrix = gen_tran_matrix_1D( 

355 dist_mGrid, 

356 bNext, 

357 shk_prbs, 

358 perm_shks, 

359 tran_shks, 

360 LivPrb, 

361 NewBornDist, 

362 ) 

363 

364 else: 

365 NewBornDist = jump_to_grid_2D( 

366 np.ones_like(tran_shks), 

367 np.ones_like(tran_shks), 

368 shk_prbs, 

369 dist_mGrid, 

370 dist_pGrid, 

371 ) 

372 

373 # Generate Transition Matrix 

374 # Compute Transition Matrix given shocks and grids. 

375 self.tran_matrix = gen_tran_matrix_2D( 

376 dist_mGrid, 

377 dist_pGrid, 

378 bNext, 

379 shk_prbs, 

380 perm_shks, 

381 tran_shks, 

382 LivPrb, 

383 NewBornDist, 

384 ) 

385 

386 elif self.cycles > 1: 

387 raise Exception("calc_transition_matrix requires cycles = 0 or cycles = 1") 

388 

389 elif self.T_cycle != 0: # finite horizon problem 

390 if not hasattr(shk_dstn, "pmv"): 

391 shk_dstn = self.IncShkDstn 

392 

393 self.cPol_Grid = [] 

394 # List of consumption policy grids for each period in T_cycle 

395 self.aPol_Grid = [] 

396 # List of asset policy grids for each period in T_cycle 

397 self.tran_matrix = [] # List of transition matrices 

398 

399 dist_mGrid = self.dist_mGrid 

400 

401 for k in range(self.T_cycle): 

402 if type(self.dist_pGrid) == list: 

403 # Permanent income grid this period 

404 dist_pGrid = self.dist_pGrid[k] 

405 else: 

406 dist_pGrid = ( 

407 self.dist_pGrid 

408 ) # If here then use prespecified permanent income grid 

409 

410 # Consumption policy grid in period k 

411 Cnow = self.solution[k].cFunc(dist_mGrid) 

412 self.cPol_Grid.append(Cnow) # Add to list 

413 

414 aNext = dist_mGrid - Cnow # Asset policy grid in period k 

415 self.aPol_Grid.append(aNext) # Add to list 

416 

417 bNext = self.Rfree[k] * aNext 

418 

419 # Obtain shocks and shock probabilities from income distribution this period 

420 shk_prbs = shk_dstn[k].pmv # Probability of shocks this period 

421 # Transitory shocks this period 

422 tran_shks = shk_dstn[k].atoms[1] 

423 # Permanent shocks this period 

424 perm_shks = shk_dstn[k].atoms[0] 

425 # Update probability of staying alive this period 

426 LivPrb = self.LivPrb[k] 

427 

428 if len(dist_pGrid) == 1: 

429 # New borns have this distribution (assumes start with no assets and permanent income=1) 

430 NewBornDist = jump_to_grid_1D( 

431 np.ones_like(tran_shks), shk_prbs, dist_mGrid 

432 ) 

433 # Compute Transition Matrix given shocks and grids. 

434 TranMatrix_M = gen_tran_matrix_1D( 

435 dist_mGrid, 

436 bNext, 

437 shk_prbs, 

438 perm_shks, 

439 tran_shks, 

440 LivPrb, 

441 NewBornDist, 

442 ) 

443 self.tran_matrix.append(TranMatrix_M) 

444 

445 else: 

446 NewBornDist = jump_to_grid_2D( 

447 np.ones_like(tran_shks), 

448 np.ones_like(tran_shks), 

449 shk_prbs, 

450 dist_mGrid, 

451 dist_pGrid, 

452 ) 

453 # Compute Transition Matrix given shocks and grids. 

454 TranMatrix = gen_tran_matrix_2D( 

455 dist_mGrid, 

456 dist_pGrid, 

457 bNext, 

458 shk_prbs, 

459 perm_shks, 

460 tran_shks, 

461 LivPrb, 

462 NewBornDist, 

463 ) 

464 self.tran_matrix.append(TranMatrix) 

465 

466 def calc_ergodic_dist(self, transition_matrix=None): 

467 """ 

468 Calculates the ergodic distribution across normalized market resources and 

469 permanent income as the eigenvector associated with the eigenvalue 1. 

470 The distribution is stored as attributes of self both as a vector and as a reshaped array with the ij'th element representing 

471 the probability of being at the i'th point on the mGrid and the j'th 

472 point on the pGrid. 

473 

474 Parameters 

475 ---------- 

476 transition_matrix: List 

477 list with one transition matrix whose ergordic distribution is to be solved 

478 Returns 

479 ------- 

480 None 

481 """ 

482 

483 if not isinstance(transition_matrix, list): 

484 transition_matrix = [self.tran_matrix] 

485 

486 eigen, ergodic_distr = sp.linalg.eigs( 

487 transition_matrix[0], v0=np.ones(len(transition_matrix[0])), k=1, which="LM" 

488 ) # Solve for ergodic distribution 

489 ergodic_distr = ergodic_distr.real / np.sum(ergodic_distr.real) 

490 

491 self.vec_erg_dstn = ergodic_distr # distribution as a vector 

492 # distribution reshaped into len(mgrid) by len(pgrid) array 

493 self.erg_dstn = ergodic_distr.reshape( 

494 (len(self.dist_mGrid), len(self.dist_pGrid)) 

495 ) 

496 

497 def compute_pe_steady_state(self): 

498 """ 

499 Compute the partial equilibrium steady state levels of aggregate assets 

500 and consumption, storing them in attributes A_ss and C_ss. General method: 

501 

502 1. Solve the agents' infinite horizon model. 

503 2. Build transition matrices on a discretized state space using policy functions. 

504 3. Find the ergodic distribution of idiosyncratic states. 

505 4. Calculate average consumption and assets using policy functions and ergodic distribution. 

506 

507 Parameters 

508 ---------- 

509 None 

510 

511 Returns 

512 ------- 

513 A_ss : float 

514 Partial equilibrium steady state average level of (end-of-period) assets, 

515 which also represent aggregate capital holdings in a general equilibrium framework. 

516 C_ss : float 

517 Partial equilibrium steady state average level of consumption. 

518 """ 

519 # Compute steady state to perturb around 

520 self.cycles = 0 

521 self.solve() 

522 

523 # Use Harmenberg Measure 

524 self.neutral_measure = True 

525 self.construct("IncShkDstn", "TranShkDstn", "PermShkDstn") 

526 

527 # Non stochastic simuation 

528 self.define_distribution_grid() 

529 self.calc_transition_matrix() 

530 

531 self.c_ss = self.cPol_Grid # Normalized Consumption Policy grid 

532 self.a_ss = self.aPol_Grid # Normalized Asset Policy grid 

533 

534 self.calc_ergodic_dist() # Calculate ergodic distribution 

535 # Steady State Distribution as a vector (m*p x 1) where m is the number of gridpoints on the market resources grid 

536 ss_dstn = self.vec_erg_dstn 

537 

538 self.A_ss = np.dot(self.a_ss, ss_dstn)[0] 

539 self.C_ss = np.dot(self.c_ss, ss_dstn)[0] 

540 

541 return self.A_ss, self.C_ss 

542 

543 def calc_jacobian(self, shk_param, T): 

544 """ 

545 Calculates the Jacobians of aggregate consumption and aggregate assets. 

546 Parameters that can be shocked are LivPrb, PermShkStd,TranShkStd, DiscFac, 

547 UnempPrb, Rfree, IncUnemp, and DiscFac. 

548 

549 Parameters: 

550 ----------- 

551 

552 shk_param: string 

553 name of variable to be shocked 

554 

555 T: int 

556 dimension of Jacobian Matrix. Jacobian Matrix is a TxT square Matrix 

557 

558 

559 Returns 

560 ---------- 

561 CJAC: numpy.array 

562 TxT Jacobian Matrix of Aggregate Consumption with respect to shk_param 

563 

564 AJAC: numpy.array 

565 TxT Jacobian Matrix of Aggregate Assets with respect to shk_param 

566 

567 """ 

568 

569 # Set up finite Horizon dictionary 

570 params = deepcopy(self.__dict__["parameters"]) 

571 params["T_cycle"] = T # Dimension of Jacobian Matrix 

572 

573 # Specify a dictionary of lists because problem we are solving is 

574 # technically finite horizon so variables can be time varying (see 

575 # section on fake news algorithm in 

576 # https://onlinelibrary.wiley.com/doi/abs/10.3982/ECTA17434 ) 

577 params["LivPrb"] = params["T_cycle"] * [self.LivPrb[0]] 

578 params["PermGroFac"] = params["T_cycle"] * [self.PermGroFac[0]] 

579 params["PermShkStd"] = params["T_cycle"] * [self.PermShkStd[0]] 

580 params["TranShkStd"] = params["T_cycle"] * [self.TranShkStd[0]] 

581 params["Rfree"] = params["T_cycle"] * [self.Rfree[0]] 

582 params["UnempPrb"] = params["T_cycle"] * [self.UnempPrb] 

583 params["IncUnemp"] = params["T_cycle"] * [self.IncUnemp] 

584 params["wage"] = params["T_cycle"] * [self.wage[0]] 

585 params["labor"] = params["T_cycle"] * [self.labor[0]] 

586 params["tax_rate"] = params["T_cycle"] * [self.tax_rate[0]] 

587 params["cycles"] = 1 # "finite horizon", sort of 

588 

589 # Create instance of a finite horizon agent 

590 FinHorizonAgent = NewKeynesianConsumerType(**params) 

591 

592 dx = 0.0001 # Size of perturbation 

593 # Period in which the change in the interest rate occurs (second to last period) 

594 i = params["T_cycle"] - 1 

595 

596 FinHorizonAgent.IncShkDstn = params["T_cycle"] * [self.IncShkDstn[0]] 

597 

598 # If parameter is in time invariant list then add it to time vary list 

599 FinHorizonAgent.del_from_time_inv(shk_param) 

600 FinHorizonAgent.add_to_time_vary(shk_param) 

601 

602 # this condition is because some attributes are specified as lists while other as floats 

603 if type(getattr(self, shk_param)) == list: 

604 perturbed_list = ( 

605 (i) * [getattr(self, shk_param)[0]] 

606 + [getattr(self, shk_param)[0] + dx] 

607 + (params["T_cycle"] - i - 1) * [getattr(self, shk_param)[0]] 

608 ) # Sequence of interest rates the agent faces 

609 else: 

610 perturbed_list = ( 

611 (i) * [getattr(self, shk_param)] 

612 + [getattr(self, shk_param) + dx] 

613 + (params["T_cycle"] - i - 1) * [getattr(self, shk_param)] 

614 ) # Sequence of interest rates the agent faces 

615 setattr(FinHorizonAgent, shk_param, perturbed_list) 

616 self.parameters[shk_param] = perturbed_list 

617 

618 # Update income process if perturbed parameter enters the income shock distribution 

619 FinHorizonAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn") 

620 

621 # Solve the "finite horizon" model assuming that it ends back in steady state 

622 FinHorizonAgent.solve(presolve=False, from_solution=self.solution[0]) 

623 

624 # Use Harmenberg Neutral Measure 

625 FinHorizonAgent.neutral_measure = True 

626 FinHorizonAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn") 

627 

628 # Calculate Transition Matrices 

629 FinHorizonAgent.define_distribution_grid() 

630 FinHorizonAgent.calc_transition_matrix() 

631 

632 # Normalized consumption Policy Grids across time 

633 c_t = FinHorizonAgent.cPol_Grid 

634 a_t = FinHorizonAgent.aPol_Grid 

635 

636 # Append steady state policy grid into list of policy grids as HARK does not provide the initial policy 

637 c_t.append(self.c_ss) 

638 a_t.append(self.a_ss) 

639 

640 # Fake News Algorithm begins below ( To find fake news algorithm See page 2388 of https://onlinelibrary.wiley.com/doi/abs/10.3982/ECTA17434 ) 

641 

642 ########## 

643 # STEP 1 # of fake news algorithm, As in the paper for Curly Y and Curly D. Here the policies are over assets and consumption so we denote them as curly C and curly D. 

644 ########## 

645 a_ss = self.aPol_Grid # steady state Asset Policy 

646 c_ss = self.cPol_Grid # steady state Consumption Policy 

647 tranmat_ss = self.tran_matrix # Steady State Transition Matrix 

648 

649 # List of asset policies grids where households expect the shock to occur in the second to last Period 

650 a_t = FinHorizonAgent.aPol_Grid 

651 # add steady state assets to list as it does not get appended in calc_transition_matrix method 

652 a_t.append(self.a_ss) 

653 

654 # List of consumption policies grids where households expect the shock to occur in the second to last Period 

655 c_t = FinHorizonAgent.cPol_Grid 

656 # add steady state consumption to list as it does not get appended in calc_transition_matrix method 

657 c_t.append(self.c_ss) 

658 

659 da0_s = [] # Deviation of asset policy from steady state policy 

660 dc0_s = [] # Deviation of Consumption policy from steady state policy 

661 for i in range(T): 

662 da0_s.append(a_t[T - i] - a_ss) 

663 dc0_s.append(c_t[T - i] - c_ss) 

664 

665 da0_s = np.array(da0_s) 

666 dc0_s = np.array(dc0_s) 

667 

668 # Steady state distribution of market resources (permanent income weighted distribution) 

669 D_ss = self.vec_erg_dstn.T[0] 

670 dA0_s = [] 

671 dC0_s = [] 

672 for i in range(T): 

673 dA0_s.append(np.dot(da0_s[i], D_ss)) 

674 dC0_s.append(np.dot(dc0_s[i], D_ss)) 

675 

676 dA0_s = np.array(dA0_s) 

677 # This is equivalent to the curly Y scalar detailed in the first step of the algorithm 

678 A_curl_s = dA0_s / dx 

679 

680 dC0_s = np.array(dC0_s) 

681 C_curl_s = dC0_s / dx 

682 

683 # List of computed transition matrices for each period 

684 tranmat_t = FinHorizonAgent.tran_matrix 

685 tranmat_t.append(tranmat_ss) 

686 

687 # List of change in transition matrix relative to the steady state transition matrix 

688 dlambda0_s = [] 

689 for i in range(T): 

690 dlambda0_s.append(tranmat_t[T - i] - tranmat_ss) 

691 

692 dlambda0_s = np.array(dlambda0_s) 

693 

694 dD0_s = [] 

695 for i in range(T): 

696 dD0_s.append(np.dot(dlambda0_s[i], D_ss)) 

697 

698 dD0_s = np.array(dD0_s) 

699 D_curl_s = dD0_s / dx # Curly D in the sequence space jacobian 

700 

701 ######## 

702 # STEP2 # of fake news algorithm 

703 ######## 

704 

705 # Expectation Vectors 

706 exp_vecs_a = [] 

707 exp_vecs_c = [] 

708 

709 # First expectation vector is the steady state policy 

710 exp_vec_a = a_ss 

711 exp_vec_c = c_ss 

712 for i in range(T): 

713 exp_vecs_a.append(exp_vec_a) 

714 exp_vec_a = np.dot(tranmat_ss.T, exp_vec_a) 

715 

716 exp_vecs_c.append(exp_vec_c) 

717 exp_vec_c = np.dot(tranmat_ss.T, exp_vec_c) 

718 

719 # Turn expectation vectors into arrays 

720 exp_vecs_a = np.array(exp_vecs_a) 

721 exp_vecs_c = np.array(exp_vecs_c) 

722 

723 ######### 

724 # STEP3 # of the algorithm. In particular equation 26 of the published paper. 

725 ######### 

726 # Fake news matrices 

727 Curl_F_A = np.zeros((T, T)) # Fake news matrix for assets 

728 Curl_F_C = np.zeros((T, T)) # Fake news matrix for consumption 

729 

730 # First row of Fake News Matrix 

731 Curl_F_A[0] = A_curl_s 

732 Curl_F_C[0] = C_curl_s 

733 

734 for i in range(T - 1): 

735 for j in range(T): 

736 Curl_F_A[i + 1][j] = np.dot(exp_vecs_a[i], D_curl_s[j]) 

737 Curl_F_C[i + 1][j] = np.dot(exp_vecs_c[i], D_curl_s[j]) 

738 

739 ######## 

740 # STEP4 # of the algorithm 

741 ######## 

742 

743 # Function to compute jacobian matrix from fake news matrix 

744 def J_from_F(F): 

745 J = F.copy() 

746 for t in range(1, F.shape[0]): 

747 J[1:, t] += J[:-1, t - 1] 

748 return J 

749 

750 J_A = J_from_F(Curl_F_A) 

751 J_C = J_from_F(Curl_F_C) 

752 

753 ######## 

754 # Additional step due to compute Zeroth Column of the Jacobian 

755 ######## 

756 

757 params = deepcopy(self.__dict__["parameters"]) 

758 params["T_cycle"] = 2 # Just need one transition matrix 

759 params["LivPrb"] = params["T_cycle"] * [self.LivPrb[0]] 

760 params["PermGroFac"] = params["T_cycle"] * [self.PermGroFac[0]] 

761 params["PermShkStd"] = params["T_cycle"] * [self.PermShkStd[0]] 

762 params["TranShkStd"] = params["T_cycle"] * [self.TranShkStd[0]] 

763 params["Rfree"] = params["T_cycle"] * [self.Rfree[0]] 

764 params["UnempPrb"] = params["T_cycle"] * [self.UnempPrb] 

765 params["IncUnemp"] = params["T_cycle"] * [self.IncUnemp] 

766 params["IncShkDstn"] = params["T_cycle"] * [self.IncShkDstn[0]] 

767 params["wage"] = params["T_cycle"] * [self.wage[0]] 

768 params["labor"] = params["T_cycle"] * [self.labor[0]] 

769 params["tax_rate"] = params["T_cycle"] * [self.tax_rate[0]] 

770 params["cycles"] = 1 # Now it's "finite" horizon while things are changing 

771 

772 # Create instance of a finite horizon agent for calculation of zeroth 

773 ZerothColAgent = NewKeynesianConsumerType(**params) 

774 

775 # If parameter is in time invariant list then add it to time vary list 

776 ZerothColAgent.del_from_time_inv(shk_param) 

777 ZerothColAgent.add_to_time_vary(shk_param) 

778 

779 # Update income process if perturbed parameter enters the income shock distribution 

780 ZerothColAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn") 

781 

782 # Solve the "finite horizon" problem, again assuming that steady state comes 

783 # after the shocks 

784 ZerothColAgent.solve(presolve=False, from_solution=self.solution[0]) 

785 

786 # this condition is because some attributes are specified as lists while other as floats 

787 if type(getattr(self, shk_param)) == list: 

788 perturbed_list = [getattr(self, shk_param)[0] + dx] + ( 

789 params["T_cycle"] - 1 

790 ) * [ 

791 getattr(self, shk_param)[0] 

792 ] # Sequence of interest rates the agent faces 

793 else: 

794 perturbed_list = [getattr(self, shk_param) + dx] + ( 

795 params["T_cycle"] - 1 

796 ) * [getattr(self, shk_param)] 

797 # Sequence of interest rates the agent 

798 

799 setattr(ZerothColAgent, shk_param, perturbed_list) # Set attribute to agent 

800 self.parameters[shk_param] = perturbed_list 

801 

802 # Use Harmenberg Neutral Measure 

803 ZerothColAgent.neutral_measure = True 

804 ZerothColAgent.construct("IncShkDstn", "TranShkDstn", "PermShkDstn") 

805 

806 # Calculate Transition Matrices 

807 ZerothColAgent.define_distribution_grid() 

808 ZerothColAgent.calc_transition_matrix() 

809 

810 tranmat_t_zeroth_col = ZerothColAgent.tran_matrix 

811 dstn_t_zeroth_col = self.vec_erg_dstn.T[0] 

812 

813 C_t_no_sim = np.zeros(T) 

814 A_t_no_sim = np.zeros(T) 

815 

816 for i in range(T): 

817 if i == 0: 

818 dstn_t_zeroth_col = np.dot(tranmat_t_zeroth_col[i], dstn_t_zeroth_col) 

819 else: 

820 dstn_t_zeroth_col = np.dot(tranmat_ss, dstn_t_zeroth_col) 

821 

822 C_t_no_sim[i] = np.dot(self.cPol_Grid, dstn_t_zeroth_col) 

823 A_t_no_sim[i] = np.dot(self.aPol_Grid, dstn_t_zeroth_col) 

824 

825 J_A.T[0] = (A_t_no_sim - self.A_ss) / dx 

826 J_C.T[0] = (C_t_no_sim - self.C_ss) / dx 

827 

828 return J_C, J_A