Coverage for HARK / ConsumptionSaving / ConsMarkovModel.py: 97%
350 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-25 05:22 +0000
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-25 05:22 +0000
1"""
2Classes to solve and simulate consumption-savings model with a discrete, exogenous,
3stochastic Markov state. The only solver here extends ConsIndShockModel to
4include a Markov state; the interest factor, permanent growth factor, and income
5distribution can vary with the discrete state.
6"""
8import numpy as np
10from HARK import AgentType, NullFunc
11from HARK.Calibration.Income.IncomeProcesses import (
12 construct_markov_lognormal_income_process_unemployment,
13 get_PermShkDstn_from_IncShkDstn_markov,
14 get_TranShkDstn_from_IncShkDstn_markov,
15)
16from HARK.ConsumptionSaving.ConsIndShockModel import (
17 ConsumerSolution,
18 IndShockConsumerType,
19 make_basic_CRRA_solution_terminal,
20 make_lognormal_kNrm_init_dstn,
21 make_lognormal_pLvl_init_dstn,
22)
23from HARK.distributions import MarkovProcess, Uniform, expected, DiscreteDistribution
24from HARK.interpolation import (
25 CubicInterp,
26 LinearInterp,
27 LowerEnvelope,
28 IndexedInterp,
29 MargMargValueFuncCRRA,
30 MargValueFuncCRRA,
31 ValueFuncCRRA,
32)
33from HARK.rewards import (
34 UtilityFuncCRRA,
35 CRRAutility,
36 CRRAutility_inv,
37 CRRAutility_invP,
38 CRRAutilityP,
39 CRRAutilityP_inv,
40 CRRAutilityP_invP,
41 CRRAutilityPP,
42)
43from HARK.utilities import make_assets_grid
45__all__ = ["MarkovConsumerType"]
47utility = CRRAutility
48utilityP = CRRAutilityP
49utilityPP = CRRAutilityPP
50utilityP_inv = CRRAutilityP_inv
51utility_invP = CRRAutility_invP
52utility_inv = CRRAutility_inv
53utilityP_invP = CRRAutilityP_invP
56###############################################################################
58# Define some functions that can be used as constructors for MrkvArray
61def make_simple_binary_markov(T_cycle, Mrkv_p11, Mrkv_p22):
62 """
63 Make a list of very simple Markov arrays between two binary states by specifying
64 diagonal elements in each period (probability of remaining in that state).
66 Parameters
67 ----------
68 T_cycle : int
69 Number of non-terminal periods in this instance's sequential problem.
70 Mrkv_p11 : [float]
71 List or array of probabilities of remaining in the first state between periods.
72 Mrkv_p22 : [float]
73 List or array of probabilities of remaining in the second state between periods.
75 Returns
76 -------
77 MrkvArray : [np.array]
78 List of 2x2 Markov transition arrays, one for each non-terminal period.
79 """
80 p11 = np.array(Mrkv_p11)
81 p22 = np.array(Mrkv_p22)
83 if len(p11) != T_cycle or len(p22) != T_cycle:
84 raise ValueError("Length of p11 and p22 probabilities must equal T_cycle!")
85 if np.any(p11 > 1.0) or np.any(p22 > 1.0):
86 raise ValueError("The p11 and p22 probabilities must not exceed 1!")
87 if np.any(p11 < 0.0) or np.any(p22 < 0.0):
88 raise ValueError("The p11 and p22 probabilities must not be less than 0!")
90 MrkvArray = [
91 np.array([[p11[t], 1.0 - p11[t]], [1.0 - p22[t], p22[t]]])
92 for t in range(T_cycle)
93 ]
94 return MrkvArray
97def make_ratchet_markov(T_cycle, Mrkv_ratchet_probs):
98 """
99 Make a list of "ratchet-style" Markov transition arrays, in which transitions
100 are strictly *one way* and only by one step. Each element of the ratchet_probs
101 list is a size-N vector giving the probability of progressing from state i to
102 state to state i+1 in that period; progress from the topmost state reverts the
103 agent to the 0th state. Set ratchet_probs[t][-1] to zero to make absorbing state.
105 Parameters
106 ----------
107 T_cycle : int
108 Number of non-terminal periods in this instance's sequential problem.
109 Mrkv_ratchet_probs : [np.array]
110 List of vectors of "ratchet probabilities" for each period.
112 Returns
113 -------
114 MrkvArray : [np.array]
115 List of NxN Markov transition arrays, one for each non-terminal period.
116 """
117 if len(Mrkv_ratchet_probs) != T_cycle:
118 raise ValueError("Length of Mrkv_ratchet_probs must equal T_cycle!")
120 N = Mrkv_ratchet_probs[0].size # number of discrete states
121 StateCount = np.array([Mrkv_ratchet_probs[t].size for t in range(T_cycle)])
122 if np.any(StateCount != N):
123 raise ValueError(
124 "All periods of the problem must have the same number of discrete states!"
125 )
127 MrkvArray = []
128 for t in range(T_cycle):
129 if np.any(Mrkv_ratchet_probs[t] > 1.0):
130 raise ValueError("Ratchet probabilities cannot exceed 1!")
131 if np.any(Mrkv_ratchet_probs[t] < 0.0):
132 raise ValueError("Ratchet probabilities cannot be below 0!")
134 MrkvArray_t = np.zeros((N, N))
135 for i in range(N):
136 p_go = Mrkv_ratchet_probs[t][i]
137 p_stay = 1.0 - p_go
138 if i < (N - 1):
139 i_next = i + 1
140 else:
141 i_next = 0
142 MrkvArray_t[i, i] = p_stay
143 MrkvArray_t[i, i_next] = p_go
145 MrkvArray.append(MrkvArray_t)
147 return MrkvArray
150def make_MrkvInitDstn(MrkvPrbsInit, RNG):
151 """
152 The constructor function for MrkvInitDstn, the distribution of Markov states
153 at model birth.
155 Parameters
156 ----------
157 MrkvPrbsInit : np.array
158 Stochastic vector specifying the distribution of initial discrete states.
159 RNG : np.random.RandomState
160 Agent's internal random number generator.
162 Returns
163 -------
164 MrkvInitDstn : DiscreteDistribution
165 Distribution from which discrete states at birth can be drawn.
166 """
167 seed = RNG.integers(0, 2**31 - 1)
168 vals = np.arange(MrkvPrbsInit.size, dtype=int)
169 MrkvInitDstn = DiscreteDistribution(pmv=MrkvPrbsInit, atoms=vals, seed=seed)
170 return MrkvInitDstn
173###############################################################################
176def make_markov_solution_terminal(CRRA, MrkvArray):
177 """
178 Make the terminal period solution for a consumption-saving model with a discrete
179 Markov state. Simply makes a basic terminal solution for IndShockConsumerType
180 and then replicates the attributes N times for the N states in the terminal period.
182 Parameters
183 ----------
184 CRRA : float
185 Coefficient of relative risk aversion.
186 MrkvArray : [np.array]
187 List of Markov transition probabilities arrays. Only used to find the
188 number of discrete states in the terminal period.
190 Returns
191 -------
192 solution_terminal : ConsumerSolution
193 Terminal period solution to the Markov consumption-saving problem.
194 """
195 solution_terminal_basic = make_basic_CRRA_solution_terminal(CRRA)
196 StateCount_T = MrkvArray[-1].shape[1]
197 N = StateCount_T # for shorter typing
199 # Make replicated terminal period solution: consume all resources, no human wealth, minimum m is 0
200 solution_terminal = ConsumerSolution(
201 cFunc=N * [solution_terminal_basic.cFunc],
202 vFunc=N * [solution_terminal_basic.vFunc],
203 vPfunc=N * [solution_terminal_basic.vPfunc],
204 vPPfunc=N * [solution_terminal_basic.vPPfunc],
205 mNrmMin=np.zeros(N),
206 hNrm=np.zeros(N),
207 MPCmin=np.ones(N),
208 MPCmax=np.ones(N),
209 )
210 solution_terminal.cFuncX = IndexedInterp(solution_terminal.cFunc)
211 return solution_terminal
214def solve_one_period_ConsMarkov(
215 solution_next,
216 IncShkDstn,
217 LivPrb,
218 DiscFac,
219 CRRA,
220 Rfree,
221 PermGroFac,
222 MrkvArray,
223 BoroCnstArt,
224 aXtraGrid,
225 vFuncBool,
226 CubicBool,
227):
228 """
229 Solves a single period consumption-saving problem with risky income and
230 stochastic transitions between discrete states, in a Markov fashion. Has
231 identical inputs as the ConsIndShock, except for a discrete Markov transition
232 rule MrkvArray. Markov states can differ in their interest factor, permanent
233 growth factor, and income distribution, so the inputs Rfree, PermGroFac, and
234 IncShkDstn are lists specifying those values in each (succeeding) Markov state.
236 Parameters
237 ----------
238 solution_next : ConsumerSolution
239 The solution to next period's one period problem.
240 IncShkDstn : [distribution.Distribution]
241 A length N list of income distributions in each succeeding Markov state.
242 Each income distribution is a discrete approximation to the income process
243 at the beginning of the succeeding period.
244 LivPrb : [float]
245 Survival probability; likelihood of being alive at the beginning of the
246 succeeding period conditional on the current state.
247 DiscFac : float
248 Intertemporal discount factor for future utility.
249 CRRA : float
250 Coefficient of relative risk aversion.
251 Rfree : [float]
252 Risk free interest factor on end-of-period assets for each Markov
253 state in the succeeding period.
254 PermGroGac : [float]
255 Expected permanent income growth factor at the end of this period
256 for each Markov state in the succeeding period.
257 MrkvArray : numpy.array
258 An NxN array representing a Markov transition matrix between discrete
259 states. The i,j-th element of MrkvArray is the probability of
260 moving from state i in period t to state j in period t+1.
261 BoroCnstArt: float or None
262 Borrowing constraint for the minimum allowable assets to end the
263 period with. If it is less than the natural borrowing constraint,
264 then it is irrelevant; BoroCnstArt=None indicates no artificial bor-
265 rowing constraint.
266 aXtraGrid: np.array
267 Array of "extra" end-of-period asset values-- assets above the
268 absolute minimum acceptable level.
269 vFuncBool: boolean
270 An indicator for whether the value function should be computed and
271 included in the reported solution.
272 CubicBool: boolean
273 An indicator for whether the solver should use cubic or linear inter-
274 polation.
276 Returns
277 -------
278 solution : ConsumerSolution
279 The solution to the single period consumption-saving problem. Includes
280 a consumption function cFunc (using cubic or linear splines), a marg-
281 inal value function vPfunc, a minimum acceptable level of normalized
282 market resources mNrmMin, normalized human wealth hNrm, and bounding
283 MPCs MPCmin and MPCmax. It might also have a value function vFunc
284 and marginal marginal value function vPPfunc. All of these attributes
285 are lists or arrays, with elements corresponding to the current
286 Markov state. E.g. solution.cFunc[0] is the consumption function
287 when in the i=0 Markov state this period.
288 """
289 # Relabel the inputs that vary across Markov states
290 IncShkDstn_list = IncShkDstn
291 Rfree_list = np.array(Rfree)
292 LivPrb_list = np.array(LivPrb)
293 PermGroFac_list = np.array(PermGroFac)
294 StateCountNow = MrkvArray.shape[0]
295 StateCountNext = MrkvArray.shape[1]
297 # Define the utility function
298 uFunc = UtilityFuncCRRA(CRRA)
300 # Initialize the natural borrowing constraint when entering each succeeding state
301 BoroCnstNat_temp = np.zeros(StateCountNext) + np.nan
303 # Find the natural borrowing constraint conditional on next period's state
304 for j in range(StateCountNext):
305 PermShkMinNext = np.min(IncShkDstn_list[j].atoms[0])
306 TranShkMinNext = np.min(IncShkDstn_list[j].atoms[1])
307 BoroCnstNat_temp[j] = (
308 (solution_next.mNrmMin[j] - TranShkMinNext)
309 * (PermGroFac_list[j] * PermShkMinNext)
310 / Rfree_list[j]
311 )
313 # Initialize the natural borrowing constraint and minimum value of mNrm for
314 # *this* period's Markov states, as well as a "dependency table"
315 BoroCnstNat_list = np.zeros(StateCountNow) + np.nan
316 mNrmMin_list = np.zeros(StateCountNow) + np.nan
317 BoroCnstDependency = np.zeros((StateCountNow, StateCountNext)) + np.nan
319 # The natural borrowing constraint in each current state is the *highest*
320 # among next-state-conditional natural borrowing constraints that could
321 # occur from this current state.
322 for i in range(StateCountNow):
323 possible_next_states = MrkvArray[i, :] > 0
324 BoroCnstNat_list[i] = np.max(BoroCnstNat_temp[possible_next_states])
326 # Explicitly handle the "None" case:
327 if BoroCnstArt is None:
328 mNrmMin_list[i] = BoroCnstNat_list[i]
329 else:
330 mNrmMin_list[i] = np.max([BoroCnstNat_list[i], BoroCnstArt])
331 BoroCnstDependency[i, :] = BoroCnstNat_list[i] == BoroCnstNat_temp
332 # Also creates a Boolean array indicating whether the natural borrowing
333 # constraint *could* be hit when transitioning from i to j.
335 # Initialize end-of-period (marginal) value functions, expected income conditional
336 # on the next state, and probability of getting the worst income shock in each
337 # succeeding period state
338 BegOfPrd_vFunc_list = []
339 BegOfPrd_vPfunc_list = []
340 Ex_IncNextAll = np.zeros(StateCountNext) + np.nan
341 WorstIncPrbAll = np.zeros(StateCountNext) + np.nan
343 # Loop through each next-period-state and calculate the beginning-of-period
344 # (marginal) value function
345 for j in range(StateCountNext):
346 # Condition values on next period's state (and record a couple for later use)
347 Rfree = Rfree_list[j]
348 PermGroFac = PermGroFac_list[j]
349 LivPrb = LivPrb_list[j]
350 # mNrmMinNow = self.mNrmMin_list[state_index]
351 BoroCnstNat = BoroCnstNat_temp[j]
353 # Unpack the income distribution in next period's Markov state
354 IncShkDstn = IncShkDstn_list[j]
355 ShkPrbsNext = IncShkDstn.pmv
356 PermShkValsNext = IncShkDstn.atoms[0]
357 TranShkValsNext = IncShkDstn.atoms[1]
358 PermShkMinNext = np.min(PermShkValsNext)
359 TranShkMinNext = np.min(TranShkValsNext)
361 # Calculate the probability that we get the worst possible income draw
362 IncNext = PermShkValsNext * TranShkValsNext
363 WorstIncNext = PermShkMinNext * TranShkMinNext
364 WorstIncPrb = np.sum(ShkPrbsNext[IncNext == WorstIncNext])
365 # WorstIncPrb is the "Weierstrass p" concept: the odds we get the WORST thing
367 DiscFacEff = DiscFac # survival probability LivPrb represents probability
368 # from *current* state, so DiscFacEff is just DiscFac for now
370 # Unpack next period's (marginal) value function
371 vFuncNext = solution_next.vFunc[j] # This is None when vFuncBool is False
372 vPfuncNext = solution_next.vPfunc[j]
373 vPPfuncNext = solution_next.vPPfunc[j] # This is None when CubicBool is False
375 # Compute expected income next period and record worst income probability
376 Ex_IncNextAll[j] = np.dot(ShkPrbsNext, PermShkValsNext * TranShkValsNext)
377 WorstIncPrbAll[j] = WorstIncPrb
379 # Construct the BEGINNING-of-period marginal value function conditional
380 # on next period's state and add it to the list of value functions
382 # Get data to construct the end-of-period marginal value function (conditional on next state)
383 aNrmNext = np.asarray(aXtraGrid) + BoroCnstNat
385 # Define local functions for taking future expectations
386 def calc_mNrmNext(S, a, R):
387 return R / (PermGroFac * S["PermShk"]) * a + S["TranShk"]
389 def calc_vNext(S, a, R):
390 return (
391 S["PermShk"] ** (1.0 - CRRA) * PermGroFac ** (1.0 - CRRA)
392 ) * vFuncNext(calc_mNrmNext(S, a, R))
394 def calc_vPnext(S, a, R):
395 return S["PermShk"] ** (-CRRA) * vPfuncNext(calc_mNrmNext(S, a, R))
397 def calc_vPPnext(S, a, R):
398 return S["PermShk"] ** (-CRRA - 1.0) * vPPfuncNext(calc_mNrmNext(S, a, R))
400 # Calculate beginning-of-period marginal value of assets at each gridpoint
401 vPfacEff = DiscFacEff * Rfree * PermGroFac ** (-CRRA)
402 BegOfPrd_vPnext = vPfacEff * expected(
403 calc_vPnext, IncShkDstn, args=(aNrmNext, Rfree)
404 )
406 # "Decurved" marginal value
407 BegOfPrd_vPnvrsNext = uFunc.derinv(BegOfPrd_vPnext, order=(1, 0))
409 # Make the beginning-of-period pseudo-inverse marginal value of assets
410 # function conditionalon next period's state
411 if CubicBool:
412 # Calculate end-of-period marginal marginal value of assets at each gridpoint
413 vPPfacEff = DiscFacEff * Rfree * Rfree * PermGroFac ** (-CRRA - 1.0)
414 BegOfPrd_vPPnext = vPPfacEff * expected(
415 calc_vPPnext, IncShkDstn, args=(aNrmNext, Rfree)
416 )
417 # "Decurved" marginal marginal value
418 BegOfPrd_vPnvrsPnext = BegOfPrd_vPPnext * uFunc.derinv(
419 BegOfPrd_vPnext, order=(1, 1)
420 )
422 # Construct the end-of-period marginal value function conditional on the next state.
423 BegOfPrd_vPnvrsFunc = CubicInterp(
424 aNrmNext,
425 BegOfPrd_vPnvrsNext,
426 BegOfPrd_vPnvrsPnext,
427 lower_extrap=True,
428 )
429 # TODO: Should not be lower extrap, add point at BoroCnstNat
430 else:
431 BegOfPrd_vPnvrsFunc = LinearInterp(
432 aNrmNext, BegOfPrd_vPnvrsNext, lower_extrap=True
433 )
434 # TODO: Should not be lower extrap, add point at BoroCnstNat
436 # "Recurve" the pseudo-inverse marginal value function
437 BegOfPrd_vPfunc = MargValueFuncCRRA(BegOfPrd_vPnvrsFunc, CRRA)
438 BegOfPrd_vPfunc_list.append(BegOfPrd_vPfunc)
440 # Construct the beginning-of-period value functional conditional on next
441 # period's state and add it to the list of value functions
442 if vFuncBool:
443 # Calculate end-of-period value, its derivative, and their pseudo-inverse
444 BegOfPrd_vNext = DiscFacEff * expected(
445 calc_vNext, IncShkDstn, args=(aNrmNext, Rfree)
446 )
447 # value transformed through inverse utility
448 BegOfPrd_vNvrsNext = uFunc.inv(BegOfPrd_vNext)
449 BegOfPrd_vNvrsPnext = BegOfPrd_vPnext * uFunc.derinv(
450 BegOfPrd_vNext, order=(0, 1)
451 )
452 BegOfPrd_vNvrsNext = np.insert(BegOfPrd_vNvrsNext, 0, 0.0)
453 BegOfPrd_vNvrsPnext = np.insert(
454 BegOfPrd_vNvrsPnext, 0, BegOfPrd_vNvrsPnext[0]
455 )
456 # This is a very good approximation, vNvrsPP = 0 at the asset minimum
458 # Construct the end-of-period value function
459 aNrm_temp = np.insert(aNrmNext, 0, BoroCnstNat)
460 BegOfPrd_vNvrsFunc = LinearInterp(
461 aNrm_temp,
462 BegOfPrd_vNvrsNext,
463 )
464 BegOfPrd_vFunc = ValueFuncCRRA(BegOfPrd_vNvrsFunc, CRRA)
465 BegOfPrd_vFunc_list.append(BegOfPrd_vFunc)
467 # BegOfPrdvP is marginal value conditional on *next* period's state.
468 # Take expectations over Markov transitions to get EndOfPrdvP conditional on
469 # *this* period's Markov state.
471 # Find unique values of minimum acceptable end-of-period assets (and the
472 # current period states for which they apply).
473 aNrmMin_unique, Mrkv_inverse = np.unique(BoroCnstNat_list, return_inverse=True)
474 possible_transitions = MrkvArray > 0
476 # Initialize end-of-period marginal value (and marg marg value) at each
477 # asset gridpoint for each current period state
478 EndOfPrd_vP = np.zeros((StateCountNow, aXtraGrid.size))
479 EndOfPrd_vPP = np.zeros((StateCountNow, aXtraGrid.size))
481 # Calculate end-of-period marginal value (and marg marg value) at each
482 # asset gridpoint for each current period state, grouping current states
483 # by their natural borrowing constraint
484 for k in range(aNrmMin_unique.size):
485 # Get the states for which this minimum applies amd the aNrm grid for
486 # this set of current states
487 aNrmMin = aNrmMin_unique[k] # minimum assets for this pass
488 which_states = Mrkv_inverse == k
489 aNrmNow = aNrmMin + aXtraGrid # assets grid for this pass
491 # Make arrays to hold successor period's beginning-of-period (marginal)
492 # marginal value if we transition to it
493 BegOfPrd_vPnext = np.zeros((StateCountNext, aXtraGrid.size))
494 BegOfPrd_vPPnext = np.zeros((StateCountNext, aXtraGrid.size))
496 # Loop through future Markov states and fill in those values, but only
497 # look at future states that can actually be reached from our current
498 # set of states (for this natural borrowing constraint value)
499 for j in range(StateCountNext):
500 if not np.any(np.logical_and(possible_transitions[:, j], which_states)):
501 continue
503 BegOfPrd_vPnext[j, :] = BegOfPrd_vPfunc_list[j](aNrmNow)
504 # Add conditional end-of-period (marginal) marginal value to the arrays
505 if CubicBool:
506 BegOfPrd_vPPnext[j, :] = BegOfPrd_vPfunc_list[j].derivativeX(aNrmNow)
508 # Weight conditional marginal values by transition probabilities
509 # to get unconditional marginal (marginal) value at each gridpoint.
510 EndOfPrd_vP_temp = np.dot(MrkvArray, BegOfPrd_vPnext)
512 # Only take the states for which this asset minimum applies
513 EndOfPrd_vP[which_states, :] = EndOfPrd_vP_temp[which_states, :]
515 # Do the same thing for marginal marginal value
516 if CubicBool:
517 EndOfPrd_vPP_temp = np.dot(MrkvArray, BegOfPrd_vPPnext)
518 EndOfPrd_vPP[which_states, :] = EndOfPrd_vPP_temp[which_states, :]
520 # Store the results as attributes of self, scaling end of period marginal value by survival probability from each current state
521 LivPrb_tiled = np.tile(
522 np.reshape(LivPrb_list, (StateCountNow, 1)), (1, aXtraGrid.size)
523 )
524 EndOfPrd_vP = LivPrb_tiled * EndOfPrd_vP
525 if CubicBool:
526 EndOfPrd_vPP = LivPrb_tiled * EndOfPrd_vPP
528 # Calculate the bounding MPCs and PDV of human wealth for each state
530 # Calculate probability of getting the "worst" income shock and transition
531 # from each current state
532 WorstIncPrb_array = BoroCnstDependency * np.tile(
533 np.reshape(WorstIncPrbAll, (1, StateCountNext)), (StateCountNow, 1)
534 )
535 temp_array = MrkvArray * WorstIncPrb_array
536 WorstIncPrbNow = np.sum(temp_array, axis=1)
538 # Calculate expectation of upper bound of next period's MPC
539 Ex_MPCmaxNext = (
540 np.dot(temp_array, Rfree_list ** (1.0 - CRRA) * solution_next.MPCmax ** (-CRRA))
541 / WorstIncPrbNow
542 ) ** (-1.0 / CRRA)
544 # Calculate limiting upper bound of MPC this period for each Markov state
545 DiscFacEff_temp = DiscFac * LivPrb_list
546 MPCmaxNow = 1.0 / (
547 1.0 + ((DiscFacEff_temp * WorstIncPrbNow) ** (1.0 / CRRA)) / Ex_MPCmaxNext
548 )
549 MPCmaxEff = MPCmaxNow
550 MPCmaxEff[BoroCnstNat_list < mNrmMin_list] = 1.0
552 # Calculate the current Markov-state-conditional PDV of human wealth, correctly
553 # accounting for risky returns and risk aversion
554 hNrmPlusIncNext = Ex_IncNextAll + solution_next.hNrm
555 R_adj = np.dot(MrkvArray, Rfree_list ** (1.0 - CRRA))
556 hNrmNow = (
557 np.dot(MrkvArray, (PermGroFac_list / Rfree_list**CRRA) * hNrmPlusIncNext)
558 / R_adj
559 )
561 # Calculate the lower bound on MPC as m gets arbitrarily large
562 temp = (
563 DiscFacEff_temp
564 * np.dot(
565 MrkvArray, solution_next.MPCmin ** (-CRRA) * Rfree_list ** (1.0 - CRRA)
566 )
567 ) ** (1.0 / CRRA)
568 MPCminNow = 1.0 / (1.0 + temp)
570 # Find consumption and market resources corresponding to each end-of-period
571 # assets point for each state (and add an additional point at the lower bound)
572 aNrmNow = (aXtraGrid)[np.newaxis, :] + np.array(BoroCnstNat_list)[:, np.newaxis]
573 cNrmNow = uFunc.derinv(EndOfPrd_vP, order=(1, 0))
574 mNrmNow = cNrmNow + aNrmNow # Endogenous mNrm gridpoints
575 cNrmNow = np.hstack((np.zeros((StateCountNow, 1)), cNrmNow))
576 mNrmNow = np.hstack((np.reshape(mNrmMin_list, (StateCountNow, 1)), mNrmNow))
578 # Calculate the MPC at each market resource gridpoint in each state (if desired)
579 if CubicBool:
580 dcda = EndOfPrd_vPP / uFunc.der(cNrmNow[:, 1:], order=2) # drop first
581 MPCnow = dcda / (dcda + 1.0)
582 MPCnow = np.hstack((np.reshape(MPCmaxNow, (StateCountNow, 1)), MPCnow))
584 # Initialize an empty solution to which we'll add state-conditional solutions
585 solution = ConsumerSolution()
587 # Loop through each current period state and add its solution to the overall solution
588 for i in range(StateCountNow):
589 # Set current-Markov-state-conditional human wealth and MPC bounds
590 hNrmNow_i = hNrmNow[i]
591 MPCminNow_i = MPCminNow[i]
592 mNrmMin_i = mNrmMin_list[i]
594 # Construct the consumption function by combining the constrained and unconstrained portions
595 cFuncNowCnst = LinearInterp(
596 np.array([mNrmMin_list[i], mNrmMin_list[i] + 1.0]), np.array([0.0, 1.0])
597 )
598 if CubicBool:
599 cFuncNowUnc = CubicInterp(
600 mNrmNow[i, :],
601 cNrmNow[i, :],
602 MPCnow[i, :],
603 MPCminNow_i * hNrmNow_i,
604 MPCminNow_i,
605 )
606 else:
607 cFuncNowUnc = LinearInterp(
608 mNrmNow[i, :], cNrmNow[i, :], MPCminNow_i * hNrmNow_i, MPCminNow_i
609 )
610 cFuncNow = LowerEnvelope(cFuncNowUnc, cFuncNowCnst)
612 # Make the marginal (marginal) value function
613 vPfuncNow = MargValueFuncCRRA(cFuncNow, CRRA)
614 if CubicBool:
615 vPPfuncNow = MargMargValueFuncCRRA(cFuncNow, CRRA)
616 else:
617 vPPfuncNow = NullFunc()
619 # Make the value function for this state if requested
620 if vFuncBool:
621 # Make state-conditional grids of market resources and consumption
622 mNrm_for_vFunc = mNrmMin_i + aXtraGrid
623 cNrm_for_vFunc = cFuncNow(mNrm_for_vFunc)
624 aNrm_for_vFunc = mNrm_for_vFunc - cNrm_for_vFunc
626 # Calculate end-of-period value at each gridpoint
627 BegOfPrd_v_temp = np.zeros((StateCountNow, aXtraGrid.size))
628 for j in range(StateCountNext):
629 if possible_transitions[i, j]:
630 BegOfPrd_v_temp[j, :] = BegOfPrd_vFunc_list[j](aNrm_for_vFunc)
631 EndOfPrd_v = np.dot(MrkvArray[i, :], BegOfPrd_v_temp)
633 # Calculate (normalized) value and marginal value at each gridpoint
634 v_now = uFunc(cNrm_for_vFunc) + EndOfPrd_v
635 vP_now = uFunc.der(cNrm_for_vFunc)
637 # Make a "decurved" value function with the inverse utility function
638 # value transformed through inverse utility
639 vNvrs_now = uFunc.inv(v_now)
640 vNvrsP_now = vP_now * uFunc.derinv(v_now, order=(0, 1))
641 mNrm_temp = np.insert(mNrm_for_vFunc, 0, mNrmMin_i) # add the lower bound
642 vNvrs_now = np.insert(vNvrs_now, 0, 0.0)
643 vNvrsP_now = np.insert(
644 vNvrsP_now, 0, MPCmaxEff[i] ** (-CRRA / (1.0 - CRRA))
645 )
646 # MPCminNvrs = MPCminNow[i] ** (-CRRA / (1.0 - CRRA))
647 vNvrsFuncNow = LinearInterp(
648 mNrm_temp,
649 vNvrs_now,
650 # vNvrsP_now,
651 ) # MPCminNvrs * hNrmNow_i, MPCminNvrs)
652 # The bounding function for the pseudo-inverse value function is incorrect.
653 # TODO: Resolve this strange issue; extrapolation is suppressed for now.
655 # "Recurve" the decurved value function and add it to the list
656 vFuncNow = ValueFuncCRRA(vNvrsFuncNow, CRRA)
658 else:
659 vFuncNow = NullFunc()
661 # Make the current-Markov-state-conditional solution
662 solution_cond = ConsumerSolution(
663 cFunc=cFuncNow,
664 vFunc=vFuncNow,
665 vPfunc=vPfuncNow,
666 vPPfunc=vPPfuncNow,
667 mNrmMin=mNrmMin_i,
668 )
670 # Add the current-state-conditional solution to the overall period solution
671 solution.append_solution(solution_cond)
673 # Add the lower bounds of market resources, MPC limits, human resources,
674 # and the value functions to the overall solution, then return it
675 solution.mNrmMin = mNrmMin_list
676 solution.hNrm = hNrmNow
677 solution.MPCmin = MPCminNow
678 solution.MPCmax = MPCmaxNow
679 solution.cFuncX = IndexedInterp(solution.cFunc)
680 return solution
683####################################################################################################
684####################################################################################################
686# Make a dictionary of constructors for the markov consumption-saving model
687markov_constructor_dict = {
688 "IncShkDstn": construct_markov_lognormal_income_process_unemployment,
689 "PermShkDstn": get_PermShkDstn_from_IncShkDstn_markov,
690 "TranShkDstn": get_TranShkDstn_from_IncShkDstn_markov,
691 "aXtraGrid": make_assets_grid,
692 "MrkvArray": make_simple_binary_markov,
693 "solution_terminal": make_markov_solution_terminal,
694 "kNrmInitDstn": make_lognormal_kNrm_init_dstn,
695 "pLvlInitDstn": make_lognormal_pLvl_init_dstn,
696 "MrkvInitDstn": make_MrkvInitDstn,
697}
699# Make a dictionary with parameters for the default constructor for kNrmInitDstn
700default_kNrmInitDstn_params = {
701 "kLogInitMean": -12.0, # Mean of log initial capital
702 "kLogInitStd": 0.0, # Stdev of log initial capital
703 "kNrmInitCount": 15, # Number of points in initial capital discretization
704}
706# Make a dictionary with parameters for the default constructor for pLvlInitDstn
707default_pLvlInitDstn_params = {
708 "pLogInitMean": 0.0, # Mean of log permanent income
709 "pLogInitStd": 0.0, # Stdev of log permanent income
710 "pLvlInitCount": 15, # Number of points in initial capital discretization
711}
713# Default parameters to make IncShkDstn using construct_lognormal_income_process_unemployment
714default_IncShkDstn_params = {
715 "PermShkStd": np.array(
716 [[0.1, 0.1]]
717 ), # Standard deviation of log permanent income shocks
718 "PermShkCount": 7, # Number of points in discrete approximation to permanent income shocks
719 "TranShkStd": np.array(
720 [[0.1, 0.1]]
721 ), # Standard deviation of log transitory income shocks
722 "TranShkCount": 7, # Number of points in discrete approximation to transitory income shocks
723 "UnempPrb": np.array([0.05, 0.05]), # Probability of unemployment while working
724 "IncUnemp": np.array(
725 [0.3, 0.3]
726 ), # Unemployment benefits replacement rate while working
727 "T_retire": 0, # Period of retirement (0 --> no retirement)
728 "UnempPrbRet": None, # Probability of "unemployment" while retired
729 "IncUnempRet": None, # "Unemployment" benefits when retired
730}
732# Default parameters to make aXtraGrid using make_assets_grid
733default_aXtraGrid_params = {
734 "aXtraMin": 0.001, # Minimum end-of-period "assets above minimum" value
735 "aXtraMax": 20, # Maximum end-of-period "assets above minimum" value
736 "aXtraNestFac": 3, # Exponential nesting factor for aXtraGrid
737 "aXtraCount": 48, # Number of points in the grid of "assets above minimum"
738 "aXtraExtra": None, # Additional other values to add in grid (optional)
739}
741# Default parameters to make MrkvArray using make_simple_binary_markov
742default_MrkvArray_params = {
743 "Mrkv_p11": [0.9], # Probability of remaining in binary state 1
744 "Mrkv_p22": [0.4], # Probability of remaining in binary state 2
745}
747# Make a dictionary to specify an idiosyncratic income shocks consumer type
748init_indshk_markov = {
749 # BASIC HARK PARAMETERS REQUIRED TO SOLVE THE MODEL
750 "cycles": 1, # Finite, non-cyclic model
751 "T_cycle": 1, # Number of periods in the cycle for this agent type
752 "constructors": markov_constructor_dict, # See dictionary above
753 "pseudo_terminal": False, # Terminal period really does exist
754 "global_markov": False, # Whether the Markov state is shared across agents
755 # PRIMITIVE RAW PARAMETERS REQUIRED TO SOLVE THE MODEL
756 "CRRA": 2.0, # Coefficient of relative risk aversion
757 "Rfree": [np.array([1.03, 1.03])], # Interest factor on retained assets
758 "DiscFac": 0.96, # Intertemporal discount factor
759 "LivPrb": [np.array([0.98, 0.98])], # Survival probability after each period
760 "PermGroFac": [np.array([0.99, 1.03])], # Permanent income growth factor
761 "BoroCnstArt": 0.0, # Artificial borrowing constraint
762 "vFuncBool": False, # Whether to calculate the value function during solution
763 "CubicBool": False, # Whether to use cubic spline interpolation when True
764 # (Uses linear spline interpolation for cFunc when False)
765 # PARAMETERS REQUIRED TO SIMULATE THE MODEL
766 "AgentCount": 10000, # Number of agents of this type
767 "T_age": None, # Age after which simulated agents are automatically killed
768 "PermGroFacAgg": 1.0, # Aggregate permanent income growth factor
769 "MrkvPrbsInit": np.array([1.0, 0.0]), # Initial distribution of discrete state
770 # (The portion of PermGroFac attributable to aggregate productivity growth)
771 "NewbornTransShk": False, # Whether Newborns have transitory shock
772 # ADDITIONAL OPTIONAL PARAMETERS
773 "PerfMITShk": False, # Do Perfect Foresight MIT Shock
774 # (Forces Newborns to follow solution path of the agent they replaced if True)
775 "neutral_measure": False, # Whether to use permanent income neutral measure (see Harmenberg 2021)
776}
777init_indshk_markov.update(default_IncShkDstn_params)
778init_indshk_markov.update(default_aXtraGrid_params)
779init_indshk_markov.update(default_MrkvArray_params)
780init_indshk_markov.update(default_kNrmInitDstn_params)
781init_indshk_markov.update(default_pLvlInitDstn_params)
784class MarkovConsumerType(IndShockConsumerType):
785 """
786 An agent in the Markov consumption-saving model. His problem is defined by a sequence
787 of income distributions, survival probabilities, discount factors, and permanent
788 income growth rates, as well as time invariant values for risk aversion, the
789 interest rate, the grid of end-of-period assets, and how he is borrowing constrained.
790 """
792 time_vary_ = IndShockConsumerType.time_vary_ + ["MrkvArray"]
794 # Mrkv is both a shock and a state
795 shock_vars_ = IndShockConsumerType.shock_vars_ + ["Mrkv"]
796 state_vars = IndShockConsumerType.state_vars + ["Mrkv"]
797 default_ = {
798 "params": init_indshk_markov,
799 "solver": solve_one_period_ConsMarkov,
800 "model": "ConsMarkov.yaml",
801 "track_vars": ["aNrm", "cNrm", "mNrm", "pLvl", "Mrkv"],
802 }
803 distributions = [
804 "IncShkDstn",
805 "PermShkDstn",
806 "TranShkDstn",
807 "kNrmInitDstn",
808 "pLvlInitDstn",
809 "MrkvInitDstn",
810 ]
812 def check_markov_inputs(self):
813 """
814 Many parameters used by MarkovConsumerType are arrays. Make sure those arrays are the
815 right shape.
817 Parameters
818 ----------
819 None
821 Returns
822 -------
823 None
824 """
825 StateCount = self.MrkvArray[0].shape[0]
827 # Check that arrays are the right shape
828 for t in range(self.T_cycle):
829 if not isinstance(self.Rfree[t], np.ndarray) or self.Rfree[t].shape != (
830 StateCount,
831 ):
832 raise ValueError(
833 "Rfree[t] not the right shape, it should be an array of Rfree of all the states."
834 )
836 # Check that arrays in lists are the right shape
837 for MrkvArray_t in self.MrkvArray:
838 if not isinstance(MrkvArray_t, np.ndarray) or MrkvArray_t.shape != (
839 StateCount,
840 StateCount,
841 ):
842 raise ValueError(
843 "MrkvArray not the right shape, it should be of the size states*states."
844 )
845 for LivPrb_t in self.LivPrb:
846 if not isinstance(LivPrb_t, np.ndarray) or LivPrb_t.shape != (StateCount,):
847 raise ValueError(
848 "Array in LivPrb is not the right shape, it should be an array of length equal to number of states"
849 )
850 for PermGroFac_t in self.PermGroFac:
851 if not isinstance(PermGroFac_t, np.ndarray) or PermGroFac_t.shape != (
852 StateCount,
853 ):
854 raise ValueError(
855 "Array in PermGroFac is not the right shape, it should be an array of length equal to number of states"
856 )
858 # Now check the income distribution.
859 # Note IncShkDstn is (potentially) time-varying, so it is in time_vary.
860 # Therefore it is a list, and each element of that list responds to the income distribution
861 # at a particular point in time.
862 for IncShkDstn_t in self.IncShkDstn:
863 if not isinstance(IncShkDstn_t, list):
864 raise ValueError(
865 "self.IncShkDstn is time varying and so must be a list"
866 + "of lists of Distributions, one per Markov State. Found "
867 + f"{self.IncShkDstn} instead"
868 )
869 elif len(IncShkDstn_t) != StateCount:
870 raise ValueError(
871 "List in IncShkDstn is not the right length, it should be length equal to number of states"
872 )
874 def pre_solve(self):
875 """
876 Check to make sure that the inputs that are specific to MarkovConsumerType
877 are of the right shape (if arrays) or length (if lists).
879 Parameters
880 ----------
881 None
883 Returns
884 -------
885 None
886 """
887 AgentType.pre_solve(self)
888 self.check_markov_inputs()
889 self.construct("solution_terminal")
891 def initialize_sim(self):
892 self.shocks["Mrkv"] = np.zeros(self.AgentCount, dtype=int)
893 IndShockConsumerType.initialize_sim(self)
895 # Need to initialize markov state to be the same for all agents
896 if self.global_markov:
897 base_draw = Uniform(seed=self.RNG.integers(0, 2**31 - 1)).draw(1)
898 Cutoffs = np.cumsum(np.array(self.MrkvPrbsInit))
899 self.shocks["Mrkv"] = np.ones(self.AgentCount) * np.searchsorted(
900 Cutoffs, base_draw
901 ).astype(int)
902 self.shocks["Mrkv"] = self.shocks["Mrkv"].astype(int)
904 def sim_death(self):
905 """
906 Determines which agents die this period and must be replaced. Uses the sequence in LivPrb
907 to determine survival probabilities for each agent.
909 Parameters
910 ----------
911 None
913 Returns
914 -------
915 which_agents : np.array(bool)
916 Boolean array of size AgentCount indicating which agents die.
917 """
918 # Determine who dies
919 LivPrb = np.array(self.LivPrb)[
920 self.t_cycle - 1, self.shocks["Mrkv"]
921 ] # Time has already advanced, so look back one
922 DiePrb = 1.0 - LivPrb
923 DeathShks = Uniform(seed=self.RNG.integers(0, 2**31 - 1)).draw(
924 N=self.AgentCount
925 )
926 which_agents = DeathShks < DiePrb
927 if self.T_age is not None: # Kill agents that have lived for too many periods
928 too_old = self.t_age >= self.T_age
929 which_agents = np.logical_or(which_agents, too_old)
930 return which_agents
932 def sim_birth(self, which_agents):
933 """
934 Makes new Markov consumer by drawing initial normalized assets, permanent income levels, and
935 discrete states. Calls IndShockConsumerType.sim_birth, then draws from initial Markov distribution.
937 Parameters
938 ----------
939 which_agents : np.array(Bool)
940 Boolean array of size self.AgentCount indicating which agents should be "born".
942 Returns
943 -------
944 None
945 """
946 # Get initial assets and permanent income
947 IndShockConsumerType.sim_birth(self, which_agents)
949 # Markov state is not changed if it is set at the global level
950 if not self.global_markov:
951 N = np.sum(which_agents)
952 self.state_now["Mrkv"][which_agents] = self.MrkvInitDstn.draw(N)
954 def get_markov_states(self):
955 """
956 Draw new Markov states for each agent in the simulated population, using
957 the attribute MrkvArray to determine transition probabilities.
959 Parameters
960 ----------
961 None
963 Returns
964 -------
965 None
966 """
967 # Don't change Markov state for those who were just born (unless global_markov)
968 dont_change = self.t_age == 0
969 if self.t_sim == 0: # Respect initial distribution of Markov states
970 dont_change[:] = True
972 # Determine which agents are in which states right now
973 MrkvPrev = self.shocks["Mrkv"]
974 MrkvNow = np.zeros(self.AgentCount, dtype=int)
976 # Draw new Markov states for each agent
977 for t in range(self.T_cycle):
978 markov_process = MarkovProcess(
979 self.MrkvArray[t], seed=self.RNG.integers(0, 2**31 - 1)
980 )
981 right_age = self.t_cycle == t
982 MrkvNow[right_age] = markov_process.draw(MrkvPrev[right_age])
983 if not self.global_markov:
984 MrkvNow[dont_change] = MrkvPrev[dont_change]
986 self.shocks["Mrkv"] = MrkvNow.astype(int)
988 def get_shocks(self):
989 """
990 Gets new Markov states and permanent and transitory income shocks for this period. Samples
991 from IncShkDstn for each period-state in the cycle.
993 Parameters
994 ----------
995 None
997 Returns
998 -------
999 None
1000 """
1001 self.get_markov_states()
1002 MrkvNow = self.shocks["Mrkv"]
1004 # Now get income shocks for each consumer, by cycle-time and discrete state
1005 PermShkNow = np.zeros(self.AgentCount) # Initialize shock arrays
1006 TranShkNow = np.zeros(self.AgentCount)
1007 for t in range(self.T_cycle):
1008 for j in range(self.MrkvArray[t].shape[0]):
1009 these = np.logical_and(t == self.t_cycle, j == MrkvNow)
1010 N = np.sum(these)
1011 if N > 0:
1012 IncShkDstnNow = self.IncShkDstn[t - 1][
1013 j
1014 ] # set current income distribution
1015 PermGroFacNow = self.PermGroFac[t - 1][
1016 j
1017 ] # and permanent growth factor
1019 # Get random draws of income shocks from the discrete distribution
1020 EventDraws = IncShkDstnNow.draw_events(N)
1021 PermShkNow[these] = (
1022 IncShkDstnNow.atoms[0][EventDraws] * PermGroFacNow
1023 ) # permanent "shock" includes expected growth
1024 TranShkNow[these] = IncShkDstnNow.atoms[1][EventDraws]
1025 newborn = self.t_age == 0
1026 PermShkNow[newborn] = 1.0
1027 TranShkNow[newborn] = 1.0
1028 self.shocks["PermShk"] = PermShkNow
1029 self.shocks["TranShk"] = TranShkNow
1031 def read_shocks_from_history(self):
1032 """
1033 A slight modification of AgentType.read_shocks that makes sure that MrkvNow is int, not float.
1035 Parameters
1036 ----------
1037 None
1039 Returns
1040 -------
1041 None
1042 """
1043 IndShockConsumerType.read_shocks_from_history(self)
1044 self.shocks["Mrkv"] = self.shocks["Mrkv"].astype(int)
1046 def get_Rport(self):
1047 """
1048 Returns an array of size self.AgentCount with interest factor that varies with discrete state.
1049 This represents the portfolio return in this model.
1051 Parameters
1052 ----------
1053 None
1055 Returns
1056 -------
1057 RfreeNow : np.array
1058 Array of size self.AgentCount with risk free interest rate for each agent.
1059 """
1060 RfreeNow = np.zeros(self.AgentCount)
1061 for t in range(self.T_cycle):
1062 these = self.t_cycle == t
1063 RfreeNow[these] = self.Rfree[t][self.shocks["Mrkv"][these]]
1064 return RfreeNow
1066 def get_controls(self):
1067 """
1068 Calculates consumption for each consumer of this type using the consumption functions.
1070 Parameters
1071 ----------
1072 None
1074 Returns
1075 -------
1076 None
1077 """
1078 cNrmNow = np.zeros(self.AgentCount) + np.nan
1079 MPCnow = np.zeros(self.AgentCount) + np.nan
1080 J = self.MrkvArray[0].shape[0]
1082 MrkvBoolArray = np.zeros((J, self.AgentCount), dtype=bool)
1083 for j in range(J):
1084 MrkvBoolArray[j, :] = j == self.shocks["Mrkv"]
1086 for t in range(self.T_cycle):
1087 right_t = t == self.t_cycle
1088 for j in range(J):
1089 these = np.logical_and(right_t, MrkvBoolArray[j, :])
1090 cNrmNow[these], MPCnow[these] = (
1091 self.solution[t]
1092 .cFunc[j]
1093 .eval_with_derivative(self.state_now["mNrm"][these])
1094 )
1095 self.controls["cNrm"] = cNrmNow
1096 self.MPCnow = MPCnow
1098 def get_poststates(self):
1099 super().get_poststates()
1100 self.state_now["Mrkv"] = self.shocks["Mrkv"].copy()
1102 def calc_bounding_values(self): # pragma: nocover
1103 """
1104 Calculate human wealth plus minimum and maximum MPC in an infinite
1105 horizon model with only one period repeated indefinitely. Store results
1106 as attributes of self. Human wealth is the present discounted value of
1107 expected future income after receiving income this period, ignoring mort-
1108 ality. The maximum MPC is the limit of the MPC as m --> mNrmMin. The
1109 minimum MPC is the limit of the MPC as m --> infty. Results are all
1110 np.array with elements corresponding to each Markov state.
1112 NOT YET IMPLEMENTED FOR THIS CLASS
1114 Parameters
1115 ----------
1116 None
1118 Returns
1119 -------
1120 None
1121 """
1122 raise NotImplementedError()
1124 def make_euler_error_func(self, mMax=100, approx_inc_dstn=True): # pragma: nocover
1125 """
1126 Creates a "normalized Euler error" function for this instance, mapping
1127 from market resources to "consumption error per dollar of consumption."
1128 Stores result in attribute eulerErrorFunc as an interpolated function.
1129 Has option to use approximate income distribution stored in self.IncShkDstn
1130 or to use a (temporary) very dense approximation.
1132 NOT YET IMPLEMENTED FOR THIS CLASS
1134 Parameters
1135 ----------
1136 mMax : float
1137 Maximum normalized market resources for the Euler error function.
1138 approx_inc_dstn : Boolean
1139 Indicator for whether to use the approximate discrete income distri-
1140 bution stored in self.IncShkDstn[0], or to use a very accurate
1141 discrete approximation instead. When True, uses approximation in
1142 IncShkDstn; when False, makes and uses a very dense approximation.
1144 Returns
1145 -------
1146 None
1147 """
1148 raise NotImplementedError()
1150 def check_conditions(self, verbose=None): # pragma: nocover
1151 raise NotImplementedError()
1153 def calc_limiting_values(self): # pragma: nocover
1154 raise NotImplementedError()