Coverage for HARK / utilities.py: 93%
265 statements
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-25 05:22 +0000
« prev ^ index » next coverage.py v7.13.1, created at 2026-01-25 05:22 +0000
1"""
2General purpose / miscellaneous functions. Includes functions to approximate
3continuous distributions with discrete ones, utility functions (and their
4derivatives), manipulation of discrete distributions, and basic plotting tools.
5"""
7import cProfile
8import os
9import pstats
10import re
12import numba
13import numpy as np # Python's numeric library, abbreviated "np"
14from scipy.interpolate import interp1d
16from inspect import signature
19class get_it_from:
20 """
21 Class whose instances act as a special case trivial constructor that merely
22 grabs an attribute or entry from the named attribute. This is useful when
23 there are constructed model inputs that are "built together". Simply have a
24 constructor that makes a dictionary (or object) containing the several inputs,
25 then use get_it_from(that_dict_name) as the constructor for each of them.
27 Parameters
28 ----------
29 name : str
30 Name of the parent dictionary or object from which to take the object.
31 """
33 def __init__(self, name):
34 self.name = name
36 def __call__(self, parent, query):
37 if isinstance(parent, dict):
38 return parent[query]
39 else:
40 return getattr(parent, query)
43# ==============================================================================
44# ============== Some basic function tools ====================================
45# ==============================================================================
48def get_arg_names(function):
49 """
50 Returns a list of strings naming all of the arguments for the passed function.
52 Parameters
53 ----------
54 function : function
55 A function whose argument names are wanted.
57 Returns
58 -------
59 argNames : [string]
60 The names of the arguments of function.
61 """
62 argCount = function.__code__.co_argcount
63 argNames = function.__code__.co_varnames[:argCount]
64 return argNames
67class NullFunc:
68 """
69 A trivial class that acts as a placeholder "do nothing" function.
70 """
72 def __call__(self, *args):
73 """
74 Returns meaningless output no matter what the input(s) is. If no input,
75 returns None. Otherwise, returns an array of NaNs (or a single NaN) of
76 the same size as the first input.
77 """
78 if len(args) == 0:
79 return None
80 else:
81 arg = args[0]
82 if hasattr(arg, "shape"):
83 return np.zeros_like(arg) + np.nan
84 else:
85 return np.nan
87 def distance(self, other):
88 """
89 Trivial distance metric that only cares whether the other object is also
90 an instance of NullFunc. Intentionally does not inherit from HARKobject
91 as this might create dependency problems.
93 Parameters
94 ----------
95 other : any
96 Any object for comparison to this instance of NullFunc.
98 Returns
99 -------
100 (unnamed) : float
101 The distance between self and other. Returns 0 if other is also a
102 NullFunc; otherwise returns an arbitrary high number.
103 """
104 if other.__class__ is self.__class__:
105 return 0.0
106 else:
107 return 1000.0
110def apply_fun_to_vals(fun, vals):
111 """
112 Applies a function to the arguments defined in `vals`.
113 This is equivalent to `fun(**vals)`, except
114 that `vals` may contain keys that are not named arguments
115 of `fun`.
117 Parameters
118 ----------
119 fun: callable
121 vals: dict
122 """
123 return fun(*[vals[var] for var in signature(fun).parameters])
126# =======================================================
127# ================ Other useful functions ===============
128# =======================================================
131def make_assets_grid(aXtraMin, aXtraMax, aXtraCount, aXtraExtra, aXtraNestFac):
132 """
133 Constructs the base grid of post-decision states, representing end-of-period
134 assets above the absolute minimum. Has three modes, depending on aXtraNestFac:
136 aXtraNestFac = -1 : Uniformly spaced grid.
137 aXtraNestFac = 0 : Ordinary exponentially spaced grid.
138 aXtraNestFac >= 1 : Multi-exponentially nested grid.
140 See :func:`HARK.utilities.make_grid_exp_mult` for more info
142 Parameters
143 ----------
144 aXtraMin: float
145 Minimum value for the assets-above-minimum grid.
146 aXtraMax: float
147 Maximum value for the assets-above-minimum grid.
148 aXtraCount: int
149 Number of nodes in the assets-above-minimum grid, not counting extra values.
150 aXtraExtra: [float]
151 Additional values to insert in the assets-above-minimum grid.
152 aXtraNestFac: int
153 Level of exponential nesting for grid. If -1, the grid is linearly spaced.
155 Returns
156 -------
157 aXtraGrid: np.ndarray
158 Base array of values for the post-decision-state grid.
159 """
160 # Set up post decision state grid:
161 if aXtraNestFac == -1:
162 aXtraGrid = np.linspace(aXtraMin, aXtraMax, aXtraCount)
163 elif (aXtraNestFac >= 0) and type(aXtraNestFac) is int:
164 aXtraGrid = make_grid_exp_mult(
165 ming=aXtraMin, maxg=aXtraMax, ng=aXtraCount, timestonest=aXtraNestFac
166 )
167 else:
168 raise ValueError(
169 "Please ensure aXtraNestFac is either -1 or a positive integer."
170 )
172 # Add in additional points for the grid:
173 if aXtraExtra is not None:
174 temp_list = []
175 for i in aXtraExtra:
176 if i is not None:
177 temp_list.append(i)
178 aXtraGrid = np.sort(np.unique(np.concatenate((aXtraGrid, temp_list))))
180 return aXtraGrid
183# ==============================================================================
184# ============== Functions for generating state space grids ===================
185# ==============================================================================
188def make_grid_exp_mult(ming, maxg, ng, timestonest=20):
189 r"""
190 Makes a multi-exponentially spaced grid.
191 If the function :math:`\ln(1+x)` were applied timestonest times, the grid would
192 become linearly spaced. If timestonest is 0, the grid is exponentially spaced.
193 If timestonest is -1, the grid is linearly spaced.
195 NOTE: The bounds of the grid must be non-negative, else this function will
196 return an invalid grid with NaNs in it. If you want a non-linearly spaced
197 grid that spans negative numbers, use make_exponential_grid; see below.
199 Parameters
200 ----------
201 ming : float
202 Minimum value of the grid, which must be non-negative.
203 maxg : float
204 Maximum value of the grid, which must be greater than ming.
205 ng : int
206 The number of grid points
207 timestonest : int
208 the number of times to nest the exponentiation
210 Returns
211 -------
212 points : np.array
213 A multi-exponentially spaced grid
215 Notes
216 -----
217 Original Matab code can be found in Chris Carroll's
218 [Solution Methods for Microeconomic Dynamic Optimization Problems]
219 (https://www.econ2.jhu.edu/people/ccarroll/solvingmicrodsops/) toolkit.
220 Latest update: 01 May 2015
221 """
222 if timestonest == -1:
223 grid = np.linspace(ming, maxg, ng)
224 return grid
225 if timestonest > 0:
226 Lming = ming
227 Lmaxg = maxg
228 for j in range(timestonest):
229 Lming = np.log(Lming + 1)
230 Lmaxg = np.log(Lmaxg + 1)
231 Lgrid = np.linspace(Lming, Lmaxg, ng)
232 grid = Lgrid
233 for j in range(timestonest):
234 grid = np.exp(grid) - 1
235 else:
236 Lming = np.log(ming)
237 Lmaxg = np.log(maxg)
238 Lgrid = np.linspace(Lming, Lmaxg, ng)
239 grid = np.exp(Lgrid)
240 return grid
243def make_exponential_grid(ming, maxg, ng, order=1.0):
244 """
245 Construct an exponentially spaced grid with chosen exponential order.
246 A uniformly spaced grid on [0,1] is raised to the chosen order, then linearly
247 remapped to the specified interval. Supports any real valued grid bounds.
249 Parameters
250 ----------
251 ming : float
252 Lower bound of grid.
253 maxg : float
254 Upper bound of grid.
255 ng : int
256 Number of points in the grid.
257 order : float, optional
258 Exponential spacing order for the grid. The default is 1.0, or linear.
260 Returns
261 -------
262 grid : np.array
263 Exponentially spaced grid on [ming, maxg] with ng points.
264 """
265 grid = np.linspace(0.0, 1.0, ng) ** order * (maxg - ming) + ming
266 return grid
269# ==============================================================================
270# ============== Uncategorized general functions ===================
271# ==============================================================================
274def get_percentiles(data, weights=None, percentiles=None, presorted=False):
275 """
276 Calculates the requested percentiles of (weighted) data. Median by default.
278 Parameters
279 ----------
280 data : numpy.array
281 A 1D array of float data.
282 weights : np.array
283 A weighting vector for the data.
284 percentiles : [float]
285 A list or numpy.array of percentiles to calculate for the data. Each element should
286 be in (0,1).
287 presorted : boolean
288 Indicator for whether data has already been sorted.
290 Returns
291 -------
292 pctl_out : numpy.array
293 The requested percentiles of the data.
294 """
295 if percentiles is None:
296 percentiles = [0.5]
297 else:
298 if (
299 not isinstance(percentiles, (list, np.ndarray))
300 or min(percentiles) <= 0
301 or max(percentiles) >= 1
302 ):
303 raise ValueError(
304 "Percentiles should be a list or numpy array of floats between 0 and 1"
305 )
307 if data.size < 2:
308 return np.zeros(np.array(percentiles).shape) + np.nan
310 if weights is None: # Set equiprobable weights if none were passed
311 weights = np.ones(data.size) / float(data.size)
313 if presorted: # Sort the data if it is not already
314 data_sorted = data
315 weights_sorted = weights
316 else:
317 order = np.argsort(data)
318 data_sorted = data[order]
319 weights_sorted = weights[order]
321 cum_dist = np.cumsum(weights_sorted) / np.sum(
322 weights_sorted
323 ) # cumulative probability distribution
325 # Calculate the requested percentiles by interpolating the data over the
326 # cumulative distribution, then evaluating at the percentile values
327 inv_CDF = interp1d(cum_dist, data_sorted, bounds_error=False, assume_sorted=True)
328 pctl_out = inv_CDF(percentiles)
329 return pctl_out
332def get_lorenz_shares(data, weights=None, percentiles=None, presorted=False):
333 """
334 Calculates the Lorenz curve at the requested percentiles of (weighted) data.
335 Median by default.
337 Parameters
338 ----------
339 data : numpy.array
340 A 1D array of float data.
341 weights : numpy.array
342 A weighting vector for the data.
343 percentiles : [float]
344 A list or numpy.array of percentiles to calculate for the data. Each element should
345 be in (0,1).
346 presorted : boolean
347 Indicator for whether data has already been sorted.
349 Returns
350 -------
351 lorenz_out : numpy.array
352 The requested Lorenz curve points of the data.
353 """
354 if percentiles is None:
355 percentiles = [0.5]
356 else:
357 if (
358 not isinstance(percentiles, (list, np.ndarray))
359 or min(percentiles) <= 0
360 or max(percentiles) >= 1
361 ):
362 raise ValueError(
363 "Percentiles should be a list or numpy array of floats between 0 and 1"
364 )
365 if weights is None: # Set equiprobable weights if none were given
366 weights = np.ones(data.size)
368 if presorted: # Sort the data if it is not already
369 data_sorted = data
370 weights_sorted = weights
371 else:
372 order = np.argsort(data)
373 data_sorted = data[order]
374 weights_sorted = weights[order]
376 cum_dist = np.cumsum(weights_sorted) / np.sum(
377 weights_sorted
378 ) # cumulative probability distribution
379 temp = data_sorted * weights_sorted
380 cum_data = np.cumsum(temp) / sum(temp) # cumulative ownership shares
382 # Calculate the requested Lorenz shares by interpolating the cumulative ownership
383 # shares over the cumulative distribution, then evaluating at requested points
384 lorenzFunc = interp1d(cum_dist, cum_data, bounds_error=False, assume_sorted=True)
385 lorenz_out = lorenzFunc(percentiles)
386 return lorenz_out
389def calc_subpop_avg(data, reference, cutoffs, weights=None):
390 """
391 Calculates the average of (weighted) data between cutoff percentiles of a
392 reference variable.
394 Parameters
395 ----------
396 data : numpy.array
397 A 1D array of float data.
398 reference : numpy.array
399 A 1D array of float data of the same length as data.
400 cutoffs : [(float,float)]
401 A list of doubles with the lower and upper percentile bounds (should be
402 in [0,1]).
403 weights : numpy.array
404 A weighting vector for the data.
406 Returns
407 -------
408 slice_avg
409 The (weighted) average of data that falls within the cutoff percentiles
410 of reference.
412 """
413 if weights is None: # Set equiprobable weights if none were given
414 weights = np.ones(data.size)
416 # Sort the data and generate a cumulative distribution
417 order = np.argsort(reference)
418 data_sorted = data[order]
419 weights_sorted = weights[order]
420 cum_dist = np.cumsum(weights_sorted) / np.sum(weights_sorted)
422 # For each set of cutoffs, calculate the average of data that falls within
423 # the cutoff percentiles of reference
424 slice_avg = []
425 for j in range(len(cutoffs)):
426 bot = np.searchsorted(cum_dist, cutoffs[j][0])
427 top = np.searchsorted(cum_dist, cutoffs[j][1])
428 slice_avg.append(
429 np.sum(data_sorted[bot:top] * weights_sorted[bot:top])
430 / np.sum(weights_sorted[bot:top])
431 )
432 return slice_avg
435def epanechnikov_kernel(x, ref_x, h=1.0):
436 """
437 The Epanechnikov kernel, which has been shown to be the most efficient kernel
438 for non-parametric regression.
440 Parameters
441 ----------
442 x : np.array
443 Values at which to evaluate the kernel
444 x_ref : float
445 The reference point
446 h : float
447 Kernel bandwidth
449 Returns
450 -------
451 out : np.array
452 Kernel values at each value of x
453 """
454 u = (x - ref_x) / h # Normalize distance by bandwidth
455 out = 0.75 * (1.0 - u**2)
456 out[np.abs(u) > 1.0] = 0.0 # Kernel = 0 outside [-1,1]
457 return out
460def triangle_kernel(x, ref_x, h=1.0):
461 """
462 The triangle or "hat" kernel.
464 Parameters
465 ----------
466 x : np.array
467 Values at which to evaluate the kernel
468 x_ref : float
469 The reference point
470 h : float
471 Kernel bandwidth
473 Returns
474 -------
475 out : np.array
476 Kernel values at each value of x
477 """
478 u = (x - ref_x) / h # Normalize distance by bandwidth
479 these = np.abs(u) <= 1.0 # Kernel = 0 outside [-1,1]
480 out = np.zeros_like(u) # Initialize kernel output
481 out[these] = 1.0 - np.abs(u[these]) # Evaluate kernel
482 return out
485kernel_dict = {
486 "epanechnikov": epanechnikov_kernel,
487 "triangle": triangle_kernel,
488 "hat": triangle_kernel,
489}
492def kernel_regression(x, y, bot=None, top=None, N=500, h=None, kernel="epanechnikov"):
493 """
494 Performs a non-parametric Nadaraya-Watson 1D kernel regression on given data
495 with optionally specified range, number of points, and kernel bandwidth.
497 Parameters
498 ----------
499 x : np.array
500 The independent variable in the kernel regression.
501 y : np.array
502 The dependent variable in the kernel regression.
503 bot : float
504 Minimum value of interest in the regression; defaults to min(x).
505 top : float
506 Maximum value of interest in the regression; defaults to max(y).
507 N : int
508 Number of points to compute.
509 h : float
510 The bandwidth of the (Epanechnikov) kernel. To-do: GENERALIZE.
512 Returns
513 -------
514 regression : LinearInterp
515 A piecewise locally linear kernel regression: y = f(x).
516 """
517 # Fix omitted inputs
518 if bot is None:
519 bot = np.min(x)
520 if top is None:
521 top = np.max(x)
522 if h is None:
523 h = 2.0 * (top - bot) / float(N) # This is an arbitrary default
525 # Get kernel if possible
526 try:
527 kern = kernel_dict[kernel]
528 except:
529 raise ValueError("Can't find a kernel named '" + kernel + "'!")
531 # Construct a local linear approximation
532 x_vec = np.linspace(bot, top, num=N)
533 # Evaluate the kernel for all evaluation points at once
534 weights = kern(x[:, None], x_vec[None, :], h)
535 weight_sums = np.sum(weights, axis=0)
536 # Avoid division by zero when weights are extremely small
537 weight_sums[weight_sums == 0] = np.nan
538 y_vec = np.dot(weights.T, y) / weight_sums
539 regression = interp1d(x_vec, y_vec, bounds_error=False, assume_sorted=True)
540 return regression
543def make_polynomial_params(coeffs, T, offset=0.0, step=1.0):
544 """
545 Make a T-length array of parameters using polynomial coefficients.
547 Parameters
548 ----------
549 coeffs : [float]
550 Arbitrary length list of polynomial coefficients.
551 T : int
552 Number of elements in the output.
553 offset : float, optional
554 Value at which the X values should start. The default is 0.0.
555 step : float, optional
556 Increment of the X values. The default is 1.0.
558 Returns
559 -------
560 param_vals : np.array
561 T-length array of parameter values calculated using the polynomial coefficients.
562 """
563 X = offset + step * np.arange(T)
564 # np.polyval expects highest power first
565 return np.polyval(coeffs[::-1], X)
568@numba.njit
569def jump_to_grid_1D(m_vals, probs, Dist_mGrid): # pragma: nocover
570 """
571 Distributes values onto a predefined grid, maintaining the means.
574 Parameters
575 ----------
576 m_vals: np.array
577 Market resource values
579 probs: np.array
580 Shock probabilities associated with combinations of m_vals.
581 Can be thought of as the probability mass function of (m_vals).
583 dist_mGrid : np.array
584 Grid over normalized market resources
586 Returns
587 -------
588 probGrid.flatten(): np.array
589 Probabilities of each gridpoint on the combined grid of market resources
591 """
593 probGrid = np.zeros(len(Dist_mGrid))
594 mIndex = np.digitize(m_vals, Dist_mGrid) - 1
595 mIndex[m_vals <= Dist_mGrid[0]] = -1
596 mIndex[m_vals >= Dist_mGrid[-1]] = len(Dist_mGrid) - 1
598 for i in range(len(m_vals)):
599 if mIndex[i] == -1:
600 mlowerIndex = 0
601 mupperIndex = 0
602 mlowerWeight = 1.0
603 mupperWeight = 0.0
604 elif mIndex[i] == len(Dist_mGrid) - 1:
605 mlowerIndex = -1
606 mupperIndex = -1
607 mlowerWeight = 1.0
608 mupperWeight = 0.0
609 else:
610 mlowerIndex = mIndex[i]
611 mupperIndex = mIndex[i] + 1
612 mlowerWeight = (Dist_mGrid[mupperIndex] - m_vals[i]) / (
613 Dist_mGrid[mupperIndex] - Dist_mGrid[mlowerIndex]
614 )
615 mupperWeight = 1.0 - mlowerWeight
617 probGrid[mlowerIndex] += probs[i] * mlowerWeight
618 probGrid[mupperIndex] += probs[i] * mupperWeight
620 return probGrid.flatten()
623@numba.njit
624def jump_to_grid_2D(
625 m_vals, perm_vals, probs, dist_mGrid, dist_pGrid
626): # pragma: nocover
627 """
628 Distributes values onto a predefined grid, maintaining the means. m_vals and perm_vals are realizations of market resources and permanent income while
629 dist_mGrid and dist_pGrid are the predefined grids of market resources and permanent income, respectively. That is, m_vals and perm_vals do not necesarily lie on their
630 respective grids. Returns probabilities of each gridpoint on the combined grid of market resources and permanent income.
633 Parameters
634 ----------
635 m_vals: np.array
636 Market resource values
638 perm_vals: np.array
639 Permanent income values
641 probs: np.array
642 Shock probabilities associated with combinations of m_vals and perm_vals.
643 Can be thought of as the probability mass function of (m_vals, perm_vals).
645 dist_mGrid : np.array
646 Grid over normalized market resources
648 dist_pGrid : np.array
649 Grid over permanent income
650 Returns
651 -------
652 probGrid.flatten(): np.array
653 Probabilities of each gridpoint on the combined grid of market resources and permanent income
654 """
656 probGrid = np.zeros((len(dist_mGrid), len(dist_pGrid)))
658 # Maybe use np.searchsorted as opposed to np.digitize
659 mIndex = (
660 np.digitize(m_vals, dist_mGrid) - 1
661 ) # Array indicating in which bin each values of m_vals lies in relative to dist_mGrid. Bins lie between between point of Dist_mGrid.
662 # For instance, if mval lies between dist_mGrid[4] and dist_mGrid[5] it is in bin 4 (would be 5 if 1 was not subtracted in the previous line).
663 mIndex[
664 m_vals <= dist_mGrid[0]
665 ] = -1 # if the value is less than the smallest value on dist_mGrid assign it an index of -1
666 mIndex[m_vals >= dist_mGrid[-1]] = (
667 len(dist_mGrid) - 1
668 ) # if value if greater than largest value on dist_mGrid assign it an index of the length of the grid minus 1
670 # the following three lines hold the same intuition as above
671 pIndex = np.digitize(perm_vals, dist_pGrid) - 1
672 pIndex[perm_vals <= dist_pGrid[0]] = -1
673 pIndex[perm_vals >= dist_pGrid[-1]] = len(dist_pGrid) - 1
675 for i in range(len(m_vals)):
676 if (
677 mIndex[i] == -1
678 ): # if mval is below smallest gridpoint, then assign it a weight of 1.0 for lower weight.
679 mlowerIndex = 0
680 mupperIndex = 0
681 mlowerWeight = 1.0
682 mupperWeight = 0.0
683 elif (
684 mIndex[i] == len(dist_mGrid) - 1
685 ): # if mval is greater than maximum gridpoint, then assign the following weights
686 mlowerIndex = -1
687 mupperIndex = -1
688 mlowerWeight = 1.0
689 mupperWeight = 0.0
690 else: # Standard case where mval does not lie past any extremes
691 # identify which two points on the grid the mval is inbetween
692 mlowerIndex = mIndex[i]
693 mupperIndex = mIndex[i] + 1
694 # Assign weight to the indices that bound the m_vals point. Intuitively, an mval perfectly between two points on the mgrid will assign a weight of .5 to the gridpoint above and below
695 mlowerWeight = (
696 (dist_mGrid[mupperIndex] - m_vals[i])
697 / (dist_mGrid[mupperIndex] - dist_mGrid[mlowerIndex])
698 ) # Metric to determine weight of gridpoint/index below. Intuitively, mvals that are close to gridpoint/index above are assigned a smaller mlowerweight.
699 mupperWeight = 1.0 - mlowerWeight # weight of gridpoint/ index above
701 # Same logic as above except the weights here concern the permanent income grid
702 if pIndex[i] == -1:
703 plowerIndex = 0
704 pupperIndex = 0
705 plowerWeight = 1.0
706 pupperWeight = 0.0
707 elif pIndex[i] == len(dist_pGrid) - 1:
708 plowerIndex = -1
709 pupperIndex = -1
710 plowerWeight = 1.0
711 pupperWeight = 0.0
712 else:
713 plowerIndex = pIndex[i]
714 pupperIndex = pIndex[i] + 1
715 plowerWeight = (dist_pGrid[pupperIndex] - perm_vals[i]) / (
716 dist_pGrid[pupperIndex] - dist_pGrid[plowerIndex]
717 )
718 pupperWeight = 1.0 - plowerWeight
720 # Compute probabilities of each gridpoint on the combined market resources and permanent income grid by looping through each point on the combined market resources and permanent income grid,
721 # assigning probabilities to each gridpoint based off the probabilities of the surrounding mvals and pvals and their respective weights placed on the gridpoint.
722 # Note* probs[i] is the probability of mval AND pval occurring
724 probGrid[mlowerIndex][plowerIndex] += (
725 probs[i] * mlowerWeight * plowerWeight
726 ) # probability of gridpoint below mval and pval
727 probGrid[mlowerIndex][pupperIndex] += (
728 probs[i] * mlowerWeight * pupperWeight
729 ) # probability of gridpoint below mval and above pval
730 probGrid[mupperIndex][plowerIndex] += (
731 probs[i] * mupperWeight * plowerWeight
732 ) # probability of gridpoint above mval and below pval
733 probGrid[mupperIndex][pupperIndex] += (
734 probs[i] * mupperWeight * pupperWeight
735 ) # probability of gridpoint above mval and above pval
737 return probGrid.flatten()
740@numba.njit(parallel=True)
741def gen_tran_matrix_1D(
742 dist_mGrid, bNext, shk_prbs, perm_shks, tran_shks, LivPrb, NewBornDist
743): # pragma: nocover
744 """
745 Computes Transition Matrix across normalized market resources.
746 This function is built to non-stochastic simulate the IndShockConsumerType.
747 This function is used exclusively when Harmenberg Neutral Measure is applied and/or if permanent income is not a state variable
748 For more information, see https://econ-ark.org/materials/harmenberg-aggregation?launch
750 Parameters
751 ----------
752 dist_mGrid : np.array
753 Grid over normalized market resources
755 bNext : np.array
756 Grid over bank balances
758 shk_prbs : np.array
759 Array of shock probabilities over combinations of permanent and transitory shocks
761 perm_shks : np.array
762 Array of shocks to permanent income. Shocks should follow Harmenberg neutral measure
764 tran_shks : np.array
765 Array of shocks to transitory
767 LivPrb : float
768 Probability of not dying
770 NewBornDist : np.array
771 array representing distribution of newborns across grid of normalized market resources and grid of permanent income.
773 Returns
774 -------
775 TranMatrix : np.array
776 Transition Matrix over normalized market resources grid.
779 """
781 TranMatrix = np.zeros((len(dist_mGrid), len(dist_mGrid)))
782 for i in numba.prange(len(dist_mGrid)):
783 mNext_ij = (
784 bNext[i] / perm_shks + tran_shks
785 ) # Compute next period's market resources given todays bank balances bnext[i]
786 TranMatrix[:, i] += (
787 LivPrb * jump_to_grid_1D(mNext_ij, shk_prbs, dist_mGrid)
788 + (1.0 - LivPrb) * NewBornDist
789 ) # this is the transition matrix if given you are unemployed today and unemployed tomorrow so you assume the unemployed consumption policy
790 return TranMatrix
793@numba.njit(parallel=True)
794def gen_tran_matrix_2D(
795 dist_mGrid, dist_pGrid, bNext, shk_prbs, perm_shks, tran_shks, LivPrb, NewBornDist
796): # pragma: nocover
797 """
798 Computes Transition Matrix over normalized market resources and permanent income.
799 This function is built to non-stochastic simulate the IndShockConsumerType.
801 Parameters
802 ----------
803 dist_mGrid : np.array
804 Grid over normalized market resources
806 dist_pGrid : np.array
807 Grid over permanent income
809 bNext : np.array
810 Grid over bank balances
812 shk_prbs : np.array
813 Array of shock probabilities over combinations of perm and tran shocks
815 perm_shks : np.array
816 Array of shocks to permanent income
818 tran_shks : np.array
819 Array of shocks to transitory income
821 LivPrb : float
822 Probability of not dying
824 NewBornDist : np.array
825 array representing distribution of newborns across grid of normalized market resources and grid of permanent income.
827 Returns
828 -------
829 TranMatrix : np.array
830 Transition Matrix over normalized market resources grid and permanent income grid
831 """
832 TranMatrix = np.zeros(
833 (len(dist_mGrid) * len(dist_pGrid), len(dist_mGrid) * len(dist_pGrid))
834 )
835 for i in numba.prange(len(dist_mGrid)):
836 for j in numba.prange(len(dist_pGrid)):
837 mNext_ij = (
838 bNext[i] / perm_shks + tran_shks
839 ) # Compute next period's market resources given todays bank balances bnext[i]
840 pNext_ij = (
841 dist_pGrid[j] * perm_shks
842 ) # Computes next period's permanent income level by applying permanent income shock
843 TranMatrix[:, i * len(dist_pGrid) + j] += (
844 LivPrb
845 * jump_to_grid_2D(mNext_ij, pNext_ij, shk_prbs, dist_mGrid, dist_pGrid)
846 + (1.0 - LivPrb) * NewBornDist
847 ) # this is the transition matrix if given you are unemployed today and unemployed tomorrow so you assume the unemployed consumption policy
848 return TranMatrix
851# ==============================================================================
852# ============== Some basic plotting tools ====================================
853# ==============================================================================
856def plot_funcs(
857 functions, bottom, top, N=1000, legend_kwds=None, xlabel=None, ylabel=None
858):
859 """
860 Plots 1D function(s) over a given range.
862 Parameters
863 ----------
864 functions : [function] or function
865 A single function, or a list of functions, to be plotted.
866 bottom : float
867 The lower limit of the domain to be plotted.
868 top : float
869 The upper limit of the domain to be plotted.
870 N : int
871 Number of points in the domain to evaluate.
872 legend_kwds: None or dictionary
873 If not None, the keyword dictionary to pass to plt.legend
874 xlabel : None or str
875 Optional horizontal axis label.
876 ylabel : None or str
877 Optional vertical axis label.
879 Returns
880 -------
881 none
882 """
883 import matplotlib.pyplot as plt
885 plt.ion()
887 if type(functions) == list:
888 function_list = functions
889 else:
890 function_list = [functions]
892 for function in function_list:
893 x = np.linspace(bottom, top, N, endpoint=True)
894 y = function(x)
895 plt.plot(x, y)
896 plt.xlim([bottom, top])
897 if xlabel is not None:
898 plt.xlabel(xlabel)
899 if ylabel is not None:
900 plt.ylabel(ylabel)
901 if legend_kwds is not None:
902 plt.legend(**legend_kwds)
903 plt.show(block=False)
906def plot_funcs_der(
907 functions, bottom, top, N=1000, legend_kwds=None, xlabel=None, ylabel=None
908):
909 """
910 Plots the first derivative of 1D function(s) over a given range.
912 Parameters
913 ----------
914 function : function
915 A function or list of functions, the derivatives of which are to be plotted.
916 bottom : float
917 The lower limit of the domain to be plotted.
918 top : float
919 The upper limit of the domain to be plotted.
920 N : int
921 Number of points in the domain to evaluate.
922 legend_kwds: None or dictionary
923 If not None, the keyword dictionary to pass to plt.legend
924 xlabel : None or str
925 Optional horizontal axis label.
926 ylabel : None or str
927 Optional vertical axis label.
929 Returns
930 -------
931 none
932 """
933 import matplotlib.pyplot as plt
935 plt.ion()
937 if type(functions) == list:
938 function_list = functions
939 else:
940 function_list = [functions]
942 step = (top - bottom) / N
943 for function in function_list:
944 x = np.arange(bottom, top, step)
945 y = function.derivative(x)
946 plt.plot(x, y)
947 plt.xlim([bottom, top])
948 if xlabel is not None:
949 plt.xlabel(xlabel)
950 if ylabel is not None:
951 plt.ylabel(ylabel)
952 if legend_kwds is not None:
953 plt.legend(**legend_kwds)
954 plt.show(block=False)
957def plot_func_slices(
958 func,
959 bot,
960 top,
961 N=1000,
962 xdim=0,
963 zdim=1,
964 zmin=None,
965 zmax=None,
966 zn=11,
967 zorder=1.0,
968 Z=None,
969 const=None,
970 legend_kwds=None,
971 xlabel=None,
972 ylabel=None,
973):
974 """
975 Plots "slices" of a function with more than one argument. User specifies
976 range of the "x" (horizontal) dimension and selects which other dimension
977 is "z". Can specify set of z values explicitly with list Z or describe an
978 exponentially spaced grid with zmin, zmax, (zn, zorder).
980 Parameters
981 ----------
982 func : callable
983 The function whose slices are to be plotted. Must take more than one argument.
984 bot : float
985 Lowest value in the xdim to plot.
986 top : float
987 Highest value in the xdim to plot.
988 N : int
989 Number of x values to plot for each slice (default 1000).
990 xdim : int
991 Index of the input that serves as "x", the horizontal plot dimension (default 0).
992 zdim : int
993 Index of the input that serves as "z", which is varied for each slice plotted (default 1).
994 zmin : None or float
995 If specified, the lowest value of z that will be plotted.
996 zmax : None or float
997 If specified, the highest value of z that will be plotted.
998 zn : int
999 The number of slices to plot if zmin and zmax are specified (default 11).
1000 zorder : float
1001 The exponential order of the set of z values, if zmin and zmax are specified (default 1.0).
1002 Z : None or [float]
1003 A user-specified list (or array) of z values. Cannot be used of zmin and zmax are provided.
1004 const : None or [float]
1005 Constant values at which to hold fixed function arguments *other* than x and z.
1006 E.g. if user wants to plot f(x, 3.0, z, 5.0), they specify xdim=0, zdim=2, const=[3.0, 5.0].
1007 legend_kwds: None or dictionary
1008 If not None, the keyword dictionary to pass to plt.legend
1009 xlabel : None or str
1010 Optional horizontal axis label.
1011 ylabel : None or str
1012 Optional vertical axis label.
1014 Returns
1015 -------
1016 None
1017 """
1018 import matplotlib.pyplot as plt
1020 plt.ion()
1022 if xdim == zdim:
1023 raise ValueError("xdim and zdim cannot refer to the same argument!")
1025 # Check whether the set for z has been correctly specified
1026 if (zmin is not None) and (zmax is not None):
1027 if Z is None:
1028 if zmax > zmin:
1029 Z = make_exponential_grid(zmin, zmax, zn, order=zorder)
1030 else:
1031 raise ValueError("zmax must be greater than zmin!")
1032 else:
1033 raise ValueError(
1034 "Cannot provide set Z if zmin and zmax are also specified!"
1035 )
1036 if Z is None:
1037 raise ValueError("Must specify set Z or grid of z with zmin and zmax!")
1039 # Build the vectors of x values and constant values
1040 X = np.linspace(bot, top, num=N)
1041 if const is not None:
1042 Q = [const[j] * np.ones(N) for j in range(len(const))]
1043 else:
1044 Q = []
1045 D = 2 + len(Q)
1047 # Assemble a list of function arguments in the right order, leaving z blank
1048 args = []
1049 i = 0
1050 for d in range(D):
1051 if d == xdim:
1052 args.append(X)
1053 elif d == zdim:
1054 args.append(None)
1055 else:
1056 args.append(Q[i])
1057 i += 1
1059 # Plot a slice for each z in Z
1060 for j in range(len(Z)):
1061 z = Z[j]
1062 args[zdim] = z * np.ones(N)
1063 plt.plot(X, func(*args))
1065 # Format and display the figure
1066 plt.xlim(bot, top)
1067 if xlabel is not None:
1068 plt.xlabel(xlabel)
1069 if ylabel is not None:
1070 plt.ylabel(ylabel)
1071 if legend_kwds is not None:
1072 plt.legend(**legend_kwds)
1073 plt.show(block=False)
1076###############################################################################
1079def determine_platform(): # pragma: nocover
1080 """
1081 Utility function to return the platform currenlty in use.
1083 Returns
1084 ---------
1085 pf: str
1086 'darwin' (MacOS), 'debian'(debian Linux) or 'win' (windows)
1087 """
1088 import platform
1090 pform = platform.system().lower()
1091 if "darwin" in pform:
1092 pf = "darwin" # MacOS
1093 elif "debian" in pform:
1094 pf = "debian" # Probably cloud (MyBinder, CoLab, ...)
1095 elif "ubuntu" in pform:
1096 pf = "debian" # Probably cloud (MyBinder, CoLab, ...)
1097 elif "win" in pform:
1098 pf = "win"
1099 elif "linux" in pform:
1100 pf = "linux"
1101 else:
1102 raise ValueError("Not able to find out the platform.")
1103 return pf
1106def test_latex_installation(pf): # pragma: no cover
1107 """Test to check if latex is installed on the machine.
1109 Parameters
1110 -----------
1111 pf: str (platform)
1112 output of determine_platform()
1114 Returns
1115 --------
1116 bool: Boolean
1117 True if latex found, else installed in the case of debian
1118 otherwise ImportError raised to direct user to install latex manually
1119 """
1120 # Test whether latex is installed (some of the figures require it)
1121 from distutils.spawn import find_executable
1123 latexExists = False
1125 if find_executable("latex"):
1126 latexExists = True
1127 return True
1129 if not latexExists:
1130 print("Some of the figures below require a full installation of LaTeX")
1131 # If running on Mac or Win, user can be assumed to be able to install
1132 # any missing packages in response to error messages; but not on cloud
1133 # so load LaTeX by hand (painfully slowly)
1134 if "debian" in pf: # CoLab and MyBinder are both ubuntu
1135 print("Installing LaTeX now; please wait 3-5 minutes")
1136 from IPython.utils import io
1138 with io.capture_output() as captured: # Hide hideously long output
1139 os.system("apt-get update")
1140 os.system(
1141 "apt-get install texlive texlive-latex-extra texlive-xetex dvipng"
1142 )
1143 latexExists = True
1144 return True
1145 else:
1146 raise ImportError(
1147 "Please install a full distribution of LaTeX on your computer then rerun. \n \
1148 A full distribution means textlive, texlive-latex-extras, texlive-xetex, dvipng, and ghostscript"
1149 )
1152def in_ipynb():
1153 """If the ipython process contains 'terminal' assume not in a notebook.
1155 Returns
1156 --------
1157 bool: Boolean
1158 True if called from a jupyter notebook, else False
1159 """
1160 try:
1161 if "terminal" in str(type(get_ipython())):
1162 return False
1163 else:
1164 return True
1165 except NameError:
1166 return False
1169def setup_latex_env_notebook(pf, latexExists): # pragma: nocover
1170 """This is needed for use of the latex_envs notebook extension
1171 which allows the use of environments in Markdown.
1173 Parameters
1174 -----------
1175 pf: str (platform)
1176 output of determine_platform()
1177 """
1178 import os
1180 import matplotlib.pyplot as plt
1182 plt.rc("font", family="serif")
1183 plt.rc("text", usetex=latexExists)
1184 if latexExists:
1185 latex_preamble = (
1186 r"\usepackage{amsmath}\usepackage{amsfonts}"
1187 r"\usepackage[T1]{fontenc}"
1188 r"\providecommand{\Ex}{\mathbb{E}}"
1189 r"\providecommand{\StE}{\check}"
1190 r"\providecommand{\Trg}{\hat}"
1191 r"\providecommand{\PermGroFac}{\Gamma}"
1192 r"\providecommand{\cLev}{\pmb{\mathrm{c}}}"
1193 r"\providecommand{\mLev}{\pmb{\mathrm{m}}}"
1194 r"\providecommand{\Rfree}{\mathsf{R}}"
1195 r"\providecommand{\DiscFac}{\beta}"
1196 r"\providecommand{\CRRA}{\rho}"
1197 r"\providecommand{\MPC}{\kappa}"
1198 r"\providecommand{\UnempPrb}{\wp}"
1199 )
1200 # Latex expects paths to be separated by /. \ might result in pieces
1201 # being interpreted as commands.
1202 latexdefs_path = os.getcwd().replace(os.path.sep, "/") + "/latexdefs.tex"
1203 if os.path.isfile(latexdefs_path):
1204 latex_preamble = latex_preamble + r"\input{" + latexdefs_path + r"}"
1205 else: # the required latex_envs package needs this file to exist even if it is empty
1206 from pathlib import Path
1208 Path(latexdefs_path).touch()
1209 plt.rcParams["text.latex.preamble"] = latex_preamble
1212def make_figs(figure_name, saveFigs, drawFigs, target_dir="Figures"):
1213 """Utility function to save figure in multiple formats and display the image.
1215 Parameters
1216 ----------
1217 figure_name: str
1218 name of the figure
1219 saveFigs: bool
1220 True if the figure needs to be written to disk else False
1221 drawFigs: bool
1222 True if the figure should be displayed using plt.draw()
1223 target_dir: str, default = 'Figures/'
1224 Name of folder to save figures to in the current directory
1226 """
1227 import matplotlib.pyplot as plt
1229 if saveFigs:
1230 import os
1232 # Where to put any figures that the user wants to save
1233 my_file_path = os.getcwd() # Find pathname to this file:
1234 Figures_dir = os.path.join(
1235 my_file_path, f"{target_dir}"
1236 ) # LaTeX document assumes figures will be here
1237 if not os.path.exists(Figures_dir):
1238 os.makedirs(Figures_dir) # If dir does not exist, create it
1239 # Save the figures in several formats
1240 print(f"Saving figure {figure_name} in {target_dir}")
1241 plt.savefig(
1242 os.path.join(target_dir, f"{figure_name}.jpg"),
1243 # metadata is not supported for jpg
1244 ) # For web/html
1245 plt.savefig(
1246 os.path.join(target_dir, f"{figure_name}.png"),
1247 metadata={"CreationDate": None},
1248 ) # For web/html
1249 plt.savefig(
1250 os.path.join(target_dir, f"{figure_name}.pdf"),
1251 metadata={"CreationDate": None},
1252 ) # For LaTeX
1253 plt.savefig(
1254 os.path.join(target_dir, f"{figure_name}.svg"),
1255 metadata={"Date": None},
1256 ) # For html5
1257 # Make sure it's possible to plot it by checking for GUI
1258 if drawFigs and find_gui():
1259 plt.ion() # Counterintuitively, you want interactive mode on if you don't want to interact
1260 plt.draw() # Change to false if you want to pause after the figure
1261 plt.pause(2)
1264def find_gui():
1265 """Quick fix to check if matplotlib is running in a GUI environment.
1267 Returns
1268 -------
1269 bool: Boolean
1270 True if it's a GUI environment, False if not.
1271 """
1272 try:
1273 import matplotlib.pyplot as plt
1274 except:
1275 return False
1276 if plt.get_backend() == "Agg":
1277 return False
1278 return True
1281def benchmark(
1282 agent, sort_by="tottime", max_print=10, filename="restats", return_output=False
1283): # pragma: nocover
1284 """
1285 Profiling tool for HARK models. Calling `benchmark` on agents calls the solver for
1286 the agents and provides time to solve as well as the top `max_print` function calls
1287 in terms of `sort_by`. Optionally allows for saving a text copy of the profile
1288 as well as returning the `Stats` object for further inspection.
1290 For more details on the python profilers, see
1291 https://docs.python.org/3/library/profile.html#the-stats-class
1293 Parameters
1294 ----------
1295 agent: AgentType
1296 A HARK AgentType with a solve() method.
1297 sort_by: string
1298 A string to sort the stats by.
1299 max_print: int
1300 Number of lines to print
1301 filename: string
1302 Optional filename to save output.
1303 return_output: bool
1304 Boolean to determine whether Stats object should be returned.
1306 Returns
1307 -------
1308 stats: Stats (optional)
1309 Profiling object with call statistics.
1310 """
1311 cProfile.run("agent.solve()", filename)
1312 stats = pstats.Stats(filename)
1313 stats.strip_dirs()
1314 stats.sort_stats(sort_by)
1315 stats.print_stats(max_print)
1316 if return_output:
1317 return stats
1320simpledec = re.compile(r"\d*\.\d{8,20}")
1323def mround(match):
1324 return f"{float(match.group()):.5f}"
1327def round_in_file(filename): # pragma: nocover
1328 with open(filename, "r+") as file:
1329 filetext = file.read()
1330 filetext = re.sub(simpledec, mround, filetext)
1331 file.seek(0)
1332 file.write(filetext)
1333 file.truncate()
1336def files_in_dir(mypath):
1337 return [
1338 os.path.join(mypath, f)
1339 for f in os.listdir(mypath)
1340 if os.path.isfile(os.path.join(mypath, f))
1341 ]