Coverage for HARK / utilities.py: 95%

218 statements  

« prev     ^ index     » next       coverage.py v7.12.0, created at 2025-12-07 05:16 +0000

1""" 

2General purpose / miscellaneous functions. Includes functions to approximate 

3continuous distributions with discrete ones, utility functions (and their 

4derivatives), manipulation of discrete distributions, and basic plotting tools. 

5""" 

6 

7import cProfile 

8import os 

9import pstats 

10import re 

11 

12import numba 

13import numpy as np # Python's numeric library, abbreviated "np" 

14from scipy.interpolate import interp1d 

15 

16from inspect import signature 

17 

18 

19class get_it_from: 

20 """ 

21 Class whose instances act as a special case trivial constructor that merely 

22 grabs an attribute or entry from the named attribute. This is useful when 

23 there are constructed model inputs that are "built together". Simply have a 

24 constructor that makes a dictionary (or object) containing the several inputs, 

25 then use get_it_from(that_dict_name) as the constructor for each of them. 

26 

27 Parameters 

28 ---------- 

29 name : str 

30 Name of the parent dictionary or object from which to take the object. 

31 """ 

32 

33 def __init__(self, name): 

34 self.name = name 

35 

36 def __call__(self, parent, query): 

37 if isinstance(parent, dict): 

38 return parent[query] 

39 else: 

40 return getattr(parent, query) 

41 

42 

43# ============================================================================== 

44# ============== Some basic function tools ==================================== 

45# ============================================================================== 

46 

47 

48def get_arg_names(function): 

49 """ 

50 Returns a list of strings naming all of the arguments for the passed function. 

51 

52 Parameters 

53 ---------- 

54 function : function 

55 A function whose argument names are wanted. 

56 

57 Returns 

58 ------- 

59 argNames : [string] 

60 The names of the arguments of function. 

61 """ 

62 argCount = function.__code__.co_argcount 

63 argNames = function.__code__.co_varnames[:argCount] 

64 return argNames 

65 

66 

67class NullFunc: 

68 """ 

69 A trivial class that acts as a placeholder "do nothing" function. 

70 """ 

71 

72 def __call__(self, *args): 

73 """ 

74 Returns meaningless output no matter what the input(s) is. If no input, 

75 returns None. Otherwise, returns an array of NaNs (or a single NaN) of 

76 the same size as the first input. 

77 """ 

78 if len(args) == 0: 

79 return None 

80 else: 

81 arg = args[0] 

82 if hasattr(arg, "shape"): 

83 return np.zeros_like(arg) + np.nan 

84 else: 

85 return np.nan 

86 

87 def distance(self, other): 

88 """ 

89 Trivial distance metric that only cares whether the other object is also 

90 an instance of NullFunc. Intentionally does not inherit from HARKobject 

91 as this might create dependency problems. 

92 

93 Parameters 

94 ---------- 

95 other : any 

96 Any object for comparison to this instance of NullFunc. 

97 

98 Returns 

99 ------- 

100 (unnamed) : float 

101 The distance between self and other. Returns 0 if other is also a 

102 NullFunc; otherwise returns an arbitrary high number. 

103 """ 

104 if other.__class__ is self.__class__: 

105 return 0.0 

106 else: 

107 return 1000.0 

108 

109 

110def apply_fun_to_vals(fun, vals): 

111 """ 

112 Applies a function to the arguments defined in `vals`. 

113 This is equivalent to `fun(**vals)`, except 

114 that `vals` may contain keys that are not named arguments 

115 of `fun`. 

116 

117 Parameters 

118 ---------- 

119 fun: callable 

120 

121 vals: dict 

122 """ 

123 return fun(*[vals[var] for var in signature(fun).parameters]) 

124 

125 

126# ======================================================= 

127# ================ Other useful functions =============== 

128# ======================================================= 

129 

130 

131def make_assets_grid(aXtraMin, aXtraMax, aXtraCount, aXtraExtra, aXtraNestFac): 

132 """ 

133 Constructs the base grid of post-decision states, representing end-of-period 

134 assets above the absolute minimum. Has three modes, depending on aXtraNestFac: 

135 

136 aXtraNestFac = -1 : Uniformly spaced grid. 

137 aXtraNestFac = 0 : Ordinary exponentially spaced grid. 

138 aXtraNestFac >= 1 : Multi-exponentially nested grid. 

139 

140 See :func:`HARK.utilities.make_grid_exp_mult` for more info 

141 

142 Parameters 

143 ---------- 

144 aXtraMin: float 

145 Minimum value for the assets-above-minimum grid. 

146 aXtraMax: float 

147 Maximum value for the assets-above-minimum grid. 

148 aXtraCount: int 

149 Number of nodes in the assets-above-minimum grid, not counting extra values. 

150 aXtraExtra: [float] 

151 Additional values to insert in the assets-above-minimum grid. 

152 aXtraNestFac: int 

153 Level of exponential nesting for grid. If -1, the grid is linearly spaced. 

154 

155 Returns 

156 ------- 

157 aXtraGrid: np.ndarray 

158 Base array of values for the post-decision-state grid. 

159 """ 

160 # Set up post decision state grid: 

161 if aXtraNestFac == -1: 

162 aXtraGrid = np.linspace(aXtraMin, aXtraMax, aXtraCount) 

163 elif (aXtraNestFac >= 0) and type(aXtraNestFac) is int: 

164 aXtraGrid = make_grid_exp_mult( 

165 ming=aXtraMin, maxg=aXtraMax, ng=aXtraCount, timestonest=aXtraNestFac 

166 ) 

167 else: 

168 raise ValueError( 

169 "Please ensure aXtraNestFac is either -1 or a positive integer." 

170 ) 

171 

172 # Add in additional points for the grid: 

173 if aXtraExtra is not None: 

174 temp_list = [] 

175 for i in aXtraExtra: 

176 if i is not None: 

177 temp_list.append(i) 

178 aXtraGrid = np.sort(np.unique(np.concatenate((aXtraGrid, temp_list)))) 

179 

180 return aXtraGrid 

181 

182 

183# ============================================================================== 

184# ============== Functions for generating state space grids =================== 

185# ============================================================================== 

186 

187 

188def make_grid_exp_mult(ming, maxg, ng, timestonest=20): 

189 r""" 

190 Makes a multi-exponentially spaced grid. 

191 If the function :math:`\ln(1+x)` were applied timestonest times, the grid would 

192 become linearly spaced. If timestonest is 0, the grid is exponentially spaced. 

193 If timestonest is -1, the grid is linearly spaced. 

194 

195 NOTE: The bounds of the grid must be non-negative, else this function will 

196 return an invalid grid with NaNs in it. If you want a non-linearly spaced 

197 grid that spans negative numbers, use make_exponential_grid; see below. 

198 

199 Parameters 

200 ---------- 

201 ming : float 

202 Minimum value of the grid, which must be non-negative. 

203 maxg : float 

204 Maximum value of the grid, which must be greater than ming. 

205 ng : int 

206 The number of grid points 

207 timestonest : int 

208 the number of times to nest the exponentiation 

209 

210 Returns 

211 ------- 

212 points : np.array 

213 A multi-exponentially spaced grid 

214 

215 Notes 

216 ----- 

217 Original Matab code can be found in Chris Carroll's 

218 [Solution Methods for Microeconomic Dynamic Optimization Problems] 

219 (https://www.econ2.jhu.edu/people/ccarroll/solvingmicrodsops/) toolkit. 

220 Latest update: 01 May 2015 

221 """ 

222 if timestonest == -1: 

223 grid = np.linspace(ming, maxg, ng) 

224 return grid 

225 if timestonest > 0: 

226 Lming = ming 

227 Lmaxg = maxg 

228 for j in range(timestonest): 

229 Lming = np.log(Lming + 1) 

230 Lmaxg = np.log(Lmaxg + 1) 

231 Lgrid = np.linspace(Lming, Lmaxg, ng) 

232 grid = Lgrid 

233 for j in range(timestonest): 

234 grid = np.exp(grid) - 1 

235 else: 

236 Lming = np.log(ming) 

237 Lmaxg = np.log(maxg) 

238 Lgrid = np.linspace(Lming, Lmaxg, ng) 

239 grid = np.exp(Lgrid) 

240 return grid 

241 

242 

243def make_exponential_grid(ming, maxg, ng, order=1.0): 

244 """ 

245 Construct an exponentially spaced grid with chosen exponential order. 

246 A uniformly spaced grid on [0,1] is raised to the chosen order, then linearly 

247 remapped to the specified interval. Supports any real valued grid bounds. 

248 

249 Parameters 

250 ---------- 

251 ming : float 

252 Lower bound of grid. 

253 maxg : float 

254 Upper bound of grid. 

255 ng : int 

256 Number of points in the grid. 

257 order : float, optional 

258 Exponential spacing order for the grid. The default is 1.0, or linear. 

259 

260 Returns 

261 ------- 

262 grid : np.array 

263 Exponentially spaced grid on [ming, maxg] with ng points. 

264 """ 

265 grid = np.linspace(0.0, 1.0, ng) ** order * (maxg - ming) + ming 

266 return grid 

267 

268 

269# ============================================================================== 

270# ============== Uncategorized general functions =================== 

271# ============================================================================== 

272 

273 

274def get_percentiles(data, weights=None, percentiles=None, presorted=False): 

275 """ 

276 Calculates the requested percentiles of (weighted) data. Median by default. 

277 

278 Parameters 

279 ---------- 

280 data : numpy.array 

281 A 1D array of float data. 

282 weights : np.array 

283 A weighting vector for the data. 

284 percentiles : [float] 

285 A list or numpy.array of percentiles to calculate for the data. Each element should 

286 be in (0,1). 

287 presorted : boolean 

288 Indicator for whether data has already been sorted. 

289 

290 Returns 

291 ------- 

292 pctl_out : numpy.array 

293 The requested percentiles of the data. 

294 """ 

295 if percentiles is None: 

296 percentiles = [0.5] 

297 else: 

298 if ( 

299 not isinstance(percentiles, (list, np.ndarray)) 

300 or min(percentiles) <= 0 

301 or max(percentiles) >= 1 

302 ): 

303 raise ValueError( 

304 "Percentiles should be a list or numpy array of floats between 0 and 1" 

305 ) 

306 

307 if data.size < 2: 

308 return np.zeros(np.array(percentiles).shape) + np.nan 

309 

310 if weights is None: # Set equiprobable weights if none were passed 

311 weights = np.ones(data.size) / float(data.size) 

312 

313 if presorted: # Sort the data if it is not already 

314 data_sorted = data 

315 weights_sorted = weights 

316 else: 

317 order = np.argsort(data) 

318 data_sorted = data[order] 

319 weights_sorted = weights[order] 

320 

321 cum_dist = np.cumsum(weights_sorted) / np.sum( 

322 weights_sorted 

323 ) # cumulative probability distribution 

324 

325 # Calculate the requested percentiles by interpolating the data over the 

326 # cumulative distribution, then evaluating at the percentile values 

327 inv_CDF = interp1d(cum_dist, data_sorted, bounds_error=False, assume_sorted=True) 

328 pctl_out = inv_CDF(percentiles) 

329 return pctl_out 

330 

331 

332def get_lorenz_shares(data, weights=None, percentiles=None, presorted=False): 

333 """ 

334 Calculates the Lorenz curve at the requested percentiles of (weighted) data. 

335 Median by default. 

336 

337 Parameters 

338 ---------- 

339 data : numpy.array 

340 A 1D array of float data. 

341 weights : numpy.array 

342 A weighting vector for the data. 

343 percentiles : [float] 

344 A list or numpy.array of percentiles to calculate for the data. Each element should 

345 be in (0,1). 

346 presorted : boolean 

347 Indicator for whether data has already been sorted. 

348 

349 Returns 

350 ------- 

351 lorenz_out : numpy.array 

352 The requested Lorenz curve points of the data. 

353 """ 

354 if percentiles is None: 

355 percentiles = [0.5] 

356 else: 

357 if ( 

358 not isinstance(percentiles, (list, np.ndarray)) 

359 or min(percentiles) <= 0 

360 or max(percentiles) >= 1 

361 ): 

362 raise ValueError( 

363 "Percentiles should be a list or numpy array of floats between 0 and 1" 

364 ) 

365 if weights is None: # Set equiprobable weights if none were given 

366 weights = np.ones(data.size) 

367 

368 if presorted: # Sort the data if it is not already 

369 data_sorted = data 

370 weights_sorted = weights 

371 else: 

372 order = np.argsort(data) 

373 data_sorted = data[order] 

374 weights_sorted = weights[order] 

375 

376 cum_dist = np.cumsum(weights_sorted) / np.sum( 

377 weights_sorted 

378 ) # cumulative probability distribution 

379 temp = data_sorted * weights_sorted 

380 cum_data = np.cumsum(temp) / sum(temp) # cumulative ownership shares 

381 

382 # Calculate the requested Lorenz shares by interpolating the cumulative ownership 

383 # shares over the cumulative distribution, then evaluating at requested points 

384 lorenzFunc = interp1d(cum_dist, cum_data, bounds_error=False, assume_sorted=True) 

385 lorenz_out = lorenzFunc(percentiles) 

386 return lorenz_out 

387 

388 

389def calc_subpop_avg(data, reference, cutoffs, weights=None): 

390 """ 

391 Calculates the average of (weighted) data between cutoff percentiles of a 

392 reference variable. 

393 

394 Parameters 

395 ---------- 

396 data : numpy.array 

397 A 1D array of float data. 

398 reference : numpy.array 

399 A 1D array of float data of the same length as data. 

400 cutoffs : [(float,float)] 

401 A list of doubles with the lower and upper percentile bounds (should be 

402 in [0,1]). 

403 weights : numpy.array 

404 A weighting vector for the data. 

405 

406 Returns 

407 ------- 

408 slice_avg 

409 The (weighted) average of data that falls within the cutoff percentiles 

410 of reference. 

411 

412 """ 

413 if weights is None: # Set equiprobable weights if none were given 

414 weights = np.ones(data.size) 

415 

416 # Sort the data and generate a cumulative distribution 

417 order = np.argsort(reference) 

418 data_sorted = data[order] 

419 weights_sorted = weights[order] 

420 cum_dist = np.cumsum(weights_sorted) / np.sum(weights_sorted) 

421 

422 # For each set of cutoffs, calculate the average of data that falls within 

423 # the cutoff percentiles of reference 

424 slice_avg = [] 

425 for j in range(len(cutoffs)): 

426 bot = np.searchsorted(cum_dist, cutoffs[j][0]) 

427 top = np.searchsorted(cum_dist, cutoffs[j][1]) 

428 slice_avg.append( 

429 np.sum(data_sorted[bot:top] * weights_sorted[bot:top]) 

430 / np.sum(weights_sorted[bot:top]) 

431 ) 

432 return slice_avg 

433 

434 

435def epanechnikov_kernel(x, ref_x, h=1.0): 

436 """ 

437 The Epanechnikov kernel, which has been shown to be the most efficient kernel 

438 for non-parametric regression. 

439 

440 Parameters 

441 ---------- 

442 x : np.array 

443 Values at which to evaluate the kernel 

444 x_ref : float 

445 The reference point 

446 h : float 

447 Kernel bandwidth 

448 

449 Returns 

450 ------- 

451 out : np.array 

452 Kernel values at each value of x 

453 """ 

454 u = (x - ref_x) / h # Normalize distance by bandwidth 

455 out = 0.75 * (1.0 - u**2) 

456 out[np.abs(u) > 1.0] = 0.0 # Kernel = 0 outside [-1,1] 

457 return out 

458 

459 

460def triangle_kernel(x, ref_x, h=1.0): 

461 """ 

462 The triangle or "hat" kernel. 

463 

464 Parameters 

465 ---------- 

466 x : np.array 

467 Values at which to evaluate the kernel 

468 x_ref : float 

469 The reference point 

470 h : float 

471 Kernel bandwidth 

472 

473 Returns 

474 ------- 

475 out : np.array 

476 Kernel values at each value of x 

477 """ 

478 u = (x - ref_x) / h # Normalize distance by bandwidth 

479 these = np.abs(u) <= 1.0 # Kernel = 0 outside [-1,1] 

480 out = np.zeros_like(u) # Initialize kernel output 

481 out[these] = 1.0 - np.abs(u[these]) # Evaluate kernel 

482 return out 

483 

484 

485kernel_dict = { 

486 "epanechnikov": epanechnikov_kernel, 

487 "triangle": triangle_kernel, 

488 "hat": triangle_kernel, 

489} 

490 

491 

492def kernel_regression(x, y, bot=None, top=None, N=500, h=None, kernel="epanechnikov"): 

493 """ 

494 Performs a non-parametric Nadaraya-Watson 1D kernel regression on given data 

495 with optionally specified range, number of points, and kernel bandwidth. 

496 

497 Parameters 

498 ---------- 

499 x : np.array 

500 The independent variable in the kernel regression. 

501 y : np.array 

502 The dependent variable in the kernel regression. 

503 bot : float 

504 Minimum value of interest in the regression; defaults to min(x). 

505 top : float 

506 Maximum value of interest in the regression; defaults to max(y). 

507 N : int 

508 Number of points to compute. 

509 h : float 

510 The bandwidth of the (Epanechnikov) kernel. To-do: GENERALIZE. 

511 

512 Returns 

513 ------- 

514 regression : LinearInterp 

515 A piecewise locally linear kernel regression: y = f(x). 

516 """ 

517 # Fix omitted inputs 

518 if bot is None: 

519 bot = np.min(x) 

520 if top is None: 

521 top = np.max(x) 

522 if h is None: 

523 h = 2.0 * (top - bot) / float(N) # This is an arbitrary default 

524 

525 # Get kernel if possible 

526 try: 

527 kern = kernel_dict[kernel] 

528 except: 

529 raise ValueError("Can't find a kernel named '" + kernel + "'!") 

530 

531 # Construct a local linear approximation 

532 x_vec = np.linspace(bot, top, num=N) 

533 # Evaluate the kernel for all evaluation points at once 

534 weights = kern(x[:, None], x_vec[None, :], h) 

535 weight_sums = np.sum(weights, axis=0) 

536 # Avoid division by zero when weights are extremely small 

537 weight_sums[weight_sums == 0] = np.nan 

538 y_vec = np.dot(weights.T, y) / weight_sums 

539 regression = interp1d(x_vec, y_vec, bounds_error=False, assume_sorted=True) 

540 return regression 

541 

542 

543def make_polynomial_params(coeffs, T, offset=0.0, step=1.0): 

544 """ 

545 Make a T-length array of parameters using polynomial coefficients. 

546 

547 Parameters 

548 ---------- 

549 coeffs : [float] 

550 Arbitrary length list of polynomial coefficients. 

551 T : int 

552 Number of elements in the output. 

553 offset : float, optional 

554 Value at which the X values should start. The default is 0.0. 

555 step : float, optional 

556 Increment of the X values. The default is 1.0. 

557 

558 Returns 

559 ------- 

560 param_vals : np.array 

561 T-length array of parameter values calculated using the polynomial coefficients. 

562 """ 

563 X = offset + step * np.arange(T) 

564 # np.polyval expects highest power first 

565 return np.polyval(coeffs[::-1], X) 

566 

567 

568@numba.njit 

569def jump_to_grid_1D(m_vals, probs, Dist_mGrid): # pragma: nocover 

570 """ 

571 Distributes values onto a predefined grid, maintaining the means. 

572 

573 

574 Parameters 

575 ---------- 

576 m_vals: np.array 

577 Market resource values 

578 

579 probs: np.array 

580 Shock probabilities associated with combinations of m_vals. 

581 Can be thought of as the probability mass function of (m_vals). 

582 

583 dist_mGrid : np.array 

584 Grid over normalized market resources 

585 

586 Returns 

587 ------- 

588 probGrid.flatten(): np.array 

589 Probabilities of each gridpoint on the combined grid of market resources 

590 

591 """ 

592 

593 probGrid = np.zeros(len(Dist_mGrid)) 

594 mIndex = np.digitize(m_vals, Dist_mGrid) - 1 

595 mIndex[m_vals <= Dist_mGrid[0]] = -1 

596 mIndex[m_vals >= Dist_mGrid[-1]] = len(Dist_mGrid) - 1 

597 

598 for i in range(len(m_vals)): 

599 if mIndex[i] == -1: 

600 mlowerIndex = 0 

601 mupperIndex = 0 

602 mlowerWeight = 1.0 

603 mupperWeight = 0.0 

604 elif mIndex[i] == len(Dist_mGrid) - 1: 

605 mlowerIndex = -1 

606 mupperIndex = -1 

607 mlowerWeight = 1.0 

608 mupperWeight = 0.0 

609 else: 

610 mlowerIndex = mIndex[i] 

611 mupperIndex = mIndex[i] + 1 

612 mlowerWeight = (Dist_mGrid[mupperIndex] - m_vals[i]) / ( 

613 Dist_mGrid[mupperIndex] - Dist_mGrid[mlowerIndex] 

614 ) 

615 mupperWeight = 1.0 - mlowerWeight 

616 

617 probGrid[mlowerIndex] += probs[i] * mlowerWeight 

618 probGrid[mupperIndex] += probs[i] * mupperWeight 

619 

620 return probGrid.flatten() 

621 

622 

623@numba.njit 

624def jump_to_grid_2D( 

625 m_vals, perm_vals, probs, dist_mGrid, dist_pGrid 

626): # pragma: nocover 

627 """ 

628 Distributes values onto a predefined grid, maintaining the means. m_vals and perm_vals are realizations of market resources and permanent income while 

629 dist_mGrid and dist_pGrid are the predefined grids of market resources and permanent income, respectively. That is, m_vals and perm_vals do not necesarily lie on their 

630 respective grids. Returns probabilities of each gridpoint on the combined grid of market resources and permanent income. 

631 

632 

633 Parameters 

634 ---------- 

635 m_vals: np.array 

636 Market resource values 

637 

638 perm_vals: np.array 

639 Permanent income values 

640 

641 probs: np.array 

642 Shock probabilities associated with combinations of m_vals and perm_vals. 

643 Can be thought of as the probability mass function of (m_vals, perm_vals). 

644 

645 dist_mGrid : np.array 

646 Grid over normalized market resources 

647 

648 dist_pGrid : np.array 

649 Grid over permanent income 

650 Returns 

651 ------- 

652 probGrid.flatten(): np.array 

653 Probabilities of each gridpoint on the combined grid of market resources and permanent income 

654 """ 

655 

656 probGrid = np.zeros((len(dist_mGrid), len(dist_pGrid))) 

657 

658 # Maybe use np.searchsorted as opposed to np.digitize 

659 mIndex = ( 

660 np.digitize(m_vals, dist_mGrid) - 1 

661 ) # Array indicating in which bin each values of m_vals lies in relative to dist_mGrid. Bins lie between between point of Dist_mGrid. 

662 # For instance, if mval lies between dist_mGrid[4] and dist_mGrid[5] it is in bin 4 (would be 5 if 1 was not subtracted in the previous line). 

663 mIndex[ 

664 m_vals <= dist_mGrid[0] 

665 ] = -1 # if the value is less than the smallest value on dist_mGrid assign it an index of -1 

666 mIndex[m_vals >= dist_mGrid[-1]] = ( 

667 len(dist_mGrid) - 1 

668 ) # if value if greater than largest value on dist_mGrid assign it an index of the length of the grid minus 1 

669 

670 # the following three lines hold the same intuition as above 

671 pIndex = np.digitize(perm_vals, dist_pGrid) - 1 

672 pIndex[perm_vals <= dist_pGrid[0]] = -1 

673 pIndex[perm_vals >= dist_pGrid[-1]] = len(dist_pGrid) - 1 

674 

675 for i in range(len(m_vals)): 

676 if ( 

677 mIndex[i] == -1 

678 ): # if mval is below smallest gridpoint, then assign it a weight of 1.0 for lower weight. 

679 mlowerIndex = 0 

680 mupperIndex = 0 

681 mlowerWeight = 1.0 

682 mupperWeight = 0.0 

683 elif ( 

684 mIndex[i] == len(dist_mGrid) - 1 

685 ): # if mval is greater than maximum gridpoint, then assign the following weights 

686 mlowerIndex = -1 

687 mupperIndex = -1 

688 mlowerWeight = 1.0 

689 mupperWeight = 0.0 

690 else: # Standard case where mval does not lie past any extremes 

691 # identify which two points on the grid the mval is inbetween 

692 mlowerIndex = mIndex[i] 

693 mupperIndex = mIndex[i] + 1 

694 # Assign weight to the indices that bound the m_vals point. Intuitively, an mval perfectly between two points on the mgrid will assign a weight of .5 to the gridpoint above and below 

695 mlowerWeight = ( 

696 (dist_mGrid[mupperIndex] - m_vals[i]) 

697 / (dist_mGrid[mupperIndex] - dist_mGrid[mlowerIndex]) 

698 ) # Metric to determine weight of gridpoint/index below. Intuitively, mvals that are close to gridpoint/index above are assigned a smaller mlowerweight. 

699 mupperWeight = 1.0 - mlowerWeight # weight of gridpoint/ index above 

700 

701 # Same logic as above except the weights here concern the permanent income grid 

702 if pIndex[i] == -1: 

703 plowerIndex = 0 

704 pupperIndex = 0 

705 plowerWeight = 1.0 

706 pupperWeight = 0.0 

707 elif pIndex[i] == len(dist_pGrid) - 1: 

708 plowerIndex = -1 

709 pupperIndex = -1 

710 plowerWeight = 1.0 

711 pupperWeight = 0.0 

712 else: 

713 plowerIndex = pIndex[i] 

714 pupperIndex = pIndex[i] + 1 

715 plowerWeight = (dist_pGrid[pupperIndex] - perm_vals[i]) / ( 

716 dist_pGrid[pupperIndex] - dist_pGrid[plowerIndex] 

717 ) 

718 pupperWeight = 1.0 - plowerWeight 

719 

720 # Compute probabilities of each gridpoint on the combined market resources and permanent income grid by looping through each point on the combined market resources and permanent income grid, 

721 # assigning probabilities to each gridpoint based off the probabilities of the surrounding mvals and pvals and their respective weights placed on the gridpoint. 

722 # Note* probs[i] is the probability of mval AND pval occurring 

723 

724 probGrid[mlowerIndex][plowerIndex] += ( 

725 probs[i] * mlowerWeight * plowerWeight 

726 ) # probability of gridpoint below mval and pval 

727 probGrid[mlowerIndex][pupperIndex] += ( 

728 probs[i] * mlowerWeight * pupperWeight 

729 ) # probability of gridpoint below mval and above pval 

730 probGrid[mupperIndex][plowerIndex] += ( 

731 probs[i] * mupperWeight * plowerWeight 

732 ) # probability of gridpoint above mval and below pval 

733 probGrid[mupperIndex][pupperIndex] += ( 

734 probs[i] * mupperWeight * pupperWeight 

735 ) # probability of gridpoint above mval and above pval 

736 

737 return probGrid.flatten() 

738 

739 

740@numba.njit(parallel=True) 

741def gen_tran_matrix_1D( 

742 dist_mGrid, bNext, shk_prbs, perm_shks, tran_shks, LivPrb, NewBornDist 

743): # pragma: nocover 

744 """ 

745 Computes Transition Matrix across normalized market resources. 

746 This function is built to non-stochastic simulate the IndShockConsumerType. 

747 This function is used exclusively when Harmenberg Neutral Measure is applied and/or if permanent income is not a state variable 

748 For more information, see https://econ-ark.org/materials/harmenberg-aggregation?launch 

749 

750 Parameters 

751 ---------- 

752 dist_mGrid : np.array 

753 Grid over normalized market resources 

754 

755 bNext : np.array 

756 Grid over bank balances 

757 

758 shk_prbs : np.array 

759 Array of shock probabilities over combinations of permanent and transitory shocks 

760 

761 perm_shks : np.array 

762 Array of shocks to permanent income. Shocks should follow Harmenberg neutral measure 

763 

764 tran_shks : np.array 

765 Array of shocks to transitory 

766 

767 LivPrb : float 

768 Probability of not dying 

769 

770 NewBornDist : np.array 

771 array representing distribution of newborns across grid of normalized market resources and grid of permanent income. 

772 

773 Returns 

774 ------- 

775 TranMatrix : np.array 

776 Transition Matrix over normalized market resources grid. 

777 

778 

779 """ 

780 

781 TranMatrix = np.zeros((len(dist_mGrid), len(dist_mGrid))) 

782 for i in numba.prange(len(dist_mGrid)): 

783 mNext_ij = ( 

784 bNext[i] / perm_shks + tran_shks 

785 ) # Compute next period's market resources given todays bank balances bnext[i] 

786 TranMatrix[:, i] += ( 

787 LivPrb * jump_to_grid_1D(mNext_ij, shk_prbs, dist_mGrid) 

788 + (1.0 - LivPrb) * NewBornDist 

789 ) # this is the transition matrix if given you are unemployed today and unemployed tomorrow so you assume the unemployed consumption policy 

790 return TranMatrix 

791 

792 

793@numba.njit(parallel=True) 

794def gen_tran_matrix_2D( 

795 dist_mGrid, dist_pGrid, bNext, shk_prbs, perm_shks, tran_shks, LivPrb, NewBornDist 

796): # pragma: nocover 

797 """ 

798 Computes Transition Matrix over normalized market resources and permanent income. 

799 This function is built to non-stochastic simulate the IndShockConsumerType. 

800 

801 Parameters 

802 ---------- 

803 dist_mGrid : np.array 

804 Grid over normalized market resources 

805 

806 dist_pGrid : np.array 

807 Grid over permanent income 

808 

809 bNext : np.array 

810 Grid over bank balances 

811 

812 shk_prbs : np.array 

813 Array of shock probabilities over combinations of perm and tran shocks 

814 

815 perm_shks : np.array 

816 Array of shocks to permanent income 

817 

818 tran_shks : np.array 

819 Array of shocks to transitory income 

820 

821 LivPrb : float 

822 Probability of not dying 

823 

824 NewBornDist : np.array 

825 array representing distribution of newborns across grid of normalized market resources and grid of permanent income. 

826 

827 Returns 

828 ------- 

829 TranMatrix : np.array 

830 Transition Matrix over normalized market resources grid and permanent income grid 

831 """ 

832 TranMatrix = np.zeros( 

833 (len(dist_mGrid) * len(dist_pGrid), len(dist_mGrid) * len(dist_pGrid)) 

834 ) 

835 for i in numba.prange(len(dist_mGrid)): 

836 for j in numba.prange(len(dist_pGrid)): 

837 mNext_ij = ( 

838 bNext[i] / perm_shks + tran_shks 

839 ) # Compute next period's market resources given todays bank balances bnext[i] 

840 pNext_ij = ( 

841 dist_pGrid[j] * perm_shks 

842 ) # Computes next period's permanent income level by applying permanent income shock 

843 TranMatrix[:, i * len(dist_pGrid) + j] += ( 

844 LivPrb 

845 * jump_to_grid_2D(mNext_ij, pNext_ij, shk_prbs, dist_mGrid, dist_pGrid) 

846 + (1.0 - LivPrb) * NewBornDist 

847 ) # this is the transition matrix if given you are unemployed today and unemployed tomorrow so you assume the unemployed consumption policy 

848 return TranMatrix 

849 

850 

851# ============================================================================== 

852# ============== Some basic plotting tools ==================================== 

853# ============================================================================== 

854 

855 

856def plot_funcs(functions, bottom, top, N=1000, legend_kwds=None): 

857 """ 

858 Plots 1D function(s) over a given range. 

859 

860 Parameters 

861 ---------- 

862 functions : [function] or function 

863 A single function, or a list of functions, to be plotted. 

864 bottom : float 

865 The lower limit of the domain to be plotted. 

866 top : float 

867 The upper limit of the domain to be plotted. 

868 N : int 

869 Number of points in the domain to evaluate. 

870 legend_kwds: None, or dictionary 

871 If not None, the keyword dictionary to pass to plt.legend 

872 

873 Returns 

874 ------- 

875 none 

876 """ 

877 import matplotlib.pyplot as plt 

878 

879 plt.ion() 

880 

881 if type(functions) == list: 

882 function_list = functions 

883 else: 

884 function_list = [functions] 

885 

886 for function in function_list: 

887 x = np.linspace(bottom, top, N, endpoint=True) 

888 y = function(x) 

889 plt.plot(x, y) 

890 plt.xlim([bottom, top]) 

891 if legend_kwds is not None: 

892 plt.legend(**legend_kwds) 

893 plt.show(block=False) 

894 

895 

896def plot_funcs_der(functions, bottom, top, N=1000, legend_kwds=None): 

897 """ 

898 Plots the first derivative of 1D function(s) over a given range. 

899 

900 Parameters 

901 ---------- 

902 function : function 

903 A function or list of functions, the derivatives of which are to be plotted. 

904 bottom : float 

905 The lower limit of the domain to be plotted. 

906 top : float 

907 The upper limit of the domain to be plotted. 

908 N : int 

909 Number of points in the domain to evaluate. 

910 legend_kwds: None, or dictionary 

911 If not None, the keyword dictionary to pass to plt.legend 

912 

913 Returns 

914 ------- 

915 none 

916 """ 

917 import matplotlib.pyplot as plt 

918 

919 plt.ion() 

920 

921 if type(functions) == list: 

922 function_list = functions 

923 else: 

924 function_list = [functions] 

925 

926 step = (top - bottom) / N 

927 for function in function_list: 

928 x = np.arange(bottom, top, step) 

929 y = function.derivative(x) 

930 plt.plot(x, y) 

931 plt.xlim([bottom, top]) 

932 if legend_kwds is not None: 

933 plt.legend(**legend_kwds) 

934 plt.show(block=False) 

935 

936 

937############################################################################### 

938 

939 

940def determine_platform(): # pragma: nocover 

941 """ 

942 Utility function to return the platform currenlty in use. 

943 

944 Returns 

945 --------- 

946 pf: str 

947 'darwin' (MacOS), 'debian'(debian Linux) or 'win' (windows) 

948 """ 

949 import platform 

950 

951 pform = platform.system().lower() 

952 if "darwin" in pform: 

953 pf = "darwin" # MacOS 

954 elif "debian" in pform: 

955 pf = "debian" # Probably cloud (MyBinder, CoLab, ...) 

956 elif "ubuntu" in pform: 

957 pf = "debian" # Probably cloud (MyBinder, CoLab, ...) 

958 elif "win" in pform: 

959 pf = "win" 

960 elif "linux" in pform: 

961 pf = "linux" 

962 else: 

963 raise ValueError("Not able to find out the platform.") 

964 return pf 

965 

966 

967def test_latex_installation(pf): # pragma: no cover 

968 """Test to check if latex is installed on the machine. 

969 

970 Parameters 

971 ----------- 

972 pf: str (platform) 

973 output of determine_platform() 

974 

975 Returns 

976 -------- 

977 bool: Boolean 

978 True if latex found, else installed in the case of debian 

979 otherwise ImportError raised to direct user to install latex manually 

980 """ 

981 # Test whether latex is installed (some of the figures require it) 

982 from distutils.spawn import find_executable 

983 

984 latexExists = False 

985 

986 if find_executable("latex"): 

987 latexExists = True 

988 return True 

989 

990 if not latexExists: 

991 print("Some of the figures below require a full installation of LaTeX") 

992 # If running on Mac or Win, user can be assumed to be able to install 

993 # any missing packages in response to error messages; but not on cloud 

994 # so load LaTeX by hand (painfully slowly) 

995 if "debian" in pf: # CoLab and MyBinder are both ubuntu 

996 print("Installing LaTeX now; please wait 3-5 minutes") 

997 from IPython.utils import io 

998 

999 with io.capture_output() as captured: # Hide hideously long output 

1000 os.system("apt-get update") 

1001 os.system( 

1002 "apt-get install texlive texlive-latex-extra texlive-xetex dvipng" 

1003 ) 

1004 latexExists = True 

1005 return True 

1006 else: 

1007 raise ImportError( 

1008 "Please install a full distribution of LaTeX on your computer then rerun. \n \ 

1009 A full distribution means textlive, texlive-latex-extras, texlive-xetex, dvipng, and ghostscript" 

1010 ) 

1011 

1012 

1013def in_ipynb(): 

1014 """If the ipython process contains 'terminal' assume not in a notebook. 

1015 

1016 Returns 

1017 -------- 

1018 bool: Boolean 

1019 True if called from a jupyter notebook, else False 

1020 """ 

1021 try: 

1022 if "terminal" in str(type(get_ipython())): 

1023 return False 

1024 else: 

1025 return True 

1026 except NameError: 

1027 return False 

1028 

1029 

1030def setup_latex_env_notebook(pf, latexExists): # pragma: nocover 

1031 """This is needed for use of the latex_envs notebook extension 

1032 which allows the use of environments in Markdown. 

1033 

1034 Parameters 

1035 ----------- 

1036 pf: str (platform) 

1037 output of determine_platform() 

1038 """ 

1039 import os 

1040 

1041 import matplotlib.pyplot as plt 

1042 

1043 plt.rc("font", family="serif") 

1044 plt.rc("text", usetex=latexExists) 

1045 if latexExists: 

1046 latex_preamble = ( 

1047 r"\usepackage{amsmath}\usepackage{amsfonts}" 

1048 r"\usepackage[T1]{fontenc}" 

1049 r"\providecommand{\Ex}{\mathbb{E}}" 

1050 r"\providecommand{\StE}{\check}" 

1051 r"\providecommand{\Trg}{\hat}" 

1052 r"\providecommand{\PermGroFac}{\Gamma}" 

1053 r"\providecommand{\cLev}{\pmb{\mathrm{c}}}" 

1054 r"\providecommand{\mLev}{\pmb{\mathrm{m}}}" 

1055 r"\providecommand{\Rfree}{\mathsf{R}}" 

1056 r"\providecommand{\DiscFac}{\beta}" 

1057 r"\providecommand{\CRRA}{\rho}" 

1058 r"\providecommand{\MPC}{\kappa}" 

1059 r"\providecommand{\UnempPrb}{\wp}" 

1060 ) 

1061 # Latex expects paths to be separated by /. \ might result in pieces 

1062 # being interpreted as commands. 

1063 latexdefs_path = os.getcwd().replace(os.path.sep, "/") + "/latexdefs.tex" 

1064 if os.path.isfile(latexdefs_path): 

1065 latex_preamble = latex_preamble + r"\input{" + latexdefs_path + r"}" 

1066 else: # the required latex_envs package needs this file to exist even if it is empty 

1067 from pathlib import Path 

1068 

1069 Path(latexdefs_path).touch() 

1070 plt.rcParams["text.latex.preamble"] = latex_preamble 

1071 

1072 

1073def make_figs(figure_name, saveFigs, drawFigs, target_dir="Figures"): 

1074 """Utility function to save figure in multiple formats and display the image. 

1075 

1076 Parameters 

1077 ---------- 

1078 figure_name: str 

1079 name of the figure 

1080 saveFigs: bool 

1081 True if the figure needs to be written to disk else False 

1082 drawFigs: bool 

1083 True if the figure should be displayed using plt.draw() 

1084 target_dir: str, default = 'Figures/' 

1085 Name of folder to save figures to in the current directory 

1086 

1087 """ 

1088 import matplotlib.pyplot as plt 

1089 

1090 if saveFigs: 

1091 import os 

1092 

1093 # Where to put any figures that the user wants to save 

1094 my_file_path = os.getcwd() # Find pathname to this file: 

1095 Figures_dir = os.path.join( 

1096 my_file_path, f"{target_dir}" 

1097 ) # LaTeX document assumes figures will be here 

1098 if not os.path.exists(Figures_dir): 

1099 os.makedirs(Figures_dir) # If dir does not exist, create it 

1100 # Save the figures in several formats 

1101 print(f"Saving figure {figure_name} in {target_dir}") 

1102 plt.savefig( 

1103 os.path.join(target_dir, f"{figure_name}.jpg"), 

1104 # metadata is not supported for jpg 

1105 ) # For web/html 

1106 plt.savefig( 

1107 os.path.join(target_dir, f"{figure_name}.png"), 

1108 metadata={"CreationDate": None}, 

1109 ) # For web/html 

1110 plt.savefig( 

1111 os.path.join(target_dir, f"{figure_name}.pdf"), 

1112 metadata={"CreationDate": None}, 

1113 ) # For LaTeX 

1114 plt.savefig( 

1115 os.path.join(target_dir, f"{figure_name}.svg"), 

1116 metadata={"Date": None}, 

1117 ) # For html5 

1118 # Make sure it's possible to plot it by checking for GUI 

1119 if drawFigs and find_gui(): 

1120 plt.ion() # Counterintuitively, you want interactive mode on if you don't want to interact 

1121 plt.draw() # Change to false if you want to pause after the figure 

1122 plt.pause(2) 

1123 

1124 

1125def find_gui(): 

1126 """Quick fix to check if matplotlib is running in a GUI environment. 

1127 

1128 Returns 

1129 ------- 

1130 bool: Boolean 

1131 True if it's a GUI environment, False if not. 

1132 """ 

1133 try: 

1134 import matplotlib.pyplot as plt 

1135 except: 

1136 return False 

1137 if plt.get_backend() == "Agg": 

1138 return False 

1139 return True 

1140 

1141 

1142def benchmark( 

1143 agent, sort_by="tottime", max_print=10, filename="restats", return_output=False 

1144): # pragma: nocover 

1145 """ 

1146 Profiling tool for HARK models. Calling `benchmark` on agents calls the solver for 

1147 the agents and provides time to solve as well as the top `max_print` function calls 

1148 in terms of `sort_by`. Optionally allows for saving a text copy of the profile 

1149 as well as returning the `Stats` object for further inspection. 

1150 

1151 For more details on the python profilers, see 

1152 https://docs.python.org/3/library/profile.html#the-stats-class 

1153 

1154 Parameters 

1155 ---------- 

1156 agent: AgentType 

1157 A HARK AgentType with a solve() method. 

1158 sort_by: string 

1159 A string to sort the stats by. 

1160 max_print: int 

1161 Number of lines to print 

1162 filename: string 

1163 Optional filename to save output. 

1164 return_output: bool 

1165 Boolean to determine whether Stats object should be returned. 

1166 

1167 Returns 

1168 ------- 

1169 stats: Stats (optional) 

1170 Profiling object with call statistics. 

1171 """ 

1172 cProfile.run("agent.solve()", filename) 

1173 stats = pstats.Stats(filename) 

1174 stats.strip_dirs() 

1175 stats.sort_stats(sort_by) 

1176 stats.print_stats(max_print) 

1177 if return_output: 

1178 return stats 

1179 

1180 

1181simpledec = re.compile(r"\d*\.\d{8,20}") 

1182 

1183 

1184def mround(match): 

1185 return f"{float(match.group()):.5f}" 

1186 

1187 

1188def round_in_file(filename): # pragma: nocover 

1189 with open(filename, "r+") as file: 

1190 filetext = file.read() 

1191 filetext = re.sub(simpledec, mround, filetext) 

1192 file.seek(0) 

1193 file.write(filetext) 

1194 file.truncate() 

1195 

1196 

1197def files_in_dir(mypath): 

1198 return [ 

1199 os.path.join(mypath, f) 

1200 for f in os.listdir(mypath) 

1201 if os.path.isfile(os.path.join(mypath, f)) 

1202 ]