Coverage for /var/srv/projects/api.amasfac.comuna18.com/tmp/venv/lib/python3.9/site-packages/numpy/lib/function_base.py: 9%

1212 statements  

« prev     ^ index     » next       coverage.py v6.4.4, created at 2023-07-17 14:22 -0600

1import collections.abc 

2import functools 

3import re 

4import sys 

5import warnings 

6 

7import numpy as np 

8import numpy.core.numeric as _nx 

9from numpy.core import transpose 

10from numpy.core.numeric import ( 

11 ones, zeros_like, arange, concatenate, array, asarray, asanyarray, empty, 

12 ndarray, take, dot, where, intp, integer, isscalar, absolute 

13 ) 

14from numpy.core.umath import ( 

15 pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, 

16 mod, exp, not_equal, subtract 

17 ) 

18from numpy.core.fromnumeric import ( 

19 ravel, nonzero, partition, mean, any, sum 

20 ) 

21from numpy.core.numerictypes import typecodes 

22from numpy.core.overrides import set_module 

23from numpy.core import overrides 

24from numpy.core.function_base import add_newdoc 

25from numpy.lib.twodim_base import diag 

26from numpy.core.multiarray import ( 

27 _insert, add_docstring, bincount, normalize_axis_index, _monotonicity, 

28 interp as compiled_interp, interp_complex as compiled_interp_complex 

29 ) 

30from numpy.core.umath import _add_newdoc_ufunc as add_newdoc_ufunc 

31 

32import builtins 

33 

34# needed in this module for compatibility 

35from numpy.lib.histograms import histogram, histogramdd # noqa: F401 

36 

37 

38array_function_dispatch = functools.partial( 

39 overrides.array_function_dispatch, module='numpy') 

40 

41 

42__all__ = [ 

43 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', 

44 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'disp', 'flip', 

45 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 

46 'bincount', 'digitize', 'cov', 'corrcoef', 

47 'msort', 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 

48 'blackman', 'kaiser', 'trapz', 'i0', 'add_newdoc', 'add_docstring', 

49 'meshgrid', 'delete', 'insert', 'append', 'interp', 'add_newdoc_ufunc', 

50 'quantile' 

51 ] 

52 

53# _QuantileMethods is a dictionary listing all the supported methods to 

54# compute quantile/percentile. 

55# 

56# Below virtual_index refer to the index of the element where the percentile 

57# would be found in the sorted sample. 

58# When the sample contains exactly the percentile wanted, the virtual_index is 

59# an integer to the index of this element. 

60# When the percentile wanted is in between two elements, the virtual_index 

61# is made of a integer part (a.k.a 'i' or 'left') and a fractional part 

62# (a.k.a 'g' or 'gamma') 

63# 

64# Each method in _QuantileMethods has two properties 

65# get_virtual_index : Callable 

66# The function used to compute the virtual_index. 

67# fix_gamma : Callable 

68# A function used for discret methods to force the index to a specific value. 

69_QuantileMethods = dict( 69 ↛ exitline 69 didn't jump to the function exit

70 # --- HYNDMAN and FAN METHODS 

71 # Discrete methods 

72 inverted_cdf=dict( 

73 get_virtual_index=lambda n, quantiles: _inverted_cdf(n, quantiles), 

74 fix_gamma=lambda gamma, _: gamma, # should never be called 

75 ), 

76 averaged_inverted_cdf=dict( 

77 get_virtual_index=lambda n, quantiles: (n * quantiles) - 1, 

78 fix_gamma=lambda gamma, _: _get_gamma_mask( 

79 shape=gamma.shape, 

80 default_value=1., 

81 conditioned_value=0.5, 

82 where=gamma == 0), 

83 ), 

84 closest_observation=dict( 

85 get_virtual_index=lambda n, quantiles: _closest_observation(n, 

86 quantiles), 

87 fix_gamma=lambda gamma, _: gamma, # should never be called 

88 ), 

89 # Continuous methods 

90 interpolated_inverted_cdf=dict( 

91 get_virtual_index=lambda n, quantiles: 

92 _compute_virtual_index(n, quantiles, 0, 1), 

93 fix_gamma=lambda gamma, _: gamma, 

94 ), 

95 hazen=dict( 

96 get_virtual_index=lambda n, quantiles: 

97 _compute_virtual_index(n, quantiles, 0.5, 0.5), 

98 fix_gamma=lambda gamma, _: gamma, 

99 ), 

100 weibull=dict( 

101 get_virtual_index=lambda n, quantiles: 

102 _compute_virtual_index(n, quantiles, 0, 0), 

103 fix_gamma=lambda gamma, _: gamma, 

104 ), 

105 # Default method. 

106 # To avoid some rounding issues, `(n-1) * quantiles` is preferred to 

107 # `_compute_virtual_index(n, quantiles, 1, 1)`. 

108 # They are mathematically equivalent. 

109 linear=dict( 

110 get_virtual_index=lambda n, quantiles: (n - 1) * quantiles, 

111 fix_gamma=lambda gamma, _: gamma, 

112 ), 

113 median_unbiased=dict( 

114 get_virtual_index=lambda n, quantiles: 

115 _compute_virtual_index(n, quantiles, 1 / 3.0, 1 / 3.0), 

116 fix_gamma=lambda gamma, _: gamma, 

117 ), 

118 normal_unbiased=dict( 

119 get_virtual_index=lambda n, quantiles: 

120 _compute_virtual_index(n, quantiles, 3 / 8.0, 3 / 8.0), 

121 fix_gamma=lambda gamma, _: gamma, 

122 ), 

123 # --- OTHER METHODS 

124 lower=dict( 

125 get_virtual_index=lambda n, quantiles: np.floor( 

126 (n - 1) * quantiles).astype(np.intp), 

127 fix_gamma=lambda gamma, _: gamma, 

128 # should never be called, index dtype is int 

129 ), 

130 higher=dict( 

131 get_virtual_index=lambda n, quantiles: np.ceil( 

132 (n - 1) * quantiles).astype(np.intp), 

133 fix_gamma=lambda gamma, _: gamma, 

134 # should never be called, index dtype is int 

135 ), 

136 midpoint=dict( 

137 get_virtual_index=lambda n, quantiles: 0.5 * ( 

138 np.floor((n - 1) * quantiles) 

139 + np.ceil((n - 1) * quantiles)), 

140 fix_gamma=lambda gamma, index: _get_gamma_mask( 

141 shape=gamma.shape, 

142 default_value=0.5, 

143 conditioned_value=0., 

144 where=index % 1 == 0), 

145 ), 

146 nearest=dict( 

147 get_virtual_index=lambda n, quantiles: np.around( 

148 (n - 1) * quantiles).astype(np.intp), 

149 fix_gamma=lambda gamma, _: gamma, 

150 # should never be called, index dtype is int 

151 )) 

152 

153 

154def _rot90_dispatcher(m, k=None, axes=None): 

155 return (m,) 

156 

157 

158@array_function_dispatch(_rot90_dispatcher) 

159def rot90(m, k=1, axes=(0, 1)): 

160 """ 

161 Rotate an array by 90 degrees in the plane specified by axes. 

162 

163 Rotation direction is from the first towards the second axis. 

164 

165 Parameters 

166 ---------- 

167 m : array_like 

168 Array of two or more dimensions. 

169 k : integer 

170 Number of times the array is rotated by 90 degrees. 

171 axes : (2,) array_like 

172 The array is rotated in the plane defined by the axes. 

173 Axes must be different. 

174 

175 .. versionadded:: 1.12.0 

176 

177 Returns 

178 ------- 

179 y : ndarray 

180 A rotated view of `m`. 

181 

182 See Also 

183 -------- 

184 flip : Reverse the order of elements in an array along the given axis. 

185 fliplr : Flip an array horizontally. 

186 flipud : Flip an array vertically. 

187 

188 Notes 

189 ----- 

190 ``rot90(m, k=1, axes=(1,0))`` is the reverse of 

191 ``rot90(m, k=1, axes=(0,1))`` 

192 

193 ``rot90(m, k=1, axes=(1,0))`` is equivalent to 

194 ``rot90(m, k=-1, axes=(0,1))`` 

195 

196 Examples 

197 -------- 

198 >>> m = np.array([[1,2],[3,4]], int) 

199 >>> m 

200 array([[1, 2], 

201 [3, 4]]) 

202 >>> np.rot90(m) 

203 array([[2, 4], 

204 [1, 3]]) 

205 >>> np.rot90(m, 2) 

206 array([[4, 3], 

207 [2, 1]]) 

208 >>> m = np.arange(8).reshape((2,2,2)) 

209 >>> np.rot90(m, 1, (1,2)) 

210 array([[[1, 3], 

211 [0, 2]], 

212 [[5, 7], 

213 [4, 6]]]) 

214 

215 """ 

216 axes = tuple(axes) 

217 if len(axes) != 2: 

218 raise ValueError("len(axes) must be 2.") 

219 

220 m = asanyarray(m) 

221 

222 if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: 

223 raise ValueError("Axes must be different.") 

224 

225 if (axes[0] >= m.ndim or axes[0] < -m.ndim 

226 or axes[1] >= m.ndim or axes[1] < -m.ndim): 

227 raise ValueError("Axes={} out of range for array of ndim={}." 

228 .format(axes, m.ndim)) 

229 

230 k %= 4 

231 

232 if k == 0: 

233 return m[:] 

234 if k == 2: 

235 return flip(flip(m, axes[0]), axes[1]) 

236 

237 axes_list = arange(0, m.ndim) 

238 (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], 

239 axes_list[axes[0]]) 

240 

241 if k == 1: 

242 return transpose(flip(m, axes[1]), axes_list) 

243 else: 

244 # k == 3 

245 return flip(transpose(m, axes_list), axes[1]) 

246 

247 

248def _flip_dispatcher(m, axis=None): 

249 return (m,) 

250 

251 

252@array_function_dispatch(_flip_dispatcher) 

253def flip(m, axis=None): 

254 """ 

255 Reverse the order of elements in an array along the given axis. 

256 

257 The shape of the array is preserved, but the elements are reordered. 

258 

259 .. versionadded:: 1.12.0 

260 

261 Parameters 

262 ---------- 

263 m : array_like 

264 Input array. 

265 axis : None or int or tuple of ints, optional 

266 Axis or axes along which to flip over. The default, 

267 axis=None, will flip over all of the axes of the input array. 

268 If axis is negative it counts from the last to the first axis. 

269 

270 If axis is a tuple of ints, flipping is performed on all of the axes 

271 specified in the tuple. 

272 

273 .. versionchanged:: 1.15.0 

274 None and tuples of axes are supported 

275 

276 Returns 

277 ------- 

278 out : array_like 

279 A view of `m` with the entries of axis reversed. Since a view is 

280 returned, this operation is done in constant time. 

281 

282 See Also 

283 -------- 

284 flipud : Flip an array vertically (axis=0). 

285 fliplr : Flip an array horizontally (axis=1). 

286 

287 Notes 

288 ----- 

289 flip(m, 0) is equivalent to flipud(m). 

290 

291 flip(m, 1) is equivalent to fliplr(m). 

292 

293 flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. 

294 

295 flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all 

296 positions. 

297 

298 flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at 

299 position 0 and position 1. 

300 

301 Examples 

302 -------- 

303 >>> A = np.arange(8).reshape((2,2,2)) 

304 >>> A 

305 array([[[0, 1], 

306 [2, 3]], 

307 [[4, 5], 

308 [6, 7]]]) 

309 >>> np.flip(A, 0) 

310 array([[[4, 5], 

311 [6, 7]], 

312 [[0, 1], 

313 [2, 3]]]) 

314 >>> np.flip(A, 1) 

315 array([[[2, 3], 

316 [0, 1]], 

317 [[6, 7], 

318 [4, 5]]]) 

319 >>> np.flip(A) 

320 array([[[7, 6], 

321 [5, 4]], 

322 [[3, 2], 

323 [1, 0]]]) 

324 >>> np.flip(A, (0, 2)) 

325 array([[[5, 4], 

326 [7, 6]], 

327 [[1, 0], 

328 [3, 2]]]) 

329 >>> A = np.random.randn(3,4,5) 

330 >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) 

331 True 

332 """ 

333 if not hasattr(m, 'ndim'): 

334 m = asarray(m) 

335 if axis is None: 

336 indexer = (np.s_[::-1],) * m.ndim 

337 else: 

338 axis = _nx.normalize_axis_tuple(axis, m.ndim) 

339 indexer = [np.s_[:]] * m.ndim 

340 for ax in axis: 

341 indexer[ax] = np.s_[::-1] 

342 indexer = tuple(indexer) 

343 return m[indexer] 

344 

345 

346@set_module('numpy') 

347def iterable(y): 

348 """ 

349 Check whether or not an object can be iterated over. 

350 

351 Parameters 

352 ---------- 

353 y : object 

354 Input object. 

355 

356 Returns 

357 ------- 

358 b : bool 

359 Return ``True`` if the object has an iterator method or is a 

360 sequence and ``False`` otherwise. 

361 

362 

363 Examples 

364 -------- 

365 >>> np.iterable([1, 2, 3]) 

366 True 

367 >>> np.iterable(2) 

368 False 

369 

370 Notes 

371 ----- 

372 In most cases, the results of ``np.iterable(obj)`` are consistent with 

373 ``isinstance(obj, collections.abc.Iterable)``. One notable exception is 

374 the treatment of 0-dimensional arrays:: 

375 

376 >>> from collections.abc import Iterable 

377 >>> a = np.array(1.0) # 0-dimensional numpy array 

378 >>> isinstance(a, Iterable) 

379 True 

380 >>> np.iterable(a) 

381 False 

382 

383 """ 

384 try: 

385 iter(y) 

386 except TypeError: 

387 return False 

388 return True 

389 

390 

391def _average_dispatcher(a, axis=None, weights=None, returned=None, *, 

392 keepdims=None): 

393 return (a, weights) 

394 

395 

396@array_function_dispatch(_average_dispatcher) 

397def average(a, axis=None, weights=None, returned=False, *, 

398 keepdims=np._NoValue): 

399 """ 

400 Compute the weighted average along the specified axis. 

401 

402 Parameters 

403 ---------- 

404 a : array_like 

405 Array containing data to be averaged. If `a` is not an array, a 

406 conversion is attempted. 

407 axis : None or int or tuple of ints, optional 

408 Axis or axes along which to average `a`. The default, 

409 axis=None, will average over all of the elements of the input array. 

410 If axis is negative it counts from the last to the first axis. 

411 

412 .. versionadded:: 1.7.0 

413 

414 If axis is a tuple of ints, averaging is performed on all of the axes 

415 specified in the tuple instead of a single axis or all the axes as 

416 before. 

417 weights : array_like, optional 

418 An array of weights associated with the values in `a`. Each value in 

419 `a` contributes to the average according to its associated weight. 

420 The weights array can either be 1-D (in which case its length must be 

421 the size of `a` along the given axis) or of the same shape as `a`. 

422 If `weights=None`, then all data in `a` are assumed to have a 

423 weight equal to one. The 1-D calculation is:: 

424 

425 avg = sum(a * weights) / sum(weights) 

426 

427 The only constraint on `weights` is that `sum(weights)` must not be 0. 

428 returned : bool, optional 

429 Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) 

430 is returned, otherwise only the average is returned. 

431 If `weights=None`, `sum_of_weights` is equivalent to the number of 

432 elements over which the average is taken. 

433 keepdims : bool, optional 

434 If this is set to True, the axes which are reduced are left 

435 in the result as dimensions with size one. With this option, 

436 the result will broadcast correctly against the original `a`. 

437 *Note:* `keepdims` will not work with instances of `numpy.matrix` 

438 or other classes whose methods do not support `keepdims`. 

439 

440 .. versionadded:: 1.23.0 

441 

442 Returns 

443 ------- 

444 retval, [sum_of_weights] : array_type or double 

445 Return the average along the specified axis. When `returned` is `True`, 

446 return a tuple with the average as the first element and the sum 

447 of the weights as the second element. `sum_of_weights` is of the 

448 same type as `retval`. The result dtype follows a genereal pattern. 

449 If `weights` is None, the result dtype will be that of `a` , or ``float64`` 

450 if `a` is integral. Otherwise, if `weights` is not None and `a` is non- 

451 integral, the result type will be the type of lowest precision capable of 

452 representing values of both `a` and `weights`. If `a` happens to be 

453 integral, the previous rules still applies but the result dtype will 

454 at least be ``float64``. 

455 

456 Raises 

457 ------ 

458 ZeroDivisionError 

459 When all weights along axis are zero. See `numpy.ma.average` for a 

460 version robust to this type of error. 

461 TypeError 

462 When the length of 1D `weights` is not the same as the shape of `a` 

463 along axis. 

464 

465 See Also 

466 -------- 

467 mean 

468 

469 ma.average : average for masked arrays -- useful if your data contains 

470 "missing" values 

471 numpy.result_type : Returns the type that results from applying the 

472 numpy type promotion rules to the arguments. 

473 

474 Examples 

475 -------- 

476 >>> data = np.arange(1, 5) 

477 >>> data 

478 array([1, 2, 3, 4]) 

479 >>> np.average(data) 

480 2.5 

481 >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) 

482 4.0 

483 

484 >>> data = np.arange(6).reshape((3, 2)) 

485 >>> data 

486 array([[0, 1], 

487 [2, 3], 

488 [4, 5]]) 

489 >>> np.average(data, axis=1, weights=[1./4, 3./4]) 

490 array([0.75, 2.75, 4.75]) 

491 >>> np.average(data, weights=[1./4, 3./4]) 

492 Traceback (most recent call last): 

493 ... 

494 TypeError: Axis must be specified when shapes of a and weights differ. 

495 

496 >>> a = np.ones(5, dtype=np.float128) 

497 >>> w = np.ones(5, dtype=np.complex64) 

498 >>> avg = np.average(a, weights=w) 

499 >>> print(avg.dtype) 

500 complex256 

501 

502 With ``keepdims=True``, the following result has shape (3, 1). 

503 

504 >>> np.average(data, axis=1, keepdims=True) 

505 array([[0.5], 

506 [2.5], 

507 [4.5]]) 

508 """ 

509 a = np.asanyarray(a) 

510 

511 if keepdims is np._NoValue: 

512 # Don't pass on the keepdims argument if one wasn't given. 

513 keepdims_kw = {} 

514 else: 

515 keepdims_kw = {'keepdims': keepdims} 

516 

517 if weights is None: 

518 avg = a.mean(axis, **keepdims_kw) 

519 scl = avg.dtype.type(a.size/avg.size) 

520 else: 

521 wgt = np.asanyarray(weights) 

522 

523 if issubclass(a.dtype.type, (np.integer, np.bool_)): 

524 result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') 

525 else: 

526 result_dtype = np.result_type(a.dtype, wgt.dtype) 

527 

528 # Sanity checks 

529 if a.shape != wgt.shape: 

530 if axis is None: 

531 raise TypeError( 

532 "Axis must be specified when shapes of a and weights " 

533 "differ.") 

534 if wgt.ndim != 1: 

535 raise TypeError( 

536 "1D weights expected when shapes of a and weights differ.") 

537 if wgt.shape[0] != a.shape[axis]: 

538 raise ValueError( 

539 "Length of weights not compatible with specified axis.") 

540 

541 # setup wgt to broadcast along axis 

542 wgt = np.broadcast_to(wgt, (a.ndim-1)*(1,) + wgt.shape) 

543 wgt = wgt.swapaxes(-1, axis) 

544 

545 scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) 

546 if np.any(scl == 0.0): 

547 raise ZeroDivisionError( 

548 "Weights sum to zero, can't be normalized") 

549 

550 avg = np.multiply(a, wgt, 

551 dtype=result_dtype).sum(axis, **keepdims_kw) / scl 

552 

553 if returned: 

554 if scl.shape != avg.shape: 

555 scl = np.broadcast_to(scl, avg.shape).copy() 

556 return avg, scl 

557 else: 

558 return avg 

559 

560 

561@set_module('numpy') 

562def asarray_chkfinite(a, dtype=None, order=None): 

563 """Convert the input to an array, checking for NaNs or Infs. 

564 

565 Parameters 

566 ---------- 

567 a : array_like 

568 Input data, in any form that can be converted to an array. This 

569 includes lists, lists of tuples, tuples, tuples of tuples, tuples 

570 of lists and ndarrays. Success requires no NaNs or Infs. 

571 dtype : data-type, optional 

572 By default, the data-type is inferred from the input data. 

573 order : {'C', 'F', 'A', 'K'}, optional 

574 Memory layout. 'A' and 'K' depend on the order of input array a. 

575 'C' row-major (C-style), 

576 'F' column-major (Fortran-style) memory representation. 

577 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise 

578 'K' (keep) preserve input order 

579 Defaults to 'C'. 

580 

581 Returns 

582 ------- 

583 out : ndarray 

584 Array interpretation of `a`. No copy is performed if the input 

585 is already an ndarray. If `a` is a subclass of ndarray, a base 

586 class ndarray is returned. 

587 

588 Raises 

589 ------ 

590 ValueError 

591 Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). 

592 

593 See Also 

594 -------- 

595 asarray : Create and array. 

596 asanyarray : Similar function which passes through subclasses. 

597 ascontiguousarray : Convert input to a contiguous array. 

598 asfarray : Convert input to a floating point ndarray. 

599 asfortranarray : Convert input to an ndarray with column-major 

600 memory order. 

601 fromiter : Create an array from an iterator. 

602 fromfunction : Construct an array by executing a function on grid 

603 positions. 

604 

605 Examples 

606 -------- 

607 Convert a list into an array. If all elements are finite 

608 ``asarray_chkfinite`` is identical to ``asarray``. 

609 

610 >>> a = [1, 2] 

611 >>> np.asarray_chkfinite(a, dtype=float) 

612 array([1., 2.]) 

613 

614 Raises ValueError if array_like contains Nans or Infs. 

615 

616 >>> a = [1, 2, np.inf] 

617 >>> try: 

618 ... np.asarray_chkfinite(a) 

619 ... except ValueError: 

620 ... print('ValueError') 

621 ... 

622 ValueError 

623 

624 """ 

625 a = asarray(a, dtype=dtype, order=order) 

626 if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): 

627 raise ValueError( 

628 "array must not contain infs or NaNs") 

629 return a 

630 

631 

632def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): 

633 yield x 

634 # support the undocumented behavior of allowing scalars 

635 if np.iterable(condlist): 

636 yield from condlist 

637 

638 

639@array_function_dispatch(_piecewise_dispatcher) 

640def piecewise(x, condlist, funclist, *args, **kw): 

641 """ 

642 Evaluate a piecewise-defined function. 

643 

644 Given a set of conditions and corresponding functions, evaluate each 

645 function on the input data wherever its condition is true. 

646 

647 Parameters 

648 ---------- 

649 x : ndarray or scalar 

650 The input domain. 

651 condlist : list of bool arrays or bool scalars 

652 Each boolean array corresponds to a function in `funclist`. Wherever 

653 `condlist[i]` is True, `funclist[i](x)` is used as the output value. 

654 

655 Each boolean array in `condlist` selects a piece of `x`, 

656 and should therefore be of the same shape as `x`. 

657 

658 The length of `condlist` must correspond to that of `funclist`. 

659 If one extra function is given, i.e. if 

660 ``len(funclist) == len(condlist) + 1``, then that extra function 

661 is the default value, used wherever all conditions are false. 

662 funclist : list of callables, f(x,*args,**kw), or scalars 

663 Each function is evaluated over `x` wherever its corresponding 

664 condition is True. It should take a 1d array as input and give an 1d 

665 array or a scalar value as output. If, instead of a callable, 

666 a scalar is provided then a constant function (``lambda x: scalar``) is 

667 assumed. 

668 args : tuple, optional 

669 Any further arguments given to `piecewise` are passed to the functions 

670 upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then 

671 each function is called as ``f(x, 1, 'a')``. 

672 kw : dict, optional 

673 Keyword arguments used in calling `piecewise` are passed to the 

674 functions upon execution, i.e., if called 

675 ``piecewise(..., ..., alpha=1)``, then each function is called as 

676 ``f(x, alpha=1)``. 

677 

678 Returns 

679 ------- 

680 out : ndarray 

681 The output is the same shape and type as x and is found by 

682 calling the functions in `funclist` on the appropriate portions of `x`, 

683 as defined by the boolean arrays in `condlist`. Portions not covered 

684 by any condition have a default value of 0. 

685 

686 

687 See Also 

688 -------- 

689 choose, select, where 

690 

691 Notes 

692 ----- 

693 This is similar to choose or select, except that functions are 

694 evaluated on elements of `x` that satisfy the corresponding condition from 

695 `condlist`. 

696 

697 The result is:: 

698 

699 |-- 

700 |funclist[0](x[condlist[0]]) 

701 out = |funclist[1](x[condlist[1]]) 

702 |... 

703 |funclist[n2](x[condlist[n2]]) 

704 |-- 

705 

706 Examples 

707 -------- 

708 Define the sigma function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. 

709 

710 >>> x = np.linspace(-2.5, 2.5, 6) 

711 >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) 

712 array([-1., -1., -1., 1., 1., 1.]) 

713 

714 Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for 

715 ``x >= 0``. 

716 

717 >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) 

718 array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) 

719 

720 Apply the same function to a scalar value. 

721 

722 >>> y = -2 

723 >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) 

724 array(2) 

725 

726 """ 

727 x = asanyarray(x) 

728 n2 = len(funclist) 

729 

730 # undocumented: single condition is promoted to a list of one condition 

731 if isscalar(condlist) or ( 

732 not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): 

733 condlist = [condlist] 

734 

735 condlist = asarray(condlist, dtype=bool) 

736 n = len(condlist) 

737 

738 if n == n2 - 1: # compute the "otherwise" condition. 

739 condelse = ~np.any(condlist, axis=0, keepdims=True) 

740 condlist = np.concatenate([condlist, condelse], axis=0) 

741 n += 1 

742 elif n != n2: 

743 raise ValueError( 

744 "with {} condition(s), either {} or {} functions are expected" 

745 .format(n, n, n+1) 

746 ) 

747 

748 y = zeros_like(x) 

749 for cond, func in zip(condlist, funclist): 

750 if not isinstance(func, collections.abc.Callable): 

751 y[cond] = func 

752 else: 

753 vals = x[cond] 

754 if vals.size > 0: 

755 y[cond] = func(vals, *args, **kw) 

756 

757 return y 

758 

759 

760def _select_dispatcher(condlist, choicelist, default=None): 

761 yield from condlist 

762 yield from choicelist 

763 

764 

765@array_function_dispatch(_select_dispatcher) 

766def select(condlist, choicelist, default=0): 

767 """ 

768 Return an array drawn from elements in choicelist, depending on conditions. 

769 

770 Parameters 

771 ---------- 

772 condlist : list of bool ndarrays 

773 The list of conditions which determine from which array in `choicelist` 

774 the output elements are taken. When multiple conditions are satisfied, 

775 the first one encountered in `condlist` is used. 

776 choicelist : list of ndarrays 

777 The list of arrays from which the output elements are taken. It has 

778 to be of the same length as `condlist`. 

779 default : scalar, optional 

780 The element inserted in `output` when all conditions evaluate to False. 

781 

782 Returns 

783 ------- 

784 output : ndarray 

785 The output at position m is the m-th element of the array in 

786 `choicelist` where the m-th element of the corresponding array in 

787 `condlist` is True. 

788 

789 See Also 

790 -------- 

791 where : Return elements from one of two arrays depending on condition. 

792 take, choose, compress, diag, diagonal 

793 

794 Examples 

795 -------- 

796 >>> x = np.arange(6) 

797 >>> condlist = [x<3, x>3] 

798 >>> choicelist = [x, x**2] 

799 >>> np.select(condlist, choicelist, 42) 

800 array([ 0, 1, 2, 42, 16, 25]) 

801 

802 >>> condlist = [x<=4, x>3] 

803 >>> choicelist = [x, x**2] 

804 >>> np.select(condlist, choicelist, 55) 

805 array([ 0, 1, 2, 3, 4, 25]) 

806 

807 """ 

808 # Check the size of condlist and choicelist are the same, or abort. 

809 if len(condlist) != len(choicelist): 

810 raise ValueError( 

811 'list of cases must be same length as list of conditions') 

812 

813 # Now that the dtype is known, handle the deprecated select([], []) case 

814 if len(condlist) == 0: 

815 raise ValueError("select with an empty condition list is not possible") 

816 

817 choicelist = [np.asarray(choice) for choice in choicelist] 

818 

819 try: 

820 intermediate_dtype = np.result_type(*choicelist) 

821 except TypeError as e: 

822 msg = f'Choicelist elements do not have a common dtype: {e}' 

823 raise TypeError(msg) from None 

824 default_array = np.asarray(default) 

825 choicelist.append(default_array) 

826 

827 # need to get the result type before broadcasting for correct scalar 

828 # behaviour 

829 try: 

830 dtype = np.result_type(intermediate_dtype, default_array) 

831 except TypeError as e: 

832 msg = f'Choicelists and default value do not have a common dtype: {e}' 

833 raise TypeError(msg) from None 

834 

835 # Convert conditions to arrays and broadcast conditions and choices 

836 # as the shape is needed for the result. Doing it separately optimizes 

837 # for example when all choices are scalars. 

838 condlist = np.broadcast_arrays(*condlist) 

839 choicelist = np.broadcast_arrays(*choicelist) 

840 

841 # If cond array is not an ndarray in boolean format or scalar bool, abort. 

842 for i, cond in enumerate(condlist): 

843 if cond.dtype.type is not np.bool_: 

844 raise TypeError( 

845 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) 

846 

847 if choicelist[0].ndim == 0: 

848 # This may be common, so avoid the call. 

849 result_shape = condlist[0].shape 

850 else: 

851 result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape 

852 

853 result = np.full(result_shape, choicelist[-1], dtype) 

854 

855 # Use np.copyto to burn each choicelist array onto result, using the 

856 # corresponding condlist as a boolean mask. This is done in reverse 

857 # order since the first choice should take precedence. 

858 choicelist = choicelist[-2::-1] 

859 condlist = condlist[::-1] 

860 for choice, cond in zip(choicelist, condlist): 

861 np.copyto(result, choice, where=cond) 

862 

863 return result 

864 

865 

866def _copy_dispatcher(a, order=None, subok=None): 

867 return (a,) 

868 

869 

870@array_function_dispatch(_copy_dispatcher) 

871def copy(a, order='K', subok=False): 

872 """ 

873 Return an array copy of the given object. 

874 

875 Parameters 

876 ---------- 

877 a : array_like 

878 Input data. 

879 order : {'C', 'F', 'A', 'K'}, optional 

880 Controls the memory layout of the copy. 'C' means C-order, 

881 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 

882 'C' otherwise. 'K' means match the layout of `a` as closely 

883 as possible. (Note that this function and :meth:`ndarray.copy` are very 

884 similar, but have different default values for their order= 

885 arguments.) 

886 subok : bool, optional 

887 If True, then sub-classes will be passed-through, otherwise the 

888 returned array will be forced to be a base-class array (defaults to False). 

889 

890 .. versionadded:: 1.19.0 

891 

892 Returns 

893 ------- 

894 arr : ndarray 

895 Array interpretation of `a`. 

896 

897 See Also 

898 -------- 

899 ndarray.copy : Preferred method for creating an array copy 

900 

901 Notes 

902 ----- 

903 This is equivalent to: 

904 

905 >>> np.array(a, copy=True) #doctest: +SKIP 

906 

907 Examples 

908 -------- 

909 Create an array x, with a reference y and a copy z: 

910 

911 >>> x = np.array([1, 2, 3]) 

912 >>> y = x 

913 >>> z = np.copy(x) 

914 

915 Note that, when we modify x, y changes, but not z: 

916 

917 >>> x[0] = 10 

918 >>> x[0] == y[0] 

919 True 

920 >>> x[0] == z[0] 

921 False 

922 

923 Note that, np.copy clears previously set WRITEABLE=False flag. 

924 

925 >>> a = np.array([1, 2, 3]) 

926 >>> a.flags["WRITEABLE"] = False 

927 >>> b = np.copy(a) 

928 >>> b.flags["WRITEABLE"] 

929 True 

930 >>> b[0] = 3 

931 >>> b 

932 array([3, 2, 3]) 

933 

934 Note that np.copy is a shallow copy and will not copy object 

935 elements within arrays. This is mainly important for arrays 

936 containing Python objects. The new array will contain the 

937 same object which may lead to surprises if that object can 

938 be modified (is mutable): 

939 

940 >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) 

941 >>> b = np.copy(a) 

942 >>> b[2][0] = 10 

943 >>> a 

944 array([1, 'm', list([10, 3, 4])], dtype=object) 

945 

946 To ensure all elements within an ``object`` array are copied, 

947 use `copy.deepcopy`: 

948 

949 >>> import copy 

950 >>> a = np.array([1, 'm', [2, 3, 4]], dtype=object) 

951 >>> c = copy.deepcopy(a) 

952 >>> c[2][0] = 10 

953 >>> c 

954 array([1, 'm', list([10, 3, 4])], dtype=object) 

955 >>> a 

956 array([1, 'm', list([2, 3, 4])], dtype=object) 

957 

958 """ 

959 return array(a, order=order, subok=subok, copy=True) 

960 

961# Basic operations 

962 

963 

964def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None): 

965 yield f 

966 yield from varargs 

967 

968 

969@array_function_dispatch(_gradient_dispatcher) 

970def gradient(f, *varargs, axis=None, edge_order=1): 

971 """ 

972 Return the gradient of an N-dimensional array. 

973 

974 The gradient is computed using second order accurate central differences 

975 in the interior points and either first or second order accurate one-sides 

976 (forward or backwards) differences at the boundaries. 

977 The returned gradient hence has the same shape as the input array. 

978 

979 Parameters 

980 ---------- 

981 f : array_like 

982 An N-dimensional array containing samples of a scalar function. 

983 varargs : list of scalar or array, optional 

984 Spacing between f values. Default unitary spacing for all dimensions. 

985 Spacing can be specified using: 

986 

987 1. single scalar to specify a sample distance for all dimensions. 

988 2. N scalars to specify a constant sample distance for each dimension. 

989 i.e. `dx`, `dy`, `dz`, ... 

990 3. N arrays to specify the coordinates of the values along each 

991 dimension of F. The length of the array must match the size of 

992 the corresponding dimension 

993 4. Any combination of N scalars/arrays with the meaning of 2. and 3. 

994 

995 If `axis` is given, the number of varargs must equal the number of axes. 

996 Default: 1. 

997 

998 edge_order : {1, 2}, optional 

999 Gradient is calculated using N-th order accurate differences 

1000 at the boundaries. Default: 1. 

1001 

1002 .. versionadded:: 1.9.1 

1003 

1004 axis : None or int or tuple of ints, optional 

1005 Gradient is calculated only along the given axis or axes 

1006 The default (axis = None) is to calculate the gradient for all the axes 

1007 of the input array. axis may be negative, in which case it counts from 

1008 the last to the first axis. 

1009 

1010 .. versionadded:: 1.11.0 

1011 

1012 Returns 

1013 ------- 

1014 gradient : ndarray or list of ndarray 

1015 A list of ndarrays (or a single ndarray if there is only one dimension) 

1016 corresponding to the derivatives of f with respect to each dimension. 

1017 Each derivative has the same shape as f. 

1018 

1019 Examples 

1020 -------- 

1021 >>> f = np.array([1, 2, 4, 7, 11, 16], dtype=float) 

1022 >>> np.gradient(f) 

1023 array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) 

1024 >>> np.gradient(f, 2) 

1025 array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) 

1026 

1027 Spacing can be also specified with an array that represents the coordinates 

1028 of the values F along the dimensions. 

1029 For instance a uniform spacing: 

1030 

1031 >>> x = np.arange(f.size) 

1032 >>> np.gradient(f, x) 

1033 array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) 

1034 

1035 Or a non uniform one: 

1036 

1037 >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.], dtype=float) 

1038 >>> np.gradient(f, x) 

1039 array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) 

1040 

1041 For two dimensional arrays, the return will be two arrays ordered by 

1042 axis. In this example the first array stands for the gradient in 

1043 rows and the second one in columns direction: 

1044 

1045 >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float)) 

1046 [array([[ 2., 2., -1.], 

1047 [ 2., 2., -1.]]), array([[1. , 2.5, 4. ], 

1048 [1. , 1. , 1. ]])] 

1049 

1050 In this example the spacing is also specified: 

1051 uniform for axis=0 and non uniform for axis=1 

1052 

1053 >>> dx = 2. 

1054 >>> y = [1., 1.5, 3.5] 

1055 >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), dx, y) 

1056 [array([[ 1. , 1. , -0.5], 

1057 [ 1. , 1. , -0.5]]), array([[2. , 2. , 2. ], 

1058 [2. , 1.7, 0.5]])] 

1059 

1060 It is possible to specify how boundaries are treated using `edge_order` 

1061 

1062 >>> x = np.array([0, 1, 2, 3, 4]) 

1063 >>> f = x**2 

1064 >>> np.gradient(f, edge_order=1) 

1065 array([1., 2., 4., 6., 7.]) 

1066 >>> np.gradient(f, edge_order=2) 

1067 array([0., 2., 4., 6., 8.]) 

1068 

1069 The `axis` keyword can be used to specify a subset of axes of which the 

1070 gradient is calculated 

1071 

1072 >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]], dtype=float), axis=0) 

1073 array([[ 2., 2., -1.], 

1074 [ 2., 2., -1.]]) 

1075 

1076 Notes 

1077 ----- 

1078 Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous 

1079 derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we 

1080 minimize the "consistency error" :math:`\\eta_{i}` between the true gradient 

1081 and its estimate from a linear combination of the neighboring grid-points: 

1082 

1083 .. math:: 

1084 

1085 \\eta_{i} = f_{i}^{\\left(1\\right)} - 

1086 \\left[ \\alpha f\\left(x_{i}\\right) + 

1087 \\beta f\\left(x_{i} + h_{d}\\right) + 

1088 \\gamma f\\left(x_{i}-h_{s}\\right) 

1089 \\right] 

1090 

1091 By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` 

1092 with their Taylor series expansion, this translates into solving 

1093 the following the linear system: 

1094 

1095 .. math:: 

1096 

1097 \\left\\{ 

1098 \\begin{array}{r} 

1099 \\alpha+\\beta+\\gamma=0 \\\\ 

1100 \\beta h_{d}-\\gamma h_{s}=1 \\\\ 

1101 \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 

1102 \\end{array} 

1103 \\right. 

1104 

1105 The resulting approximation of :math:`f_{i}^{(1)}` is the following: 

1106 

1107 .. math:: 

1108 

1109 \\hat f_{i}^{(1)} = 

1110 \\frac{ 

1111 h_{s}^{2}f\\left(x_{i} + h_{d}\\right) 

1112 + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) 

1113 - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} 

1114 { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} 

1115 + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} 

1116 + h_{s}h_{d}^{2}}{h_{d} 

1117 + h_{s}}\\right) 

1118 

1119 It is worth noting that if :math:`h_{s}=h_{d}` 

1120 (i.e., data are evenly spaced) 

1121 we find the standard second order approximation: 

1122 

1123 .. math:: 

1124 

1125 \\hat f_{i}^{(1)}= 

1126 \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} 

1127 + \\mathcal{O}\\left(h^{2}\\right) 

1128 

1129 With a similar procedure the forward/backward approximations used for 

1130 boundaries can be derived. 

1131 

1132 References 

1133 ---------- 

1134 .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics 

1135 (Texts in Applied Mathematics). New York: Springer. 

1136 .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations 

1137 in Geophysical Fluid Dynamics. New York: Springer. 

1138 .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on 

1139 Arbitrarily Spaced Grids, 

1140 Mathematics of Computation 51, no. 184 : 699-706. 

1141 `PDF <http://www.ams.org/journals/mcom/1988-51-184/ 

1142 S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_. 

1143 """ 

1144 f = np.asanyarray(f) 

1145 N = f.ndim # number of dimensions 

1146 

1147 if axis is None: 

1148 axes = tuple(range(N)) 

1149 else: 

1150 axes = _nx.normalize_axis_tuple(axis, N) 

1151 

1152 len_axes = len(axes) 

1153 n = len(varargs) 

1154 if n == 0: 

1155 # no spacing argument - use 1 in all axes 

1156 dx = [1.0] * len_axes 

1157 elif n == 1 and np.ndim(varargs[0]) == 0: 

1158 # single scalar for all axes 

1159 dx = varargs * len_axes 

1160 elif n == len_axes: 

1161 # scalar or 1d array for each axis 

1162 dx = list(varargs) 

1163 for i, distances in enumerate(dx): 

1164 distances = np.asanyarray(distances) 

1165 if distances.ndim == 0: 

1166 continue 

1167 elif distances.ndim != 1: 

1168 raise ValueError("distances must be either scalars or 1d") 

1169 if len(distances) != f.shape[axes[i]]: 

1170 raise ValueError("when 1d, distances must match " 

1171 "the length of the corresponding dimension") 

1172 if np.issubdtype(distances.dtype, np.integer): 

1173 # Convert numpy integer types to float64 to avoid modular 

1174 # arithmetic in np.diff(distances). 

1175 distances = distances.astype(np.float64) 

1176 diffx = np.diff(distances) 

1177 # if distances are constant reduce to the scalar case 

1178 # since it brings a consistent speedup 

1179 if (diffx == diffx[0]).all(): 

1180 diffx = diffx[0] 

1181 dx[i] = diffx 

1182 else: 

1183 raise TypeError("invalid number of arguments") 

1184 

1185 if edge_order > 2: 

1186 raise ValueError("'edge_order' greater than 2 not supported") 

1187 

1188 # use central differences on interior and one-sided differences on the 

1189 # endpoints. This preserves second order-accuracy over the full domain. 

1190 

1191 outvals = [] 

1192 

1193 # create slice objects --- initially all are [:, :, ..., :] 

1194 slice1 = [slice(None)]*N 

1195 slice2 = [slice(None)]*N 

1196 slice3 = [slice(None)]*N 

1197 slice4 = [slice(None)]*N 

1198 

1199 otype = f.dtype 

1200 if otype.type is np.datetime64: 

1201 # the timedelta dtype with the same unit information 

1202 otype = np.dtype(otype.name.replace('datetime', 'timedelta')) 

1203 # view as timedelta to allow addition 

1204 f = f.view(otype) 

1205 elif otype.type is np.timedelta64: 

1206 pass 

1207 elif np.issubdtype(otype, np.inexact): 

1208 pass 

1209 else: 

1210 # All other types convert to floating point. 

1211 # First check if f is a numpy integer type; if so, convert f to float64 

1212 # to avoid modular arithmetic when computing the changes in f. 

1213 if np.issubdtype(otype, np.integer): 

1214 f = f.astype(np.float64) 

1215 otype = np.float64 

1216 

1217 for axis, ax_dx in zip(axes, dx): 

1218 if f.shape[axis] < edge_order + 1: 

1219 raise ValueError( 

1220 "Shape of array too small to calculate a numerical gradient, " 

1221 "at least (edge_order + 1) elements are required.") 

1222 # result allocation 

1223 out = np.empty_like(f, dtype=otype) 

1224 

1225 # spacing for the current axis 

1226 uniform_spacing = np.ndim(ax_dx) == 0 

1227 

1228 # Numerical differentiation: 2nd order interior 

1229 slice1[axis] = slice(1, -1) 

1230 slice2[axis] = slice(None, -2) 

1231 slice3[axis] = slice(1, -1) 

1232 slice4[axis] = slice(2, None) 

1233 

1234 if uniform_spacing: 

1235 out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) 

1236 else: 

1237 dx1 = ax_dx[0:-1] 

1238 dx2 = ax_dx[1:] 

1239 a = -(dx2)/(dx1 * (dx1 + dx2)) 

1240 b = (dx2 - dx1) / (dx1 * dx2) 

1241 c = dx1 / (dx2 * (dx1 + dx2)) 

1242 # fix the shape for broadcasting 

1243 shape = np.ones(N, dtype=int) 

1244 shape[axis] = -1 

1245 a.shape = b.shape = c.shape = shape 

1246 # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] 

1247 out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] 

1248 

1249 # Numerical differentiation: 1st order edges 

1250 if edge_order == 1: 

1251 slice1[axis] = 0 

1252 slice2[axis] = 1 

1253 slice3[axis] = 0 

1254 dx_0 = ax_dx if uniform_spacing else ax_dx[0] 

1255 # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) 

1256 out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 

1257 

1258 slice1[axis] = -1 

1259 slice2[axis] = -1 

1260 slice3[axis] = -2 

1261 dx_n = ax_dx if uniform_spacing else ax_dx[-1] 

1262 # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) 

1263 out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n 

1264 

1265 # Numerical differentiation: 2nd order edges 

1266 else: 

1267 slice1[axis] = 0 

1268 slice2[axis] = 0 

1269 slice3[axis] = 1 

1270 slice4[axis] = 2 

1271 if uniform_spacing: 

1272 a = -1.5 / ax_dx 

1273 b = 2. / ax_dx 

1274 c = -0.5 / ax_dx 

1275 else: 

1276 dx1 = ax_dx[0] 

1277 dx2 = ax_dx[1] 

1278 a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) 

1279 b = (dx1 + dx2) / (dx1 * dx2) 

1280 c = - dx1 / (dx2 * (dx1 + dx2)) 

1281 # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] 

1282 out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] 

1283 

1284 slice1[axis] = -1 

1285 slice2[axis] = -3 

1286 slice3[axis] = -2 

1287 slice4[axis] = -1 

1288 if uniform_spacing: 

1289 a = 0.5 / ax_dx 

1290 b = -2. / ax_dx 

1291 c = 1.5 / ax_dx 

1292 else: 

1293 dx1 = ax_dx[-2] 

1294 dx2 = ax_dx[-1] 

1295 a = (dx2) / (dx1 * (dx1 + dx2)) 

1296 b = - (dx2 + dx1) / (dx1 * dx2) 

1297 c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) 

1298 # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] 

1299 out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] 

1300 

1301 outvals.append(out) 

1302 

1303 # reset the slice object in this dimension to ":" 

1304 slice1[axis] = slice(None) 

1305 slice2[axis] = slice(None) 

1306 slice3[axis] = slice(None) 

1307 slice4[axis] = slice(None) 

1308 

1309 if len_axes == 1: 

1310 return outvals[0] 

1311 else: 

1312 return outvals 

1313 

1314 

1315def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): 

1316 return (a, prepend, append) 

1317 

1318 

1319@array_function_dispatch(_diff_dispatcher) 

1320def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): 

1321 """ 

1322 Calculate the n-th discrete difference along the given axis. 

1323 

1324 The first difference is given by ``out[i] = a[i+1] - a[i]`` along 

1325 the given axis, higher differences are calculated by using `diff` 

1326 recursively. 

1327 

1328 Parameters 

1329 ---------- 

1330 a : array_like 

1331 Input array 

1332 n : int, optional 

1333 The number of times values are differenced. If zero, the input 

1334 is returned as-is. 

1335 axis : int, optional 

1336 The axis along which the difference is taken, default is the 

1337 last axis. 

1338 prepend, append : array_like, optional 

1339 Values to prepend or append to `a` along axis prior to 

1340 performing the difference. Scalar values are expanded to 

1341 arrays with length 1 in the direction of axis and the shape 

1342 of the input array in along all other axes. Otherwise the 

1343 dimension and shape must match `a` except along axis. 

1344 

1345 .. versionadded:: 1.16.0 

1346 

1347 Returns 

1348 ------- 

1349 diff : ndarray 

1350 The n-th differences. The shape of the output is the same as `a` 

1351 except along `axis` where the dimension is smaller by `n`. The 

1352 type of the output is the same as the type of the difference 

1353 between any two elements of `a`. This is the same as the type of 

1354 `a` in most cases. A notable exception is `datetime64`, which 

1355 results in a `timedelta64` output array. 

1356 

1357 See Also 

1358 -------- 

1359 gradient, ediff1d, cumsum 

1360 

1361 Notes 

1362 ----- 

1363 Type is preserved for boolean arrays, so the result will contain 

1364 `False` when consecutive elements are the same and `True` when they 

1365 differ. 

1366 

1367 For unsigned integer arrays, the results will also be unsigned. This 

1368 should not be surprising, as the result is consistent with 

1369 calculating the difference directly: 

1370 

1371 >>> u8_arr = np.array([1, 0], dtype=np.uint8) 

1372 >>> np.diff(u8_arr) 

1373 array([255], dtype=uint8) 

1374 >>> u8_arr[1,...] - u8_arr[0,...] 

1375 255 

1376 

1377 If this is not desirable, then the array should be cast to a larger 

1378 integer type first: 

1379 

1380 >>> i16_arr = u8_arr.astype(np.int16) 

1381 >>> np.diff(i16_arr) 

1382 array([-1], dtype=int16) 

1383 

1384 Examples 

1385 -------- 

1386 >>> x = np.array([1, 2, 4, 7, 0]) 

1387 >>> np.diff(x) 

1388 array([ 1, 2, 3, -7]) 

1389 >>> np.diff(x, n=2) 

1390 array([ 1, 1, -10]) 

1391 

1392 >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) 

1393 >>> np.diff(x) 

1394 array([[2, 3, 4], 

1395 [5, 1, 2]]) 

1396 >>> np.diff(x, axis=0) 

1397 array([[-1, 2, 0, -2]]) 

1398 

1399 >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) 

1400 >>> np.diff(x) 

1401 array([1, 1], dtype='timedelta64[D]') 

1402 

1403 """ 

1404 if n == 0: 

1405 return a 

1406 if n < 0: 

1407 raise ValueError( 

1408 "order must be non-negative but got " + repr(n)) 

1409 

1410 a = asanyarray(a) 

1411 nd = a.ndim 

1412 if nd == 0: 

1413 raise ValueError("diff requires input that is at least one dimensional") 

1414 axis = normalize_axis_index(axis, nd) 

1415 

1416 combined = [] 

1417 if prepend is not np._NoValue: 

1418 prepend = np.asanyarray(prepend) 

1419 if prepend.ndim == 0: 

1420 shape = list(a.shape) 

1421 shape[axis] = 1 

1422 prepend = np.broadcast_to(prepend, tuple(shape)) 

1423 combined.append(prepend) 

1424 

1425 combined.append(a) 

1426 

1427 if append is not np._NoValue: 

1428 append = np.asanyarray(append) 

1429 if append.ndim == 0: 

1430 shape = list(a.shape) 

1431 shape[axis] = 1 

1432 append = np.broadcast_to(append, tuple(shape)) 

1433 combined.append(append) 

1434 

1435 if len(combined) > 1: 

1436 a = np.concatenate(combined, axis) 

1437 

1438 slice1 = [slice(None)] * nd 

1439 slice2 = [slice(None)] * nd 

1440 slice1[axis] = slice(1, None) 

1441 slice2[axis] = slice(None, -1) 

1442 slice1 = tuple(slice1) 

1443 slice2 = tuple(slice2) 

1444 

1445 op = not_equal if a.dtype == np.bool_ else subtract 

1446 for _ in range(n): 

1447 a = op(a[slice1], a[slice2]) 

1448 

1449 return a 

1450 

1451 

1452def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): 

1453 return (x, xp, fp) 

1454 

1455 

1456@array_function_dispatch(_interp_dispatcher) 

1457def interp(x, xp, fp, left=None, right=None, period=None): 

1458 """ 

1459 One-dimensional linear interpolation for monotonically increasing sample points. 

1460 

1461 Returns the one-dimensional piecewise linear interpolant to a function 

1462 with given discrete data points (`xp`, `fp`), evaluated at `x`. 

1463 

1464 Parameters 

1465 ---------- 

1466 x : array_like 

1467 The x-coordinates at which to evaluate the interpolated values. 

1468 

1469 xp : 1-D sequence of floats 

1470 The x-coordinates of the data points, must be increasing if argument 

1471 `period` is not specified. Otherwise, `xp` is internally sorted after 

1472 normalizing the periodic boundaries with ``xp = xp % period``. 

1473 

1474 fp : 1-D sequence of float or complex 

1475 The y-coordinates of the data points, same length as `xp`. 

1476 

1477 left : optional float or complex corresponding to fp 

1478 Value to return for `x < xp[0]`, default is `fp[0]`. 

1479 

1480 right : optional float or complex corresponding to fp 

1481 Value to return for `x > xp[-1]`, default is `fp[-1]`. 

1482 

1483 period : None or float, optional 

1484 A period for the x-coordinates. This parameter allows the proper 

1485 interpolation of angular x-coordinates. Parameters `left` and `right` 

1486 are ignored if `period` is specified. 

1487 

1488 .. versionadded:: 1.10.0 

1489 

1490 Returns 

1491 ------- 

1492 y : float or complex (corresponding to fp) or ndarray 

1493 The interpolated values, same shape as `x`. 

1494 

1495 Raises 

1496 ------ 

1497 ValueError 

1498 If `xp` and `fp` have different length 

1499 If `xp` or `fp` are not 1-D sequences 

1500 If `period == 0` 

1501 

1502 See Also 

1503 -------- 

1504 scipy.interpolate 

1505 

1506 Warnings 

1507 -------- 

1508 The x-coordinate sequence is expected to be increasing, but this is not 

1509 explicitly enforced. However, if the sequence `xp` is non-increasing, 

1510 interpolation results are meaningless. 

1511 

1512 Note that, since NaN is unsortable, `xp` also cannot contain NaNs. 

1513 

1514 A simple check for `xp` being strictly increasing is:: 

1515 

1516 np.all(np.diff(xp) > 0) 

1517 

1518 Examples 

1519 -------- 

1520 >>> xp = [1, 2, 3] 

1521 >>> fp = [3, 2, 0] 

1522 >>> np.interp(2.5, xp, fp) 

1523 1.0 

1524 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) 

1525 array([3. , 3. , 2.5 , 0.56, 0. ]) 

1526 >>> UNDEF = -99.0 

1527 >>> np.interp(3.14, xp, fp, right=UNDEF) 

1528 -99.0 

1529 

1530 Plot an interpolant to the sine function: 

1531 

1532 >>> x = np.linspace(0, 2*np.pi, 10) 

1533 >>> y = np.sin(x) 

1534 >>> xvals = np.linspace(0, 2*np.pi, 50) 

1535 >>> yinterp = np.interp(xvals, x, y) 

1536 >>> import matplotlib.pyplot as plt 

1537 >>> plt.plot(x, y, 'o') 

1538 [<matplotlib.lines.Line2D object at 0x...>] 

1539 >>> plt.plot(xvals, yinterp, '-x') 

1540 [<matplotlib.lines.Line2D object at 0x...>] 

1541 >>> plt.show() 

1542 

1543 Interpolation with periodic x-coordinates: 

1544 

1545 >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] 

1546 >>> xp = [190, -190, 350, -350] 

1547 >>> fp = [5, 10, 3, 4] 

1548 >>> np.interp(x, xp, fp, period=360) 

1549 array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) 

1550 

1551 Complex interpolation: 

1552 

1553 >>> x = [1.5, 4.0] 

1554 >>> xp = [2,3,5] 

1555 >>> fp = [1.0j, 0, 2+3j] 

1556 >>> np.interp(x, xp, fp) 

1557 array([0.+1.j , 1.+1.5j]) 

1558 

1559 """ 

1560 

1561 fp = np.asarray(fp) 

1562 

1563 if np.iscomplexobj(fp): 

1564 interp_func = compiled_interp_complex 

1565 input_dtype = np.complex128 

1566 else: 

1567 interp_func = compiled_interp 

1568 input_dtype = np.float64 

1569 

1570 if period is not None: 

1571 if period == 0: 

1572 raise ValueError("period must be a non-zero value") 

1573 period = abs(period) 

1574 left = None 

1575 right = None 

1576 

1577 x = np.asarray(x, dtype=np.float64) 

1578 xp = np.asarray(xp, dtype=np.float64) 

1579 fp = np.asarray(fp, dtype=input_dtype) 

1580 

1581 if xp.ndim != 1 or fp.ndim != 1: 

1582 raise ValueError("Data points must be 1-D sequences") 

1583 if xp.shape[0] != fp.shape[0]: 

1584 raise ValueError("fp and xp are not of the same length") 

1585 # normalizing periodic boundaries 

1586 x = x % period 

1587 xp = xp % period 

1588 asort_xp = np.argsort(xp) 

1589 xp = xp[asort_xp] 

1590 fp = fp[asort_xp] 

1591 xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) 

1592 fp = np.concatenate((fp[-1:], fp, fp[0:1])) 

1593 

1594 return interp_func(x, xp, fp, left, right) 

1595 

1596 

1597def _angle_dispatcher(z, deg=None): 

1598 return (z,) 

1599 

1600 

1601@array_function_dispatch(_angle_dispatcher) 

1602def angle(z, deg=False): 

1603 """ 

1604 Return the angle of the complex argument. 

1605 

1606 Parameters 

1607 ---------- 

1608 z : array_like 

1609 A complex number or sequence of complex numbers. 

1610 deg : bool, optional 

1611 Return angle in degrees if True, radians if False (default). 

1612 

1613 Returns 

1614 ------- 

1615 angle : ndarray or scalar 

1616 The counterclockwise angle from the positive real axis on the complex 

1617 plane in the range ``(-pi, pi]``, with dtype as numpy.float64. 

1618 

1619 .. versionchanged:: 1.16.0 

1620 This function works on subclasses of ndarray like `ma.array`. 

1621 

1622 See Also 

1623 -------- 

1624 arctan2 

1625 absolute 

1626 

1627 Notes 

1628 ----- 

1629 Although the angle of the complex number 0 is undefined, ``numpy.angle(0)`` 

1630 returns the value 0. 

1631 

1632 Examples 

1633 -------- 

1634 >>> np.angle([1.0, 1.0j, 1+1j]) # in radians 

1635 array([ 0. , 1.57079633, 0.78539816]) # may vary 

1636 >>> np.angle(1+1j, deg=True) # in degrees 

1637 45.0 

1638 

1639 """ 

1640 z = asanyarray(z) 

1641 if issubclass(z.dtype.type, _nx.complexfloating): 

1642 zimag = z.imag 

1643 zreal = z.real 

1644 else: 

1645 zimag = 0 

1646 zreal = z 

1647 

1648 a = arctan2(zimag, zreal) 

1649 if deg: 

1650 a *= 180/pi 

1651 return a 

1652 

1653 

1654def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): 

1655 return (p,) 

1656 

1657 

1658@array_function_dispatch(_unwrap_dispatcher) 

1659def unwrap(p, discont=None, axis=-1, *, period=2*pi): 

1660 r""" 

1661 Unwrap by taking the complement of large deltas with respect to the period. 

1662 

1663 This unwraps a signal `p` by changing elements which have an absolute 

1664 difference from their predecessor of more than ``max(discont, period/2)`` 

1665 to their `period`-complementary values. 

1666 

1667 For the default case where `period` is :math:`2\pi` and `discont` is 

1668 :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences 

1669 are never greater than :math:`\pi` by adding :math:`2k\pi` for some 

1670 integer :math:`k`. 

1671 

1672 Parameters 

1673 ---------- 

1674 p : array_like 

1675 Input array. 

1676 discont : float, optional 

1677 Maximum discontinuity between values, default is ``period/2``. 

1678 Values below ``period/2`` are treated as if they were ``period/2``. 

1679 To have an effect different from the default, `discont` should be 

1680 larger than ``period/2``. 

1681 axis : int, optional 

1682 Axis along which unwrap will operate, default is the last axis. 

1683 period : float, optional 

1684 Size of the range over which the input wraps. By default, it is 

1685 ``2 pi``. 

1686 

1687 .. versionadded:: 1.21.0 

1688 

1689 Returns 

1690 ------- 

1691 out : ndarray 

1692 Output array. 

1693 

1694 See Also 

1695 -------- 

1696 rad2deg, deg2rad 

1697 

1698 Notes 

1699 ----- 

1700 If the discontinuity in `p` is smaller than ``period/2``, 

1701 but larger than `discont`, no unwrapping is done because taking 

1702 the complement would only make the discontinuity larger. 

1703 

1704 Examples 

1705 -------- 

1706 >>> phase = np.linspace(0, np.pi, num=5) 

1707 >>> phase[3:] += np.pi 

1708 >>> phase 

1709 array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary 

1710 >>> np.unwrap(phase) 

1711 array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary 

1712 >>> np.unwrap([0, 1, 2, -1, 0], period=4) 

1713 array([0, 1, 2, 3, 4]) 

1714 >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) 

1715 array([1, 2, 3, 4, 5, 6, 7, 8, 9]) 

1716 >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) 

1717 array([2, 3, 4, 5, 6, 7, 8, 9]) 

1718 >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180 

1719 >>> np.unwrap(phase_deg, period=360) 

1720 array([-180., -140., -100., -60., -20., 20., 60., 100., 140., 

1721 180., 220., 260., 300., 340., 380., 420., 460., 500., 

1722 540.]) 

1723 """ 

1724 p = asarray(p) 

1725 nd = p.ndim 

1726 dd = diff(p, axis=axis) 

1727 if discont is None: 

1728 discont = period/2 

1729 slice1 = [slice(None, None)]*nd # full slices 

1730 slice1[axis] = slice(1, None) 

1731 slice1 = tuple(slice1) 

1732 dtype = np.result_type(dd, period) 

1733 if _nx.issubdtype(dtype, _nx.integer): 

1734 interval_high, rem = divmod(period, 2) 

1735 boundary_ambiguous = rem == 0 

1736 else: 

1737 interval_high = period / 2 

1738 boundary_ambiguous = True 

1739 interval_low = -interval_high 

1740 ddmod = mod(dd - interval_low, period) + interval_low 

1741 if boundary_ambiguous: 

1742 # for `mask = (abs(dd) == period/2)`, the above line made 

1743 # `ddmod[mask] == -period/2`. correct these such that 

1744 # `ddmod[mask] == sign(dd[mask])*period/2`. 

1745 _nx.copyto(ddmod, interval_high, 

1746 where=(ddmod == interval_low) & (dd > 0)) 

1747 ph_correct = ddmod - dd 

1748 _nx.copyto(ph_correct, 0, where=abs(dd) < discont) 

1749 up = array(p, copy=True, dtype=dtype) 

1750 up[slice1] = p[slice1] + ph_correct.cumsum(axis) 

1751 return up 

1752 

1753 

1754def _sort_complex(a): 

1755 return (a,) 

1756 

1757 

1758@array_function_dispatch(_sort_complex) 

1759def sort_complex(a): 

1760 """ 

1761 Sort a complex array using the real part first, then the imaginary part. 

1762 

1763 Parameters 

1764 ---------- 

1765 a : array_like 

1766 Input array 

1767 

1768 Returns 

1769 ------- 

1770 out : complex ndarray 

1771 Always returns a sorted complex array. 

1772 

1773 Examples 

1774 -------- 

1775 >>> np.sort_complex([5, 3, 6, 2, 1]) 

1776 array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) 

1777 

1778 >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) 

1779 array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) 

1780 

1781 """ 

1782 b = array(a, copy=True) 

1783 b.sort() 

1784 if not issubclass(b.dtype.type, _nx.complexfloating): 

1785 if b.dtype.char in 'bhBH': 

1786 return b.astype('F') 

1787 elif b.dtype.char == 'g': 

1788 return b.astype('G') 

1789 else: 

1790 return b.astype('D') 

1791 else: 

1792 return b 

1793 

1794 

1795def _trim_zeros(filt, trim=None): 

1796 return (filt,) 

1797 

1798 

1799@array_function_dispatch(_trim_zeros) 

1800def trim_zeros(filt, trim='fb'): 

1801 """ 

1802 Trim the leading and/or trailing zeros from a 1-D array or sequence. 

1803 

1804 Parameters 

1805 ---------- 

1806 filt : 1-D array or sequence 

1807 Input array. 

1808 trim : str, optional 

1809 A string with 'f' representing trim from front and 'b' to trim from 

1810 back. Default is 'fb', trim zeros from both front and back of the 

1811 array. 

1812 

1813 Returns 

1814 ------- 

1815 trimmed : 1-D array or sequence 

1816 The result of trimming the input. The input data type is preserved. 

1817 

1818 Examples 

1819 -------- 

1820 >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) 

1821 >>> np.trim_zeros(a) 

1822 array([1, 2, 3, 0, 2, 1]) 

1823 

1824 >>> np.trim_zeros(a, 'b') 

1825 array([0, 0, 0, ..., 0, 2, 1]) 

1826 

1827 The input data type is preserved, list/tuple in means list/tuple out. 

1828 

1829 >>> np.trim_zeros([0, 1, 2, 0]) 

1830 [1, 2] 

1831 

1832 """ 

1833 

1834 first = 0 

1835 trim = trim.upper() 

1836 if 'F' in trim: 

1837 for i in filt: 

1838 if i != 0.: 

1839 break 

1840 else: 

1841 first = first + 1 

1842 last = len(filt) 

1843 if 'B' in trim: 

1844 for i in filt[::-1]: 

1845 if i != 0.: 

1846 break 

1847 else: 

1848 last = last - 1 

1849 return filt[first:last] 

1850 

1851 

1852def _extract_dispatcher(condition, arr): 

1853 return (condition, arr) 

1854 

1855 

1856@array_function_dispatch(_extract_dispatcher) 

1857def extract(condition, arr): 

1858 """ 

1859 Return the elements of an array that satisfy some condition. 

1860 

1861 This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If 

1862 `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. 

1863 

1864 Note that `place` does the exact opposite of `extract`. 

1865 

1866 Parameters 

1867 ---------- 

1868 condition : array_like 

1869 An array whose nonzero or True entries indicate the elements of `arr` 

1870 to extract. 

1871 arr : array_like 

1872 Input array of the same size as `condition`. 

1873 

1874 Returns 

1875 ------- 

1876 extract : ndarray 

1877 Rank 1 array of values from `arr` where `condition` is True. 

1878 

1879 See Also 

1880 -------- 

1881 take, put, copyto, compress, place 

1882 

1883 Examples 

1884 -------- 

1885 >>> arr = np.arange(12).reshape((3, 4)) 

1886 >>> arr 

1887 array([[ 0, 1, 2, 3], 

1888 [ 4, 5, 6, 7], 

1889 [ 8, 9, 10, 11]]) 

1890 >>> condition = np.mod(arr, 3)==0 

1891 >>> condition 

1892 array([[ True, False, False, True], 

1893 [False, False, True, False], 

1894 [False, True, False, False]]) 

1895 >>> np.extract(condition, arr) 

1896 array([0, 3, 6, 9]) 

1897 

1898 

1899 If `condition` is boolean: 

1900 

1901 >>> arr[condition] 

1902 array([0, 3, 6, 9]) 

1903 

1904 """ 

1905 return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) 

1906 

1907 

1908def _place_dispatcher(arr, mask, vals): 

1909 return (arr, mask, vals) 

1910 

1911 

1912@array_function_dispatch(_place_dispatcher) 

1913def place(arr, mask, vals): 

1914 """ 

1915 Change elements of an array based on conditional and input values. 

1916 

1917 Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that 

1918 `place` uses the first N elements of `vals`, where N is the number of 

1919 True values in `mask`, while `copyto` uses the elements where `mask` 

1920 is True. 

1921 

1922 Note that `extract` does the exact opposite of `place`. 

1923 

1924 Parameters 

1925 ---------- 

1926 arr : ndarray 

1927 Array to put data into. 

1928 mask : array_like 

1929 Boolean mask array. Must have the same size as `a`. 

1930 vals : 1-D sequence 

1931 Values to put into `a`. Only the first N elements are used, where 

1932 N is the number of True values in `mask`. If `vals` is smaller 

1933 than N, it will be repeated, and if elements of `a` are to be masked, 

1934 this sequence must be non-empty. 

1935 

1936 See Also 

1937 -------- 

1938 copyto, put, take, extract 

1939 

1940 Examples 

1941 -------- 

1942 >>> arr = np.arange(6).reshape(2, 3) 

1943 >>> np.place(arr, arr>2, [44, 55]) 

1944 >>> arr 

1945 array([[ 0, 1, 2], 

1946 [44, 55, 44]]) 

1947 

1948 """ 

1949 if not isinstance(arr, np.ndarray): 

1950 raise TypeError("argument 1 must be numpy.ndarray, " 

1951 "not {name}".format(name=type(arr).__name__)) 

1952 

1953 return _insert(arr, mask, vals) 

1954 

1955 

1956def disp(mesg, device=None, linefeed=True): 

1957 """ 

1958 Display a message on a device. 

1959 

1960 Parameters 

1961 ---------- 

1962 mesg : str 

1963 Message to display. 

1964 device : object 

1965 Device to write message. If None, defaults to ``sys.stdout`` which is 

1966 very similar to ``print``. `device` needs to have ``write()`` and 

1967 ``flush()`` methods. 

1968 linefeed : bool, optional 

1969 Option whether to print a line feed or not. Defaults to True. 

1970 

1971 Raises 

1972 ------ 

1973 AttributeError 

1974 If `device` does not have a ``write()`` or ``flush()`` method. 

1975 

1976 Examples 

1977 -------- 

1978 Besides ``sys.stdout``, a file-like object can also be used as it has 

1979 both required methods: 

1980 

1981 >>> from io import StringIO 

1982 >>> buf = StringIO() 

1983 >>> np.disp(u'"Display" in a file', device=buf) 

1984 >>> buf.getvalue() 

1985 '"Display" in a file\\n' 

1986 

1987 """ 

1988 if device is None: 

1989 device = sys.stdout 

1990 if linefeed: 

1991 device.write('%s\n' % mesg) 

1992 else: 

1993 device.write('%s' % mesg) 

1994 device.flush() 

1995 return 

1996 

1997 

1998# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html 

1999_DIMENSION_NAME = r'\w+' 

2000_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) 

2001_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) 

2002_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) 

2003_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) 

2004 

2005 

2006def _parse_gufunc_signature(signature): 

2007 """ 

2008 Parse string signatures for a generalized universal function. 

2009 

2010 Arguments 

2011 --------- 

2012 signature : string 

2013 Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` 

2014 for ``np.matmul``. 

2015 

2016 Returns 

2017 ------- 

2018 Tuple of input and output core dimensions parsed from the signature, each 

2019 of the form List[Tuple[str, ...]]. 

2020 """ 

2021 signature = re.sub(r'\s+', '', signature) 

2022 

2023 if not re.match(_SIGNATURE, signature): 

2024 raise ValueError( 

2025 'not a valid gufunc signature: {}'.format(signature)) 

2026 return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) 

2027 for arg in re.findall(_ARGUMENT, arg_list)] 

2028 for arg_list in signature.split('->')) 

2029 

2030 

2031def _update_dim_sizes(dim_sizes, arg, core_dims): 

2032 """ 

2033 Incrementally check and update core dimension sizes for a single argument. 

2034 

2035 Arguments 

2036 --------- 

2037 dim_sizes : Dict[str, int] 

2038 Sizes of existing core dimensions. Will be updated in-place. 

2039 arg : ndarray 

2040 Argument to examine. 

2041 core_dims : Tuple[str, ...] 

2042 Core dimensions for this argument. 

2043 """ 

2044 if not core_dims: 

2045 return 

2046 

2047 num_core_dims = len(core_dims) 

2048 if arg.ndim < num_core_dims: 

2049 raise ValueError( 

2050 '%d-dimensional argument does not have enough ' 

2051 'dimensions for all core dimensions %r' 

2052 % (arg.ndim, core_dims)) 

2053 

2054 core_shape = arg.shape[-num_core_dims:] 

2055 for dim, size in zip(core_dims, core_shape): 

2056 if dim in dim_sizes: 

2057 if size != dim_sizes[dim]: 

2058 raise ValueError( 

2059 'inconsistent size for core dimension %r: %r vs %r' 

2060 % (dim, size, dim_sizes[dim])) 

2061 else: 

2062 dim_sizes[dim] = size 

2063 

2064 

2065def _parse_input_dimensions(args, input_core_dims): 

2066 """ 

2067 Parse broadcast and core dimensions for vectorize with a signature. 

2068 

2069 Arguments 

2070 --------- 

2071 args : Tuple[ndarray, ...] 

2072 Tuple of input arguments to examine. 

2073 input_core_dims : List[Tuple[str, ...]] 

2074 List of core dimensions corresponding to each input. 

2075 

2076 Returns 

2077 ------- 

2078 broadcast_shape : Tuple[int, ...] 

2079 Common shape to broadcast all non-core dimensions to. 

2080 dim_sizes : Dict[str, int] 

2081 Common sizes for named core dimensions. 

2082 """ 

2083 broadcast_args = [] 

2084 dim_sizes = {} 

2085 for arg, core_dims in zip(args, input_core_dims): 

2086 _update_dim_sizes(dim_sizes, arg, core_dims) 

2087 ndim = arg.ndim - len(core_dims) 

2088 dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) 

2089 broadcast_args.append(dummy_array) 

2090 broadcast_shape = np.lib.stride_tricks._broadcast_shape(*broadcast_args) 

2091 return broadcast_shape, dim_sizes 

2092 

2093 

2094def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): 

2095 """Helper for calculating broadcast shapes with core dimensions.""" 

2096 return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) 

2097 for core_dims in list_of_core_dims] 

2098 

2099 

2100def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, 

2101 results=None): 

2102 """Helper for creating output arrays in vectorize.""" 

2103 shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) 

2104 if dtypes is None: 

2105 dtypes = [None] * len(shapes) 

2106 if results is None: 

2107 arrays = tuple(np.empty(shape=shape, dtype=dtype) 

2108 for shape, dtype in zip(shapes, dtypes)) 

2109 else: 

2110 arrays = tuple(np.empty_like(result, shape=shape, dtype=dtype) 

2111 for result, shape, dtype 

2112 in zip(results, shapes, dtypes)) 

2113 return arrays 

2114 

2115 

2116@set_module('numpy') 

2117class vectorize: 

2118 """ 

2119 vectorize(pyfunc, otypes=None, doc=None, excluded=None, cache=False, 

2120 signature=None) 

2121 

2122 Generalized function class. 

2123 

2124 Define a vectorized function which takes a nested sequence of objects or 

2125 numpy arrays as inputs and returns a single numpy array or a tuple of numpy 

2126 arrays. The vectorized function evaluates `pyfunc` over successive tuples 

2127 of the input arrays like the python map function, except it uses the 

2128 broadcasting rules of numpy. 

2129 

2130 The data type of the output of `vectorized` is determined by calling 

2131 the function with the first element of the input. This can be avoided 

2132 by specifying the `otypes` argument. 

2133 

2134 Parameters 

2135 ---------- 

2136 pyfunc : callable 

2137 A python function or method. 

2138 otypes : str or list of dtypes, optional 

2139 The output data type. It must be specified as either a string of 

2140 typecode characters or a list of data type specifiers. There should 

2141 be one data type specifier for each output. 

2142 doc : str, optional 

2143 The docstring for the function. If None, the docstring will be the 

2144 ``pyfunc.__doc__``. 

2145 excluded : set, optional 

2146 Set of strings or integers representing the positional or keyword 

2147 arguments for which the function will not be vectorized. These will be 

2148 passed directly to `pyfunc` unmodified. 

2149 

2150 .. versionadded:: 1.7.0 

2151 

2152 cache : bool, optional 

2153 If `True`, then cache the first function call that determines the number 

2154 of outputs if `otypes` is not provided. 

2155 

2156 .. versionadded:: 1.7.0 

2157 

2158 signature : string, optional 

2159 Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for 

2160 vectorized matrix-vector multiplication. If provided, ``pyfunc`` will 

2161 be called with (and expected to return) arrays with shapes given by the 

2162 size of corresponding core dimensions. By default, ``pyfunc`` is 

2163 assumed to take scalars as input and output. 

2164 

2165 .. versionadded:: 1.12.0 

2166 

2167 Returns 

2168 ------- 

2169 vectorized : callable 

2170 Vectorized function. 

2171 

2172 See Also 

2173 -------- 

2174 frompyfunc : Takes an arbitrary Python function and returns a ufunc 

2175 

2176 Notes 

2177 ----- 

2178 The `vectorize` function is provided primarily for convenience, not for 

2179 performance. The implementation is essentially a for loop. 

2180 

2181 If `otypes` is not specified, then a call to the function with the 

2182 first argument will be used to determine the number of outputs. The 

2183 results of this call will be cached if `cache` is `True` to prevent 

2184 calling the function twice. However, to implement the cache, the 

2185 original function must be wrapped which will slow down subsequent 

2186 calls, so only do this if your function is expensive. 

2187 

2188 The new keyword argument interface and `excluded` argument support 

2189 further degrades performance. 

2190 

2191 References 

2192 ---------- 

2193 .. [1] :doc:`/reference/c-api/generalized-ufuncs` 

2194 

2195 Examples 

2196 -------- 

2197 >>> def myfunc(a, b): 

2198 ... "Return a-b if a>b, otherwise return a+b" 

2199 ... if a > b: 

2200 ... return a - b 

2201 ... else: 

2202 ... return a + b 

2203 

2204 >>> vfunc = np.vectorize(myfunc) 

2205 >>> vfunc([1, 2, 3, 4], 2) 

2206 array([3, 4, 1, 2]) 

2207 

2208 The docstring is taken from the input function to `vectorize` unless it 

2209 is specified: 

2210 

2211 >>> vfunc.__doc__ 

2212 'Return a-b if a>b, otherwise return a+b' 

2213 >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') 

2214 >>> vfunc.__doc__ 

2215 'Vectorized `myfunc`' 

2216 

2217 The output type is determined by evaluating the first element of the input, 

2218 unless it is specified: 

2219 

2220 >>> out = vfunc([1, 2, 3, 4], 2) 

2221 >>> type(out[0]) 

2222 <class 'numpy.int64'> 

2223 >>> vfunc = np.vectorize(myfunc, otypes=[float]) 

2224 >>> out = vfunc([1, 2, 3, 4], 2) 

2225 >>> type(out[0]) 

2226 <class 'numpy.float64'> 

2227 

2228 The `excluded` argument can be used to prevent vectorizing over certain 

2229 arguments. This can be useful for array-like arguments of a fixed length 

2230 such as the coefficients for a polynomial as in `polyval`: 

2231 

2232 >>> def mypolyval(p, x): 

2233 ... _p = list(p) 

2234 ... res = _p.pop(0) 

2235 ... while _p: 

2236 ... res = res*x + _p.pop(0) 

2237 ... return res 

2238 >>> vpolyval = np.vectorize(mypolyval, excluded=['p']) 

2239 >>> vpolyval(p=[1, 2, 3], x=[0, 1]) 

2240 array([3, 6]) 

2241 

2242 Positional arguments may also be excluded by specifying their position: 

2243 

2244 >>> vpolyval.excluded.add(0) 

2245 >>> vpolyval([1, 2, 3], x=[0, 1]) 

2246 array([3, 6]) 

2247 

2248 The `signature` argument allows for vectorizing functions that act on 

2249 non-scalar arrays of fixed length. For example, you can use it for a 

2250 vectorized calculation of Pearson correlation coefficient and its p-value: 

2251 

2252 >>> import scipy.stats 

2253 >>> pearsonr = np.vectorize(scipy.stats.pearsonr, 

2254 ... signature='(n),(n)->(),()') 

2255 >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) 

2256 (array([ 1., -1.]), array([ 0., 0.])) 

2257 

2258 Or for a vectorized convolution: 

2259 

2260 >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') 

2261 >>> convolve(np.eye(4), [1, 2, 1]) 

2262 array([[1., 2., 1., 0., 0., 0.], 

2263 [0., 1., 2., 1., 0., 0.], 

2264 [0., 0., 1., 2., 1., 0.], 

2265 [0., 0., 0., 1., 2., 1.]]) 

2266 

2267 """ 

2268 def __init__(self, pyfunc, otypes=None, doc=None, excluded=None, 

2269 cache=False, signature=None): 

2270 self.pyfunc = pyfunc 

2271 self.cache = cache 

2272 self.signature = signature 

2273 self._ufunc = {} # Caching to improve default performance 

2274 

2275 if doc is None: 

2276 self.__doc__ = pyfunc.__doc__ 

2277 else: 

2278 self.__doc__ = doc 

2279 

2280 if isinstance(otypes, str): 

2281 for char in otypes: 

2282 if char not in typecodes['All']: 

2283 raise ValueError("Invalid otype specified: %s" % (char,)) 

2284 elif iterable(otypes): 

2285 otypes = ''.join([_nx.dtype(x).char for x in otypes]) 

2286 elif otypes is not None: 

2287 raise ValueError("Invalid otype specification") 

2288 self.otypes = otypes 

2289 

2290 # Excluded variable support 

2291 if excluded is None: 

2292 excluded = set() 

2293 self.excluded = set(excluded) 

2294 

2295 if signature is not None: 

2296 self._in_and_out_core_dims = _parse_gufunc_signature(signature) 

2297 else: 

2298 self._in_and_out_core_dims = None 

2299 

2300 def __call__(self, *args, **kwargs): 

2301 """ 

2302 Return arrays with the results of `pyfunc` broadcast (vectorized) over 

2303 `args` and `kwargs` not in `excluded`. 

2304 """ 

2305 excluded = self.excluded 

2306 if not kwargs and not excluded: 

2307 func = self.pyfunc 

2308 vargs = args 

2309 else: 

2310 # The wrapper accepts only positional arguments: we use `names` and 

2311 # `inds` to mutate `the_args` and `kwargs` to pass to the original 

2312 # function. 

2313 nargs = len(args) 

2314 

2315 names = [_n for _n in kwargs if _n not in excluded] 

2316 inds = [_i for _i in range(nargs) if _i not in excluded] 

2317 the_args = list(args) 

2318 

2319 def func(*vargs): 

2320 for _n, _i in enumerate(inds): 

2321 the_args[_i] = vargs[_n] 

2322 kwargs.update(zip(names, vargs[len(inds):])) 

2323 return self.pyfunc(*the_args, **kwargs) 

2324 

2325 vargs = [args[_i] for _i in inds] 

2326 vargs.extend([kwargs[_n] for _n in names]) 

2327 

2328 return self._vectorize_call(func=func, args=vargs) 

2329 

2330 def _get_ufunc_and_otypes(self, func, args): 

2331 """Return (ufunc, otypes).""" 

2332 # frompyfunc will fail if args is empty 

2333 if not args: 

2334 raise ValueError('args can not be empty') 

2335 

2336 if self.otypes is not None: 

2337 otypes = self.otypes 

2338 

2339 # self._ufunc is a dictionary whose keys are the number of 

2340 # arguments (i.e. len(args)) and whose values are ufuncs created 

2341 # by frompyfunc. len(args) can be different for different calls if 

2342 # self.pyfunc has parameters with default values. We only use the 

2343 # cache when func is self.pyfunc, which occurs when the call uses 

2344 # only positional arguments and no arguments are excluded. 

2345 

2346 nin = len(args) 

2347 nout = len(self.otypes) 

2348 if func is not self.pyfunc or nin not in self._ufunc: 

2349 ufunc = frompyfunc(func, nin, nout) 

2350 else: 

2351 ufunc = None # We'll get it from self._ufunc 

2352 if func is self.pyfunc: 

2353 ufunc = self._ufunc.setdefault(nin, ufunc) 

2354 else: 

2355 # Get number of outputs and output types by calling the function on 

2356 # the first entries of args. We also cache the result to prevent 

2357 # the subsequent call when the ufunc is evaluated. 

2358 # Assumes that ufunc first evaluates the 0th elements in the input 

2359 # arrays (the input values are not checked to ensure this) 

2360 args = [asarray(arg) for arg in args] 

2361 if builtins.any(arg.size == 0 for arg in args): 

2362 raise ValueError('cannot call `vectorize` on size 0 inputs ' 

2363 'unless `otypes` is set') 

2364 

2365 inputs = [arg.flat[0] for arg in args] 

2366 outputs = func(*inputs) 

2367 

2368 # Performance note: profiling indicates that -- for simple 

2369 # functions at least -- this wrapping can almost double the 

2370 # execution time. 

2371 # Hence we make it optional. 

2372 if self.cache: 

2373 _cache = [outputs] 

2374 

2375 def _func(*vargs): 

2376 if _cache: 

2377 return _cache.pop() 

2378 else: 

2379 return func(*vargs) 

2380 else: 

2381 _func = func 

2382 

2383 if isinstance(outputs, tuple): 

2384 nout = len(outputs) 

2385 else: 

2386 nout = 1 

2387 outputs = (outputs,) 

2388 

2389 otypes = ''.join([asarray(outputs[_k]).dtype.char 

2390 for _k in range(nout)]) 

2391 

2392 # Performance note: profiling indicates that creating the ufunc is 

2393 # not a significant cost compared with wrapping so it seems not 

2394 # worth trying to cache this. 

2395 ufunc = frompyfunc(_func, len(args), nout) 

2396 

2397 return ufunc, otypes 

2398 

2399 def _vectorize_call(self, func, args): 

2400 """Vectorized call to `func` over positional `args`.""" 

2401 if self.signature is not None: 

2402 res = self._vectorize_call_with_signature(func, args) 

2403 elif not args: 

2404 res = func() 

2405 else: 

2406 ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) 

2407 

2408 # Convert args to object arrays first 

2409 inputs = [asanyarray(a, dtype=object) for a in args] 

2410 

2411 outputs = ufunc(*inputs) 

2412 

2413 if ufunc.nout == 1: 

2414 res = asanyarray(outputs, dtype=otypes[0]) 

2415 else: 

2416 res = tuple([asanyarray(x, dtype=t) 

2417 for x, t in zip(outputs, otypes)]) 

2418 return res 

2419 

2420 def _vectorize_call_with_signature(self, func, args): 

2421 """Vectorized call over positional arguments with a signature.""" 

2422 input_core_dims, output_core_dims = self._in_and_out_core_dims 

2423 

2424 if len(args) != len(input_core_dims): 

2425 raise TypeError('wrong number of positional arguments: ' 

2426 'expected %r, got %r' 

2427 % (len(input_core_dims), len(args))) 

2428 args = tuple(asanyarray(arg) for arg in args) 

2429 

2430 broadcast_shape, dim_sizes = _parse_input_dimensions( 

2431 args, input_core_dims) 

2432 input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, 

2433 input_core_dims) 

2434 args = [np.broadcast_to(arg, shape, subok=True) 

2435 for arg, shape in zip(args, input_shapes)] 

2436 

2437 outputs = None 

2438 otypes = self.otypes 

2439 nout = len(output_core_dims) 

2440 

2441 for index in np.ndindex(*broadcast_shape): 

2442 results = func(*(arg[index] for arg in args)) 

2443 

2444 n_results = len(results) if isinstance(results, tuple) else 1 

2445 

2446 if nout != n_results: 

2447 raise ValueError( 

2448 'wrong number of outputs from pyfunc: expected %r, got %r' 

2449 % (nout, n_results)) 

2450 

2451 if nout == 1: 

2452 results = (results,) 

2453 

2454 if outputs is None: 

2455 for result, core_dims in zip(results, output_core_dims): 

2456 _update_dim_sizes(dim_sizes, result, core_dims) 

2457 

2458 outputs = _create_arrays(broadcast_shape, dim_sizes, 

2459 output_core_dims, otypes, results) 

2460 

2461 for output, result in zip(outputs, results): 

2462 output[index] = result 

2463 

2464 if outputs is None: 

2465 # did not call the function even once 

2466 if otypes is None: 

2467 raise ValueError('cannot call `vectorize` on size 0 inputs ' 

2468 'unless `otypes` is set') 

2469 if builtins.any(dim not in dim_sizes 

2470 for dims in output_core_dims 

2471 for dim in dims): 

2472 raise ValueError('cannot call `vectorize` with a signature ' 

2473 'including new output dimensions on size 0 ' 

2474 'inputs') 

2475 outputs = _create_arrays(broadcast_shape, dim_sizes, 

2476 output_core_dims, otypes) 

2477 

2478 return outputs[0] if nout == 1 else outputs 

2479 

2480 

2481def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, 

2482 fweights=None, aweights=None, *, dtype=None): 

2483 return (m, y, fweights, aweights) 

2484 

2485 

2486@array_function_dispatch(_cov_dispatcher) 

2487def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, 

2488 aweights=None, *, dtype=None): 

2489 """ 

2490 Estimate a covariance matrix, given data and weights. 

2491 

2492 Covariance indicates the level to which two variables vary together. 

2493 If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, 

2494 then the covariance matrix element :math:`C_{ij}` is the covariance of 

2495 :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance 

2496 of :math:`x_i`. 

2497 

2498 See the notes for an outline of the algorithm. 

2499 

2500 Parameters 

2501 ---------- 

2502 m : array_like 

2503 A 1-D or 2-D array containing multiple variables and observations. 

2504 Each row of `m` represents a variable, and each column a single 

2505 observation of all those variables. Also see `rowvar` below. 

2506 y : array_like, optional 

2507 An additional set of variables and observations. `y` has the same form 

2508 as that of `m`. 

2509 rowvar : bool, optional 

2510 If `rowvar` is True (default), then each row represents a 

2511 variable, with observations in the columns. Otherwise, the relationship 

2512 is transposed: each column represents a variable, while the rows 

2513 contain observations. 

2514 bias : bool, optional 

2515 Default normalization (False) is by ``(N - 1)``, where ``N`` is the 

2516 number of observations given (unbiased estimate). If `bias` is True, 

2517 then normalization is by ``N``. These values can be overridden by using 

2518 the keyword ``ddof`` in numpy versions >= 1.5. 

2519 ddof : int, optional 

2520 If not ``None`` the default value implied by `bias` is overridden. 

2521 Note that ``ddof=1`` will return the unbiased estimate, even if both 

2522 `fweights` and `aweights` are specified, and ``ddof=0`` will return 

2523 the simple average. See the notes for the details. The default value 

2524 is ``None``. 

2525 

2526 .. versionadded:: 1.5 

2527 fweights : array_like, int, optional 

2528 1-D array of integer frequency weights; the number of times each 

2529 observation vector should be repeated. 

2530 

2531 .. versionadded:: 1.10 

2532 aweights : array_like, optional 

2533 1-D array of observation vector weights. These relative weights are 

2534 typically large for observations considered "important" and smaller for 

2535 observations considered less "important". If ``ddof=0`` the array of 

2536 weights can be used to assign probabilities to observation vectors. 

2537 

2538 .. versionadded:: 1.10 

2539 dtype : data-type, optional 

2540 Data-type of the result. By default, the return data-type will have 

2541 at least `numpy.float64` precision. 

2542 

2543 .. versionadded:: 1.20 

2544 

2545 Returns 

2546 ------- 

2547 out : ndarray 

2548 The covariance matrix of the variables. 

2549 

2550 See Also 

2551 -------- 

2552 corrcoef : Normalized covariance matrix 

2553 

2554 Notes 

2555 ----- 

2556 Assume that the observations are in the columns of the observation 

2557 array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The 

2558 steps to compute the weighted covariance are as follows:: 

2559 

2560 >>> m = np.arange(10, dtype=np.float64) 

2561 >>> f = np.arange(10) * 2 

2562 >>> a = np.arange(10) ** 2. 

2563 >>> ddof = 1 

2564 >>> w = f * a 

2565 >>> v1 = np.sum(w) 

2566 >>> v2 = np.sum(w * a) 

2567 >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 

2568 >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) 

2569 

2570 Note that when ``a == 1``, the normalization factor 

2571 ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` 

2572 as it should. 

2573 

2574 Examples 

2575 -------- 

2576 Consider two variables, :math:`x_0` and :math:`x_1`, which 

2577 correlate perfectly, but in opposite directions: 

2578 

2579 >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T 

2580 >>> x 

2581 array([[0, 1, 2], 

2582 [2, 1, 0]]) 

2583 

2584 Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance 

2585 matrix shows this clearly: 

2586 

2587 >>> np.cov(x) 

2588 array([[ 1., -1.], 

2589 [-1., 1.]]) 

2590 

2591 Note that element :math:`C_{0,1}`, which shows the correlation between 

2592 :math:`x_0` and :math:`x_1`, is negative. 

2593 

2594 Further, note how `x` and `y` are combined: 

2595 

2596 >>> x = [-2.1, -1, 4.3] 

2597 >>> y = [3, 1.1, 0.12] 

2598 >>> X = np.stack((x, y), axis=0) 

2599 >>> np.cov(X) 

2600 array([[11.71 , -4.286 ], # may vary 

2601 [-4.286 , 2.144133]]) 

2602 >>> np.cov(x, y) 

2603 array([[11.71 , -4.286 ], # may vary 

2604 [-4.286 , 2.144133]]) 

2605 >>> np.cov(x) 

2606 array(11.71) 

2607 

2608 """ 

2609 # Check inputs 

2610 if ddof is not None and ddof != int(ddof): 

2611 raise ValueError( 

2612 "ddof must be integer") 

2613 

2614 # Handles complex arrays too 

2615 m = np.asarray(m) 

2616 if m.ndim > 2: 

2617 raise ValueError("m has more than 2 dimensions") 

2618 

2619 if y is not None: 

2620 y = np.asarray(y) 

2621 if y.ndim > 2: 

2622 raise ValueError("y has more than 2 dimensions") 

2623 

2624 if dtype is None: 

2625 if y is None: 

2626 dtype = np.result_type(m, np.float64) 

2627 else: 

2628 dtype = np.result_type(m, y, np.float64) 

2629 

2630 X = array(m, ndmin=2, dtype=dtype) 

2631 if not rowvar and X.shape[0] != 1: 

2632 X = X.T 

2633 if X.shape[0] == 0: 

2634 return np.array([]).reshape(0, 0) 

2635 if y is not None: 

2636 y = array(y, copy=False, ndmin=2, dtype=dtype) 

2637 if not rowvar and y.shape[0] != 1: 

2638 y = y.T 

2639 X = np.concatenate((X, y), axis=0) 

2640 

2641 if ddof is None: 

2642 if bias == 0: 

2643 ddof = 1 

2644 else: 

2645 ddof = 0 

2646 

2647 # Get the product of frequencies and weights 

2648 w = None 

2649 if fweights is not None: 

2650 fweights = np.asarray(fweights, dtype=float) 

2651 if not np.all(fweights == np.around(fweights)): 

2652 raise TypeError( 

2653 "fweights must be integer") 

2654 if fweights.ndim > 1: 

2655 raise RuntimeError( 

2656 "cannot handle multidimensional fweights") 

2657 if fweights.shape[0] != X.shape[1]: 

2658 raise RuntimeError( 

2659 "incompatible numbers of samples and fweights") 

2660 if any(fweights < 0): 

2661 raise ValueError( 

2662 "fweights cannot be negative") 

2663 w = fweights 

2664 if aweights is not None: 

2665 aweights = np.asarray(aweights, dtype=float) 

2666 if aweights.ndim > 1: 

2667 raise RuntimeError( 

2668 "cannot handle multidimensional aweights") 

2669 if aweights.shape[0] != X.shape[1]: 

2670 raise RuntimeError( 

2671 "incompatible numbers of samples and aweights") 

2672 if any(aweights < 0): 

2673 raise ValueError( 

2674 "aweights cannot be negative") 

2675 if w is None: 

2676 w = aweights 

2677 else: 

2678 w *= aweights 

2679 

2680 avg, w_sum = average(X, axis=1, weights=w, returned=True) 

2681 w_sum = w_sum[0] 

2682 

2683 # Determine the normalization 

2684 if w is None: 

2685 fact = X.shape[1] - ddof 

2686 elif ddof == 0: 

2687 fact = w_sum 

2688 elif aweights is None: 

2689 fact = w_sum - ddof 

2690 else: 

2691 fact = w_sum - ddof*sum(w*aweights)/w_sum 

2692 

2693 if fact <= 0: 

2694 warnings.warn("Degrees of freedom <= 0 for slice", 

2695 RuntimeWarning, stacklevel=3) 

2696 fact = 0.0 

2697 

2698 X -= avg[:, None] 

2699 if w is None: 

2700 X_T = X.T 

2701 else: 

2702 X_T = (X*w).T 

2703 c = dot(X, X_T.conj()) 

2704 c *= np.true_divide(1, fact) 

2705 return c.squeeze() 

2706 

2707 

2708def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *, 

2709 dtype=None): 

2710 return (x, y) 

2711 

2712 

2713@array_function_dispatch(_corrcoef_dispatcher) 

2714def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, 

2715 dtype=None): 

2716 """ 

2717 Return Pearson product-moment correlation coefficients. 

2718 

2719 Please refer to the documentation for `cov` for more detail. The 

2720 relationship between the correlation coefficient matrix, `R`, and the 

2721 covariance matrix, `C`, is 

2722 

2723 .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} C_{jj} } } 

2724 

2725 The values of `R` are between -1 and 1, inclusive. 

2726 

2727 Parameters 

2728 ---------- 

2729 x : array_like 

2730 A 1-D or 2-D array containing multiple variables and observations. 

2731 Each row of `x` represents a variable, and each column a single 

2732 observation of all those variables. Also see `rowvar` below. 

2733 y : array_like, optional 

2734 An additional set of variables and observations. `y` has the same 

2735 shape as `x`. 

2736 rowvar : bool, optional 

2737 If `rowvar` is True (default), then each row represents a 

2738 variable, with observations in the columns. Otherwise, the relationship 

2739 is transposed: each column represents a variable, while the rows 

2740 contain observations. 

2741 bias : _NoValue, optional 

2742 Has no effect, do not use. 

2743 

2744 .. deprecated:: 1.10.0 

2745 ddof : _NoValue, optional 

2746 Has no effect, do not use. 

2747 

2748 .. deprecated:: 1.10.0 

2749 dtype : data-type, optional 

2750 Data-type of the result. By default, the return data-type will have 

2751 at least `numpy.float64` precision. 

2752 

2753 .. versionadded:: 1.20 

2754 

2755 Returns 

2756 ------- 

2757 R : ndarray 

2758 The correlation coefficient matrix of the variables. 

2759 

2760 See Also 

2761 -------- 

2762 cov : Covariance matrix 

2763 

2764 Notes 

2765 ----- 

2766 Due to floating point rounding the resulting array may not be Hermitian, 

2767 the diagonal elements may not be 1, and the elements may not satisfy the 

2768 inequality abs(a) <= 1. The real and imaginary parts are clipped to the 

2769 interval [-1, 1] in an attempt to improve on that situation but is not 

2770 much help in the complex case. 

2771 

2772 This function accepts but discards arguments `bias` and `ddof`. This is 

2773 for backwards compatibility with previous versions of this function. These 

2774 arguments had no effect on the return values of the function and can be 

2775 safely ignored in this and previous versions of numpy. 

2776 

2777 Examples 

2778 -------- 

2779 In this example we generate two random arrays, ``xarr`` and ``yarr``, and 

2780 compute the row-wise and column-wise Pearson correlation coefficients, 

2781 ``R``. Since ``rowvar`` is true by default, we first find the row-wise 

2782 Pearson correlation coefficients between the variables of ``xarr``. 

2783 

2784 >>> import numpy as np 

2785 >>> rng = np.random.default_rng(seed=42) 

2786 >>> xarr = rng.random((3, 3)) 

2787 >>> xarr 

2788 array([[0.77395605, 0.43887844, 0.85859792], 

2789 [0.69736803, 0.09417735, 0.97562235], 

2790 [0.7611397 , 0.78606431, 0.12811363]]) 

2791 >>> R1 = np.corrcoef(xarr) 

2792 >>> R1 

2793 array([[ 1. , 0.99256089, -0.68080986], 

2794 [ 0.99256089, 1. , -0.76492172], 

2795 [-0.68080986, -0.76492172, 1. ]]) 

2796 

2797 If we add another set of variables and observations ``yarr``, we can 

2798 compute the row-wise Pearson correlation coefficients between the 

2799 variables in ``xarr`` and ``yarr``. 

2800 

2801 >>> yarr = rng.random((3, 3)) 

2802 >>> yarr 

2803 array([[0.45038594, 0.37079802, 0.92676499], 

2804 [0.64386512, 0.82276161, 0.4434142 ], 

2805 [0.22723872, 0.55458479, 0.06381726]]) 

2806 >>> R2 = np.corrcoef(xarr, yarr) 

2807 >>> R2 

2808 array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 , 

2809 -0.99004057], 

2810 [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098, 

2811 -0.99981569], 

2812 [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355, 

2813 0.77714685], 

2814 [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855, 

2815 -0.83571711], 

2816 [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. , 

2817 0.97517215], 

2818 [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215, 

2819 1. ]]) 

2820 

2821 Finally if we use the option ``rowvar=False``, the columns are now 

2822 being treated as the variables and we will find the column-wise Pearson 

2823 correlation coefficients between variables in ``xarr`` and ``yarr``. 

2824 

2825 >>> R3 = np.corrcoef(xarr, yarr, rowvar=False) 

2826 >>> R3 

2827 array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 , 

2828 0.22423734], 

2829 [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587, 

2830 -0.44069024], 

2831 [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648, 

2832 0.75137473], 

2833 [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469, 

2834 0.47536961], 

2835 [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. , 

2836 -0.46666491], 

2837 [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491, 

2838 1. ]]) 

2839 

2840 """ 

2841 if bias is not np._NoValue or ddof is not np._NoValue: 

2842 # 2015-03-15, 1.10 

2843 warnings.warn('bias and ddof have no effect and are deprecated', 

2844 DeprecationWarning, stacklevel=3) 

2845 c = cov(x, y, rowvar, dtype=dtype) 

2846 try: 

2847 d = diag(c) 

2848 except ValueError: 

2849 # scalar covariance 

2850 # nan if incorrect value (nan, inf, 0), 1 otherwise 

2851 return c / c 

2852 stddev = sqrt(d.real) 

2853 c /= stddev[:, None] 

2854 c /= stddev[None, :] 

2855 

2856 # Clip real and imaginary parts to [-1, 1]. This does not guarantee 

2857 # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without 

2858 # excessive work. 

2859 np.clip(c.real, -1, 1, out=c.real) 

2860 if np.iscomplexobj(c): 

2861 np.clip(c.imag, -1, 1, out=c.imag) 

2862 

2863 return c 

2864 

2865 

2866@set_module('numpy') 

2867def blackman(M): 

2868 """ 

2869 Return the Blackman window. 

2870 

2871 The Blackman window is a taper formed by using the first three 

2872 terms of a summation of cosines. It was designed to have close to the 

2873 minimal leakage possible. It is close to optimal, only slightly worse 

2874 than a Kaiser window. 

2875 

2876 Parameters 

2877 ---------- 

2878 M : int 

2879 Number of points in the output window. If zero or less, an empty 

2880 array is returned. 

2881 

2882 Returns 

2883 ------- 

2884 out : ndarray 

2885 The window, with the maximum value normalized to one (the value one 

2886 appears only if the number of samples is odd). 

2887 

2888 See Also 

2889 -------- 

2890 bartlett, hamming, hanning, kaiser 

2891 

2892 Notes 

2893 ----- 

2894 The Blackman window is defined as 

2895 

2896 .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) 

2897 

2898 Most references to the Blackman window come from the signal processing 

2899 literature, where it is used as one of many windowing functions for 

2900 smoothing values. It is also known as an apodization (which means 

2901 "removing the foot", i.e. smoothing discontinuities at the beginning 

2902 and end of the sampled signal) or tapering function. It is known as a 

2903 "near optimal" tapering function, almost as good (by some measures) 

2904 as the kaiser window. 

2905 

2906 References 

2907 ---------- 

2908 Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, 

2909 Dover Publications, New York. 

2910 

2911 Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. 

2912 Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. 

2913 

2914 Examples 

2915 -------- 

2916 >>> import matplotlib.pyplot as plt 

2917 >>> np.blackman(12) 

2918 array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary 

2919 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, 

2920 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, 

2921 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) 

2922 

2923 Plot the window and the frequency response: 

2924 

2925 >>> from numpy.fft import fft, fftshift 

2926 >>> window = np.blackman(51) 

2927 >>> plt.plot(window) 

2928 [<matplotlib.lines.Line2D object at 0x...>] 

2929 >>> plt.title("Blackman window") 

2930 Text(0.5, 1.0, 'Blackman window') 

2931 >>> plt.ylabel("Amplitude") 

2932 Text(0, 0.5, 'Amplitude') 

2933 >>> plt.xlabel("Sample") 

2934 Text(0.5, 0, 'Sample') 

2935 >>> plt.show() 

2936 

2937 >>> plt.figure() 

2938 <Figure size 640x480 with 0 Axes> 

2939 >>> A = fft(window, 2048) / 25.5 

2940 >>> mag = np.abs(fftshift(A)) 

2941 >>> freq = np.linspace(-0.5, 0.5, len(A)) 

2942 >>> with np.errstate(divide='ignore', invalid='ignore'): 

2943 ... response = 20 * np.log10(mag) 

2944 ... 

2945 >>> response = np.clip(response, -100, 100) 

2946 >>> plt.plot(freq, response) 

2947 [<matplotlib.lines.Line2D object at 0x...>] 

2948 >>> plt.title("Frequency response of Blackman window") 

2949 Text(0.5, 1.0, 'Frequency response of Blackman window') 

2950 >>> plt.ylabel("Magnitude [dB]") 

2951 Text(0, 0.5, 'Magnitude [dB]') 

2952 >>> plt.xlabel("Normalized frequency [cycles per sample]") 

2953 Text(0.5, 0, 'Normalized frequency [cycles per sample]') 

2954 >>> _ = plt.axis('tight') 

2955 >>> plt.show() 

2956 

2957 """ 

2958 if M < 1: 

2959 return array([], dtype=np.result_type(M, 0.0)) 

2960 if M == 1: 

2961 return ones(1, dtype=np.result_type(M, 0.0)) 

2962 n = arange(1-M, M, 2) 

2963 return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1)) 

2964 

2965 

2966@set_module('numpy') 

2967def bartlett(M): 

2968 """ 

2969 Return the Bartlett window. 

2970 

2971 The Bartlett window is very similar to a triangular window, except 

2972 that the end points are at zero. It is often used in signal 

2973 processing for tapering a signal, without generating too much 

2974 ripple in the frequency domain. 

2975 

2976 Parameters 

2977 ---------- 

2978 M : int 

2979 Number of points in the output window. If zero or less, an 

2980 empty array is returned. 

2981 

2982 Returns 

2983 ------- 

2984 out : array 

2985 The triangular window, with the maximum value normalized to one 

2986 (the value one appears only if the number of samples is odd), with 

2987 the first and last samples equal to zero. 

2988 

2989 See Also 

2990 -------- 

2991 blackman, hamming, hanning, kaiser 

2992 

2993 Notes 

2994 ----- 

2995 The Bartlett window is defined as 

2996 

2997 .. math:: w(n) = \\frac{2}{M-1} \\left( 

2998 \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| 

2999 \\right) 

3000 

3001 Most references to the Bartlett window come from the signal processing 

3002 literature, where it is used as one of many windowing functions for 

3003 smoothing values. Note that convolution with this window produces linear 

3004 interpolation. It is also known as an apodization (which means "removing 

3005 the foot", i.e. smoothing discontinuities at the beginning and end of the 

3006 sampled signal) or tapering function. The Fourier transform of the 

3007 Bartlett window is the product of two sinc functions. Note the excellent 

3008 discussion in Kanasewich [2]_. 

3009 

3010 References 

3011 ---------- 

3012 .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", 

3013 Biometrika 37, 1-16, 1950. 

3014 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", 

3015 The University of Alberta Press, 1975, pp. 109-110. 

3016 .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal 

3017 Processing", Prentice-Hall, 1999, pp. 468-471. 

3018 .. [4] Wikipedia, "Window function", 

3019 https://en.wikipedia.org/wiki/Window_function 

3020 .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, 

3021 "Numerical Recipes", Cambridge University Press, 1986, page 429. 

3022 

3023 Examples 

3024 -------- 

3025 >>> import matplotlib.pyplot as plt 

3026 >>> np.bartlett(12) 

3027 array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary 

3028 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 

3029 0.18181818, 0. ]) 

3030 

3031 Plot the window and its frequency response (requires SciPy and matplotlib): 

3032 

3033 >>> from numpy.fft import fft, fftshift 

3034 >>> window = np.bartlett(51) 

3035 >>> plt.plot(window) 

3036 [<matplotlib.lines.Line2D object at 0x...>] 

3037 >>> plt.title("Bartlett window") 

3038 Text(0.5, 1.0, 'Bartlett window') 

3039 >>> plt.ylabel("Amplitude") 

3040 Text(0, 0.5, 'Amplitude') 

3041 >>> plt.xlabel("Sample") 

3042 Text(0.5, 0, 'Sample') 

3043 >>> plt.show() 

3044 

3045 >>> plt.figure() 

3046 <Figure size 640x480 with 0 Axes> 

3047 >>> A = fft(window, 2048) / 25.5 

3048 >>> mag = np.abs(fftshift(A)) 

3049 >>> freq = np.linspace(-0.5, 0.5, len(A)) 

3050 >>> with np.errstate(divide='ignore', invalid='ignore'): 

3051 ... response = 20 * np.log10(mag) 

3052 ... 

3053 >>> response = np.clip(response, -100, 100) 

3054 >>> plt.plot(freq, response) 

3055 [<matplotlib.lines.Line2D object at 0x...>] 

3056 >>> plt.title("Frequency response of Bartlett window") 

3057 Text(0.5, 1.0, 'Frequency response of Bartlett window') 

3058 >>> plt.ylabel("Magnitude [dB]") 

3059 Text(0, 0.5, 'Magnitude [dB]') 

3060 >>> plt.xlabel("Normalized frequency [cycles per sample]") 

3061 Text(0.5, 0, 'Normalized frequency [cycles per sample]') 

3062 >>> _ = plt.axis('tight') 

3063 >>> plt.show() 

3064 

3065 """ 

3066 if M < 1: 

3067 return array([], dtype=np.result_type(M, 0.0)) 

3068 if M == 1: 

3069 return ones(1, dtype=np.result_type(M, 0.0)) 

3070 n = arange(1-M, M, 2) 

3071 return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1)) 

3072 

3073 

3074@set_module('numpy') 

3075def hanning(M): 

3076 """ 

3077 Return the Hanning window. 

3078 

3079 The Hanning window is a taper formed by using a weighted cosine. 

3080 

3081 Parameters 

3082 ---------- 

3083 M : int 

3084 Number of points in the output window. If zero or less, an 

3085 empty array is returned. 

3086 

3087 Returns 

3088 ------- 

3089 out : ndarray, shape(M,) 

3090 The window, with the maximum value normalized to one (the value 

3091 one appears only if `M` is odd). 

3092 

3093 See Also 

3094 -------- 

3095 bartlett, blackman, hamming, kaiser 

3096 

3097 Notes 

3098 ----- 

3099 The Hanning window is defined as 

3100 

3101 .. math:: w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) 

3102 \\qquad 0 \\leq n \\leq M-1 

3103 

3104 The Hanning was named for Julius von Hann, an Austrian meteorologist. 

3105 It is also known as the Cosine Bell. Some authors prefer that it be 

3106 called a Hann window, to help avoid confusion with the very similar 

3107 Hamming window. 

3108 

3109 Most references to the Hanning window come from the signal processing 

3110 literature, where it is used as one of many windowing functions for 

3111 smoothing values. It is also known as an apodization (which means 

3112 "removing the foot", i.e. smoothing discontinuities at the beginning 

3113 and end of the sampled signal) or tapering function. 

3114 

3115 References 

3116 ---------- 

3117 .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power 

3118 spectra, Dover Publications, New York. 

3119 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", 

3120 The University of Alberta Press, 1975, pp. 106-108. 

3121 .. [3] Wikipedia, "Window function", 

3122 https://en.wikipedia.org/wiki/Window_function 

3123 .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, 

3124 "Numerical Recipes", Cambridge University Press, 1986, page 425. 

3125 

3126 Examples 

3127 -------- 

3128 >>> np.hanning(12) 

3129 array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 

3130 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, 

3131 0.07937323, 0. ]) 

3132 

3133 Plot the window and its frequency response: 

3134 

3135 >>> import matplotlib.pyplot as plt 

3136 >>> from numpy.fft import fft, fftshift 

3137 >>> window = np.hanning(51) 

3138 >>> plt.plot(window) 

3139 [<matplotlib.lines.Line2D object at 0x...>] 

3140 >>> plt.title("Hann window") 

3141 Text(0.5, 1.0, 'Hann window') 

3142 >>> plt.ylabel("Amplitude") 

3143 Text(0, 0.5, 'Amplitude') 

3144 >>> plt.xlabel("Sample") 

3145 Text(0.5, 0, 'Sample') 

3146 >>> plt.show() 

3147 

3148 >>> plt.figure() 

3149 <Figure size 640x480 with 0 Axes> 

3150 >>> A = fft(window, 2048) / 25.5 

3151 >>> mag = np.abs(fftshift(A)) 

3152 >>> freq = np.linspace(-0.5, 0.5, len(A)) 

3153 >>> with np.errstate(divide='ignore', invalid='ignore'): 

3154 ... response = 20 * np.log10(mag) 

3155 ... 

3156 >>> response = np.clip(response, -100, 100) 

3157 >>> plt.plot(freq, response) 

3158 [<matplotlib.lines.Line2D object at 0x...>] 

3159 >>> plt.title("Frequency response of the Hann window") 

3160 Text(0.5, 1.0, 'Frequency response of the Hann window') 

3161 >>> plt.ylabel("Magnitude [dB]") 

3162 Text(0, 0.5, 'Magnitude [dB]') 

3163 >>> plt.xlabel("Normalized frequency [cycles per sample]") 

3164 Text(0.5, 0, 'Normalized frequency [cycles per sample]') 

3165 >>> plt.axis('tight') 

3166 ... 

3167 >>> plt.show() 

3168 

3169 """ 

3170 if M < 1: 

3171 return array([], dtype=np.result_type(M, 0.0)) 

3172 if M == 1: 

3173 return ones(1, dtype=np.result_type(M, 0.0)) 

3174 n = arange(1-M, M, 2) 

3175 return 0.5 + 0.5*cos(pi*n/(M-1)) 

3176 

3177 

3178@set_module('numpy') 

3179def hamming(M): 

3180 """ 

3181 Return the Hamming window. 

3182 

3183 The Hamming window is a taper formed by using a weighted cosine. 

3184 

3185 Parameters 

3186 ---------- 

3187 M : int 

3188 Number of points in the output window. If zero or less, an 

3189 empty array is returned. 

3190 

3191 Returns 

3192 ------- 

3193 out : ndarray 

3194 The window, with the maximum value normalized to one (the value 

3195 one appears only if the number of samples is odd). 

3196 

3197 See Also 

3198 -------- 

3199 bartlett, blackman, hanning, kaiser 

3200 

3201 Notes 

3202 ----- 

3203 The Hamming window is defined as 

3204 

3205 .. math:: w(n) = 0.54 - 0.46\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) 

3206 \\qquad 0 \\leq n \\leq M-1 

3207 

3208 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey 

3209 and is described in Blackman and Tukey. It was recommended for 

3210 smoothing the truncated autocovariance function in the time domain. 

3211 Most references to the Hamming window come from the signal processing 

3212 literature, where it is used as one of many windowing functions for 

3213 smoothing values. It is also known as an apodization (which means 

3214 "removing the foot", i.e. smoothing discontinuities at the beginning 

3215 and end of the sampled signal) or tapering function. 

3216 

3217 References 

3218 ---------- 

3219 .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power 

3220 spectra, Dover Publications, New York. 

3221 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The 

3222 University of Alberta Press, 1975, pp. 109-110. 

3223 .. [3] Wikipedia, "Window function", 

3224 https://en.wikipedia.org/wiki/Window_function 

3225 .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, 

3226 "Numerical Recipes", Cambridge University Press, 1986, page 425. 

3227 

3228 Examples 

3229 -------- 

3230 >>> np.hamming(12) 

3231 array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary 

3232 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 

3233 0.15302337, 0.08 ]) 

3234 

3235 Plot the window and the frequency response: 

3236 

3237 >>> import matplotlib.pyplot as plt 

3238 >>> from numpy.fft import fft, fftshift 

3239 >>> window = np.hamming(51) 

3240 >>> plt.plot(window) 

3241 [<matplotlib.lines.Line2D object at 0x...>] 

3242 >>> plt.title("Hamming window") 

3243 Text(0.5, 1.0, 'Hamming window') 

3244 >>> plt.ylabel("Amplitude") 

3245 Text(0, 0.5, 'Amplitude') 

3246 >>> plt.xlabel("Sample") 

3247 Text(0.5, 0, 'Sample') 

3248 >>> plt.show() 

3249 

3250 >>> plt.figure() 

3251 <Figure size 640x480 with 0 Axes> 

3252 >>> A = fft(window, 2048) / 25.5 

3253 >>> mag = np.abs(fftshift(A)) 

3254 >>> freq = np.linspace(-0.5, 0.5, len(A)) 

3255 >>> response = 20 * np.log10(mag) 

3256 >>> response = np.clip(response, -100, 100) 

3257 >>> plt.plot(freq, response) 

3258 [<matplotlib.lines.Line2D object at 0x...>] 

3259 >>> plt.title("Frequency response of Hamming window") 

3260 Text(0.5, 1.0, 'Frequency response of Hamming window') 

3261 >>> plt.ylabel("Magnitude [dB]") 

3262 Text(0, 0.5, 'Magnitude [dB]') 

3263 >>> plt.xlabel("Normalized frequency [cycles per sample]") 

3264 Text(0.5, 0, 'Normalized frequency [cycles per sample]') 

3265 >>> plt.axis('tight') 

3266 ... 

3267 >>> plt.show() 

3268 

3269 """ 

3270 if M < 1: 

3271 return array([], dtype=np.result_type(M, 0.0)) 

3272 if M == 1: 

3273 return ones(1, dtype=np.result_type(M, 0.0)) 

3274 n = arange(1-M, M, 2) 

3275 return 0.54 + 0.46*cos(pi*n/(M-1)) 

3276 

3277 

3278## Code from cephes for i0 

3279 

3280_i0A = [ 

3281 -4.41534164647933937950E-18, 

3282 3.33079451882223809783E-17, 

3283 -2.43127984654795469359E-16, 

3284 1.71539128555513303061E-15, 

3285 -1.16853328779934516808E-14, 

3286 7.67618549860493561688E-14, 

3287 -4.85644678311192946090E-13, 

3288 2.95505266312963983461E-12, 

3289 -1.72682629144155570723E-11, 

3290 9.67580903537323691224E-11, 

3291 -5.18979560163526290666E-10, 

3292 2.65982372468238665035E-9, 

3293 -1.30002500998624804212E-8, 

3294 6.04699502254191894932E-8, 

3295 -2.67079385394061173391E-7, 

3296 1.11738753912010371815E-6, 

3297 -4.41673835845875056359E-6, 

3298 1.64484480707288970893E-5, 

3299 -5.75419501008210370398E-5, 

3300 1.88502885095841655729E-4, 

3301 -5.76375574538582365885E-4, 

3302 1.63947561694133579842E-3, 

3303 -4.32430999505057594430E-3, 

3304 1.05464603945949983183E-2, 

3305 -2.37374148058994688156E-2, 

3306 4.93052842396707084878E-2, 

3307 -9.49010970480476444210E-2, 

3308 1.71620901522208775349E-1, 

3309 -3.04682672343198398683E-1, 

3310 6.76795274409476084995E-1 

3311 ] 

3312 

3313_i0B = [ 

3314 -7.23318048787475395456E-18, 

3315 -4.83050448594418207126E-18, 

3316 4.46562142029675999901E-17, 

3317 3.46122286769746109310E-17, 

3318 -2.82762398051658348494E-16, 

3319 -3.42548561967721913462E-16, 

3320 1.77256013305652638360E-15, 

3321 3.81168066935262242075E-15, 

3322 -9.55484669882830764870E-15, 

3323 -4.15056934728722208663E-14, 

3324 1.54008621752140982691E-14, 

3325 3.85277838274214270114E-13, 

3326 7.18012445138366623367E-13, 

3327 -1.79417853150680611778E-12, 

3328 -1.32158118404477131188E-11, 

3329 -3.14991652796324136454E-11, 

3330 1.18891471078464383424E-11, 

3331 4.94060238822496958910E-10, 

3332 3.39623202570838634515E-9, 

3333 2.26666899049817806459E-8, 

3334 2.04891858946906374183E-7, 

3335 2.89137052083475648297E-6, 

3336 6.88975834691682398426E-5, 

3337 3.36911647825569408990E-3, 

3338 8.04490411014108831608E-1 

3339 ] 

3340 

3341 

3342def _chbevl(x, vals): 

3343 b0 = vals[0] 

3344 b1 = 0.0 

3345 

3346 for i in range(1, len(vals)): 

3347 b2 = b1 

3348 b1 = b0 

3349 b0 = x*b1 - b2 + vals[i] 

3350 

3351 return 0.5*(b0 - b2) 

3352 

3353 

3354def _i0_1(x): 

3355 return exp(x) * _chbevl(x/2.0-2, _i0A) 

3356 

3357 

3358def _i0_2(x): 

3359 return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) 

3360 

3361 

3362def _i0_dispatcher(x): 

3363 return (x,) 

3364 

3365 

3366@array_function_dispatch(_i0_dispatcher) 

3367def i0(x): 

3368 """ 

3369 Modified Bessel function of the first kind, order 0. 

3370 

3371 Usually denoted :math:`I_0`. 

3372 

3373 Parameters 

3374 ---------- 

3375 x : array_like of float 

3376 Argument of the Bessel function. 

3377 

3378 Returns 

3379 ------- 

3380 out : ndarray, shape = x.shape, dtype = float 

3381 The modified Bessel function evaluated at each of the elements of `x`. 

3382 

3383 See Also 

3384 -------- 

3385 scipy.special.i0, scipy.special.iv, scipy.special.ive 

3386 

3387 Notes 

3388 ----- 

3389 The scipy implementation is recommended over this function: it is a 

3390 proper ufunc written in C, and more than an order of magnitude faster. 

3391 

3392 We use the algorithm published by Clenshaw [1]_ and referenced by 

3393 Abramowitz and Stegun [2]_, for which the function domain is 

3394 partitioned into the two intervals [0,8] and (8,inf), and Chebyshev 

3395 polynomial expansions are employed in each interval. Relative error on 

3396 the domain [0,30] using IEEE arithmetic is documented [3]_ as having a 

3397 peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). 

3398 

3399 References 

3400 ---------- 

3401 .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in 

3402 *National Physical Laboratory Mathematical Tables*, vol. 5, London: 

3403 Her Majesty's Stationery Office, 1962. 

3404 .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical 

3405 Functions*, 10th printing, New York: Dover, 1964, pp. 379. 

3406 https://personal.math.ubc.ca/~cbm/aands/page_379.htm 

3407 .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero 

3408 

3409 Examples 

3410 -------- 

3411 >>> np.i0(0.) 

3412 array(1.0) 

3413 >>> np.i0([0, 1, 2, 3]) 

3414 array([1. , 1.26606588, 2.2795853 , 4.88079259]) 

3415 

3416 """ 

3417 x = np.asanyarray(x) 

3418 if x.dtype.kind == 'c': 

3419 raise TypeError("i0 not supported for complex values") 

3420 if x.dtype.kind != 'f': 

3421 x = x.astype(float) 

3422 x = np.abs(x) 

3423 return piecewise(x, [x <= 8.0], [_i0_1, _i0_2]) 

3424 

3425## End of cephes code for i0 

3426 

3427 

3428@set_module('numpy') 

3429def kaiser(M, beta): 

3430 """ 

3431 Return the Kaiser window. 

3432 

3433 The Kaiser window is a taper formed by using a Bessel function. 

3434 

3435 Parameters 

3436 ---------- 

3437 M : int 

3438 Number of points in the output window. If zero or less, an 

3439 empty array is returned. 

3440 beta : float 

3441 Shape parameter for window. 

3442 

3443 Returns 

3444 ------- 

3445 out : array 

3446 The window, with the maximum value normalized to one (the value 

3447 one appears only if the number of samples is odd). 

3448 

3449 See Also 

3450 -------- 

3451 bartlett, blackman, hamming, hanning 

3452 

3453 Notes 

3454 ----- 

3455 The Kaiser window is defined as 

3456 

3457 .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} 

3458 \\right)/I_0(\\beta) 

3459 

3460 with 

3461 

3462 .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, 

3463 

3464 where :math:`I_0` is the modified zeroth-order Bessel function. 

3465 

3466 The Kaiser was named for Jim Kaiser, who discovered a simple 

3467 approximation to the DPSS window based on Bessel functions. The Kaiser 

3468 window is a very good approximation to the Digital Prolate Spheroidal 

3469 Sequence, or Slepian window, which is the transform which maximizes the 

3470 energy in the main lobe of the window relative to total energy. 

3471 

3472 The Kaiser can approximate many other windows by varying the beta 

3473 parameter. 

3474 

3475 ==== ======================= 

3476 beta Window shape 

3477 ==== ======================= 

3478 0 Rectangular 

3479 5 Similar to a Hamming 

3480 6 Similar to a Hanning 

3481 8.6 Similar to a Blackman 

3482 ==== ======================= 

3483 

3484 A beta value of 14 is probably a good starting point. Note that as beta 

3485 gets large, the window narrows, and so the number of samples needs to be 

3486 large enough to sample the increasingly narrow spike, otherwise NaNs will 

3487 get returned. 

3488 

3489 Most references to the Kaiser window come from the signal processing 

3490 literature, where it is used as one of many windowing functions for 

3491 smoothing values. It is also known as an apodization (which means 

3492 "removing the foot", i.e. smoothing discontinuities at the beginning 

3493 and end of the sampled signal) or tapering function. 

3494 

3495 References 

3496 ---------- 

3497 .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by 

3498 digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. 

3499 John Wiley and Sons, New York, (1966). 

3500 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The 

3501 University of Alberta Press, 1975, pp. 177-178. 

3502 .. [3] Wikipedia, "Window function", 

3503 https://en.wikipedia.org/wiki/Window_function 

3504 

3505 Examples 

3506 -------- 

3507 >>> import matplotlib.pyplot as plt 

3508 >>> np.kaiser(12, 14) 

3509 array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary 

3510 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, 

3511 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, 

3512 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) 

3513 

3514 

3515 Plot the window and the frequency response: 

3516 

3517 >>> from numpy.fft import fft, fftshift 

3518 >>> window = np.kaiser(51, 14) 

3519 >>> plt.plot(window) 

3520 [<matplotlib.lines.Line2D object at 0x...>] 

3521 >>> plt.title("Kaiser window") 

3522 Text(0.5, 1.0, 'Kaiser window') 

3523 >>> plt.ylabel("Amplitude") 

3524 Text(0, 0.5, 'Amplitude') 

3525 >>> plt.xlabel("Sample") 

3526 Text(0.5, 0, 'Sample') 

3527 >>> plt.show() 

3528 

3529 >>> plt.figure() 

3530 <Figure size 640x480 with 0 Axes> 

3531 >>> A = fft(window, 2048) / 25.5 

3532 >>> mag = np.abs(fftshift(A)) 

3533 >>> freq = np.linspace(-0.5, 0.5, len(A)) 

3534 >>> response = 20 * np.log10(mag) 

3535 >>> response = np.clip(response, -100, 100) 

3536 >>> plt.plot(freq, response) 

3537 [<matplotlib.lines.Line2D object at 0x...>] 

3538 >>> plt.title("Frequency response of Kaiser window") 

3539 Text(0.5, 1.0, 'Frequency response of Kaiser window') 

3540 >>> plt.ylabel("Magnitude [dB]") 

3541 Text(0, 0.5, 'Magnitude [dB]') 

3542 >>> plt.xlabel("Normalized frequency [cycles per sample]") 

3543 Text(0.5, 0, 'Normalized frequency [cycles per sample]') 

3544 >>> plt.axis('tight') 

3545 (-0.5, 0.5, -100.0, ...) # may vary 

3546 >>> plt.show() 

3547 

3548 """ 

3549 if M == 1: 

3550 return np.ones(1, dtype=np.result_type(M, 0.0)) 

3551 n = arange(0, M) 

3552 alpha = (M-1)/2.0 

3553 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(float(beta)) 

3554 

3555 

3556def _sinc_dispatcher(x): 

3557 return (x,) 

3558 

3559 

3560@array_function_dispatch(_sinc_dispatcher) 

3561def sinc(x): 

3562 r""" 

3563 Return the normalized sinc function. 

3564 

3565 The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument 

3566 :math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not 

3567 only everywhere continuous but also infinitely differentiable. 

3568 

3569 .. note:: 

3570 

3571 Note the normalization factor of ``pi`` used in the definition. 

3572 This is the most commonly used definition in signal processing. 

3573 Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function 

3574 :math:`\sin(x)/x` that is more common in mathematics. 

3575 

3576 Parameters 

3577 ---------- 

3578 x : ndarray 

3579 Array (possibly multi-dimensional) of values for which to calculate 

3580 ``sinc(x)``. 

3581 

3582 Returns 

3583 ------- 

3584 out : ndarray 

3585 ``sinc(x)``, which has the same shape as the input. 

3586 

3587 Notes 

3588 ----- 

3589 The name sinc is short for "sine cardinal" or "sinus cardinalis". 

3590 

3591 The sinc function is used in various signal processing applications, 

3592 including in anti-aliasing, in the construction of a Lanczos resampling 

3593 filter, and in interpolation. 

3594 

3595 For bandlimited interpolation of discrete-time signals, the ideal 

3596 interpolation kernel is proportional to the sinc function. 

3597 

3598 References 

3599 ---------- 

3600 .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web 

3601 Resource. http://mathworld.wolfram.com/SincFunction.html 

3602 .. [2] Wikipedia, "Sinc function", 

3603 https://en.wikipedia.org/wiki/Sinc_function 

3604 

3605 Examples 

3606 -------- 

3607 >>> import matplotlib.pyplot as plt 

3608 >>> x = np.linspace(-4, 4, 41) 

3609 >>> np.sinc(x) 

3610 array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary 

3611 -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, 

3612 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, 

3613 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, 

3614 -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, 

3615 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, 

3616 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, 

3617 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, 

3618 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, 

3619 -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, 

3620 -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, 

3621 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, 

3622 -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, 

3623 -4.92362781e-02, -3.89804309e-17]) 

3624 

3625 >>> plt.plot(x, np.sinc(x)) 

3626 [<matplotlib.lines.Line2D object at 0x...>] 

3627 >>> plt.title("Sinc Function") 

3628 Text(0.5, 1.0, 'Sinc Function') 

3629 >>> plt.ylabel("Amplitude") 

3630 Text(0, 0.5, 'Amplitude') 

3631 >>> plt.xlabel("X") 

3632 Text(0.5, 0, 'X') 

3633 >>> plt.show() 

3634 

3635 """ 

3636 x = np.asanyarray(x) 

3637 y = pi * where(x == 0, 1.0e-20, x) 

3638 return sin(y)/y 

3639 

3640 

3641def _msort_dispatcher(a): 

3642 return (a,) 

3643 

3644 

3645@array_function_dispatch(_msort_dispatcher) 

3646def msort(a): 

3647 """ 

3648 Return a copy of an array sorted along the first axis. 

3649 

3650 Parameters 

3651 ---------- 

3652 a : array_like 

3653 Array to be sorted. 

3654 

3655 Returns 

3656 ------- 

3657 sorted_array : ndarray 

3658 Array of the same type and shape as `a`. 

3659 

3660 See Also 

3661 -------- 

3662 sort 

3663 

3664 Notes 

3665 ----- 

3666 ``np.msort(a)`` is equivalent to ``np.sort(a, axis=0)``. 

3667 

3668 """ 

3669 b = array(a, subok=True, copy=True) 

3670 b.sort(0) 

3671 return b 

3672 

3673 

3674def _ureduce(a, func, **kwargs): 

3675 """ 

3676 Internal Function. 

3677 Call `func` with `a` as first argument swapping the axes to use extended 

3678 axis on functions that don't support it natively. 

3679 

3680 Returns result and a.shape with axis dims set to 1. 

3681 

3682 Parameters 

3683 ---------- 

3684 a : array_like 

3685 Input array or object that can be converted to an array. 

3686 func : callable 

3687 Reduction function capable of receiving a single axis argument. 

3688 It is called with `a` as first argument followed by `kwargs`. 

3689 kwargs : keyword arguments 

3690 additional keyword arguments to pass to `func`. 

3691 

3692 Returns 

3693 ------- 

3694 result : tuple 

3695 Result of func(a, **kwargs) and a.shape with axis dims set to 1 

3696 which can be used to reshape the result to the same shape a ufunc with 

3697 keepdims=True would produce. 

3698 

3699 """ 

3700 a = np.asanyarray(a) 

3701 axis = kwargs.get('axis', None) 

3702 if axis is not None: 

3703 keepdim = list(a.shape) 

3704 nd = a.ndim 

3705 axis = _nx.normalize_axis_tuple(axis, nd) 

3706 

3707 for ax in axis: 

3708 keepdim[ax] = 1 

3709 

3710 if len(axis) == 1: 

3711 kwargs['axis'] = axis[0] 

3712 else: 

3713 keep = set(range(nd)) - set(axis) 

3714 nkeep = len(keep) 

3715 # swap axis that should not be reduced to front 

3716 for i, s in enumerate(sorted(keep)): 

3717 a = a.swapaxes(i, s) 

3718 # merge reduced axis 

3719 a = a.reshape(a.shape[:nkeep] + (-1,)) 

3720 kwargs['axis'] = -1 

3721 keepdim = tuple(keepdim) 

3722 else: 

3723 keepdim = (1,) * a.ndim 

3724 

3725 r = func(a, **kwargs) 

3726 return r, keepdim 

3727 

3728 

3729def _median_dispatcher( 

3730 a, axis=None, out=None, overwrite_input=None, keepdims=None): 

3731 return (a, out) 

3732 

3733 

3734@array_function_dispatch(_median_dispatcher) 

3735def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): 

3736 """ 

3737 Compute the median along the specified axis. 

3738 

3739 Returns the median of the array elements. 

3740 

3741 Parameters 

3742 ---------- 

3743 a : array_like 

3744 Input array or object that can be converted to an array. 

3745 axis : {int, sequence of int, None}, optional 

3746 Axis or axes along which the medians are computed. The default 

3747 is to compute the median along a flattened version of the array. 

3748 A sequence of axes is supported since version 1.9.0. 

3749 out : ndarray, optional 

3750 Alternative output array in which to place the result. It must 

3751 have the same shape and buffer length as the expected output, 

3752 but the type (of the output) will be cast if necessary. 

3753 overwrite_input : bool, optional 

3754 If True, then allow use of memory of input array `a` for 

3755 calculations. The input array will be modified by the call to 

3756 `median`. This will save memory when you do not need to preserve 

3757 the contents of the input array. Treat the input as undefined, 

3758 but it will probably be fully or partially sorted. Default is 

3759 False. If `overwrite_input` is ``True`` and `a` is not already an 

3760 `ndarray`, an error will be raised. 

3761 keepdims : bool, optional 

3762 If this is set to True, the axes which are reduced are left 

3763 in the result as dimensions with size one. With this option, 

3764 the result will broadcast correctly against the original `arr`. 

3765 

3766 .. versionadded:: 1.9.0 

3767 

3768 Returns 

3769 ------- 

3770 median : ndarray 

3771 A new array holding the result. If the input contains integers 

3772 or floats smaller than ``float64``, then the output data-type is 

3773 ``np.float64``. Otherwise, the data-type of the output is the 

3774 same as that of the input. If `out` is specified, that array is 

3775 returned instead. 

3776 

3777 See Also 

3778 -------- 

3779 mean, percentile 

3780 

3781 Notes 

3782 ----- 

3783 Given a vector ``V`` of length ``N``, the median of ``V`` is the 

3784 middle value of a sorted copy of ``V``, ``V_sorted`` - i 

3785 e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the 

3786 two middle values of ``V_sorted`` when ``N`` is even. 

3787 

3788 Examples 

3789 -------- 

3790 >>> a = np.array([[10, 7, 4], [3, 2, 1]]) 

3791 >>> a 

3792 array([[10, 7, 4], 

3793 [ 3, 2, 1]]) 

3794 >>> np.median(a) 

3795 3.5 

3796 >>> np.median(a, axis=0) 

3797 array([6.5, 4.5, 2.5]) 

3798 >>> np.median(a, axis=1) 

3799 array([7., 2.]) 

3800 >>> m = np.median(a, axis=0) 

3801 >>> out = np.zeros_like(m) 

3802 >>> np.median(a, axis=0, out=m) 

3803 array([6.5, 4.5, 2.5]) 

3804 >>> m 

3805 array([6.5, 4.5, 2.5]) 

3806 >>> b = a.copy() 

3807 >>> np.median(b, axis=1, overwrite_input=True) 

3808 array([7., 2.]) 

3809 >>> assert not np.all(a==b) 

3810 >>> b = a.copy() 

3811 >>> np.median(b, axis=None, overwrite_input=True) 

3812 3.5 

3813 >>> assert not np.all(a==b) 

3814 

3815 """ 

3816 r, k = _ureduce(a, func=_median, axis=axis, out=out, 

3817 overwrite_input=overwrite_input) 

3818 if keepdims: 

3819 return r.reshape(k) 

3820 else: 

3821 return r 

3822 

3823 

3824def _median(a, axis=None, out=None, overwrite_input=False): 

3825 # can't be reasonably be implemented in terms of percentile as we have to 

3826 # call mean to not break astropy 

3827 a = np.asanyarray(a) 

3828 

3829 # Set the partition indexes 

3830 if axis is None: 

3831 sz = a.size 

3832 else: 

3833 sz = a.shape[axis] 

3834 if sz % 2 == 0: 

3835 szh = sz // 2 

3836 kth = [szh - 1, szh] 

3837 else: 

3838 kth = [(sz - 1) // 2] 

3839 # Check if the array contains any nan's 

3840 if np.issubdtype(a.dtype, np.inexact): 

3841 kth.append(-1) 

3842 

3843 if overwrite_input: 

3844 if axis is None: 

3845 part = a.ravel() 

3846 part.partition(kth) 

3847 else: 

3848 a.partition(kth, axis=axis) 

3849 part = a 

3850 else: 

3851 part = partition(a, kth, axis=axis) 

3852 

3853 if part.shape == (): 

3854 # make 0-D arrays work 

3855 return part.item() 

3856 if axis is None: 

3857 axis = 0 

3858 

3859 indexer = [slice(None)] * part.ndim 

3860 index = part.shape[axis] // 2 

3861 if part.shape[axis] % 2 == 1: 

3862 # index with slice to allow mean (below) to work 

3863 indexer[axis] = slice(index, index+1) 

3864 else: 

3865 indexer[axis] = slice(index-1, index+1) 

3866 indexer = tuple(indexer) 

3867 

3868 # Use mean in both odd and even case to coerce data type, 

3869 # using out array if needed. 

3870 rout = mean(part[indexer], axis=axis, out=out) 

3871 # Check if the array contains any nan's 

3872 if np.issubdtype(a.dtype, np.inexact) and sz > 0: 

3873 # If nans are possible, warn and replace by nans like mean would. 

3874 rout = np.lib.utils._median_nancheck(part, rout, axis) 

3875 

3876 return rout 

3877 

3878 

3879def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, 

3880 method=None, keepdims=None, *, interpolation=None): 

3881 return (a, q, out) 

3882 

3883 

3884@array_function_dispatch(_percentile_dispatcher) 

3885def percentile(a, 

3886 q, 

3887 axis=None, 

3888 out=None, 

3889 overwrite_input=False, 

3890 method="linear", 

3891 keepdims=False, 

3892 *, 

3893 interpolation=None): 

3894 """ 

3895 Compute the q-th percentile of the data along the specified axis. 

3896 

3897 Returns the q-th percentile(s) of the array elements. 

3898 

3899 Parameters 

3900 ---------- 

3901 a : array_like 

3902 Input array or object that can be converted to an array. 

3903 q : array_like of float 

3904 Percentile or sequence of percentiles to compute, which must be between 

3905 0 and 100 inclusive. 

3906 axis : {int, tuple of int, None}, optional 

3907 Axis or axes along which the percentiles are computed. The 

3908 default is to compute the percentile(s) along a flattened 

3909 version of the array. 

3910 

3911 .. versionchanged:: 1.9.0 

3912 A tuple of axes is supported 

3913 out : ndarray, optional 

3914 Alternative output array in which to place the result. It must 

3915 have the same shape and buffer length as the expected output, 

3916 but the type (of the output) will be cast if necessary. 

3917 overwrite_input : bool, optional 

3918 If True, then allow the input array `a` to be modified by intermediate 

3919 calculations, to save memory. In this case, the contents of the input 

3920 `a` after this function completes is undefined. 

3921 method : str, optional 

3922 This parameter specifies the method to use for estimating the 

3923 percentile. There are many different methods, some unique to NumPy. 

3924 See the notes for explanation. The options sorted by their R type 

3925 as summarized in the H&F paper [1]_ are: 

3926 

3927 1. 'inverted_cdf' 

3928 2. 'averaged_inverted_cdf' 

3929 3. 'closest_observation' 

3930 4. 'interpolated_inverted_cdf' 

3931 5. 'hazen' 

3932 6. 'weibull' 

3933 7. 'linear' (default) 

3934 8. 'median_unbiased' 

3935 9. 'normal_unbiased' 

3936 

3937 The first three methods are discontiuous. NumPy further defines the 

3938 following discontinuous variations of the default 'linear' (7.) option: 

3939 

3940 * 'lower' 

3941 * 'higher', 

3942 * 'midpoint' 

3943 * 'nearest' 

3944 

3945 .. versionchanged:: 1.22.0 

3946 This argument was previously called "interpolation" and only 

3947 offered the "linear" default and last four options. 

3948 

3949 keepdims : bool, optional 

3950 If this is set to True, the axes which are reduced are left in 

3951 the result as dimensions with size one. With this option, the 

3952 result will broadcast correctly against the original array `a`. 

3953 

3954 .. versionadded:: 1.9.0 

3955 

3956 interpolation : str, optional 

3957 Deprecated name for the method keyword argument. 

3958 

3959 .. deprecated:: 1.22.0 

3960 

3961 Returns 

3962 ------- 

3963 percentile : scalar or ndarray 

3964 If `q` is a single percentile and `axis=None`, then the result 

3965 is a scalar. If multiple percentiles are given, first axis of 

3966 the result corresponds to the percentiles. The other axes are 

3967 the axes that remain after the reduction of `a`. If the input 

3968 contains integers or floats smaller than ``float64``, the output 

3969 data-type is ``float64``. Otherwise, the output data-type is the 

3970 same as that of the input. If `out` is specified, that array is 

3971 returned instead. 

3972 

3973 See Also 

3974 -------- 

3975 mean 

3976 median : equivalent to ``percentile(..., 50)`` 

3977 nanpercentile 

3978 quantile : equivalent to percentile, except q in the range [0, 1]. 

3979 

3980 Notes 

3981 ----- 

3982 Given a vector ``V`` of length ``N``, the q-th percentile of ``V`` is 

3983 the value ``q/100`` of the way from the minimum to the maximum in a 

3984 sorted copy of ``V``. The values and distances of the two nearest 

3985 neighbors as well as the `method` parameter will determine the 

3986 percentile if the normalized ranking does not match the location of 

3987 ``q`` exactly. This function is the same as the median if ``q=50``, the 

3988 same as the minimum if ``q=0`` and the same as the maximum if 

3989 ``q=100``. 

3990 

3991 This optional `method` parameter specifies the method to use when the 

3992 desired quantile lies between two data points ``i < j``. 

3993 If ``g`` is the fractional part of the index surrounded by ``i`` and 

3994 alpha and beta are correction constants modifying i and j. 

3995 

3996 Below, 'q' is the quantile value, 'n' is the sample size and 

3997 alpha and beta are constants. 

3998 The following formula gives an interpolation "i + g" of where the quantile 

3999 would be in the sorted sample. 

4000 With 'i' being the floor and 'g' the fractional part of the result. 

4001 

4002 .. math:: 

4003 i + g = (q - alpha) / ( n - alpha - beta + 1 ) 

4004 

4005 The different methods then work as follows 

4006 

4007 inverted_cdf: 

4008 method 1 of H&F [1]_. 

4009 This method gives discontinuous results: 

4010 

4011 * if g > 0 ; then take j 

4012 * if g = 0 ; then take i 

4013 

4014 averaged_inverted_cdf: 

4015 method 2 of H&F [1]_. 

4016 This method give discontinuous results: 

4017 

4018 * if g > 0 ; then take j 

4019 * if g = 0 ; then average between bounds 

4020 

4021 closest_observation: 

4022 method 3 of H&F [1]_. 

4023 This method give discontinuous results: 

4024 

4025 * if g > 0 ; then take j 

4026 * if g = 0 and index is odd ; then take j 

4027 * if g = 0 and index is even ; then take i 

4028 

4029 interpolated_inverted_cdf: 

4030 method 4 of H&F [1]_. 

4031 This method give continuous results using: 

4032 

4033 * alpha = 0 

4034 * beta = 1 

4035 

4036 hazen: 

4037 method 5 of H&F [1]_. 

4038 This method give continuous results using: 

4039 

4040 * alpha = 1/2 

4041 * beta = 1/2 

4042 

4043 weibull: 

4044 method 6 of H&F [1]_. 

4045 This method give continuous results using: 

4046 

4047 * alpha = 0 

4048 * beta = 0 

4049 

4050 linear: 

4051 method 7 of H&F [1]_. 

4052 This method give continuous results using: 

4053 

4054 * alpha = 1 

4055 * beta = 1 

4056 

4057 median_unbiased: 

4058 method 8 of H&F [1]_. 

4059 This method is probably the best method if the sample 

4060 distribution function is unknown (see reference). 

4061 This method give continuous results using: 

4062 

4063 * alpha = 1/3 

4064 * beta = 1/3 

4065 

4066 normal_unbiased: 

4067 method 9 of H&F [1]_. 

4068 This method is probably the best method if the sample 

4069 distribution function is known to be normal. 

4070 This method give continuous results using: 

4071 

4072 * alpha = 3/8 

4073 * beta = 3/8 

4074 

4075 lower: 

4076 NumPy method kept for backwards compatibility. 

4077 Takes ``i`` as the interpolation point. 

4078 

4079 higher: 

4080 NumPy method kept for backwards compatibility. 

4081 Takes ``j`` as the interpolation point. 

4082 

4083 nearest: 

4084 NumPy method kept for backwards compatibility. 

4085 Takes ``i`` or ``j``, whichever is nearest. 

4086 

4087 midpoint: 

4088 NumPy method kept for backwards compatibility. 

4089 Uses ``(i + j) / 2``. 

4090 

4091 Examples 

4092 -------- 

4093 >>> a = np.array([[10, 7, 4], [3, 2, 1]]) 

4094 >>> a 

4095 array([[10, 7, 4], 

4096 [ 3, 2, 1]]) 

4097 >>> np.percentile(a, 50) 

4098 3.5 

4099 >>> np.percentile(a, 50, axis=0) 

4100 array([6.5, 4.5, 2.5]) 

4101 >>> np.percentile(a, 50, axis=1) 

4102 array([7., 2.]) 

4103 >>> np.percentile(a, 50, axis=1, keepdims=True) 

4104 array([[7.], 

4105 [2.]]) 

4106 

4107 >>> m = np.percentile(a, 50, axis=0) 

4108 >>> out = np.zeros_like(m) 

4109 >>> np.percentile(a, 50, axis=0, out=out) 

4110 array([6.5, 4.5, 2.5]) 

4111 >>> m 

4112 array([6.5, 4.5, 2.5]) 

4113 

4114 >>> b = a.copy() 

4115 >>> np.percentile(b, 50, axis=1, overwrite_input=True) 

4116 array([7., 2.]) 

4117 >>> assert not np.all(a == b) 

4118 

4119 The different methods can be visualized graphically: 

4120 

4121 .. plot:: 

4122 

4123 import matplotlib.pyplot as plt 

4124 

4125 a = np.arange(4) 

4126 p = np.linspace(0, 100, 6001) 

4127 ax = plt.gca() 

4128 lines = [ 

4129 ('linear', '-', 'C0'), 

4130 ('inverted_cdf', ':', 'C1'), 

4131 # Almost the same as `inverted_cdf`: 

4132 ('averaged_inverted_cdf', '-.', 'C1'), 

4133 ('closest_observation', ':', 'C2'), 

4134 ('interpolated_inverted_cdf', '--', 'C1'), 

4135 ('hazen', '--', 'C3'), 

4136 ('weibull', '-.', 'C4'), 

4137 ('median_unbiased', '--', 'C5'), 

4138 ('normal_unbiased', '-.', 'C6'), 

4139 ] 

4140 for method, style, color in lines: 

4141 ax.plot( 

4142 p, np.percentile(a, p, method=method), 

4143 label=method, linestyle=style, color=color) 

4144 ax.set( 

4145 title='Percentiles for different methods and data: ' + str(a), 

4146 xlabel='Percentile', 

4147 ylabel='Estimated percentile value', 

4148 yticks=a) 

4149 ax.legend() 

4150 plt.show() 

4151 

4152 References 

4153 ---------- 

4154 .. [1] R. J. Hyndman and Y. Fan, 

4155 "Sample quantiles in statistical packages," 

4156 The American Statistician, 50(4), pp. 361-365, 1996 

4157 

4158 """ 

4159 if interpolation is not None: 

4160 method = _check_interpolation_as_method( 

4161 method, interpolation, "percentile") 

4162 q = np.true_divide(q, 100) 

4163 q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105) 

4164 if not _quantile_is_valid(q): 

4165 raise ValueError("Percentiles must be in the range [0, 100]") 

4166 return _quantile_unchecked( 

4167 a, q, axis, out, overwrite_input, method, keepdims) 

4168 

4169 

4170def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, 

4171 method=None, keepdims=None, *, interpolation=None): 

4172 return (a, q, out) 

4173 

4174 

4175@array_function_dispatch(_quantile_dispatcher) 

4176def quantile(a, 

4177 q, 

4178 axis=None, 

4179 out=None, 

4180 overwrite_input=False, 

4181 method="linear", 

4182 keepdims=False, 

4183 *, 

4184 interpolation=None): 

4185 """ 

4186 Compute the q-th quantile of the data along the specified axis. 

4187 

4188 .. versionadded:: 1.15.0 

4189 

4190 Parameters 

4191 ---------- 

4192 a : array_like 

4193 Input array or object that can be converted to an array. 

4194 q : array_like of float 

4195 Quantile or sequence of quantiles to compute, which must be between 

4196 0 and 1 inclusive. 

4197 axis : {int, tuple of int, None}, optional 

4198 Axis or axes along which the quantiles are computed. The default is 

4199 to compute the quantile(s) along a flattened version of the array. 

4200 out : ndarray, optional 

4201 Alternative output array in which to place the result. It must have 

4202 the same shape and buffer length as the expected output, but the 

4203 type (of the output) will be cast if necessary. 

4204 overwrite_input : bool, optional 

4205 If True, then allow the input array `a` to be modified by 

4206 intermediate calculations, to save memory. In this case, the 

4207 contents of the input `a` after this function completes is 

4208 undefined. 

4209 method : str, optional 

4210 This parameter specifies the method to use for estimating the 

4211 quantile. There are many different methods, some unique to NumPy. 

4212 See the notes for explanation. The options sorted by their R type 

4213 as summarized in the H&F paper [1]_ are: 

4214 

4215 1. 'inverted_cdf' 

4216 2. 'averaged_inverted_cdf' 

4217 3. 'closest_observation' 

4218 4. 'interpolated_inverted_cdf' 

4219 5. 'hazen' 

4220 6. 'weibull' 

4221 7. 'linear' (default) 

4222 8. 'median_unbiased' 

4223 9. 'normal_unbiased' 

4224 

4225 The first three methods are discontinuous. NumPy further defines the 

4226 following discontinuous variations of the default 'linear' (7.) option: 

4227 

4228 * 'lower' 

4229 * 'higher', 

4230 * 'midpoint' 

4231 * 'nearest' 

4232 

4233 .. versionchanged:: 1.22.0 

4234 This argument was previously called "interpolation" and only 

4235 offered the "linear" default and last four options. 

4236 

4237 keepdims : bool, optional 

4238 If this is set to True, the axes which are reduced are left in 

4239 the result as dimensions with size one. With this option, the 

4240 result will broadcast correctly against the original array `a`. 

4241 

4242 interpolation : str, optional 

4243 Deprecated name for the method keyword argument. 

4244 

4245 .. deprecated:: 1.22.0 

4246 

4247 Returns 

4248 ------- 

4249 quantile : scalar or ndarray 

4250 If `q` is a single quantile and `axis=None`, then the result 

4251 is a scalar. If multiple quantiles are given, first axis of 

4252 the result corresponds to the quantiles. The other axes are 

4253 the axes that remain after the reduction of `a`. If the input 

4254 contains integers or floats smaller than ``float64``, the output 

4255 data-type is ``float64``. Otherwise, the output data-type is the 

4256 same as that of the input. If `out` is specified, that array is 

4257 returned instead. 

4258 

4259 See Also 

4260 -------- 

4261 mean 

4262 percentile : equivalent to quantile, but with q in the range [0, 100]. 

4263 median : equivalent to ``quantile(..., 0.5)`` 

4264 nanquantile 

4265 

4266 Notes 

4267 ----- 

4268 Given a vector ``V`` of length ``N``, the q-th quantile of ``V`` is the 

4269 value ``q`` of the way from the minimum to the maximum in a sorted copy of 

4270 ``V``. The values and distances of the two nearest neighbors as well as the 

4271 `method` parameter will determine the quantile if the normalized 

4272 ranking does not match the location of ``q`` exactly. This function is the 

4273 same as the median if ``q=0.5``, the same as the minimum if ``q=0.0`` and 

4274 the same as the maximum if ``q=1.0``. 

4275 

4276 The optional `method` parameter specifies the method to use when the 

4277 desired quantile lies between two data points ``i < j``. 

4278 If ``g`` is the fractional part of the index surrounded by ``i`` and ``j``, 

4279 and alpha and beta are correction constants modifying i and j: 

4280 

4281 .. math:: 

4282 i + g = (q - alpha) / ( n - alpha - beta + 1 ) 

4283 

4284 The different methods then work as follows 

4285 

4286 inverted_cdf: 

4287 method 1 of H&F [1]_. 

4288 This method gives discontinuous results: 

4289 

4290 * if g > 0 ; then take j 

4291 * if g = 0 ; then take i 

4292 

4293 averaged_inverted_cdf: 

4294 method 2 of H&F [1]_. 

4295 This method gives discontinuous results: 

4296 

4297 * if g > 0 ; then take j 

4298 * if g = 0 ; then average between bounds 

4299 

4300 closest_observation: 

4301 method 3 of H&F [1]_. 

4302 This method gives discontinuous results: 

4303 

4304 * if g > 0 ; then take j 

4305 * if g = 0 and index is odd ; then take j 

4306 * if g = 0 and index is even ; then take i 

4307 

4308 interpolated_inverted_cdf: 

4309 method 4 of H&F [1]_. 

4310 This method gives continuous results using: 

4311 

4312 * alpha = 0 

4313 * beta = 1 

4314 

4315 hazen: 

4316 method 5 of H&F [1]_. 

4317 This method gives continuous results using: 

4318 

4319 * alpha = 1/2 

4320 * beta = 1/2 

4321 

4322 weibull: 

4323 method 6 of H&F [1]_. 

4324 This method gives continuous results using: 

4325 

4326 * alpha = 0 

4327 * beta = 0 

4328 

4329 linear: 

4330 method 7 of H&F [1]_. 

4331 This method gives continuous results using: 

4332 

4333 * alpha = 1 

4334 * beta = 1 

4335 

4336 median_unbiased: 

4337 method 8 of H&F [1]_. 

4338 This method is probably the best method if the sample 

4339 distribution function is unknown (see reference). 

4340 This method gives continuous results using: 

4341 

4342 * alpha = 1/3 

4343 * beta = 1/3 

4344 

4345 normal_unbiased: 

4346 method 9 of H&F [1]_. 

4347 This method is probably the best method if the sample 

4348 distribution function is known to be normal. 

4349 This method gives continuous results using: 

4350 

4351 * alpha = 3/8 

4352 * beta = 3/8 

4353 

4354 lower: 

4355 NumPy method kept for backwards compatibility. 

4356 Takes ``i`` as the interpolation point. 

4357 

4358 higher: 

4359 NumPy method kept for backwards compatibility. 

4360 Takes ``j`` as the interpolation point. 

4361 

4362 nearest: 

4363 NumPy method kept for backwards compatibility. 

4364 Takes ``i`` or ``j``, whichever is nearest. 

4365 

4366 midpoint: 

4367 NumPy method kept for backwards compatibility. 

4368 Uses ``(i + j) / 2``. 

4369 

4370 Examples 

4371 -------- 

4372 >>> a = np.array([[10, 7, 4], [3, 2, 1]]) 

4373 >>> a 

4374 array([[10, 7, 4], 

4375 [ 3, 2, 1]]) 

4376 >>> np.quantile(a, 0.5) 

4377 3.5 

4378 >>> np.quantile(a, 0.5, axis=0) 

4379 array([6.5, 4.5, 2.5]) 

4380 >>> np.quantile(a, 0.5, axis=1) 

4381 array([7., 2.]) 

4382 >>> np.quantile(a, 0.5, axis=1, keepdims=True) 

4383 array([[7.], 

4384 [2.]]) 

4385 >>> m = np.quantile(a, 0.5, axis=0) 

4386 >>> out = np.zeros_like(m) 

4387 >>> np.quantile(a, 0.5, axis=0, out=out) 

4388 array([6.5, 4.5, 2.5]) 

4389 >>> m 

4390 array([6.5, 4.5, 2.5]) 

4391 >>> b = a.copy() 

4392 >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) 

4393 array([7., 2.]) 

4394 >>> assert not np.all(a == b) 

4395 

4396 See also `numpy.percentile` for a visualization of most methods. 

4397 

4398 References 

4399 ---------- 

4400 .. [1] R. J. Hyndman and Y. Fan, 

4401 "Sample quantiles in statistical packages," 

4402 The American Statistician, 50(4), pp. 361-365, 1996 

4403 

4404 """ 

4405 if interpolation is not None: 

4406 method = _check_interpolation_as_method( 

4407 method, interpolation, "quantile") 

4408 

4409 q = np.asanyarray(q) 

4410 if not _quantile_is_valid(q): 

4411 raise ValueError("Quantiles must be in the range [0, 1]") 

4412 return _quantile_unchecked( 

4413 a, q, axis, out, overwrite_input, method, keepdims) 

4414 

4415 

4416def _quantile_unchecked(a, 

4417 q, 

4418 axis=None, 

4419 out=None, 

4420 overwrite_input=False, 

4421 method="linear", 

4422 keepdims=False): 

4423 """Assumes that q is in [0, 1], and is an ndarray""" 

4424 r, k = _ureduce(a, 

4425 func=_quantile_ureduce_func, 

4426 q=q, 

4427 axis=axis, 

4428 out=out, 

4429 overwrite_input=overwrite_input, 

4430 method=method) 

4431 if keepdims: 

4432 return r.reshape(q.shape + k) 

4433 else: 

4434 return r 

4435 

4436 

4437def _quantile_is_valid(q): 

4438 # avoid expensive reductions, relevant for arrays with < O(1000) elements 

4439 if q.ndim == 1 and q.size < 10: 

4440 for i in range(q.size): 

4441 if not (0.0 <= q[i] <= 1.0): 

4442 return False 

4443 else: 

4444 if not (np.all(0 <= q) and np.all(q <= 1)): 

4445 return False 

4446 return True 

4447 

4448 

4449def _check_interpolation_as_method(method, interpolation, fname): 

4450 # Deprecated NumPy 1.22, 2021-11-08 

4451 warnings.warn( 

4452 f"the `interpolation=` argument to {fname} was renamed to " 

4453 "`method=`, which has additional options.\n" 

4454 "Users of the modes 'nearest', 'lower', 'higher', or " 

4455 "'midpoint' are encouraged to review the method they used. " 

4456 "(Deprecated NumPy 1.22)", 

4457 DeprecationWarning, stacklevel=4) 

4458 if method != "linear": 

4459 # sanity check, we assume this basically never happens 

4460 raise TypeError( 

4461 "You shall not pass both `method` and `interpolation`!\n" 

4462 "(`interpolation` is Deprecated in favor of `method`)") 

4463 return interpolation 

4464 

4465 

4466def _compute_virtual_index(n, quantiles, alpha: float, beta: float): 

4467 """ 

4468 Compute the floating point indexes of an array for the linear 

4469 interpolation of quantiles. 

4470 n : array_like 

4471 The sample sizes. 

4472 quantiles : array_like 

4473 The quantiles values. 

4474 alpha : float 

4475 A constant used to correct the index computed. 

4476 beta : float 

4477 A constant used to correct the index computed. 

4478 

4479 alpha and beta values depend on the chosen method 

4480 (see quantile documentation) 

4481 

4482 Reference: 

4483 Hyndman&Fan paper "Sample Quantiles in Statistical Packages", 

4484 DOI: 10.1080/00031305.1996.10473566 

4485 """ 

4486 return n * quantiles + ( 

4487 alpha + quantiles * (1 - alpha - beta) 

4488 ) - 1 

4489 

4490 

4491def _get_gamma(virtual_indexes, previous_indexes, method): 

4492 """ 

4493 Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation 

4494 of quantiles. 

4495 

4496 virtual_indexes : array_like 

4497 The indexes where the percentile is supposed to be found in the sorted 

4498 sample. 

4499 previous_indexes : array_like 

4500 The floor values of virtual_indexes. 

4501 interpolation : dict 

4502 The interpolation method chosen, which may have a specific rule 

4503 modifying gamma. 

4504 

4505 gamma is usually the fractional part of virtual_indexes but can be modified 

4506 by the interpolation method. 

4507 """ 

4508 gamma = np.asanyarray(virtual_indexes - previous_indexes) 

4509 gamma = method["fix_gamma"](gamma, virtual_indexes) 

4510 return np.asanyarray(gamma) 

4511 

4512 

4513def _lerp(a, b, t, out=None): 

4514 """ 

4515 Compute the linear interpolation weighted by gamma on each point of 

4516 two same shape array. 

4517 

4518 a : array_like 

4519 Left bound. 

4520 b : array_like 

4521 Right bound. 

4522 t : array_like 

4523 The interpolation weight. 

4524 out : array_like 

4525 Output array. 

4526 """ 

4527 diff_b_a = subtract(b, a) 

4528 # asanyarray is a stop-gap until gh-13105 

4529 lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out)) 

4530 subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5) 

4531 if lerp_interpolation.ndim == 0 and out is None: 

4532 lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays 

4533 return lerp_interpolation 

4534 

4535 

4536def _get_gamma_mask(shape, default_value, conditioned_value, where): 

4537 out = np.full(shape, default_value) 

4538 np.copyto(out, conditioned_value, where=where, casting="unsafe") 

4539 return out 

4540 

4541 

4542def _discret_interpolation_to_boundaries(index, gamma_condition_fun): 

4543 previous = np.floor(index) 

4544 next = previous + 1 

4545 gamma = index - previous 

4546 res = _get_gamma_mask(shape=index.shape, 

4547 default_value=next, 

4548 conditioned_value=previous, 

4549 where=gamma_condition_fun(gamma, index) 

4550 ).astype(np.intp) 

4551 # Some methods can lead to out-of-bound integers, clip them: 

4552 res[res < 0] = 0 

4553 return res 

4554 

4555 

4556def _closest_observation(n, quantiles): 

4557 gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 0) 

4558 return _discret_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, 

4559 gamma_fun) 

4560 

4561 

4562def _inverted_cdf(n, quantiles): 

4563 gamma_fun = lambda gamma, _: (gamma == 0) 

4564 return _discret_interpolation_to_boundaries((n * quantiles) - 1, 

4565 gamma_fun) 

4566 

4567 

4568def _quantile_ureduce_func( 

4569 a: np.array, 

4570 q: np.array, 

4571 axis: int = None, 

4572 out=None, 

4573 overwrite_input: bool = False, 

4574 method="linear", 

4575) -> np.array: 

4576 if q.ndim > 2: 

4577 # The code below works fine for nd, but it might not have useful 

4578 # semantics. For now, keep the supported dimensions the same as it was 

4579 # before. 

4580 raise ValueError("q must be a scalar or 1d") 

4581 if overwrite_input: 

4582 if axis is None: 

4583 axis = 0 

4584 arr = a.ravel() 

4585 else: 

4586 arr = a 

4587 else: 

4588 if axis is None: 

4589 axis = 0 

4590 arr = a.flatten() 

4591 else: 

4592 arr = a.copy() 

4593 result = _quantile(arr, 

4594 quantiles=q, 

4595 axis=axis, 

4596 method=method, 

4597 out=out) 

4598 return result 

4599 

4600 

4601def _get_indexes(arr, virtual_indexes, valid_values_count): 

4602 """ 

4603 Get the valid indexes of arr neighbouring virtual_indexes. 

4604 Note 

4605 This is a companion function to linear interpolation of 

4606 Quantiles 

4607 

4608 Returns 

4609 ------- 

4610 (previous_indexes, next_indexes): Tuple 

4611 A Tuple of virtual_indexes neighbouring indexes 

4612 """ 

4613 previous_indexes = np.asanyarray(np.floor(virtual_indexes)) 

4614 next_indexes = np.asanyarray(previous_indexes + 1) 

4615 indexes_above_bounds = virtual_indexes >= valid_values_count - 1 

4616 # When indexes is above max index, take the max value of the array 

4617 if indexes_above_bounds.any(): 

4618 previous_indexes[indexes_above_bounds] = -1 

4619 next_indexes[indexes_above_bounds] = -1 

4620 # When indexes is below min index, take the min value of the array 

4621 indexes_below_bounds = virtual_indexes < 0 

4622 if indexes_below_bounds.any(): 

4623 previous_indexes[indexes_below_bounds] = 0 

4624 next_indexes[indexes_below_bounds] = 0 

4625 if np.issubdtype(arr.dtype, np.inexact): 

4626 # After the sort, slices having NaNs will have for last element a NaN 

4627 virtual_indexes_nans = np.isnan(virtual_indexes) 

4628 if virtual_indexes_nans.any(): 

4629 previous_indexes[virtual_indexes_nans] = -1 

4630 next_indexes[virtual_indexes_nans] = -1 

4631 previous_indexes = previous_indexes.astype(np.intp) 

4632 next_indexes = next_indexes.astype(np.intp) 

4633 return previous_indexes, next_indexes 

4634 

4635 

4636def _quantile( 

4637 arr: np.array, 

4638 quantiles: np.array, 

4639 axis: int = -1, 

4640 method="linear", 

4641 out=None, 

4642): 

4643 """ 

4644 Private function that doesn't support extended axis or keepdims. 

4645 These methods are extended to this function using _ureduce 

4646 See nanpercentile for parameter usage 

4647 It computes the quantiles of the array for the given axis. 

4648 A linear interpolation is performed based on the `interpolation`. 

4649 

4650 By default, the method is "linear" where alpha == beta == 1 which 

4651 performs the 7th method of Hyndman&Fan. 

4652 With "median_unbiased" we get alpha == beta == 1/3 

4653 thus the 8th method of Hyndman&Fan. 

4654 """ 

4655 # --- Setup 

4656 arr = np.asanyarray(arr) 

4657 values_count = arr.shape[axis] 

4658 # The dimensions of `q` are prepended to the output shape, so we need the 

4659 # axis being sampled from `arr` to be last. 

4660 DATA_AXIS = 0 

4661 if axis != DATA_AXIS: # But moveaxis is slow, so only call it if axis!=0. 

4662 arr = np.moveaxis(arr, axis, destination=DATA_AXIS) 

4663 # --- Computation of indexes 

4664 # Index where to find the value in the sorted array. 

4665 # Virtual because it is a floating point value, not an valid index. 

4666 # The nearest neighbours are used for interpolation 

4667 try: 

4668 method = _QuantileMethods[method] 

4669 except KeyError: 

4670 raise ValueError( 

4671 f"{method!r} is not a valid method. Use one of: " 

4672 f"{_QuantileMethods.keys()}") from None 

4673 virtual_indexes = method["get_virtual_index"](values_count, quantiles) 

4674 virtual_indexes = np.asanyarray(virtual_indexes) 

4675 if np.issubdtype(virtual_indexes.dtype, np.integer): 

4676 # No interpolation needed, take the points along axis 

4677 if np.issubdtype(arr.dtype, np.inexact): 

4678 # may contain nan, which would sort to the end 

4679 arr.partition(concatenate((virtual_indexes.ravel(), [-1])), axis=0) 

4680 slices_having_nans = np.isnan(arr[-1]) 

4681 else: 

4682 # cannot contain nan 

4683 arr.partition(virtual_indexes.ravel(), axis=0) 

4684 slices_having_nans = np.array(False, dtype=bool) 

4685 result = take(arr, virtual_indexes, axis=0, out=out) 

4686 else: 

4687 previous_indexes, next_indexes = _get_indexes(arr, 

4688 virtual_indexes, 

4689 values_count) 

4690 # --- Sorting 

4691 arr.partition( 

4692 np.unique(np.concatenate(([0, -1], 

4693 previous_indexes.ravel(), 

4694 next_indexes.ravel(), 

4695 ))), 

4696 axis=DATA_AXIS) 

4697 if np.issubdtype(arr.dtype, np.inexact): 

4698 slices_having_nans = np.isnan( 

4699 take(arr, indices=-1, axis=DATA_AXIS) 

4700 ) 

4701 else: 

4702 slices_having_nans = None 

4703 # --- Get values from indexes 

4704 previous = np.take(arr, previous_indexes, axis=DATA_AXIS) 

4705 next = np.take(arr, next_indexes, axis=DATA_AXIS) 

4706 # --- Linear interpolation 

4707 gamma = _get_gamma(virtual_indexes, previous_indexes, method) 

4708 result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) 

4709 gamma = gamma.reshape(result_shape) 

4710 result = _lerp(previous, 

4711 next, 

4712 gamma, 

4713 out=out) 

4714 if np.any(slices_having_nans): 

4715 if result.ndim == 0 and out is None: 

4716 # can't write to a scalar 

4717 result = arr.dtype.type(np.nan) 

4718 else: 

4719 result[..., slices_having_nans] = np.nan 

4720 return result 

4721 

4722 

4723def _trapz_dispatcher(y, x=None, dx=None, axis=None): 

4724 return (y, x) 

4725 

4726 

4727@array_function_dispatch(_trapz_dispatcher) 

4728def trapz(y, x=None, dx=1.0, axis=-1): 

4729 r""" 

4730 Integrate along the given axis using the composite trapezoidal rule. 

4731 

4732 If `x` is provided, the integration happens in sequence along its 

4733 elements - they are not sorted. 

4734 

4735 Integrate `y` (`x`) along each 1d slice on the given axis, compute 

4736 :math:`\int y(x) dx`. 

4737 When `x` is specified, this integrates along the parametric curve, 

4738 computing :math:`\int_t y(t) dt = 

4739 \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. 

4740 

4741 Parameters 

4742 ---------- 

4743 y : array_like 

4744 Input array to integrate. 

4745 x : array_like, optional 

4746 The sample points corresponding to the `y` values. If `x` is None, 

4747 the sample points are assumed to be evenly spaced `dx` apart. The 

4748 default is None. 

4749 dx : scalar, optional 

4750 The spacing between sample points when `x` is None. The default is 1. 

4751 axis : int, optional 

4752 The axis along which to integrate. 

4753 

4754 Returns 

4755 ------- 

4756 trapz : float or ndarray 

4757 Definite integral of `y` = n-dimensional array as approximated along 

4758 a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, 

4759 then the result is a float. If `n` is greater than 1, then the result 

4760 is an `n`-1 dimensional array. 

4761 

4762 See Also 

4763 -------- 

4764 sum, cumsum 

4765 

4766 Notes 

4767 ----- 

4768 Image [2]_ illustrates trapezoidal rule -- y-axis locations of points 

4769 will be taken from `y` array, by default x-axis distances between 

4770 points will be 1.0, alternatively they can be provided with `x` array 

4771 or with `dx` scalar. Return value will be equal to combined area under 

4772 the red lines. 

4773 

4774 

4775 References 

4776 ---------- 

4777 .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule 

4778 

4779 .. [2] Illustration image: 

4780 https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png 

4781 

4782 Examples 

4783 -------- 

4784 >>> np.trapz([1,2,3]) 

4785 4.0 

4786 >>> np.trapz([1,2,3], x=[4,6,8]) 

4787 8.0 

4788 >>> np.trapz([1,2,3], dx=2) 

4789 8.0 

4790 

4791 Using a decreasing `x` corresponds to integrating in reverse: 

4792 

4793 >>> np.trapz([1,2,3], x=[8,6,4]) 

4794 -8.0 

4795 

4796 More generally `x` is used to integrate along a parametric curve. 

4797 This finds the area of a circle, noting we repeat the sample which closes 

4798 the curve: 

4799 

4800 >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) 

4801 >>> np.trapz(np.cos(theta), x=np.sin(theta)) 

4802 3.141571941375841 

4803 

4804 >>> a = np.arange(6).reshape(2, 3) 

4805 >>> a 

4806 array([[0, 1, 2], 

4807 [3, 4, 5]]) 

4808 >>> np.trapz(a, axis=0) 

4809 array([1.5, 2.5, 3.5]) 

4810 >>> np.trapz(a, axis=1) 

4811 array([2., 8.]) 

4812 """ 

4813 y = asanyarray(y) 

4814 if x is None: 

4815 d = dx 

4816 else: 

4817 x = asanyarray(x) 

4818 if x.ndim == 1: 

4819 d = diff(x) 

4820 # reshape to correct shape 

4821 shape = [1]*y.ndim 

4822 shape[axis] = d.shape[0] 

4823 d = d.reshape(shape) 

4824 else: 

4825 d = diff(x, axis=axis) 

4826 nd = y.ndim 

4827 slice1 = [slice(None)]*nd 

4828 slice2 = [slice(None)]*nd 

4829 slice1[axis] = slice(1, None) 

4830 slice2[axis] = slice(None, -1) 

4831 try: 

4832 ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) 

4833 except ValueError: 

4834 # Operations didn't work, cast to ndarray 

4835 d = np.asarray(d) 

4836 y = np.asarray(y) 

4837 ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) 

4838 return ret 

4839 

4840 

4841def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): 

4842 return xi 

4843 

4844 

4845# Based on scitools meshgrid 

4846@array_function_dispatch(_meshgrid_dispatcher) 

4847def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): 

4848 """ 

4849 Return coordinate matrices from coordinate vectors. 

4850 

4851 Make N-D coordinate arrays for vectorized evaluations of 

4852 N-D scalar/vector fields over N-D grids, given 

4853 one-dimensional coordinate arrays x1, x2,..., xn. 

4854 

4855 .. versionchanged:: 1.9 

4856 1-D and 0-D cases are allowed. 

4857 

4858 Parameters 

4859 ---------- 

4860 x1, x2,..., xn : array_like 

4861 1-D arrays representing the coordinates of a grid. 

4862 indexing : {'xy', 'ij'}, optional 

4863 Cartesian ('xy', default) or matrix ('ij') indexing of output. 

4864 See Notes for more details. 

4865 

4866 .. versionadded:: 1.7.0 

4867 sparse : bool, optional 

4868 If True the shape of the returned coordinate array for dimension *i* 

4869 is reduced from ``(N1, ..., Ni, ... Nn)`` to 

4870 ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are 

4871 intended to be use with :ref:`basics.broadcasting`. When all 

4872 coordinates are used in an expression, broadcasting still leads to a 

4873 fully-dimensonal result array. 

4874 

4875 Default is False. 

4876 

4877 .. versionadded:: 1.7.0 

4878 copy : bool, optional 

4879 If False, a view into the original arrays are returned in order to 

4880 conserve memory. Default is True. Please note that 

4881 ``sparse=False, copy=False`` will likely return non-contiguous 

4882 arrays. Furthermore, more than one element of a broadcast array 

4883 may refer to a single memory location. If you need to write to the 

4884 arrays, make copies first. 

4885 

4886 .. versionadded:: 1.7.0 

4887 

4888 Returns 

4889 ------- 

4890 X1, X2,..., XN : ndarray 

4891 For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``, 

4892 returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij' 

4893 or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy' 

4894 with the elements of `xi` repeated to fill the matrix along 

4895 the first dimension for `x1`, the second for `x2` and so on. 

4896 

4897 Notes 

4898 ----- 

4899 This function supports both indexing conventions through the indexing 

4900 keyword argument. Giving the string 'ij' returns a meshgrid with 

4901 matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. 

4902 In the 2-D case with inputs of length M and N, the outputs are of shape 

4903 (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case 

4904 with inputs of length M, N and P, outputs are of shape (N, M, P) for 

4905 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is 

4906 illustrated by the following code snippet:: 

4907 

4908 xv, yv = np.meshgrid(x, y, indexing='ij') 

4909 for i in range(nx): 

4910 for j in range(ny): 

4911 # treat xv[i,j], yv[i,j] 

4912 

4913 xv, yv = np.meshgrid(x, y, indexing='xy') 

4914 for i in range(nx): 

4915 for j in range(ny): 

4916 # treat xv[j,i], yv[j,i] 

4917 

4918 In the 1-D and 0-D case, the indexing and sparse keywords have no effect. 

4919 

4920 See Also 

4921 -------- 

4922 mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. 

4923 ogrid : Construct an open multi-dimensional "meshgrid" using indexing 

4924 notation. 

4925 

4926 Examples 

4927 -------- 

4928 >>> nx, ny = (3, 2) 

4929 >>> x = np.linspace(0, 1, nx) 

4930 >>> y = np.linspace(0, 1, ny) 

4931 >>> xv, yv = np.meshgrid(x, y) 

4932 >>> xv 

4933 array([[0. , 0.5, 1. ], 

4934 [0. , 0.5, 1. ]]) 

4935 >>> yv 

4936 array([[0., 0., 0.], 

4937 [1., 1., 1.]]) 

4938 >>> xv, yv = np.meshgrid(x, y, sparse=True) # make sparse output arrays 

4939 >>> xv 

4940 array([[0. , 0.5, 1. ]]) 

4941 >>> yv 

4942 array([[0.], 

4943 [1.]]) 

4944 

4945 `meshgrid` is very useful to evaluate functions on a grid. If the 

4946 function depends on all coordinates, you can use the parameter 

4947 ``sparse=True`` to save memory and computation time. 

4948 

4949 >>> x = np.linspace(-5, 5, 101) 

4950 >>> y = np.linspace(-5, 5, 101) 

4951 >>> # full coordinate arrays 

4952 >>> xx, yy = np.meshgrid(x, y) 

4953 >>> zz = np.sqrt(xx**2 + yy**2) 

4954 >>> xx.shape, yy.shape, zz.shape 

4955 ((101, 101), (101, 101), (101, 101)) 

4956 >>> # sparse coordinate arrays 

4957 >>> xs, ys = np.meshgrid(x, y, sparse=True) 

4958 >>> zs = np.sqrt(xs**2 + ys**2) 

4959 >>> xs.shape, ys.shape, zs.shape 

4960 ((1, 101), (101, 1), (101, 101)) 

4961 >>> np.array_equal(zz, zs) 

4962 True 

4963 

4964 >>> import matplotlib.pyplot as plt 

4965 >>> h = plt.contourf(x, y, zs) 

4966 >>> plt.axis('scaled') 

4967 >>> plt.colorbar() 

4968 >>> plt.show() 

4969 """ 

4970 ndim = len(xi) 

4971 

4972 if indexing not in ['xy', 'ij']: 

4973 raise ValueError( 

4974 "Valid values for `indexing` are 'xy' and 'ij'.") 

4975 

4976 s0 = (1,) * ndim 

4977 output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) 

4978 for i, x in enumerate(xi)] 

4979 

4980 if indexing == 'xy' and ndim > 1: 

4981 # switch first and second axis 

4982 output[0].shape = (1, -1) + s0[2:] 

4983 output[1].shape = (-1, 1) + s0[2:] 

4984 

4985 if not sparse: 

4986 # Return the full N-D matrix (not only the 1-D vector) 

4987 output = np.broadcast_arrays(*output, subok=True) 

4988 

4989 if copy: 

4990 output = [x.copy() for x in output] 

4991 

4992 return output 

4993 

4994 

4995def _delete_dispatcher(arr, obj, axis=None): 

4996 return (arr, obj) 

4997 

4998 

4999@array_function_dispatch(_delete_dispatcher) 

5000def delete(arr, obj, axis=None): 

5001 """ 

5002 Return a new array with sub-arrays along an axis deleted. For a one 

5003 dimensional array, this returns those entries not returned by 

5004 `arr[obj]`. 

5005 

5006 Parameters 

5007 ---------- 

5008 arr : array_like 

5009 Input array. 

5010 obj : slice, int or array of ints 

5011 Indicate indices of sub-arrays to remove along the specified axis. 

5012 

5013 .. versionchanged:: 1.19.0 

5014 Boolean indices are now treated as a mask of elements to remove, 

5015 rather than being cast to the integers 0 and 1. 

5016 

5017 axis : int, optional 

5018 The axis along which to delete the subarray defined by `obj`. 

5019 If `axis` is None, `obj` is applied to the flattened array. 

5020 

5021 Returns 

5022 ------- 

5023 out : ndarray 

5024 A copy of `arr` with the elements specified by `obj` removed. Note 

5025 that `delete` does not occur in-place. If `axis` is None, `out` is 

5026 a flattened array. 

5027 

5028 See Also 

5029 -------- 

5030 insert : Insert elements into an array. 

5031 append : Append elements at the end of an array. 

5032 

5033 Notes 

5034 ----- 

5035 Often it is preferable to use a boolean mask. For example: 

5036 

5037 >>> arr = np.arange(12) + 1 

5038 >>> mask = np.ones(len(arr), dtype=bool) 

5039 >>> mask[[0,2,4]] = False 

5040 >>> result = arr[mask,...] 

5041 

5042 Is equivalent to ``np.delete(arr, [0,2,4], axis=0)``, but allows further 

5043 use of `mask`. 

5044 

5045 Examples 

5046 -------- 

5047 >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) 

5048 >>> arr 

5049 array([[ 1, 2, 3, 4], 

5050 [ 5, 6, 7, 8], 

5051 [ 9, 10, 11, 12]]) 

5052 >>> np.delete(arr, 1, 0) 

5053 array([[ 1, 2, 3, 4], 

5054 [ 9, 10, 11, 12]]) 

5055 

5056 >>> np.delete(arr, np.s_[::2], 1) 

5057 array([[ 2, 4], 

5058 [ 6, 8], 

5059 [10, 12]]) 

5060 >>> np.delete(arr, [1,3,5], None) 

5061 array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) 

5062 

5063 """ 

5064 wrap = None 

5065 if type(arr) is not ndarray: 

5066 try: 

5067 wrap = arr.__array_wrap__ 

5068 except AttributeError: 

5069 pass 

5070 

5071 arr = asarray(arr) 

5072 ndim = arr.ndim 

5073 arrorder = 'F' if arr.flags.fnc else 'C' 

5074 if axis is None: 

5075 if ndim != 1: 

5076 arr = arr.ravel() 

5077 # needed for np.matrix, which is still not 1d after being ravelled 

5078 ndim = arr.ndim 

5079 axis = ndim - 1 

5080 else: 

5081 axis = normalize_axis_index(axis, ndim) 

5082 

5083 slobj = [slice(None)]*ndim 

5084 N = arr.shape[axis] 

5085 newshape = list(arr.shape) 

5086 

5087 if isinstance(obj, slice): 

5088 start, stop, step = obj.indices(N) 

5089 xr = range(start, stop, step) 

5090 numtodel = len(xr) 

5091 

5092 if numtodel <= 0: 

5093 if wrap: 

5094 return wrap(arr.copy(order=arrorder)) 

5095 else: 

5096 return arr.copy(order=arrorder) 

5097 

5098 # Invert if step is negative: 

5099 if step < 0: 

5100 step = -step 

5101 start = xr[-1] 

5102 stop = xr[0] + 1 

5103 

5104 newshape[axis] -= numtodel 

5105 new = empty(newshape, arr.dtype, arrorder) 

5106 # copy initial chunk 

5107 if start == 0: 

5108 pass 

5109 else: 

5110 slobj[axis] = slice(None, start) 

5111 new[tuple(slobj)] = arr[tuple(slobj)] 

5112 # copy end chunk 

5113 if stop == N: 

5114 pass 

5115 else: 

5116 slobj[axis] = slice(stop-numtodel, None) 

5117 slobj2 = [slice(None)]*ndim 

5118 slobj2[axis] = slice(stop, None) 

5119 new[tuple(slobj)] = arr[tuple(slobj2)] 

5120 # copy middle pieces 

5121 if step == 1: 

5122 pass 

5123 else: # use array indexing. 

5124 keep = ones(stop-start, dtype=bool) 

5125 keep[:stop-start:step] = False 

5126 slobj[axis] = slice(start, stop-numtodel) 

5127 slobj2 = [slice(None)]*ndim 

5128 slobj2[axis] = slice(start, stop) 

5129 arr = arr[tuple(slobj2)] 

5130 slobj2[axis] = keep 

5131 new[tuple(slobj)] = arr[tuple(slobj2)] 

5132 if wrap: 

5133 return wrap(new) 

5134 else: 

5135 return new 

5136 

5137 if isinstance(obj, (int, integer)) and not isinstance(obj, bool): 

5138 single_value = True 

5139 else: 

5140 single_value = False 

5141 _obj = obj 

5142 obj = np.asarray(obj) 

5143 # `size == 0` to allow empty lists similar to indexing, but (as there) 

5144 # is really too generic: 

5145 if obj.size == 0 and not isinstance(_obj, np.ndarray): 

5146 obj = obj.astype(intp) 

5147 elif obj.size == 1 and obj.dtype.kind in "ui": 

5148 # For a size 1 integer array we can use the single-value path 

5149 # (most dtypes, except boolean, should just fail later). 

5150 obj = obj.item() 

5151 single_value = True 

5152 

5153 if single_value: 

5154 # optimization for a single value 

5155 if (obj < -N or obj >= N): 

5156 raise IndexError( 

5157 "index %i is out of bounds for axis %i with " 

5158 "size %i" % (obj, axis, N)) 

5159 if (obj < 0): 

5160 obj += N 

5161 newshape[axis] -= 1 

5162 new = empty(newshape, arr.dtype, arrorder) 

5163 slobj[axis] = slice(None, obj) 

5164 new[tuple(slobj)] = arr[tuple(slobj)] 

5165 slobj[axis] = slice(obj, None) 

5166 slobj2 = [slice(None)]*ndim 

5167 slobj2[axis] = slice(obj+1, None) 

5168 new[tuple(slobj)] = arr[tuple(slobj2)] 

5169 else: 

5170 if obj.dtype == bool: 

5171 if obj.shape != (N,): 

5172 raise ValueError('boolean array argument obj to delete ' 

5173 'must be one dimensional and match the axis ' 

5174 'length of {}'.format(N)) 

5175 

5176 # optimization, the other branch is slower 

5177 keep = ~obj 

5178 else: 

5179 keep = ones(N, dtype=bool) 

5180 keep[obj,] = False 

5181 

5182 slobj[axis] = keep 

5183 new = arr[tuple(slobj)] 

5184 

5185 if wrap: 

5186 return wrap(new) 

5187 else: 

5188 return new 

5189 

5190 

5191def _insert_dispatcher(arr, obj, values, axis=None): 

5192 return (arr, obj, values) 

5193 

5194 

5195@array_function_dispatch(_insert_dispatcher) 

5196def insert(arr, obj, values, axis=None): 

5197 """ 

5198 Insert values along the given axis before the given indices. 

5199 

5200 Parameters 

5201 ---------- 

5202 arr : array_like 

5203 Input array. 

5204 obj : int, slice or sequence of ints 

5205 Object that defines the index or indices before which `values` is 

5206 inserted. 

5207 

5208 .. versionadded:: 1.8.0 

5209 

5210 Support for multiple insertions when `obj` is a single scalar or a 

5211 sequence with one element (similar to calling insert multiple 

5212 times). 

5213 values : array_like 

5214 Values to insert into `arr`. If the type of `values` is different 

5215 from that of `arr`, `values` is converted to the type of `arr`. 

5216 `values` should be shaped so that ``arr[...,obj,...] = values`` 

5217 is legal. 

5218 axis : int, optional 

5219 Axis along which to insert `values`. If `axis` is None then `arr` 

5220 is flattened first. 

5221 

5222 Returns 

5223 ------- 

5224 out : ndarray 

5225 A copy of `arr` with `values` inserted. Note that `insert` 

5226 does not occur in-place: a new array is returned. If 

5227 `axis` is None, `out` is a flattened array. 

5228 

5229 See Also 

5230 -------- 

5231 append : Append elements at the end of an array. 

5232 concatenate : Join a sequence of arrays along an existing axis. 

5233 delete : Delete elements from an array. 

5234 

5235 Notes 

5236 ----- 

5237 Note that for higher dimensional inserts ``obj=0`` behaves very different 

5238 from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from 

5239 ``arr[:,[0],:] = values``. 

5240 

5241 Examples 

5242 -------- 

5243 >>> a = np.array([[1, 1], [2, 2], [3, 3]]) 

5244 >>> a 

5245 array([[1, 1], 

5246 [2, 2], 

5247 [3, 3]]) 

5248 >>> np.insert(a, 1, 5) 

5249 array([1, 5, 1, ..., 2, 3, 3]) 

5250 >>> np.insert(a, 1, 5, axis=1) 

5251 array([[1, 5, 1], 

5252 [2, 5, 2], 

5253 [3, 5, 3]]) 

5254 

5255 Difference between sequence and scalars: 

5256 

5257 >>> np.insert(a, [1], [[1],[2],[3]], axis=1) 

5258 array([[1, 1, 1], 

5259 [2, 2, 2], 

5260 [3, 3, 3]]) 

5261 >>> np.array_equal(np.insert(a, 1, [1, 2, 3], axis=1), 

5262 ... np.insert(a, [1], [[1],[2],[3]], axis=1)) 

5263 True 

5264 

5265 >>> b = a.flatten() 

5266 >>> b 

5267 array([1, 1, 2, 2, 3, 3]) 

5268 >>> np.insert(b, [2, 2], [5, 6]) 

5269 array([1, 1, 5, ..., 2, 3, 3]) 

5270 

5271 >>> np.insert(b, slice(2, 4), [5, 6]) 

5272 array([1, 1, 5, ..., 2, 3, 3]) 

5273 

5274 >>> np.insert(b, [2, 2], [7.13, False]) # type casting 

5275 array([1, 1, 7, ..., 2, 3, 3]) 

5276 

5277 >>> x = np.arange(8).reshape(2, 4) 

5278 >>> idx = (1, 3) 

5279 >>> np.insert(x, idx, 999, axis=1) 

5280 array([[ 0, 999, 1, 2, 999, 3], 

5281 [ 4, 999, 5, 6, 999, 7]]) 

5282 

5283 """ 

5284 wrap = None 

5285 if type(arr) is not ndarray: 

5286 try: 

5287 wrap = arr.__array_wrap__ 

5288 except AttributeError: 

5289 pass 

5290 

5291 arr = asarray(arr) 

5292 ndim = arr.ndim 

5293 arrorder = 'F' if arr.flags.fnc else 'C' 

5294 if axis is None: 

5295 if ndim != 1: 

5296 arr = arr.ravel() 

5297 # needed for np.matrix, which is still not 1d after being ravelled 

5298 ndim = arr.ndim 

5299 axis = ndim - 1 

5300 else: 

5301 axis = normalize_axis_index(axis, ndim) 

5302 slobj = [slice(None)]*ndim 

5303 N = arr.shape[axis] 

5304 newshape = list(arr.shape) 

5305 

5306 if isinstance(obj, slice): 

5307 # turn it into a range object 

5308 indices = arange(*obj.indices(N), dtype=intp) 

5309 else: 

5310 # need to copy obj, because indices will be changed in-place 

5311 indices = np.array(obj) 

5312 if indices.dtype == bool: 

5313 # See also delete 

5314 # 2012-10-11, NumPy 1.8 

5315 warnings.warn( 

5316 "in the future insert will treat boolean arrays and " 

5317 "array-likes as a boolean index instead of casting it to " 

5318 "integer", FutureWarning, stacklevel=3) 

5319 indices = indices.astype(intp) 

5320 # Code after warning period: 

5321 #if obj.ndim != 1: 

5322 # raise ValueError('boolean array argument obj to insert ' 

5323 # 'must be one dimensional') 

5324 #indices = np.flatnonzero(obj) 

5325 elif indices.ndim > 1: 

5326 raise ValueError( 

5327 "index array argument obj to insert must be one dimensional " 

5328 "or scalar") 

5329 if indices.size == 1: 

5330 index = indices.item() 

5331 if index < -N or index > N: 

5332 raise IndexError(f"index {obj} is out of bounds for axis {axis} " 

5333 f"with size {N}") 

5334 if (index < 0): 

5335 index += N 

5336 

5337 # There are some object array corner cases here, but we cannot avoid 

5338 # that: 

5339 values = array(values, copy=False, ndmin=arr.ndim, dtype=arr.dtype) 

5340 if indices.ndim == 0: 

5341 # broadcasting is very different here, since a[:,0,:] = ... behaves 

5342 # very different from a[:,[0],:] = ...! This changes values so that 

5343 # it works likes the second case. (here a[:,0:1,:]) 

5344 values = np.moveaxis(values, 0, axis) 

5345 numnew = values.shape[axis] 

5346 newshape[axis] += numnew 

5347 new = empty(newshape, arr.dtype, arrorder) 

5348 slobj[axis] = slice(None, index) 

5349 new[tuple(slobj)] = arr[tuple(slobj)] 

5350 slobj[axis] = slice(index, index+numnew) 

5351 new[tuple(slobj)] = values 

5352 slobj[axis] = slice(index+numnew, None) 

5353 slobj2 = [slice(None)] * ndim 

5354 slobj2[axis] = slice(index, None) 

5355 new[tuple(slobj)] = arr[tuple(slobj2)] 

5356 if wrap: 

5357 return wrap(new) 

5358 return new 

5359 elif indices.size == 0 and not isinstance(obj, np.ndarray): 

5360 # Can safely cast the empty list to intp 

5361 indices = indices.astype(intp) 

5362 

5363 indices[indices < 0] += N 

5364 

5365 numnew = len(indices) 

5366 order = indices.argsort(kind='mergesort') # stable sort 

5367 indices[order] += np.arange(numnew) 

5368 

5369 newshape[axis] += numnew 

5370 old_mask = ones(newshape[axis], dtype=bool) 

5371 old_mask[indices] = False 

5372 

5373 new = empty(newshape, arr.dtype, arrorder) 

5374 slobj2 = [slice(None)]*ndim 

5375 slobj[axis] = indices 

5376 slobj2[axis] = old_mask 

5377 new[tuple(slobj)] = values 

5378 new[tuple(slobj2)] = arr 

5379 

5380 if wrap: 

5381 return wrap(new) 

5382 return new 

5383 

5384 

5385def _append_dispatcher(arr, values, axis=None): 

5386 return (arr, values) 

5387 

5388 

5389@array_function_dispatch(_append_dispatcher) 

5390def append(arr, values, axis=None): 

5391 """ 

5392 Append values to the end of an array. 

5393 

5394 Parameters 

5395 ---------- 

5396 arr : array_like 

5397 Values are appended to a copy of this array. 

5398 values : array_like 

5399 These values are appended to a copy of `arr`. It must be of the 

5400 correct shape (the same shape as `arr`, excluding `axis`). If 

5401 `axis` is not specified, `values` can be any shape and will be 

5402 flattened before use. 

5403 axis : int, optional 

5404 The axis along which `values` are appended. If `axis` is not 

5405 given, both `arr` and `values` are flattened before use. 

5406 

5407 Returns 

5408 ------- 

5409 append : ndarray 

5410 A copy of `arr` with `values` appended to `axis`. Note that 

5411 `append` does not occur in-place: a new array is allocated and 

5412 filled. If `axis` is None, `out` is a flattened array. 

5413 

5414 See Also 

5415 -------- 

5416 insert : Insert elements into an array. 

5417 delete : Delete elements from an array. 

5418 

5419 Examples 

5420 -------- 

5421 >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) 

5422 array([1, 2, 3, ..., 7, 8, 9]) 

5423 

5424 When `axis` is specified, `values` must have the correct shape. 

5425 

5426 >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) 

5427 array([[1, 2, 3], 

5428 [4, 5, 6], 

5429 [7, 8, 9]]) 

5430 >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) 

5431 Traceback (most recent call last): 

5432 ... 

5433 ValueError: all the input arrays must have same number of dimensions, but 

5434 the array at index 0 has 2 dimension(s) and the array at index 1 has 1 

5435 dimension(s) 

5436 

5437 """ 

5438 arr = asanyarray(arr) 

5439 if axis is None: 

5440 if arr.ndim != 1: 

5441 arr = arr.ravel() 

5442 values = ravel(values) 

5443 axis = arr.ndim-1 

5444 return concatenate((arr, values), axis=axis) 

5445 

5446 

5447def _digitize_dispatcher(x, bins, right=None): 

5448 return (x, bins) 

5449 

5450 

5451@array_function_dispatch(_digitize_dispatcher) 

5452def digitize(x, bins, right=False): 

5453 """ 

5454 Return the indices of the bins to which each value in input array belongs. 

5455 

5456 ========= ============= ============================ 

5457 `right` order of bins returned index `i` satisfies 

5458 ========= ============= ============================ 

5459 ``False`` increasing ``bins[i-1] <= x < bins[i]`` 

5460 ``True`` increasing ``bins[i-1] < x <= bins[i]`` 

5461 ``False`` decreasing ``bins[i-1] > x >= bins[i]`` 

5462 ``True`` decreasing ``bins[i-1] >= x > bins[i]`` 

5463 ========= ============= ============================ 

5464 

5465 If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is 

5466 returned as appropriate. 

5467 

5468 Parameters 

5469 ---------- 

5470 x : array_like 

5471 Input array to be binned. Prior to NumPy 1.10.0, this array had to 

5472 be 1-dimensional, but can now have any shape. 

5473 bins : array_like 

5474 Array of bins. It has to be 1-dimensional and monotonic. 

5475 right : bool, optional 

5476 Indicating whether the intervals include the right or the left bin 

5477 edge. Default behavior is (right==False) indicating that the interval 

5478 does not include the right edge. The left bin end is open in this 

5479 case, i.e., bins[i-1] <= x < bins[i] is the default behavior for 

5480 monotonically increasing bins. 

5481 

5482 Returns 

5483 ------- 

5484 indices : ndarray of ints 

5485 Output array of indices, of same shape as `x`. 

5486 

5487 Raises 

5488 ------ 

5489 ValueError 

5490 If `bins` is not monotonic. 

5491 TypeError 

5492 If the type of the input is complex. 

5493 

5494 See Also 

5495 -------- 

5496 bincount, histogram, unique, searchsorted 

5497 

5498 Notes 

5499 ----- 

5500 If values in `x` are such that they fall outside the bin range, 

5501 attempting to index `bins` with the indices that `digitize` returns 

5502 will result in an IndexError. 

5503 

5504 .. versionadded:: 1.10.0 

5505 

5506 `np.digitize` is implemented in terms of `np.searchsorted`. This means 

5507 that a binary search is used to bin the values, which scales much better 

5508 for larger number of bins than the previous linear search. It also removes 

5509 the requirement for the input array to be 1-dimensional. 

5510 

5511 For monotonically _increasing_ `bins`, the following are equivalent:: 

5512 

5513 np.digitize(x, bins, right=True) 

5514 np.searchsorted(bins, x, side='left') 

5515 

5516 Note that as the order of the arguments are reversed, the side must be too. 

5517 The `searchsorted` call is marginally faster, as it does not do any 

5518 monotonicity checks. Perhaps more importantly, it supports all dtypes. 

5519 

5520 Examples 

5521 -------- 

5522 >>> x = np.array([0.2, 6.4, 3.0, 1.6]) 

5523 >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) 

5524 >>> inds = np.digitize(x, bins) 

5525 >>> inds 

5526 array([1, 4, 3, 2]) 

5527 >>> for n in range(x.size): 

5528 ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) 

5529 ... 

5530 0.0 <= 0.2 < 1.0 

5531 4.0 <= 6.4 < 10.0 

5532 2.5 <= 3.0 < 4.0 

5533 1.0 <= 1.6 < 2.5 

5534 

5535 >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) 

5536 >>> bins = np.array([0, 5, 10, 15, 20]) 

5537 >>> np.digitize(x,bins,right=True) 

5538 array([1, 2, 3, 4, 4]) 

5539 >>> np.digitize(x,bins,right=False) 

5540 array([1, 3, 3, 4, 5]) 

5541 """ 

5542 x = _nx.asarray(x) 

5543 bins = _nx.asarray(bins) 

5544 

5545 # here for compatibility, searchsorted below is happy to take this 

5546 if np.issubdtype(x.dtype, _nx.complexfloating): 

5547 raise TypeError("x may not be complex") 

5548 

5549 mono = _monotonicity(bins) 

5550 if mono == 0: 

5551 raise ValueError("bins must be monotonically increasing or decreasing") 

5552 

5553 # this is backwards because the arguments below are swapped 

5554 side = 'left' if right else 'right' 

5555 if mono == -1: 

5556 # reverse the bins, and invert the results 

5557 return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) 

5558 else: 

5559 return _nx.searchsorted(bins, x, side=side)