Coverage for /var/srv/projects/api.amasfac.comuna18.com/tmp/venv/lib/python3.9/site-packages/pandas/core/series.py: 25%

1115 statements  

« prev     ^ index     » next       coverage.py v6.4.4, created at 2023-07-17 14:22 -0600

1""" 

2Data structure for 1-dimensional cross-sectional and time series data 

3""" 

4from __future__ import annotations 

5 

6from textwrap import dedent 

7from typing import ( 

8 IO, 

9 TYPE_CHECKING, 

10 Any, 

11 Callable, 

12 Hashable, 

13 Iterable, 

14 Literal, 

15 Mapping, 

16 Sequence, 

17 Union, 

18 cast, 

19 overload, 

20) 

21import warnings 

22import weakref 

23 

24import numpy as np 

25 

26from pandas._config import get_option 

27 

28from pandas._libs import ( 

29 lib, 

30 properties, 

31 reshape, 

32 tslibs, 

33) 

34from pandas._libs.lib import no_default 

35from pandas._typing import ( 

36 AggFuncType, 

37 AnyArrayLike, 

38 ArrayLike, 

39 Axis, 

40 Dtype, 

41 DtypeObj, 

42 FilePath, 

43 FillnaOptions, 

44 Frequency, 

45 IgnoreRaise, 

46 IndexKeyFunc, 

47 IndexLabel, 

48 Level, 

49 NaPosition, 

50 QuantileInterpolation, 

51 Renamer, 

52 SingleManager, 

53 SortKind, 

54 StorageOptions, 

55 TimedeltaConvertibleTypes, 

56 TimestampConvertibleTypes, 

57 ValueKeyFunc, 

58 WriteBuffer, 

59 npt, 

60) 

61from pandas.compat.numpy import function as nv 

62from pandas.errors import InvalidIndexError 

63from pandas.util._decorators import ( 

64 Appender, 

65 Substitution, 

66 deprecate_kwarg, 

67 deprecate_nonkeyword_arguments, 

68 doc, 

69) 

70from pandas.util._exceptions import find_stack_level 

71from pandas.util._validators import ( 

72 validate_ascending, 

73 validate_bool_kwarg, 

74 validate_percentile, 

75) 

76 

77from pandas.core.dtypes.cast import ( 

78 LossySetitemError, 

79 convert_dtypes, 

80 maybe_box_native, 

81 maybe_cast_pointwise_result, 

82) 

83from pandas.core.dtypes.common import ( 

84 ensure_platform_int, 

85 is_dict_like, 

86 is_integer, 

87 is_iterator, 

88 is_list_like, 

89 is_numeric_dtype, 

90 is_object_dtype, 

91 is_scalar, 

92 pandas_dtype, 

93 validate_all_hashable, 

94) 

95from pandas.core.dtypes.generic import ABCDataFrame 

96from pandas.core.dtypes.inference import is_hashable 

97from pandas.core.dtypes.missing import ( 

98 isna, 

99 na_value_for_dtype, 

100 notna, 

101 remove_na_arraylike, 

102) 

103 

104from pandas.core import ( 

105 algorithms, 

106 base, 

107 common as com, 

108 missing, 

109 nanops, 

110 ops, 

111) 

112from pandas.core.accessor import CachedAccessor 

113from pandas.core.apply import SeriesApply 

114from pandas.core.arrays import ExtensionArray 

115from pandas.core.arrays.categorical import CategoricalAccessor 

116from pandas.core.arrays.sparse import SparseAccessor 

117from pandas.core.construction import ( 

118 create_series_with_explicit_dtype, 

119 extract_array, 

120 is_empty_data, 

121 sanitize_array, 

122) 

123from pandas.core.generic import NDFrame 

124from pandas.core.indexers import ( 

125 deprecate_ndim_indexing, 

126 unpack_1tuple, 

127) 

128from pandas.core.indexes.accessors import CombinedDatetimelikeProperties 

129from pandas.core.indexes.api import ( 

130 CategoricalIndex, 

131 DatetimeIndex, 

132 Float64Index, 

133 Index, 

134 MultiIndex, 

135 PeriodIndex, 

136 TimedeltaIndex, 

137 default_index, 

138 ensure_index, 

139) 

140import pandas.core.indexes.base as ibase 

141from pandas.core.indexing import ( 

142 check_bool_indexer, 

143 check_deprecated_indexers, 

144) 

145from pandas.core.internals import ( 

146 SingleArrayManager, 

147 SingleBlockManager, 

148) 

149from pandas.core.shared_docs import _shared_docs 

150from pandas.core.sorting import ( 

151 ensure_key_mapped, 

152 nargsort, 

153) 

154from pandas.core.strings import StringMethods 

155from pandas.core.tools.datetimes import to_datetime 

156 

157import pandas.io.formats.format as fmt 

158from pandas.io.formats.info import ( 

159 INFO_DOCSTRING, 

160 SeriesInfo, 

161 series_sub_kwargs, 

162) 

163import pandas.plotting 

164 

165if TYPE_CHECKING: 165 ↛ 166line 165 didn't jump to line 166, because the condition on line 165 was never true

166 from pandas._typing import ( 

167 NumpySorter, 

168 NumpyValueArrayLike, 

169 Suffixes, 

170 ) 

171 

172 from pandas.core.frame import DataFrame 

173 from pandas.core.groupby.generic import SeriesGroupBy 

174 from pandas.core.resample import Resampler 

175 

176__all__ = ["Series"] 

177 

178_shared_doc_kwargs = { 

179 "axes": "index", 

180 "klass": "Series", 

181 "axes_single_arg": "{0 or 'index'}", 

182 "axis": """axis : {0 or 'index'} 

183 Unused. Parameter needed for compatibility with DataFrame.""", 

184 "inplace": """inplace : bool, default False 

185 If True, performs operation inplace and returns None.""", 

186 "unique": "np.ndarray", 

187 "duplicated": "Series", 

188 "optional_by": "", 

189 "optional_mapper": "", 

190 "optional_labels": "", 

191 "optional_axis": "", 

192 "replace_iloc": """ 

193 This differs from updating with ``.loc`` or ``.iloc``, which require 

194 you to specify a location to update with some value.""", 

195} 

196 

197 

198def _coerce_method(converter): 

199 """ 

200 Install the scalar coercion methods. 

201 """ 

202 

203 def wrapper(self): 

204 if len(self) == 1: 

205 return converter(self.iloc[0]) 

206 raise TypeError(f"cannot convert the series to {converter}") 

207 

208 wrapper.__name__ = f"__{converter.__name__}__" 

209 return wrapper 

210 

211 

212# ---------------------------------------------------------------------- 

213# Series class 

214 

215 

216class Series(base.IndexOpsMixin, NDFrame): 

217 """ 

218 One-dimensional ndarray with axis labels (including time series). 

219 

220 Labels need not be unique but must be a hashable type. The object 

221 supports both integer- and label-based indexing and provides a host of 

222 methods for performing operations involving the index. Statistical 

223 methods from ndarray have been overridden to automatically exclude 

224 missing data (currently represented as NaN). 

225 

226 Operations between Series (+, -, /, \\*, \\*\\*) align values based on their 

227 associated index values-- they need not be the same length. The result 

228 index will be the sorted union of the two indexes. 

229 

230 Parameters 

231 ---------- 

232 data : array-like, Iterable, dict, or scalar value 

233 Contains data stored in Series. If data is a dict, argument order is 

234 maintained. 

235 index : array-like or Index (1d) 

236 Values must be hashable and have the same length as `data`. 

237 Non-unique index values are allowed. Will default to 

238 RangeIndex (0, 1, 2, ..., n) if not provided. If data is dict-like 

239 and index is None, then the keys in the data are used as the index. If the 

240 index is not None, the resulting Series is reindexed with the index values. 

241 dtype : str, numpy.dtype, or ExtensionDtype, optional 

242 Data type for the output Series. If not specified, this will be 

243 inferred from `data`. 

244 See the :ref:`user guide <basics.dtypes>` for more usages. 

245 name : str, optional 

246 The name to give to the Series. 

247 copy : bool, default False 

248 Copy input data. Only affects Series or 1d ndarray input. See examples. 

249 

250 Notes 

251 ----- 

252 Please reference the :ref:`User Guide <basics.series>` for more information. 

253 

254 Examples 

255 -------- 

256 Constructing Series from a dictionary with an Index specified 

257 

258 >>> d = {'a': 1, 'b': 2, 'c': 3} 

259 >>> ser = pd.Series(data=d, index=['a', 'b', 'c']) 

260 >>> ser 

261 a 1 

262 b 2 

263 c 3 

264 dtype: int64 

265 

266 The keys of the dictionary match with the Index values, hence the Index 

267 values have no effect. 

268 

269 >>> d = {'a': 1, 'b': 2, 'c': 3} 

270 >>> ser = pd.Series(data=d, index=['x', 'y', 'z']) 

271 >>> ser 

272 x NaN 

273 y NaN 

274 z NaN 

275 dtype: float64 

276 

277 Note that the Index is first build with the keys from the dictionary. 

278 After this the Series is reindexed with the given Index values, hence we 

279 get all NaN as a result. 

280 

281 Constructing Series from a list with `copy=False`. 

282 

283 >>> r = [1, 2] 

284 >>> ser = pd.Series(r, copy=False) 

285 >>> ser.iloc[0] = 999 

286 >>> r 

287 [1, 2] 

288 >>> ser 

289 0 999 

290 1 2 

291 dtype: int64 

292 

293 Due to input data type the Series has a `copy` of 

294 the original data even though `copy=False`, so 

295 the data is unchanged. 

296 

297 Constructing Series from a 1d ndarray with `copy=False`. 

298 

299 >>> r = np.array([1, 2]) 

300 >>> ser = pd.Series(r, copy=False) 

301 >>> ser.iloc[0] = 999 

302 >>> r 

303 array([999, 2]) 

304 >>> ser 

305 0 999 

306 1 2 

307 dtype: int64 

308 

309 Due to input data type the Series has a `view` on 

310 the original data, so 

311 the data is changed as well. 

312 """ 

313 

314 _typ = "series" 

315 _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) 

316 

317 _name: Hashable 

318 _metadata: list[str] = ["name"] 

319 _internal_names_set = {"index"} | NDFrame._internal_names_set 

320 _accessors = {"dt", "cat", "str", "sparse"} 

321 _hidden_attrs = ( 

322 base.IndexOpsMixin._hidden_attrs 

323 | NDFrame._hidden_attrs 

324 | frozenset(["compress", "ptp"]) 

325 ) 

326 

327 # Override cache_readonly bc Series is mutable 

328 # error: Incompatible types in assignment (expression has type "property", 

329 # base class "IndexOpsMixin" defined the type as "Callable[[IndexOpsMixin], bool]") 

330 hasnans = property( # type: ignore[assignment] 

331 # error: "Callable[[IndexOpsMixin], bool]" has no attribute "fget" 

332 base.IndexOpsMixin.hasnans.fget, # type: ignore[attr-defined] 

333 doc=base.IndexOpsMixin.hasnans.__doc__, 

334 ) 

335 _mgr: SingleManager 

336 div: Callable[[Series, Any], Series] 

337 rdiv: Callable[[Series, Any], Series] 

338 

339 # ---------------------------------------------------------------------- 

340 # Constructors 

341 

342 def __init__( 

343 self, 

344 data=None, 

345 index=None, 

346 dtype: Dtype | None = None, 

347 name=None, 

348 copy: bool = False, 

349 fastpath: bool = False, 

350 ) -> None: 

351 

352 if ( 

353 isinstance(data, (SingleBlockManager, SingleArrayManager)) 

354 and index is None 

355 and dtype is None 

356 and copy is False 

357 ): 

358 # GH#33357 called with just the SingleBlockManager 

359 NDFrame.__init__(self, data) 

360 if fastpath: 

361 # e.g. from _box_col_values, skip validation of name 

362 object.__setattr__(self, "_name", name) 

363 else: 

364 self.name = name 

365 return 

366 

367 # we are called internally, so short-circuit 

368 if fastpath: 

369 

370 # data is an ndarray, index is defined 

371 if not isinstance(data, (SingleBlockManager, SingleArrayManager)): 

372 manager = get_option("mode.data_manager") 

373 if manager == "block": 

374 data = SingleBlockManager.from_array(data, index) 

375 elif manager == "array": 

376 data = SingleArrayManager.from_array(data, index) 

377 if copy: 

378 data = data.copy() 

379 if index is None: 

380 index = data.index 

381 

382 else: 

383 

384 name = ibase.maybe_extract_name(name, data, type(self)) 

385 

386 if is_empty_data(data) and dtype is None: 

387 # gh-17261 

388 warnings.warn( 

389 "The default dtype for empty Series will be 'object' instead " 

390 "of 'float64' in a future version. Specify a dtype explicitly " 

391 "to silence this warning.", 

392 FutureWarning, 

393 stacklevel=find_stack_level(), 

394 ) 

395 # uncomment the line below when removing the FutureWarning 

396 # dtype = np.dtype(object) 

397 

398 if index is not None: 

399 index = ensure_index(index) 

400 

401 if data is None: 

402 data = {} 

403 if dtype is not None: 

404 dtype = self._validate_dtype(dtype) 

405 

406 if isinstance(data, MultiIndex): 

407 raise NotImplementedError( 

408 "initializing a Series from a MultiIndex is not supported" 

409 ) 

410 elif isinstance(data, Index): 

411 

412 if dtype is not None: 

413 # astype copies 

414 data = data.astype(dtype) 

415 else: 

416 # GH#24096 we need to ensure the index remains immutable 

417 data = data._values.copy() 

418 copy = False 

419 

420 elif isinstance(data, np.ndarray): 

421 if len(data.dtype): 

422 # GH#13296 we are dealing with a compound dtype, which 

423 # should be treated as 2D 

424 raise ValueError( 

425 "Cannot construct a Series from an ndarray with " 

426 "compound dtype. Use DataFrame instead." 

427 ) 

428 elif isinstance(data, Series): 

429 if index is None: 

430 index = data.index 

431 else: 

432 data = data.reindex(index, copy=copy) 

433 copy = False 

434 data = data._mgr 

435 elif is_dict_like(data): 

436 data, index = self._init_dict(data, index, dtype) 

437 dtype = None 

438 copy = False 

439 elif isinstance(data, (SingleBlockManager, SingleArrayManager)): 

440 if index is None: 

441 index = data.index 

442 elif not data.index.equals(index) or copy: 

443 # GH#19275 SingleBlockManager input should only be called 

444 # internally 

445 raise AssertionError( 

446 "Cannot pass both SingleBlockManager " 

447 "`data` argument and a different " 

448 "`index` argument. `copy` must be False." 

449 ) 

450 

451 elif isinstance(data, ExtensionArray): 

452 pass 

453 else: 

454 data = com.maybe_iterable_to_list(data) 

455 

456 if index is None: 

457 if not is_list_like(data): 

458 data = [data] 

459 index = default_index(len(data)) 

460 elif is_list_like(data): 

461 com.require_length_match(data, index) 

462 

463 # create/copy the manager 

464 if isinstance(data, (SingleBlockManager, SingleArrayManager)): 

465 if dtype is not None: 

466 data = data.astype(dtype=dtype, errors="ignore", copy=copy) 

467 elif copy: 

468 data = data.copy() 

469 else: 

470 data = sanitize_array(data, index, dtype, copy) 

471 

472 manager = get_option("mode.data_manager") 

473 if manager == "block": 

474 data = SingleBlockManager.from_array(data, index) 

475 elif manager == "array": 

476 data = SingleArrayManager.from_array(data, index) 

477 

478 NDFrame.__init__(self, data) 

479 if fastpath: 

480 # skips validation of the name 

481 object.__setattr__(self, "_name", name) 

482 else: 

483 self.name = name 

484 self._set_axis(0, index) 

485 

486 def _init_dict( 

487 self, data, index: Index | None = None, dtype: DtypeObj | None = None 

488 ): 

489 """ 

490 Derive the "_mgr" and "index" attributes of a new Series from a 

491 dictionary input. 

492 

493 Parameters 

494 ---------- 

495 data : dict or dict-like 

496 Data used to populate the new Series. 

497 index : Index or None, default None 

498 Index for the new Series: if None, use dict keys. 

499 dtype : np.dtype, ExtensionDtype, or None, default None 

500 The dtype for the new Series: if None, infer from data. 

501 

502 Returns 

503 ------- 

504 _data : BlockManager for the new Series 

505 index : index for the new Series 

506 """ 

507 keys: Index | tuple 

508 

509 # Looking for NaN in dict doesn't work ({np.nan : 1}[float('nan')] 

510 # raises KeyError), so we iterate the entire dict, and align 

511 if data: 

512 # GH:34717, issue was using zip to extract key and values from data. 

513 # using generators in effects the performance. 

514 # Below is the new way of extracting the keys and values 

515 

516 keys = tuple(data.keys()) 

517 values = list(data.values()) # Generating list of values- faster way 

518 elif index is not None: 

519 # fastpath for Series(data=None). Just use broadcasting a scalar 

520 # instead of reindexing. 

521 values = na_value_for_dtype(pandas_dtype(dtype), compat=False) 

522 keys = index 

523 else: 

524 keys, values = (), [] 

525 

526 # Input is now list-like, so rely on "standard" construction: 

527 

528 # TODO: passing np.float64 to not break anything yet. See GH-17261 

529 s = create_series_with_explicit_dtype( 

530 # error: Argument "index" to "create_series_with_explicit_dtype" has 

531 # incompatible type "Tuple[Any, ...]"; expected "Union[ExtensionArray, 

532 # ndarray, Index, None]" 

533 values, 

534 index=keys, # type: ignore[arg-type] 

535 dtype=dtype, 

536 dtype_if_empty=np.float64, 

537 ) 

538 

539 # Now we just make sure the order is respected, if any 

540 if data and index is not None: 

541 s = s.reindex(index, copy=False) 

542 return s._mgr, s.index 

543 

544 # ---------------------------------------------------------------------- 

545 

546 @property 

547 def _constructor(self) -> Callable[..., Series]: 

548 return Series 

549 

550 @property 

551 def _constructor_expanddim(self) -> Callable[..., DataFrame]: 

552 """ 

553 Used when a manipulation result has one higher dimension as the 

554 original, such as Series.to_frame() 

555 """ 

556 from pandas.core.frame import DataFrame 

557 

558 return DataFrame 

559 

560 # types 

561 @property 

562 def _can_hold_na(self) -> bool: 

563 return self._mgr._can_hold_na 

564 

565 def _set_axis(self, axis: int, labels: AnyArrayLike | list) -> None: 

566 """ 

567 Override generic, we want to set the _typ here. 

568 

569 This is called from the cython code when we set the `index` attribute 

570 directly, e.g. `series.index = [1, 2, 3]`. 

571 """ 

572 labels = ensure_index(labels) 

573 

574 if labels._is_all_dates and not ( 

575 type(labels) is Index and not isinstance(labels.dtype, np.dtype) 

576 ): 

577 # exclude e.g. timestamp[ns][pyarrow] dtype from this casting 

578 deep_labels = labels 

579 if isinstance(labels, CategoricalIndex): 

580 deep_labels = labels.categories 

581 

582 if not isinstance( 

583 deep_labels, (DatetimeIndex, PeriodIndex, TimedeltaIndex) 

584 ): 

585 try: 

586 labels = DatetimeIndex(labels) 

587 except (tslibs.OutOfBoundsDatetime, ValueError): 

588 # labels may exceeds datetime bounds, 

589 # or not be a DatetimeIndex 

590 pass 

591 

592 # The ensure_index call above ensures we have an Index object 

593 self._mgr.set_axis(axis, labels) 

594 

595 # ndarray compatibility 

596 @property 

597 def dtype(self) -> DtypeObj: 

598 """ 

599 Return the dtype object of the underlying data. 

600 """ 

601 return self._mgr.dtype 

602 

603 @property 

604 def dtypes(self) -> DtypeObj: 

605 """ 

606 Return the dtype object of the underlying data. 

607 """ 

608 # DataFrame compatibility 

609 return self.dtype 

610 

611 @property 

612 def name(self) -> Hashable: 

613 """ 

614 Return the name of the Series. 

615 

616 The name of a Series becomes its index or column name if it is used 

617 to form a DataFrame. It is also used whenever displaying the Series 

618 using the interpreter. 

619 

620 Returns 

621 ------- 

622 label (hashable object) 

623 The name of the Series, also the column name if part of a DataFrame. 

624 

625 See Also 

626 -------- 

627 Series.rename : Sets the Series name when given a scalar input. 

628 Index.name : Corresponding Index property. 

629 

630 Examples 

631 -------- 

632 The Series name can be set initially when calling the constructor. 

633 

634 >>> s = pd.Series([1, 2, 3], dtype=np.int64, name='Numbers') 

635 >>> s 

636 0 1 

637 1 2 

638 2 3 

639 Name: Numbers, dtype: int64 

640 >>> s.name = "Integers" 

641 >>> s 

642 0 1 

643 1 2 

644 2 3 

645 Name: Integers, dtype: int64 

646 

647 The name of a Series within a DataFrame is its column name. 

648 

649 >>> df = pd.DataFrame([[1, 2], [3, 4], [5, 6]], 

650 ... columns=["Odd Numbers", "Even Numbers"]) 

651 >>> df 

652 Odd Numbers Even Numbers 

653 0 1 2 

654 1 3 4 

655 2 5 6 

656 >>> df["Even Numbers"].name 

657 'Even Numbers' 

658 """ 

659 return self._name 

660 

661 @name.setter 

662 def name(self, value: Hashable) -> None: 

663 validate_all_hashable(value, error_name=f"{type(self).__name__}.name") 

664 object.__setattr__(self, "_name", value) 

665 

666 @property 

667 def values(self): 

668 """ 

669 Return Series as ndarray or ndarray-like depending on the dtype. 

670 

671 .. warning:: 

672 

673 We recommend using :attr:`Series.array` or 

674 :meth:`Series.to_numpy`, depending on whether you need 

675 a reference to the underlying data or a NumPy array. 

676 

677 Returns 

678 ------- 

679 numpy.ndarray or ndarray-like 

680 

681 See Also 

682 -------- 

683 Series.array : Reference to the underlying data. 

684 Series.to_numpy : A NumPy array representing the underlying data. 

685 

686 Examples 

687 -------- 

688 >>> pd.Series([1, 2, 3]).values 

689 array([1, 2, 3]) 

690 

691 >>> pd.Series(list('aabc')).values 

692 array(['a', 'a', 'b', 'c'], dtype=object) 

693 

694 >>> pd.Series(list('aabc')).astype('category').values 

695 ['a', 'a', 'b', 'c'] 

696 Categories (3, object): ['a', 'b', 'c'] 

697 

698 Timezone aware datetime data is converted to UTC: 

699 

700 >>> pd.Series(pd.date_range('20130101', periods=3, 

701 ... tz='US/Eastern')).values 

702 array(['2013-01-01T05:00:00.000000000', 

703 '2013-01-02T05:00:00.000000000', 

704 '2013-01-03T05:00:00.000000000'], dtype='datetime64[ns]') 

705 """ 

706 return self._mgr.external_values() 

707 

708 @property 

709 def _values(self): 

710 """ 

711 Return the internal repr of this data (defined by Block.interval_values). 

712 This are the values as stored in the Block (ndarray or ExtensionArray 

713 depending on the Block class), with datetime64[ns] and timedelta64[ns] 

714 wrapped in ExtensionArrays to match Index._values behavior. 

715 

716 Differs from the public ``.values`` for certain data types, because of 

717 historical backwards compatibility of the public attribute (e.g. period 

718 returns object ndarray and datetimetz a datetime64[ns] ndarray for 

719 ``.values`` while it returns an ExtensionArray for ``._values`` in those 

720 cases). 

721 

722 Differs from ``.array`` in that this still returns the numpy array if 

723 the Block is backed by a numpy array (except for datetime64 and 

724 timedelta64 dtypes), while ``.array`` ensures to always return an 

725 ExtensionArray. 

726 

727 Overview: 

728 

729 dtype | values | _values | array | 

730 ----------- | ------------- | ------------- | ------------- | 

731 Numeric | ndarray | ndarray | PandasArray | 

732 Category | Categorical | Categorical | Categorical | 

733 dt64[ns] | ndarray[M8ns] | DatetimeArray | DatetimeArray | 

734 dt64[ns tz] | ndarray[M8ns] | DatetimeArray | DatetimeArray | 

735 td64[ns] | ndarray[m8ns] | TimedeltaArray| ndarray[m8ns] | 

736 Period | ndarray[obj] | PeriodArray | PeriodArray | 

737 Nullable | EA | EA | EA | 

738 

739 """ 

740 return self._mgr.internal_values() 

741 

742 # error: Decorated property not supported 

743 @Appender(base.IndexOpsMixin.array.__doc__) # type: ignore[misc] 

744 @property 

745 def array(self) -> ExtensionArray: 

746 return self._mgr.array_values() 

747 

748 # ops 

749 def ravel(self, order: str = "C") -> np.ndarray: 

750 """ 

751 Return the flattened underlying data as an ndarray. 

752 

753 Returns 

754 ------- 

755 numpy.ndarray or ndarray-like 

756 Flattened data of the Series. 

757 

758 See Also 

759 -------- 

760 numpy.ndarray.ravel : Return a flattened array. 

761 """ 

762 return self._values.ravel(order=order) 

763 

764 def __len__(self) -> int: 

765 """ 

766 Return the length of the Series. 

767 """ 

768 return len(self._mgr) 

769 

770 def view(self, dtype: Dtype | None = None) -> Series: 

771 """ 

772 Create a new view of the Series. 

773 

774 This function will return a new Series with a view of the same 

775 underlying values in memory, optionally reinterpreted with a new data 

776 type. The new data type must preserve the same size in bytes as to not 

777 cause index misalignment. 

778 

779 Parameters 

780 ---------- 

781 dtype : data type 

782 Data type object or one of their string representations. 

783 

784 Returns 

785 ------- 

786 Series 

787 A new Series object as a view of the same data in memory. 

788 

789 See Also 

790 -------- 

791 numpy.ndarray.view : Equivalent numpy function to create a new view of 

792 the same data in memory. 

793 

794 Notes 

795 ----- 

796 Series are instantiated with ``dtype=float64`` by default. While 

797 ``numpy.ndarray.view()`` will return a view with the same data type as 

798 the original array, ``Series.view()`` (without specified dtype) 

799 will try using ``float64`` and may fail if the original data type size 

800 in bytes is not the same. 

801 

802 Examples 

803 -------- 

804 >>> s = pd.Series([-2, -1, 0, 1, 2], dtype='int8') 

805 >>> s 

806 0 -2 

807 1 -1 

808 2 0 

809 3 1 

810 4 2 

811 dtype: int8 

812 

813 The 8 bit signed integer representation of `-1` is `0b11111111`, but 

814 the same bytes represent 255 if read as an 8 bit unsigned integer: 

815 

816 >>> us = s.view('uint8') 

817 >>> us 

818 0 254 

819 1 255 

820 2 0 

821 3 1 

822 4 2 

823 dtype: uint8 

824 

825 The views share the same underlying values: 

826 

827 >>> us[0] = 128 

828 >>> s 

829 0 -128 

830 1 -1 

831 2 0 

832 3 1 

833 4 2 

834 dtype: int8 

835 """ 

836 # self.array instead of self._values so we piggyback on PandasArray 

837 # implementation 

838 res_values = self.array.view(dtype) 

839 res_ser = self._constructor(res_values, index=self.index) 

840 return res_ser.__finalize__(self, method="view") 

841 

842 # ---------------------------------------------------------------------- 

843 # NDArray Compat 

844 _HANDLED_TYPES = (Index, ExtensionArray, np.ndarray) 

845 

846 def __array__(self, dtype: npt.DTypeLike | None = None) -> np.ndarray: 

847 """ 

848 Return the values as a NumPy array. 

849 

850 Users should not call this directly. Rather, it is invoked by 

851 :func:`numpy.array` and :func:`numpy.asarray`. 

852 

853 Parameters 

854 ---------- 

855 dtype : str or numpy.dtype, optional 

856 The dtype to use for the resulting NumPy array. By default, 

857 the dtype is inferred from the data. 

858 

859 Returns 

860 ------- 

861 numpy.ndarray 

862 The values in the series converted to a :class:`numpy.ndarray` 

863 with the specified `dtype`. 

864 

865 See Also 

866 -------- 

867 array : Create a new array from data. 

868 Series.array : Zero-copy view to the array backing the Series. 

869 Series.to_numpy : Series method for similar behavior. 

870 

871 Examples 

872 -------- 

873 >>> ser = pd.Series([1, 2, 3]) 

874 >>> np.asarray(ser) 

875 array([1, 2, 3]) 

876 

877 For timezone-aware data, the timezones may be retained with 

878 ``dtype='object'`` 

879 

880 >>> tzser = pd.Series(pd.date_range('2000', periods=2, tz="CET")) 

881 >>> np.asarray(tzser, dtype="object") 

882 array([Timestamp('2000-01-01 00:00:00+0100', tz='CET'), 

883 Timestamp('2000-01-02 00:00:00+0100', tz='CET')], 

884 dtype=object) 

885 

886 Or the values may be localized to UTC and the tzinfo discarded with 

887 ``dtype='datetime64[ns]'`` 

888 

889 >>> np.asarray(tzser, dtype="datetime64[ns]") # doctest: +ELLIPSIS 

890 array(['1999-12-31T23:00:00.000000000', ...], 

891 dtype='datetime64[ns]') 

892 """ 

893 return np.asarray(self._values, dtype) 

894 

895 # ---------------------------------------------------------------------- 

896 # Unary Methods 

897 

898 # coercion 

899 __float__ = _coerce_method(float) 

900 __long__ = _coerce_method(int) 

901 __int__ = _coerce_method(int) 

902 

903 # ---------------------------------------------------------------------- 

904 

905 # indexers 

906 @property 

907 def axes(self) -> list[Index]: 

908 """ 

909 Return a list of the row axis labels. 

910 """ 

911 return [self.index] 

912 

913 # ---------------------------------------------------------------------- 

914 # Indexing Methods 

915 

916 @Appender(NDFrame.take.__doc__) 

917 def take( 

918 self, indices, axis: Axis = 0, is_copy: bool | None = None, **kwargs 

919 ) -> Series: 

920 if is_copy is not None: 

921 warnings.warn( 

922 "is_copy is deprecated and will be removed in a future version. " 

923 "'take' always returns a copy, so there is no need to specify this.", 

924 FutureWarning, 

925 stacklevel=find_stack_level(), 

926 ) 

927 nv.validate_take((), kwargs) 

928 

929 indices = ensure_platform_int(indices) 

930 new_index = self.index.take(indices) 

931 new_values = self._values.take(indices) 

932 

933 result = self._constructor(new_values, index=new_index, fastpath=True) 

934 return result.__finalize__(self, method="take") 

935 

936 def _take_with_is_copy(self, indices, axis=0) -> Series: 

937 """ 

938 Internal version of the `take` method that sets the `_is_copy` 

939 attribute to keep track of the parent dataframe (using in indexing 

940 for the SettingWithCopyWarning). For Series this does the same 

941 as the public take (it never sets `_is_copy`). 

942 

943 See the docstring of `take` for full explanation of the parameters. 

944 """ 

945 return self.take(indices=indices, axis=axis) 

946 

947 def _ixs(self, i: int, axis: int = 0) -> Any: 

948 """ 

949 Return the i-th value or values in the Series by location. 

950 

951 Parameters 

952 ---------- 

953 i : int 

954 

955 Returns 

956 ------- 

957 scalar (int) or Series (slice, sequence) 

958 """ 

959 return self._values[i] 

960 

961 def _slice(self, slobj: slice, axis: int = 0) -> Series: 

962 # axis kwarg is retained for compat with NDFrame method 

963 # _slice is *always* positional 

964 return self._get_values(slobj) 

965 

966 def __getitem__(self, key): 

967 check_deprecated_indexers(key) 

968 key = com.apply_if_callable(key, self) 

969 

970 if key is Ellipsis: 

971 return self 

972 

973 key_is_scalar = is_scalar(key) 

974 if isinstance(key, (list, tuple)): 

975 key = unpack_1tuple(key) 

976 

977 if is_integer(key) and self.index._should_fallback_to_positional: 

978 return self._values[key] 

979 

980 elif key_is_scalar: 

981 return self._get_value(key) 

982 

983 if is_hashable(key): 

984 # Otherwise index.get_value will raise InvalidIndexError 

985 try: 

986 # For labels that don't resolve as scalars like tuples and frozensets 

987 result = self._get_value(key) 

988 

989 return result 

990 

991 except (KeyError, TypeError, InvalidIndexError): 

992 # InvalidIndexError for e.g. generator 

993 # see test_series_getitem_corner_generator 

994 if isinstance(key, tuple) and isinstance(self.index, MultiIndex): 

995 # We still have the corner case where a tuple is a key 

996 # in the first level of our MultiIndex 

997 return self._get_values_tuple(key) 

998 

999 if is_iterator(key): 

1000 key = list(key) 

1001 

1002 if com.is_bool_indexer(key): 

1003 key = check_bool_indexer(self.index, key) 

1004 key = np.asarray(key, dtype=bool) 

1005 return self._get_values(key) 

1006 

1007 return self._get_with(key) 

1008 

1009 def _get_with(self, key): 

1010 # other: fancy integer or otherwise 

1011 if isinstance(key, slice): 

1012 # _convert_slice_indexer to determine if this slice is positional 

1013 # or label based, and if the latter, convert to positional 

1014 slobj = self.index._convert_slice_indexer(key, kind="getitem") 

1015 return self._slice(slobj) 

1016 elif isinstance(key, ABCDataFrame): 

1017 raise TypeError( 

1018 "Indexing a Series with DataFrame is not " 

1019 "supported, use the appropriate DataFrame column" 

1020 ) 

1021 elif isinstance(key, tuple): 

1022 return self._get_values_tuple(key) 

1023 

1024 elif not is_list_like(key): 

1025 # e.g. scalars that aren't recognized by lib.is_scalar, GH#32684 

1026 return self.loc[key] 

1027 

1028 if not isinstance(key, (list, np.ndarray, ExtensionArray, Series, Index)): 

1029 key = list(key) 

1030 

1031 if isinstance(key, Index): 

1032 key_type = key.inferred_type 

1033 else: 

1034 key_type = lib.infer_dtype(key, skipna=False) 

1035 

1036 # Note: The key_type == "boolean" case should be caught by the 

1037 # com.is_bool_indexer check in __getitem__ 

1038 if key_type == "integer": 

1039 # We need to decide whether to treat this as a positional indexer 

1040 # (i.e. self.iloc) or label-based (i.e. self.loc) 

1041 if not self.index._should_fallback_to_positional: 

1042 return self.loc[key] 

1043 else: 

1044 return self.iloc[key] 

1045 

1046 # handle the dup indexing case GH#4246 

1047 return self.loc[key] 

1048 

1049 def _get_values_tuple(self, key: tuple): 

1050 # mpl hackaround 

1051 if com.any_none(*key): 

1052 # mpl compat if we look up e.g. ser[:, np.newaxis]; 

1053 # see tests.series.timeseries.test_mpl_compat_hack 

1054 # the asarray is needed to avoid returning a 2D DatetimeArray 

1055 result = np.asarray(self._values[key]) 

1056 deprecate_ndim_indexing(result, stacklevel=find_stack_level()) 

1057 return result 

1058 

1059 if not isinstance(self.index, MultiIndex): 

1060 raise KeyError("key of type tuple not found and not a MultiIndex") 

1061 

1062 # If key is contained, would have returned by now 

1063 indexer, new_index = self.index.get_loc_level(key) 

1064 return self._constructor(self._values[indexer], index=new_index).__finalize__( 

1065 self 

1066 ) 

1067 

1068 def _get_values(self, indexer: slice | npt.NDArray[np.bool_]) -> Series: 

1069 new_mgr = self._mgr.getitem_mgr(indexer) 

1070 return self._constructor(new_mgr).__finalize__(self) 

1071 

1072 def _get_value(self, label, takeable: bool = False): 

1073 """ 

1074 Quickly retrieve single value at passed index label. 

1075 

1076 Parameters 

1077 ---------- 

1078 label : object 

1079 takeable : interpret the index as indexers, default False 

1080 

1081 Returns 

1082 ------- 

1083 scalar value 

1084 """ 

1085 if takeable: 

1086 return self._values[label] 

1087 

1088 # Similar to Index.get_value, but we do not fall back to positional 

1089 loc = self.index.get_loc(label) 

1090 return self.index._get_values_for_loc(self, loc, label) 

1091 

1092 def __setitem__(self, key, value) -> None: 

1093 check_deprecated_indexers(key) 

1094 key = com.apply_if_callable(key, self) 

1095 cacher_needs_updating = self._check_is_chained_assignment_possible() 

1096 

1097 if key is Ellipsis: 

1098 key = slice(None) 

1099 

1100 if isinstance(key, slice): 

1101 indexer = self.index._convert_slice_indexer(key, kind="getitem") 

1102 return self._set_values(indexer, value) 

1103 

1104 try: 

1105 self._set_with_engine(key, value) 

1106 except KeyError: 

1107 # We have a scalar (or for MultiIndex or object-dtype, scalar-like) 

1108 # key that is not present in self.index. 

1109 if is_integer(key) and self.index.inferred_type != "integer": 

1110 # positional setter 

1111 if not self.index._should_fallback_to_positional: 

1112 # GH#33469 

1113 warnings.warn( 

1114 "Treating integers as positional in Series.__setitem__ " 

1115 "with a Float64Index is deprecated. In a future version, " 

1116 "`series[an_int] = val` will insert a new key into the " 

1117 "Series. Use `series.iloc[an_int] = val` to treat the " 

1118 "key as positional.", 

1119 FutureWarning, 

1120 stacklevel=find_stack_level(), 

1121 ) 

1122 # can't use _mgr.setitem_inplace yet bc could have *both* 

1123 # KeyError and then ValueError, xref GH#45070 

1124 self._set_values(key, value) 

1125 else: 

1126 # GH#12862 adding a new key to the Series 

1127 self.loc[key] = value 

1128 

1129 except (TypeError, ValueError, LossySetitemError): 

1130 # The key was OK, but we cannot set the value losslessly 

1131 indexer = self.index.get_loc(key) 

1132 self._set_values(indexer, value) 

1133 

1134 except InvalidIndexError as err: 

1135 if isinstance(key, tuple) and not isinstance(self.index, MultiIndex): 

1136 # cases with MultiIndex don't get here bc they raise KeyError 

1137 # e.g. test_basic_getitem_setitem_corner 

1138 raise KeyError( 

1139 "key of type tuple not found and not a MultiIndex" 

1140 ) from err 

1141 

1142 if com.is_bool_indexer(key): 

1143 key = check_bool_indexer(self.index, key) 

1144 key = np.asarray(key, dtype=bool) 

1145 

1146 if ( 

1147 is_list_like(value) 

1148 and len(value) != len(self) 

1149 and not isinstance(value, Series) 

1150 and not is_object_dtype(self.dtype) 

1151 ): 

1152 # Series will be reindexed to have matching length inside 

1153 # _where call below 

1154 # GH#44265 

1155 indexer = key.nonzero()[0] 

1156 self._set_values(indexer, value) 

1157 return 

1158 

1159 # otherwise with listlike other we interpret series[mask] = other 

1160 # as series[mask] = other[mask] 

1161 try: 

1162 self._where(~key, value, inplace=True) 

1163 except InvalidIndexError: 

1164 # test_where_dups 

1165 self.iloc[key] = value 

1166 return 

1167 

1168 else: 

1169 self._set_with(key, value) 

1170 

1171 if cacher_needs_updating: 

1172 self._maybe_update_cacher(inplace=True) 

1173 

1174 def _set_with_engine(self, key, value) -> None: 

1175 loc = self.index.get_loc(key) 

1176 

1177 # this is equivalent to self._values[key] = value 

1178 self._mgr.setitem_inplace(loc, value) 

1179 

1180 def _set_with(self, key, value): 

1181 # We got here via exception-handling off of InvalidIndexError, so 

1182 # key should always be listlike at this point. 

1183 assert not isinstance(key, tuple) 

1184 

1185 if is_iterator(key): 

1186 # Without this, the call to infer_dtype will consume the generator 

1187 key = list(key) 

1188 

1189 if not self.index._should_fallback_to_positional: 

1190 # Regardless of the key type, we're treating it as labels 

1191 self._set_labels(key, value) 

1192 

1193 else: 

1194 # Note: key_type == "boolean" should not occur because that 

1195 # should be caught by the is_bool_indexer check in __setitem__ 

1196 key_type = lib.infer_dtype(key, skipna=False) 

1197 

1198 if key_type == "integer": 

1199 self._set_values(key, value) 

1200 else: 

1201 self._set_labels(key, value) 

1202 

1203 def _set_labels(self, key, value) -> None: 

1204 key = com.asarray_tuplesafe(key) 

1205 indexer: np.ndarray = self.index.get_indexer(key) 

1206 mask = indexer == -1 

1207 if mask.any(): 

1208 raise KeyError(f"{key[mask]} not in index") 

1209 self._set_values(indexer, value) 

1210 

1211 def _set_values(self, key, value) -> None: 

1212 if isinstance(key, (Index, Series)): 

1213 key = key._values 

1214 

1215 self._mgr = self._mgr.setitem(indexer=key, value=value) 

1216 self._maybe_update_cacher() 

1217 

1218 def _set_value(self, label, value, takeable: bool = False): 

1219 """ 

1220 Quickly set single value at passed label. 

1221 

1222 If label is not contained, a new object is created with the label 

1223 placed at the end of the result index. 

1224 

1225 Parameters 

1226 ---------- 

1227 label : object 

1228 Partial indexing with MultiIndex not allowed. 

1229 value : object 

1230 Scalar value. 

1231 takeable : interpret the index as indexers, default False 

1232 """ 

1233 if not takeable: 

1234 try: 

1235 loc = self.index.get_loc(label) 

1236 except KeyError: 

1237 # set using a non-recursive method 

1238 self.loc[label] = value 

1239 return 

1240 else: 

1241 loc = label 

1242 

1243 self._set_values(loc, value) 

1244 

1245 # ---------------------------------------------------------------------- 

1246 # Lookup Caching 

1247 

1248 @property 

1249 def _is_cached(self) -> bool: 

1250 """Return boolean indicating if self is cached or not.""" 

1251 return getattr(self, "_cacher", None) is not None 

1252 

1253 def _get_cacher(self): 

1254 """return my cacher or None""" 

1255 cacher = getattr(self, "_cacher", None) 

1256 if cacher is not None: 

1257 cacher = cacher[1]() 

1258 return cacher 

1259 

1260 def _reset_cacher(self) -> None: 

1261 """ 

1262 Reset the cacher. 

1263 """ 

1264 if hasattr(self, "_cacher"): 

1265 del self._cacher 

1266 

1267 def _set_as_cached(self, item, cacher) -> None: 

1268 """ 

1269 Set the _cacher attribute on the calling object with a weakref to 

1270 cacher. 

1271 """ 

1272 self._cacher = (item, weakref.ref(cacher)) 

1273 

1274 def _clear_item_cache(self) -> None: 

1275 # no-op for Series 

1276 pass 

1277 

1278 def _check_is_chained_assignment_possible(self) -> bool: 

1279 """ 

1280 See NDFrame._check_is_chained_assignment_possible.__doc__ 

1281 """ 

1282 if self._is_view and self._is_cached: 

1283 ref = self._get_cacher() 

1284 if ref is not None and ref._is_mixed_type: 

1285 self._check_setitem_copy(t="referent", force=True) 

1286 return True 

1287 return super()._check_is_chained_assignment_possible() 

1288 

1289 def _maybe_update_cacher( 

1290 self, clear: bool = False, verify_is_copy: bool = True, inplace: bool = False 

1291 ) -> None: 

1292 """ 

1293 See NDFrame._maybe_update_cacher.__doc__ 

1294 """ 

1295 cacher = getattr(self, "_cacher", None) 

1296 if cacher is not None: 

1297 assert self.ndim == 1 

1298 ref: DataFrame = cacher[1]() 

1299 

1300 # we are trying to reference a dead referent, hence 

1301 # a copy 

1302 if ref is None: 

1303 del self._cacher 

1304 # for CoW, we never want to update the parent DataFrame cache 

1305 # if the Series changed, and always pop the cached item 

1306 elif ( 

1307 not ( 

1308 get_option("mode.copy_on_write") 

1309 and get_option("mode.data_manager") == "block" 

1310 ) 

1311 and len(self) == len(ref) 

1312 and self.name in ref.columns 

1313 ): 

1314 # GH#42530 self.name must be in ref.columns 

1315 # to ensure column still in dataframe 

1316 # otherwise, either self or ref has swapped in new arrays 

1317 ref._maybe_cache_changed(cacher[0], self, inplace=inplace) 

1318 else: 

1319 # GH#33675 we have swapped in a new array, so parent 

1320 # reference to self is now invalid 

1321 ref._item_cache.pop(cacher[0], None) 

1322 

1323 super()._maybe_update_cacher( 

1324 clear=clear, verify_is_copy=verify_is_copy, inplace=inplace 

1325 ) 

1326 

1327 # ---------------------------------------------------------------------- 

1328 # Unsorted 

1329 

1330 @property 

1331 def _is_mixed_type(self): 

1332 return False 

1333 

1334 def repeat(self, repeats: int | Sequence[int], axis: None = None) -> Series: 

1335 """ 

1336 Repeat elements of a Series. 

1337 

1338 Returns a new Series where each element of the current Series 

1339 is repeated consecutively a given number of times. 

1340 

1341 Parameters 

1342 ---------- 

1343 repeats : int or array of ints 

1344 The number of repetitions for each element. This should be a 

1345 non-negative integer. Repeating 0 times will return an empty 

1346 Series. 

1347 axis : None 

1348 Unused. Parameter needed for compatibility with DataFrame. 

1349 

1350 Returns 

1351 ------- 

1352 Series 

1353 Newly created Series with repeated elements. 

1354 

1355 See Also 

1356 -------- 

1357 Index.repeat : Equivalent function for Index. 

1358 numpy.repeat : Similar method for :class:`numpy.ndarray`. 

1359 

1360 Examples 

1361 -------- 

1362 >>> s = pd.Series(['a', 'b', 'c']) 

1363 >>> s 

1364 0 a 

1365 1 b 

1366 2 c 

1367 dtype: object 

1368 >>> s.repeat(2) 

1369 0 a 

1370 0 a 

1371 1 b 

1372 1 b 

1373 2 c 

1374 2 c 

1375 dtype: object 

1376 >>> s.repeat([1, 2, 3]) 

1377 0 a 

1378 1 b 

1379 1 b 

1380 2 c 

1381 2 c 

1382 2 c 

1383 dtype: object 

1384 """ 

1385 nv.validate_repeat((), {"axis": axis}) 

1386 new_index = self.index.repeat(repeats) 

1387 new_values = self._values.repeat(repeats) 

1388 return self._constructor(new_values, index=new_index).__finalize__( 

1389 self, method="repeat" 

1390 ) 

1391 

1392 @overload 

1393 def reset_index( 

1394 self, 

1395 level: IndexLabel = ..., 

1396 *, 

1397 drop: Literal[False] = ..., 

1398 name: Level = ..., 

1399 inplace: Literal[False] = ..., 

1400 allow_duplicates: bool = ..., 

1401 ) -> DataFrame: 

1402 ... 

1403 

1404 @overload 

1405 def reset_index( 

1406 self, 

1407 level: IndexLabel = ..., 

1408 *, 

1409 drop: Literal[True], 

1410 name: Level = ..., 

1411 inplace: Literal[False] = ..., 

1412 allow_duplicates: bool = ..., 

1413 ) -> Series: 

1414 ... 

1415 

1416 @overload 

1417 def reset_index( 

1418 self, 

1419 level: IndexLabel = ..., 

1420 *, 

1421 drop: bool = ..., 

1422 name: Level = ..., 

1423 inplace: Literal[True], 

1424 allow_duplicates: bool = ..., 

1425 ) -> None: 

1426 ... 

1427 

1428 @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "level"]) 

1429 def reset_index( 

1430 self, 

1431 level: IndexLabel = None, 

1432 drop: bool = False, 

1433 name: Level = lib.no_default, 

1434 inplace: bool = False, 

1435 allow_duplicates: bool = False, 

1436 ) -> DataFrame | Series | None: 

1437 """ 

1438 Generate a new DataFrame or Series with the index reset. 

1439 

1440 This is useful when the index needs to be treated as a column, or 

1441 when the index is meaningless and needs to be reset to the default 

1442 before another operation. 

1443 

1444 Parameters 

1445 ---------- 

1446 level : int, str, tuple, or list, default optional 

1447 For a Series with a MultiIndex, only remove the specified levels 

1448 from the index. Removes all levels by default. 

1449 drop : bool, default False 

1450 Just reset the index, without inserting it as a column in 

1451 the new DataFrame. 

1452 name : object, optional 

1453 The name to use for the column containing the original Series 

1454 values. Uses ``self.name`` by default. This argument is ignored 

1455 when `drop` is True. 

1456 inplace : bool, default False 

1457 Modify the Series in place (do not create a new object). 

1458 allow_duplicates : bool, default False 

1459 Allow duplicate column labels to be created. 

1460 

1461 .. versionadded:: 1.5.0 

1462 

1463 Returns 

1464 ------- 

1465 Series or DataFrame or None 

1466 When `drop` is False (the default), a DataFrame is returned. 

1467 The newly created columns will come first in the DataFrame, 

1468 followed by the original Series values. 

1469 When `drop` is True, a `Series` is returned. 

1470 In either case, if ``inplace=True``, no value is returned. 

1471 

1472 See Also 

1473 -------- 

1474 DataFrame.reset_index: Analogous function for DataFrame. 

1475 

1476 Examples 

1477 -------- 

1478 >>> s = pd.Series([1, 2, 3, 4], name='foo', 

1479 ... index=pd.Index(['a', 'b', 'c', 'd'], name='idx')) 

1480 

1481 Generate a DataFrame with default index. 

1482 

1483 >>> s.reset_index() 

1484 idx foo 

1485 0 a 1 

1486 1 b 2 

1487 2 c 3 

1488 3 d 4 

1489 

1490 To specify the name of the new column use `name`. 

1491 

1492 >>> s.reset_index(name='values') 

1493 idx values 

1494 0 a 1 

1495 1 b 2 

1496 2 c 3 

1497 3 d 4 

1498 

1499 To generate a new Series with the default set `drop` to True. 

1500 

1501 >>> s.reset_index(drop=True) 

1502 0 1 

1503 1 2 

1504 2 3 

1505 3 4 

1506 Name: foo, dtype: int64 

1507 

1508 To update the Series in place, without generating a new one 

1509 set `inplace` to True. Note that it also requires ``drop=True``. 

1510 

1511 >>> s.reset_index(inplace=True, drop=True) 

1512 >>> s 

1513 0 1 

1514 1 2 

1515 2 3 

1516 3 4 

1517 Name: foo, dtype: int64 

1518 

1519 The `level` parameter is interesting for Series with a multi-level 

1520 index. 

1521 

1522 >>> arrays = [np.array(['bar', 'bar', 'baz', 'baz']), 

1523 ... np.array(['one', 'two', 'one', 'two'])] 

1524 >>> s2 = pd.Series( 

1525 ... range(4), name='foo', 

1526 ... index=pd.MultiIndex.from_arrays(arrays, 

1527 ... names=['a', 'b'])) 

1528 

1529 To remove a specific level from the Index, use `level`. 

1530 

1531 >>> s2.reset_index(level='a') 

1532 a foo 

1533 b 

1534 one bar 0 

1535 two bar 1 

1536 one baz 2 

1537 two baz 3 

1538 

1539 If `level` is not set, all levels are removed from the Index. 

1540 

1541 >>> s2.reset_index() 

1542 a b foo 

1543 0 bar one 0 

1544 1 bar two 1 

1545 2 baz one 2 

1546 3 baz two 3 

1547 """ 

1548 inplace = validate_bool_kwarg(inplace, "inplace") 

1549 if drop: 

1550 new_index = default_index(len(self)) 

1551 if level is not None: 

1552 level_list: Sequence[Hashable] 

1553 if not isinstance(level, (tuple, list)): 

1554 level_list = [level] 

1555 else: 

1556 level_list = level 

1557 level_list = [self.index._get_level_number(lev) for lev in level_list] 

1558 if len(level_list) < self.index.nlevels: 

1559 new_index = self.index.droplevel(level_list) 

1560 

1561 if inplace: 

1562 self.index = new_index 

1563 else: 

1564 return self._constructor( 

1565 self._values.copy(), index=new_index 

1566 ).__finalize__(self, method="reset_index") 

1567 elif inplace: 

1568 raise TypeError( 

1569 "Cannot reset_index inplace on a Series to create a DataFrame" 

1570 ) 

1571 else: 

1572 if name is lib.no_default: 

1573 # For backwards compatibility, keep columns as [0] instead of 

1574 # [None] when self.name is None 

1575 if self.name is None: 

1576 name = 0 

1577 else: 

1578 name = self.name 

1579 

1580 df = self.to_frame(name) 

1581 return df.reset_index( 

1582 level=level, drop=drop, allow_duplicates=allow_duplicates 

1583 ) 

1584 return None 

1585 

1586 # ---------------------------------------------------------------------- 

1587 # Rendering Methods 

1588 

1589 def __repr__(self) -> str: 

1590 """ 

1591 Return a string representation for a particular Series. 

1592 """ 

1593 repr_params = fmt.get_series_repr_params() 

1594 return self.to_string(**repr_params) 

1595 

1596 @overload 

1597 def to_string( 

1598 self, 

1599 buf: None = ..., 

1600 na_rep: str = ..., 

1601 float_format: str | None = ..., 

1602 header: bool = ..., 

1603 index: bool = ..., 

1604 length=..., 

1605 dtype=..., 

1606 name=..., 

1607 max_rows: int | None = ..., 

1608 min_rows: int | None = ..., 

1609 ) -> str: 

1610 ... 

1611 

1612 @overload 

1613 def to_string( 

1614 self, 

1615 buf: FilePath | WriteBuffer[str], 

1616 na_rep: str = ..., 

1617 float_format: str | None = ..., 

1618 header: bool = ..., 

1619 index: bool = ..., 

1620 length=..., 

1621 dtype=..., 

1622 name=..., 

1623 max_rows: int | None = ..., 

1624 min_rows: int | None = ..., 

1625 ) -> None: 

1626 ... 

1627 

1628 def to_string( 

1629 self, 

1630 buf: FilePath | WriteBuffer[str] | None = None, 

1631 na_rep: str = "NaN", 

1632 float_format: str | None = None, 

1633 header: bool = True, 

1634 index: bool = True, 

1635 length=False, 

1636 dtype=False, 

1637 name=False, 

1638 max_rows: int | None = None, 

1639 min_rows: int | None = None, 

1640 ) -> str | None: 

1641 """ 

1642 Render a string representation of the Series. 

1643 

1644 Parameters 

1645 ---------- 

1646 buf : StringIO-like, optional 

1647 Buffer to write to. 

1648 na_rep : str, optional 

1649 String representation of NaN to use, default 'NaN'. 

1650 float_format : one-parameter function, optional 

1651 Formatter function to apply to columns' elements if they are 

1652 floats, default None. 

1653 header : bool, default True 

1654 Add the Series header (index name). 

1655 index : bool, optional 

1656 Add index (row) labels, default True. 

1657 length : bool, default False 

1658 Add the Series length. 

1659 dtype : bool, default False 

1660 Add the Series dtype. 

1661 name : bool, default False 

1662 Add the Series name if not None. 

1663 max_rows : int, optional 

1664 Maximum number of rows to show before truncating. If None, show 

1665 all. 

1666 min_rows : int, optional 

1667 The number of rows to display in a truncated repr (when number 

1668 of rows is above `max_rows`). 

1669 

1670 Returns 

1671 ------- 

1672 str or None 

1673 String representation of Series if ``buf=None``, otherwise None. 

1674 """ 

1675 formatter = fmt.SeriesFormatter( 

1676 self, 

1677 name=name, 

1678 length=length, 

1679 header=header, 

1680 index=index, 

1681 dtype=dtype, 

1682 na_rep=na_rep, 

1683 float_format=float_format, 

1684 min_rows=min_rows, 

1685 max_rows=max_rows, 

1686 ) 

1687 result = formatter.to_string() 

1688 

1689 # catch contract violations 

1690 if not isinstance(result, str): 

1691 raise AssertionError( 

1692 "result must be of type str, type " 

1693 f"of result is {repr(type(result).__name__)}" 

1694 ) 

1695 

1696 if buf is None: 

1697 return result 

1698 else: 

1699 if hasattr(buf, "write"): 

1700 # error: Item "str" of "Union[str, PathLike[str], WriteBuffer 

1701 # [str]]" has no attribute "write" 

1702 buf.write(result) # type: ignore[union-attr] 

1703 else: 

1704 # error: Argument 1 to "open" has incompatible type "Union[str, 

1705 # PathLike[str], WriteBuffer[str]]"; expected "Union[Union[str, 

1706 # bytes, PathLike[str], PathLike[bytes]], int]" 

1707 with open(buf, "w") as f: # type: ignore[arg-type] 

1708 f.write(result) 

1709 return None 

1710 

1711 @doc( 

1712 klass=_shared_doc_kwargs["klass"], 

1713 storage_options=_shared_docs["storage_options"], 

1714 examples=dedent( 

1715 """Examples 

1716 -------- 

1717 >>> s = pd.Series(["elk", "pig", "dog", "quetzal"], name="animal") 

1718 >>> print(s.to_markdown()) 

1719 | | animal | 

1720 |---:|:---------| 

1721 | 0 | elk | 

1722 | 1 | pig | 

1723 | 2 | dog | 

1724 | 3 | quetzal | 

1725 

1726 Output markdown with a tabulate option. 

1727 

1728 >>> print(s.to_markdown(tablefmt="grid")) 

1729 +----+----------+ 

1730 | | animal | 

1731 +====+==========+ 

1732 | 0 | elk | 

1733 +----+----------+ 

1734 | 1 | pig | 

1735 +----+----------+ 

1736 | 2 | dog | 

1737 +----+----------+ 

1738 | 3 | quetzal | 

1739 +----+----------+""" 

1740 ), 

1741 ) 

1742 def to_markdown( 

1743 self, 

1744 buf: IO[str] | None = None, 

1745 mode: str = "wt", 

1746 index: bool = True, 

1747 storage_options: StorageOptions = None, 

1748 **kwargs, 

1749 ) -> str | None: 

1750 """ 

1751 Print {klass} in Markdown-friendly format. 

1752 

1753 .. versionadded:: 1.0.0 

1754 

1755 Parameters 

1756 ---------- 

1757 buf : str, Path or StringIO-like, optional, default None 

1758 Buffer to write to. If None, the output is returned as a string. 

1759 mode : str, optional 

1760 Mode in which file is opened, "wt" by default. 

1761 index : bool, optional, default True 

1762 Add index (row) labels. 

1763 

1764 .. versionadded:: 1.1.0 

1765 {storage_options} 

1766 

1767 .. versionadded:: 1.2.0 

1768 

1769 **kwargs 

1770 These parameters will be passed to `tabulate \ 

1771 <https://pypi.org/project/tabulate>`_. 

1772 

1773 Returns 

1774 ------- 

1775 str 

1776 {klass} in Markdown-friendly format. 

1777 

1778 Notes 

1779 ----- 

1780 Requires the `tabulate <https://pypi.org/project/tabulate>`_ package. 

1781 

1782 {examples} 

1783 """ 

1784 return self.to_frame().to_markdown( 

1785 buf, mode, index, storage_options=storage_options, **kwargs 

1786 ) 

1787 

1788 # ---------------------------------------------------------------------- 

1789 

1790 def items(self) -> Iterable[tuple[Hashable, Any]]: 

1791 """ 

1792 Lazily iterate over (index, value) tuples. 

1793 

1794 This method returns an iterable tuple (index, value). This is 

1795 convenient if you want to create a lazy iterator. 

1796 

1797 Returns 

1798 ------- 

1799 iterable 

1800 Iterable of tuples containing the (index, value) pairs from a 

1801 Series. 

1802 

1803 See Also 

1804 -------- 

1805 DataFrame.items : Iterate over (column name, Series) pairs. 

1806 DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. 

1807 

1808 Examples 

1809 -------- 

1810 >>> s = pd.Series(['A', 'B', 'C']) 

1811 >>> for index, value in s.items(): 

1812 ... print(f"Index : {index}, Value : {value}") 

1813 Index : 0, Value : A 

1814 Index : 1, Value : B 

1815 Index : 2, Value : C 

1816 """ 

1817 return zip(iter(self.index), iter(self)) 

1818 

1819 def iteritems(self) -> Iterable[tuple[Hashable, Any]]: 

1820 """ 

1821 Lazily iterate over (index, value) tuples. 

1822 

1823 .. deprecated:: 1.5.0 

1824 iteritems is deprecated and will be removed in a future version. 

1825 Use .items instead. 

1826 

1827 This method returns an iterable tuple (index, value). This is 

1828 convenient if you want to create a lazy iterator. 

1829 

1830 Returns 

1831 ------- 

1832 iterable 

1833 Iterable of tuples containing the (index, value) pairs from a 

1834 Series. 

1835 

1836 See Also 

1837 -------- 

1838 Series.items : Recommended alternative. 

1839 DataFrame.items : Iterate over (column name, Series) pairs. 

1840 DataFrame.iterrows : Iterate over DataFrame rows as (index, Series) pairs. 

1841 """ 

1842 warnings.warn( 

1843 "iteritems is deprecated and will be removed in a future version. " 

1844 "Use .items instead.", 

1845 FutureWarning, 

1846 stacklevel=find_stack_level(), 

1847 ) 

1848 return self.items() 

1849 

1850 # ---------------------------------------------------------------------- 

1851 # Misc public methods 

1852 

1853 def keys(self) -> Index: 

1854 """ 

1855 Return alias for index. 

1856 

1857 Returns 

1858 ------- 

1859 Index 

1860 Index of the Series. 

1861 """ 

1862 return self.index 

1863 

1864 def to_dict(self, into: type[dict] = dict) -> dict: 

1865 """ 

1866 Convert Series to {label -> value} dict or dict-like object. 

1867 

1868 Parameters 

1869 ---------- 

1870 into : class, default dict 

1871 The collections.abc.Mapping subclass to use as the return 

1872 object. Can be the actual class or an empty 

1873 instance of the mapping type you want. If you want a 

1874 collections.defaultdict, you must pass it initialized. 

1875 

1876 Returns 

1877 ------- 

1878 collections.abc.Mapping 

1879 Key-value representation of Series. 

1880 

1881 Examples 

1882 -------- 

1883 >>> s = pd.Series([1, 2, 3, 4]) 

1884 >>> s.to_dict() 

1885 {0: 1, 1: 2, 2: 3, 3: 4} 

1886 >>> from collections import OrderedDict, defaultdict 

1887 >>> s.to_dict(OrderedDict) 

1888 OrderedDict([(0, 1), (1, 2), (2, 3), (3, 4)]) 

1889 >>> dd = defaultdict(list) 

1890 >>> s.to_dict(dd) 

1891 defaultdict(<class 'list'>, {0: 1, 1: 2, 2: 3, 3: 4}) 

1892 """ 

1893 # GH16122 

1894 into_c = com.standardize_mapping(into) 

1895 return into_c((k, maybe_box_native(v)) for k, v in self.items()) 

1896 

1897 def to_frame(self, name: Hashable = lib.no_default) -> DataFrame: 

1898 """ 

1899 Convert Series to DataFrame. 

1900 

1901 Parameters 

1902 ---------- 

1903 name : object, optional 

1904 The passed name should substitute for the series name (if it has 

1905 one). 

1906 

1907 Returns 

1908 ------- 

1909 DataFrame 

1910 DataFrame representation of Series. 

1911 

1912 Examples 

1913 -------- 

1914 >>> s = pd.Series(["a", "b", "c"], 

1915 ... name="vals") 

1916 >>> s.to_frame() 

1917 vals 

1918 0 a 

1919 1 b 

1920 2 c 

1921 """ 

1922 if name is None: 

1923 warnings.warn( 

1924 "Explicitly passing `name=None` currently preserves the Series' name " 

1925 "or uses a default name of 0. This behaviour is deprecated, and in " 

1926 "the future `None` will be used as the name of the resulting " 

1927 "DataFrame column.", 

1928 FutureWarning, 

1929 stacklevel=find_stack_level(), 

1930 ) 

1931 name = lib.no_default 

1932 

1933 columns: Index 

1934 if name is lib.no_default: 

1935 name = self.name 

1936 if name is None: 

1937 # default to [0], same as we would get with DataFrame(self) 

1938 columns = default_index(1) 

1939 else: 

1940 columns = Index([name]) 

1941 else: 

1942 columns = Index([name]) 

1943 

1944 mgr = self._mgr.to_2d_mgr(columns) 

1945 df = self._constructor_expanddim(mgr) 

1946 return df.__finalize__(self, method="to_frame") 

1947 

1948 def _set_name(self, name, inplace=False) -> Series: 

1949 """ 

1950 Set the Series name. 

1951 

1952 Parameters 

1953 ---------- 

1954 name : str 

1955 inplace : bool 

1956 Whether to modify `self` directly or return a copy. 

1957 """ 

1958 inplace = validate_bool_kwarg(inplace, "inplace") 

1959 ser = self if inplace else self.copy() 

1960 ser.name = name 

1961 return ser 

1962 

1963 @Appender( 

1964 """ 

1965Examples 

1966-------- 

1967>>> ser = pd.Series([390., 350., 30., 20.], 

1968... index=['Falcon', 'Falcon', 'Parrot', 'Parrot'], name="Max Speed") 

1969>>> ser 

1970Falcon 390.0 

1971Falcon 350.0 

1972Parrot 30.0 

1973Parrot 20.0 

1974Name: Max Speed, dtype: float64 

1975>>> ser.groupby(["a", "b", "a", "b"]).mean() 

1976a 210.0 

1977b 185.0 

1978Name: Max Speed, dtype: float64 

1979>>> ser.groupby(level=0).mean() 

1980Falcon 370.0 

1981Parrot 25.0 

1982Name: Max Speed, dtype: float64 

1983>>> ser.groupby(ser > 100).mean() 

1984Max Speed 

1985False 25.0 

1986True 370.0 

1987Name: Max Speed, dtype: float64 

1988 

1989**Grouping by Indexes** 

1990 

1991We can groupby different levels of a hierarchical index 

1992using the `level` parameter: 

1993 

1994>>> arrays = [['Falcon', 'Falcon', 'Parrot', 'Parrot'], 

1995... ['Captive', 'Wild', 'Captive', 'Wild']] 

1996>>> index = pd.MultiIndex.from_arrays(arrays, names=('Animal', 'Type')) 

1997>>> ser = pd.Series([390., 350., 30., 20.], index=index, name="Max Speed") 

1998>>> ser 

1999Animal Type 

2000Falcon Captive 390.0 

2001 Wild 350.0 

2002Parrot Captive 30.0 

2003 Wild 20.0 

2004Name: Max Speed, dtype: float64 

2005>>> ser.groupby(level=0).mean() 

2006Animal 

2007Falcon 370.0 

2008Parrot 25.0 

2009Name: Max Speed, dtype: float64 

2010>>> ser.groupby(level="Type").mean() 

2011Type 

2012Captive 210.0 

2013Wild 185.0 

2014Name: Max Speed, dtype: float64 

2015 

2016We can also choose to include `NA` in group keys or not by defining 

2017`dropna` parameter, the default setting is `True`. 

2018 

2019>>> ser = pd.Series([1, 2, 3, 3], index=["a", 'a', 'b', np.nan]) 

2020>>> ser.groupby(level=0).sum() 

2021a 3 

2022b 3 

2023dtype: int64 

2024 

2025>>> ser.groupby(level=0, dropna=False).sum() 

2026a 3 

2027b 3 

2028NaN 3 

2029dtype: int64 

2030 

2031>>> arrays = ['Falcon', 'Falcon', 'Parrot', 'Parrot'] 

2032>>> ser = pd.Series([390., 350., 30., 20.], index=arrays, name="Max Speed") 

2033>>> ser.groupby(["a", "b", "a", np.nan]).mean() 

2034a 210.0 

2035b 350.0 

2036Name: Max Speed, dtype: float64 

2037 

2038>>> ser.groupby(["a", "b", "a", np.nan], dropna=False).mean() 

2039a 210.0 

2040b 350.0 

2041NaN 20.0 

2042Name: Max Speed, dtype: float64 

2043""" 

2044 ) 

2045 @Appender(_shared_docs["groupby"] % _shared_doc_kwargs) 

2046 def groupby( 

2047 self, 

2048 by=None, 

2049 axis: Axis = 0, 

2050 level: Level = None, 

2051 as_index: bool = True, 

2052 sort: bool = True, 

2053 group_keys: bool | lib.NoDefault = no_default, 

2054 squeeze: bool | lib.NoDefault = no_default, 

2055 observed: bool = False, 

2056 dropna: bool = True, 

2057 ) -> SeriesGroupBy: 

2058 from pandas.core.groupby.generic import SeriesGroupBy 

2059 

2060 if squeeze is not no_default: 

2061 warnings.warn( 

2062 ( 

2063 "The `squeeze` parameter is deprecated and " 

2064 "will be removed in a future version." 

2065 ), 

2066 FutureWarning, 

2067 stacklevel=find_stack_level(), 

2068 ) 

2069 else: 

2070 squeeze = False 

2071 

2072 if level is None and by is None: 

2073 raise TypeError("You have to supply one of 'by' and 'level'") 

2074 axis = self._get_axis_number(axis) 

2075 

2076 return SeriesGroupBy( 

2077 obj=self, 

2078 keys=by, 

2079 axis=axis, 

2080 level=level, 

2081 as_index=as_index, 

2082 sort=sort, 

2083 group_keys=group_keys, 

2084 squeeze=squeeze, 

2085 observed=observed, 

2086 dropna=dropna, 

2087 ) 

2088 

2089 # ---------------------------------------------------------------------- 

2090 # Statistics, overridden ndarray methods 

2091 

2092 # TODO: integrate bottleneck 

2093 def count(self, level: Level = None): 

2094 """ 

2095 Return number of non-NA/null observations in the Series. 

2096 

2097 Parameters 

2098 ---------- 

2099 level : int or level name, default None 

2100 If the axis is a MultiIndex (hierarchical), count along a 

2101 particular level, collapsing into a smaller Series. 

2102 

2103 Returns 

2104 ------- 

2105 int or Series (if level specified) 

2106 Number of non-null values in the Series. 

2107 

2108 See Also 

2109 -------- 

2110 DataFrame.count : Count non-NA cells for each column or row. 

2111 

2112 Examples 

2113 -------- 

2114 >>> s = pd.Series([0.0, 1.0, np.nan]) 

2115 >>> s.count() 

2116 2 

2117 """ 

2118 if level is None: 

2119 return notna(self._values).sum().astype("int64") 

2120 else: 

2121 warnings.warn( 

2122 "Using the level keyword in DataFrame and Series aggregations is " 

2123 "deprecated and will be removed in a future version. Use groupby " 

2124 "instead. ser.count(level=1) should use ser.groupby(level=1).count().", 

2125 FutureWarning, 

2126 stacklevel=find_stack_level(), 

2127 ) 

2128 if not isinstance(self.index, MultiIndex): 

2129 raise ValueError("Series.count level is only valid with a MultiIndex") 

2130 

2131 index = self.index 

2132 assert isinstance(index, MultiIndex) # for mypy 

2133 

2134 if isinstance(level, str): 

2135 level = index._get_level_number(level) 

2136 

2137 lev = index.levels[level] 

2138 level_codes = np.array(index.codes[level], subok=False, copy=True) 

2139 

2140 mask = level_codes == -1 

2141 if mask.any(): 

2142 level_codes[mask] = cnt = len(lev) 

2143 lev = lev.insert(cnt, lev._na_value) 

2144 

2145 obs = level_codes[notna(self._values)] 

2146 # error: Argument "minlength" to "bincount" has incompatible type 

2147 # "Optional[int]"; expected "SupportsIndex" 

2148 out = np.bincount(obs, minlength=len(lev) or None) # type: ignore[arg-type] 

2149 return self._constructor(out, index=lev, dtype="int64").__finalize__( 

2150 self, method="count" 

2151 ) 

2152 

2153 def mode(self, dropna: bool = True) -> Series: 

2154 """ 

2155 Return the mode(s) of the Series. 

2156 

2157 The mode is the value that appears most often. There can be multiple modes. 

2158 

2159 Always returns Series even if only one value is returned. 

2160 

2161 Parameters 

2162 ---------- 

2163 dropna : bool, default True 

2164 Don't consider counts of NaN/NaT. 

2165 

2166 Returns 

2167 ------- 

2168 Series 

2169 Modes of the Series in sorted order. 

2170 """ 

2171 # TODO: Add option for bins like value_counts() 

2172 values = self._values 

2173 if isinstance(values, np.ndarray): 

2174 res_values = algorithms.mode(values, dropna=dropna) 

2175 else: 

2176 res_values = values._mode(dropna=dropna) 

2177 

2178 # Ensure index is type stable (should always use int index) 

2179 return self._constructor( 

2180 res_values, index=range(len(res_values)), name=self.name 

2181 ) 

2182 

2183 def unique(self) -> ArrayLike: 

2184 """ 

2185 Return unique values of Series object. 

2186 

2187 Uniques are returned in order of appearance. Hash table-based unique, 

2188 therefore does NOT sort. 

2189 

2190 Returns 

2191 ------- 

2192 ndarray or ExtensionArray 

2193 The unique values returned as a NumPy array. See Notes. 

2194 

2195 See Also 

2196 -------- 

2197 Series.drop_duplicates : Return Series with duplicate values removed. 

2198 unique : Top-level unique method for any 1-d array-like object. 

2199 Index.unique : Return Index with unique values from an Index object. 

2200 

2201 Notes 

2202 ----- 

2203 Returns the unique values as a NumPy array. In case of an 

2204 extension-array backed Series, a new 

2205 :class:`~api.extensions.ExtensionArray` of that type with just 

2206 the unique values is returned. This includes 

2207 

2208 * Categorical 

2209 * Period 

2210 * Datetime with Timezone 

2211 * Interval 

2212 * Sparse 

2213 * IntegerNA 

2214 

2215 See Examples section. 

2216 

2217 Examples 

2218 -------- 

2219 >>> pd.Series([2, 1, 3, 3], name='A').unique() 

2220 array([2, 1, 3]) 

2221 

2222 >>> pd.Series([pd.Timestamp('2016-01-01') for _ in range(3)]).unique() 

2223 array(['2016-01-01T00:00:00.000000000'], dtype='datetime64[ns]') 

2224 

2225 >>> pd.Series([pd.Timestamp('2016-01-01', tz='US/Eastern') 

2226 ... for _ in range(3)]).unique() 

2227 <DatetimeArray> 

2228 ['2016-01-01 00:00:00-05:00'] 

2229 Length: 1, dtype: datetime64[ns, US/Eastern] 

2230 

2231 An Categorical will return categories in the order of 

2232 appearance and with the same dtype. 

2233 

2234 >>> pd.Series(pd.Categorical(list('baabc'))).unique() 

2235 ['b', 'a', 'c'] 

2236 Categories (3, object): ['a', 'b', 'c'] 

2237 >>> pd.Series(pd.Categorical(list('baabc'), categories=list('abc'), 

2238 ... ordered=True)).unique() 

2239 ['b', 'a', 'c'] 

2240 Categories (3, object): ['a' < 'b' < 'c'] 

2241 """ 

2242 return super().unique() 

2243 

2244 @overload 

2245 def drop_duplicates( 

2246 self, 

2247 keep: Literal["first", "last", False] = ..., 

2248 *, 

2249 inplace: Literal[False] = ..., 

2250 ) -> Series: 

2251 ... 

2252 

2253 @overload 

2254 def drop_duplicates( 

2255 self, keep: Literal["first", "last", False] = ..., *, inplace: Literal[True] 

2256 ) -> None: 

2257 ... 

2258 

2259 @overload 

2260 def drop_duplicates( 

2261 self, keep: Literal["first", "last", False] = ..., *, inplace: bool = ... 

2262 ) -> Series | None: 

2263 ... 

2264 

2265 @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"]) 

2266 def drop_duplicates( 

2267 self, keep: Literal["first", "last", False] = "first", inplace=False 

2268 ) -> Series | None: 

2269 """ 

2270 Return Series with duplicate values removed. 

2271 

2272 Parameters 

2273 ---------- 

2274 keep : {'first', 'last', ``False``}, default 'first' 

2275 Method to handle dropping duplicates: 

2276 

2277 - 'first' : Drop duplicates except for the first occurrence. 

2278 - 'last' : Drop duplicates except for the last occurrence. 

2279 - ``False`` : Drop all duplicates. 

2280 

2281 inplace : bool, default ``False`` 

2282 If ``True``, performs operation inplace and returns None. 

2283 

2284 Returns 

2285 ------- 

2286 Series or None 

2287 Series with duplicates dropped or None if ``inplace=True``. 

2288 

2289 See Also 

2290 -------- 

2291 Index.drop_duplicates : Equivalent method on Index. 

2292 DataFrame.drop_duplicates : Equivalent method on DataFrame. 

2293 Series.duplicated : Related method on Series, indicating duplicate 

2294 Series values. 

2295 Series.unique : Return unique values as an array. 

2296 

2297 Examples 

2298 -------- 

2299 Generate a Series with duplicated entries. 

2300 

2301 >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 'hippo'], 

2302 ... name='animal') 

2303 >>> s 

2304 0 lama 

2305 1 cow 

2306 2 lama 

2307 3 beetle 

2308 4 lama 

2309 5 hippo 

2310 Name: animal, dtype: object 

2311 

2312 With the 'keep' parameter, the selection behaviour of duplicated values 

2313 can be changed. The value 'first' keeps the first occurrence for each 

2314 set of duplicated entries. The default value of keep is 'first'. 

2315 

2316 >>> s.drop_duplicates() 

2317 0 lama 

2318 1 cow 

2319 3 beetle 

2320 5 hippo 

2321 Name: animal, dtype: object 

2322 

2323 The value 'last' for parameter 'keep' keeps the last occurrence for 

2324 each set of duplicated entries. 

2325 

2326 >>> s.drop_duplicates(keep='last') 

2327 1 cow 

2328 3 beetle 

2329 4 lama 

2330 5 hippo 

2331 Name: animal, dtype: object 

2332 

2333 The value ``False`` for parameter 'keep' discards all sets of 

2334 duplicated entries. Setting the value of 'inplace' to ``True`` performs 

2335 the operation inplace and returns ``None``. 

2336 

2337 >>> s.drop_duplicates(keep=False, inplace=True) 

2338 >>> s 

2339 1 cow 

2340 3 beetle 

2341 5 hippo 

2342 Name: animal, dtype: object 

2343 """ 

2344 inplace = validate_bool_kwarg(inplace, "inplace") 

2345 result = super().drop_duplicates(keep=keep) 

2346 if inplace: 

2347 self._update_inplace(result) 

2348 return None 

2349 else: 

2350 return result 

2351 

2352 def duplicated(self, keep: Literal["first", "last", False] = "first") -> Series: 

2353 """ 

2354 Indicate duplicate Series values. 

2355 

2356 Duplicated values are indicated as ``True`` values in the resulting 

2357 Series. Either all duplicates, all except the first or all except the 

2358 last occurrence of duplicates can be indicated. 

2359 

2360 Parameters 

2361 ---------- 

2362 keep : {'first', 'last', False}, default 'first' 

2363 Method to handle dropping duplicates: 

2364 

2365 - 'first' : Mark duplicates as ``True`` except for the first 

2366 occurrence. 

2367 - 'last' : Mark duplicates as ``True`` except for the last 

2368 occurrence. 

2369 - ``False`` : Mark all duplicates as ``True``. 

2370 

2371 Returns 

2372 ------- 

2373 Series[bool] 

2374 Series indicating whether each value has occurred in the 

2375 preceding values. 

2376 

2377 See Also 

2378 -------- 

2379 Index.duplicated : Equivalent method on pandas.Index. 

2380 DataFrame.duplicated : Equivalent method on pandas.DataFrame. 

2381 Series.drop_duplicates : Remove duplicate values from Series. 

2382 

2383 Examples 

2384 -------- 

2385 By default, for each set of duplicated values, the first occurrence is 

2386 set on False and all others on True: 

2387 

2388 >>> animals = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama']) 

2389 >>> animals.duplicated() 

2390 0 False 

2391 1 False 

2392 2 True 

2393 3 False 

2394 4 True 

2395 dtype: bool 

2396 

2397 which is equivalent to 

2398 

2399 >>> animals.duplicated(keep='first') 

2400 0 False 

2401 1 False 

2402 2 True 

2403 3 False 

2404 4 True 

2405 dtype: bool 

2406 

2407 By using 'last', the last occurrence of each set of duplicated values 

2408 is set on False and all others on True: 

2409 

2410 >>> animals.duplicated(keep='last') 

2411 0 True 

2412 1 False 

2413 2 True 

2414 3 False 

2415 4 False 

2416 dtype: bool 

2417 

2418 By setting keep on ``False``, all duplicates are True: 

2419 

2420 >>> animals.duplicated(keep=False) 

2421 0 True 

2422 1 False 

2423 2 True 

2424 3 False 

2425 4 True 

2426 dtype: bool 

2427 """ 

2428 res = self._duplicated(keep=keep) 

2429 result = self._constructor(res, index=self.index) 

2430 return result.__finalize__(self, method="duplicated") 

2431 

2432 def idxmin(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: 

2433 """ 

2434 Return the row label of the minimum value. 

2435 

2436 If multiple values equal the minimum, the first row label with that 

2437 value is returned. 

2438 

2439 Parameters 

2440 ---------- 

2441 axis : {0 or 'index'} 

2442 Unused. Parameter needed for compatibility with DataFrame. 

2443 skipna : bool, default True 

2444 Exclude NA/null values. If the entire Series is NA, the result 

2445 will be NA. 

2446 *args, **kwargs 

2447 Additional arguments and keywords have no effect but might be 

2448 accepted for compatibility with NumPy. 

2449 

2450 Returns 

2451 ------- 

2452 Index 

2453 Label of the minimum value. 

2454 

2455 Raises 

2456 ------ 

2457 ValueError 

2458 If the Series is empty. 

2459 

2460 See Also 

2461 -------- 

2462 numpy.argmin : Return indices of the minimum values 

2463 along the given axis. 

2464 DataFrame.idxmin : Return index of first occurrence of minimum 

2465 over requested axis. 

2466 Series.idxmax : Return index *label* of the first occurrence 

2467 of maximum of values. 

2468 

2469 Notes 

2470 ----- 

2471 This method is the Series version of ``ndarray.argmin``. This method 

2472 returns the label of the minimum, while ``ndarray.argmin`` returns 

2473 the position. To get the position, use ``series.values.argmin()``. 

2474 

2475 Examples 

2476 -------- 

2477 >>> s = pd.Series(data=[1, None, 4, 1], 

2478 ... index=['A', 'B', 'C', 'D']) 

2479 >>> s 

2480 A 1.0 

2481 B NaN 

2482 C 4.0 

2483 D 1.0 

2484 dtype: float64 

2485 

2486 >>> s.idxmin() 

2487 'A' 

2488 

2489 If `skipna` is False and there is an NA value in the data, 

2490 the function returns ``nan``. 

2491 

2492 >>> s.idxmin(skipna=False) 

2493 nan 

2494 """ 

2495 i = self.argmin(axis, skipna, *args, **kwargs) 

2496 if i == -1: 

2497 return np.nan 

2498 return self.index[i] 

2499 

2500 def idxmax(self, axis: Axis = 0, skipna: bool = True, *args, **kwargs) -> Hashable: 

2501 """ 

2502 Return the row label of the maximum value. 

2503 

2504 If multiple values equal the maximum, the first row label with that 

2505 value is returned. 

2506 

2507 Parameters 

2508 ---------- 

2509 axis : {0 or 'index'} 

2510 Unused. Parameter needed for compatibility with DataFrame. 

2511 skipna : bool, default True 

2512 Exclude NA/null values. If the entire Series is NA, the result 

2513 will be NA. 

2514 *args, **kwargs 

2515 Additional arguments and keywords have no effect but might be 

2516 accepted for compatibility with NumPy. 

2517 

2518 Returns 

2519 ------- 

2520 Index 

2521 Label of the maximum value. 

2522 

2523 Raises 

2524 ------ 

2525 ValueError 

2526 If the Series is empty. 

2527 

2528 See Also 

2529 -------- 

2530 numpy.argmax : Return indices of the maximum values 

2531 along the given axis. 

2532 DataFrame.idxmax : Return index of first occurrence of maximum 

2533 over requested axis. 

2534 Series.idxmin : Return index *label* of the first occurrence 

2535 of minimum of values. 

2536 

2537 Notes 

2538 ----- 

2539 This method is the Series version of ``ndarray.argmax``. This method 

2540 returns the label of the maximum, while ``ndarray.argmax`` returns 

2541 the position. To get the position, use ``series.values.argmax()``. 

2542 

2543 Examples 

2544 -------- 

2545 >>> s = pd.Series(data=[1, None, 4, 3, 4], 

2546 ... index=['A', 'B', 'C', 'D', 'E']) 

2547 >>> s 

2548 A 1.0 

2549 B NaN 

2550 C 4.0 

2551 D 3.0 

2552 E 4.0 

2553 dtype: float64 

2554 

2555 >>> s.idxmax() 

2556 'C' 

2557 

2558 If `skipna` is False and there is an NA value in the data, 

2559 the function returns ``nan``. 

2560 

2561 >>> s.idxmax(skipna=False) 

2562 nan 

2563 """ 

2564 i = self.argmax(axis, skipna, *args, **kwargs) 

2565 if i == -1: 

2566 return np.nan 

2567 return self.index[i] 

2568 

2569 def round(self, decimals: int = 0, *args, **kwargs) -> Series: 

2570 """ 

2571 Round each value in a Series to the given number of decimals. 

2572 

2573 Parameters 

2574 ---------- 

2575 decimals : int, default 0 

2576 Number of decimal places to round to. If decimals is negative, 

2577 it specifies the number of positions to the left of the decimal point. 

2578 *args, **kwargs 

2579 Additional arguments and keywords have no effect but might be 

2580 accepted for compatibility with NumPy. 

2581 

2582 Returns 

2583 ------- 

2584 Series 

2585 Rounded values of the Series. 

2586 

2587 See Also 

2588 -------- 

2589 numpy.around : Round values of an np.array. 

2590 DataFrame.round : Round values of a DataFrame. 

2591 

2592 Examples 

2593 -------- 

2594 >>> s = pd.Series([0.1, 1.3, 2.7]) 

2595 >>> s.round() 

2596 0 0.0 

2597 1 1.0 

2598 2 3.0 

2599 dtype: float64 

2600 """ 

2601 nv.validate_round(args, kwargs) 

2602 result = self._values.round(decimals) 

2603 result = self._constructor(result, index=self.index).__finalize__( 

2604 self, method="round" 

2605 ) 

2606 

2607 return result 

2608 

2609 @overload 

2610 def quantile( 

2611 self, q: float = ..., interpolation: QuantileInterpolation = ... 

2612 ) -> float: 

2613 ... 

2614 

2615 @overload 

2616 def quantile( 

2617 self, 

2618 q: Sequence[float] | AnyArrayLike, 

2619 interpolation: QuantileInterpolation = ..., 

2620 ) -> Series: 

2621 ... 

2622 

2623 @overload 

2624 def quantile( 

2625 self, 

2626 q: float | Sequence[float] | AnyArrayLike = ..., 

2627 interpolation: QuantileInterpolation = ..., 

2628 ) -> float | Series: 

2629 ... 

2630 

2631 def quantile( 

2632 self, 

2633 q: float | Sequence[float] | AnyArrayLike = 0.5, 

2634 interpolation: QuantileInterpolation = "linear", 

2635 ) -> float | Series: 

2636 """ 

2637 Return value at the given quantile. 

2638 

2639 Parameters 

2640 ---------- 

2641 q : float or array-like, default 0.5 (50% quantile) 

2642 The quantile(s) to compute, which can lie in range: 0 <= q <= 1. 

2643 interpolation : {'linear', 'lower', 'higher', 'midpoint', 'nearest'} 

2644 This optional parameter specifies the interpolation method to use, 

2645 when the desired quantile lies between two data points `i` and `j`: 

2646 

2647 * linear: `i + (j - i) * fraction`, where `fraction` is the 

2648 fractional part of the index surrounded by `i` and `j`. 

2649 * lower: `i`. 

2650 * higher: `j`. 

2651 * nearest: `i` or `j` whichever is nearest. 

2652 * midpoint: (`i` + `j`) / 2. 

2653 

2654 Returns 

2655 ------- 

2656 float or Series 

2657 If ``q`` is an array, a Series will be returned where the 

2658 index is ``q`` and the values are the quantiles, otherwise 

2659 a float will be returned. 

2660 

2661 See Also 

2662 -------- 

2663 core.window.Rolling.quantile : Calculate the rolling quantile. 

2664 numpy.percentile : Returns the q-th percentile(s) of the array elements. 

2665 

2666 Examples 

2667 -------- 

2668 >>> s = pd.Series([1, 2, 3, 4]) 

2669 >>> s.quantile(.5) 

2670 2.5 

2671 >>> s.quantile([.25, .5, .75]) 

2672 0.25 1.75 

2673 0.50 2.50 

2674 0.75 3.25 

2675 dtype: float64 

2676 """ 

2677 validate_percentile(q) 

2678 

2679 # We dispatch to DataFrame so that core.internals only has to worry 

2680 # about 2D cases. 

2681 df = self.to_frame() 

2682 

2683 result = df.quantile(q=q, interpolation=interpolation, numeric_only=False) 

2684 if result.ndim == 2: 

2685 result = result.iloc[:, 0] 

2686 

2687 if is_list_like(q): 

2688 result.name = self.name 

2689 return self._constructor(result, index=Float64Index(q), name=self.name) 

2690 else: 

2691 # scalar 

2692 return result.iloc[0] 

2693 

2694 def corr( 

2695 self, 

2696 other: Series, 

2697 method: Literal["pearson", "kendall", "spearman"] 

2698 | Callable[[np.ndarray, np.ndarray], float] = "pearson", 

2699 min_periods: int | None = None, 

2700 ) -> float: 

2701 """ 

2702 Compute correlation with `other` Series, excluding missing values. 

2703 

2704 The two `Series` objects are not required to be the same length and will be 

2705 aligned internally before the correlation function is applied. 

2706 

2707 Parameters 

2708 ---------- 

2709 other : Series 

2710 Series with which to compute the correlation. 

2711 method : {'pearson', 'kendall', 'spearman'} or callable 

2712 Method used to compute correlation: 

2713 

2714 - pearson : Standard correlation coefficient 

2715 - kendall : Kendall Tau correlation coefficient 

2716 - spearman : Spearman rank correlation 

2717 - callable: Callable with input two 1d ndarrays and returning a float. 

2718 

2719 .. warning:: 

2720 Note that the returned matrix from corr will have 1 along the 

2721 diagonals and will be symmetric regardless of the callable's 

2722 behavior. 

2723 min_periods : int, optional 

2724 Minimum number of observations needed to have a valid result. 

2725 

2726 Returns 

2727 ------- 

2728 float 

2729 Correlation with other. 

2730 

2731 See Also 

2732 -------- 

2733 DataFrame.corr : Compute pairwise correlation between columns. 

2734 DataFrame.corrwith : Compute pairwise correlation with another 

2735 DataFrame or Series. 

2736 

2737 Notes 

2738 ----- 

2739 Pearson, Kendall and Spearman correlation are currently computed using pairwise complete observations. 

2740 

2741 * `Pearson correlation coefficient <https://en.wikipedia.org/wiki/Pearson_correlation_coefficient>`_ 

2742 * `Kendall rank correlation coefficient <https://en.wikipedia.org/wiki/Kendall_rank_correlation_coefficient>`_ 

2743 * `Spearman's rank correlation coefficient <https://en.wikipedia.org/wiki/Spearman%27s_rank_correlation_coefficient>`_ 

2744 

2745 Examples 

2746 -------- 

2747 >>> def histogram_intersection(a, b): 

2748 ... v = np.minimum(a, b).sum().round(decimals=1) 

2749 ... return v 

2750 >>> s1 = pd.Series([.2, .0, .6, .2]) 

2751 >>> s2 = pd.Series([.3, .6, .0, .1]) 

2752 >>> s1.corr(s2, method=histogram_intersection) 

2753 0.3 

2754 """ # noqa:E501 

2755 this, other = self.align(other, join="inner", copy=False) 

2756 if len(this) == 0: 

2757 return np.nan 

2758 

2759 if method in ["pearson", "spearman", "kendall"] or callable(method): 

2760 return nanops.nancorr( 

2761 this.values, other.values, method=method, min_periods=min_periods 

2762 ) 

2763 

2764 raise ValueError( 

2765 "method must be either 'pearson', " 

2766 "'spearman', 'kendall', or a callable, " 

2767 f"'{method}' was supplied" 

2768 ) 

2769 

2770 def cov( 

2771 self, 

2772 other: Series, 

2773 min_periods: int | None = None, 

2774 ddof: int | None = 1, 

2775 ) -> float: 

2776 """ 

2777 Compute covariance with Series, excluding missing values. 

2778 

2779 The two `Series` objects are not required to be the same length and 

2780 will be aligned internally before the covariance is calculated. 

2781 

2782 Parameters 

2783 ---------- 

2784 other : Series 

2785 Series with which to compute the covariance. 

2786 min_periods : int, optional 

2787 Minimum number of observations needed to have a valid result. 

2788 ddof : int, default 1 

2789 Delta degrees of freedom. The divisor used in calculations 

2790 is ``N - ddof``, where ``N`` represents the number of elements. 

2791 

2792 .. versionadded:: 1.1.0 

2793 

2794 Returns 

2795 ------- 

2796 float 

2797 Covariance between Series and other normalized by N-1 

2798 (unbiased estimator). 

2799 

2800 See Also 

2801 -------- 

2802 DataFrame.cov : Compute pairwise covariance of columns. 

2803 

2804 Examples 

2805 -------- 

2806 >>> s1 = pd.Series([0.90010907, 0.13484424, 0.62036035]) 

2807 >>> s2 = pd.Series([0.12528585, 0.26962463, 0.51111198]) 

2808 >>> s1.cov(s2) 

2809 -0.01685762652715874 

2810 """ 

2811 this, other = self.align(other, join="inner", copy=False) 

2812 if len(this) == 0: 

2813 return np.nan 

2814 return nanops.nancov( 

2815 this.values, other.values, min_periods=min_periods, ddof=ddof 

2816 ) 

2817 

2818 @doc( 

2819 klass="Series", 

2820 extra_params="", 

2821 other_klass="DataFrame", 

2822 examples=dedent( 

2823 """ 

2824 Difference with previous row 

2825 

2826 >>> s = pd.Series([1, 1, 2, 3, 5, 8]) 

2827 >>> s.diff() 

2828 0 NaN 

2829 1 0.0 

2830 2 1.0 

2831 3 1.0 

2832 4 2.0 

2833 5 3.0 

2834 dtype: float64 

2835 

2836 Difference with 3rd previous row 

2837 

2838 >>> s.diff(periods=3) 

2839 0 NaN 

2840 1 NaN 

2841 2 NaN 

2842 3 2.0 

2843 4 4.0 

2844 5 6.0 

2845 dtype: float64 

2846 

2847 Difference with following row 

2848 

2849 >>> s.diff(periods=-1) 

2850 0 0.0 

2851 1 -1.0 

2852 2 -1.0 

2853 3 -2.0 

2854 4 -3.0 

2855 5 NaN 

2856 dtype: float64 

2857 

2858 Overflow in input dtype 

2859 

2860 >>> s = pd.Series([1, 0], dtype=np.uint8) 

2861 >>> s.diff() 

2862 0 NaN 

2863 1 255.0 

2864 dtype: float64""" 

2865 ), 

2866 ) 

2867 def diff(self, periods: int = 1) -> Series: 

2868 """ 

2869 First discrete difference of element. 

2870 

2871 Calculates the difference of a {klass} element compared with another 

2872 element in the {klass} (default is element in previous row). 

2873 

2874 Parameters 

2875 ---------- 

2876 periods : int, default 1 

2877 Periods to shift for calculating difference, accepts negative 

2878 values. 

2879 {extra_params} 

2880 Returns 

2881 ------- 

2882 {klass} 

2883 First differences of the Series. 

2884 

2885 See Also 

2886 -------- 

2887 {klass}.pct_change: Percent change over given number of periods. 

2888 {klass}.shift: Shift index by desired number of periods with an 

2889 optional time freq. 

2890 {other_klass}.diff: First discrete difference of object. 

2891 

2892 Notes 

2893 ----- 

2894 For boolean dtypes, this uses :meth:`operator.xor` rather than 

2895 :meth:`operator.sub`. 

2896 The result is calculated according to current dtype in {klass}, 

2897 however dtype of the result is always float64. 

2898 

2899 Examples 

2900 -------- 

2901 {examples} 

2902 """ 

2903 result = algorithms.diff(self._values, periods) 

2904 return self._constructor(result, index=self.index).__finalize__( 

2905 self, method="diff" 

2906 ) 

2907 

2908 def autocorr(self, lag: int = 1) -> float: 

2909 """ 

2910 Compute the lag-N autocorrelation. 

2911 

2912 This method computes the Pearson correlation between 

2913 the Series and its shifted self. 

2914 

2915 Parameters 

2916 ---------- 

2917 lag : int, default 1 

2918 Number of lags to apply before performing autocorrelation. 

2919 

2920 Returns 

2921 ------- 

2922 float 

2923 The Pearson correlation between self and self.shift(lag). 

2924 

2925 See Also 

2926 -------- 

2927 Series.corr : Compute the correlation between two Series. 

2928 Series.shift : Shift index by desired number of periods. 

2929 DataFrame.corr : Compute pairwise correlation of columns. 

2930 DataFrame.corrwith : Compute pairwise correlation between rows or 

2931 columns of two DataFrame objects. 

2932 

2933 Notes 

2934 ----- 

2935 If the Pearson correlation is not well defined return 'NaN'. 

2936 

2937 Examples 

2938 -------- 

2939 >>> s = pd.Series([0.25, 0.5, 0.2, -0.05]) 

2940 >>> s.autocorr() # doctest: +ELLIPSIS 

2941 0.10355... 

2942 >>> s.autocorr(lag=2) # doctest: +ELLIPSIS 

2943 -0.99999... 

2944 

2945 If the Pearson correlation is not well defined, then 'NaN' is returned. 

2946 

2947 >>> s = pd.Series([1, 0, 0, 0]) 

2948 >>> s.autocorr() 

2949 nan 

2950 """ 

2951 return self.corr(self.shift(lag)) 

2952 

2953 def dot(self, other: AnyArrayLike) -> Series | np.ndarray: 

2954 """ 

2955 Compute the dot product between the Series and the columns of other. 

2956 

2957 This method computes the dot product between the Series and another 

2958 one, or the Series and each columns of a DataFrame, or the Series and 

2959 each columns of an array. 

2960 

2961 It can also be called using `self @ other` in Python >= 3.5. 

2962 

2963 Parameters 

2964 ---------- 

2965 other : Series, DataFrame or array-like 

2966 The other object to compute the dot product with its columns. 

2967 

2968 Returns 

2969 ------- 

2970 scalar, Series or numpy.ndarray 

2971 Return the dot product of the Series and other if other is a 

2972 Series, the Series of the dot product of Series and each rows of 

2973 other if other is a DataFrame or a numpy.ndarray between the Series 

2974 and each columns of the numpy array. 

2975 

2976 See Also 

2977 -------- 

2978 DataFrame.dot: Compute the matrix product with the DataFrame. 

2979 Series.mul: Multiplication of series and other, element-wise. 

2980 

2981 Notes 

2982 ----- 

2983 The Series and other has to share the same index if other is a Series 

2984 or a DataFrame. 

2985 

2986 Examples 

2987 -------- 

2988 >>> s = pd.Series([0, 1, 2, 3]) 

2989 >>> other = pd.Series([-1, 2, -3, 4]) 

2990 >>> s.dot(other) 

2991 8 

2992 >>> s @ other 

2993 8 

2994 >>> df = pd.DataFrame([[0, 1], [-2, 3], [4, -5], [6, 7]]) 

2995 >>> s.dot(df) 

2996 0 24 

2997 1 14 

2998 dtype: int64 

2999 >>> arr = np.array([[0, 1], [-2, 3], [4, -5], [6, 7]]) 

3000 >>> s.dot(arr) 

3001 array([24, 14]) 

3002 """ 

3003 if isinstance(other, (Series, ABCDataFrame)): 

3004 common = self.index.union(other.index) 

3005 if len(common) > len(self.index) or len(common) > len(other.index): 

3006 raise ValueError("matrices are not aligned") 

3007 

3008 left = self.reindex(index=common, copy=False) 

3009 right = other.reindex(index=common, copy=False) 

3010 lvals = left.values 

3011 rvals = right.values 

3012 else: 

3013 lvals = self.values 

3014 rvals = np.asarray(other) 

3015 if lvals.shape[0] != rvals.shape[0]: 

3016 raise Exception( 

3017 f"Dot product shape mismatch, {lvals.shape} vs {rvals.shape}" 

3018 ) 

3019 

3020 if isinstance(other, ABCDataFrame): 

3021 return self._constructor( 

3022 np.dot(lvals, rvals), index=other.columns 

3023 ).__finalize__(self, method="dot") 

3024 elif isinstance(other, Series): 

3025 return np.dot(lvals, rvals) 

3026 elif isinstance(rvals, np.ndarray): 

3027 return np.dot(lvals, rvals) 

3028 else: # pragma: no cover 

3029 raise TypeError(f"unsupported type: {type(other)}") 

3030 

3031 def __matmul__(self, other): 

3032 """ 

3033 Matrix multiplication using binary `@` operator in Python>=3.5. 

3034 """ 

3035 return self.dot(other) 

3036 

3037 def __rmatmul__(self, other): 

3038 """ 

3039 Matrix multiplication using binary `@` operator in Python>=3.5. 

3040 """ 

3041 return self.dot(np.transpose(other)) 

3042 

3043 @doc(base.IndexOpsMixin.searchsorted, klass="Series") 

3044 # Signature of "searchsorted" incompatible with supertype "IndexOpsMixin" 

3045 def searchsorted( # type: ignore[override] 

3046 self, 

3047 value: NumpyValueArrayLike | ExtensionArray, 

3048 side: Literal["left", "right"] = "left", 

3049 sorter: NumpySorter = None, 

3050 ) -> npt.NDArray[np.intp] | np.intp: 

3051 return base.IndexOpsMixin.searchsorted(self, value, side=side, sorter=sorter) 

3052 

3053 # ------------------------------------------------------------------- 

3054 # Combination 

3055 

3056 def append( 

3057 self, to_append, ignore_index: bool = False, verify_integrity: bool = False 

3058 ) -> Series: 

3059 """ 

3060 Concatenate two or more Series. 

3061 

3062 .. deprecated:: 1.4.0 

3063 Use :func:`concat` instead. For further details see 

3064 :ref:`whatsnew_140.deprecations.frame_series_append` 

3065 

3066 Parameters 

3067 ---------- 

3068 to_append : Series or list/tuple of Series 

3069 Series to append with self. 

3070 ignore_index : bool, default False 

3071 If True, the resulting axis will be labeled 0, 1, …, n - 1. 

3072 verify_integrity : bool, default False 

3073 If True, raise Exception on creating index with duplicates. 

3074 

3075 Returns 

3076 ------- 

3077 Series 

3078 Concatenated Series. 

3079 

3080 See Also 

3081 -------- 

3082 concat : General function to concatenate DataFrame or Series objects. 

3083 

3084 Notes 

3085 ----- 

3086 Iteratively appending to a Series can be more computationally intensive 

3087 than a single concatenate. A better solution is to append values to a 

3088 list and then concatenate the list with the original Series all at 

3089 once. 

3090 

3091 Examples 

3092 -------- 

3093 >>> s1 = pd.Series([1, 2, 3]) 

3094 >>> s2 = pd.Series([4, 5, 6]) 

3095 >>> s3 = pd.Series([4, 5, 6], index=[3, 4, 5]) 

3096 >>> s1.append(s2) 

3097 0 1 

3098 1 2 

3099 2 3 

3100 0 4 

3101 1 5 

3102 2 6 

3103 dtype: int64 

3104 

3105 >>> s1.append(s3) 

3106 0 1 

3107 1 2 

3108 2 3 

3109 3 4 

3110 4 5 

3111 5 6 

3112 dtype: int64 

3113 

3114 With `ignore_index` set to True: 

3115 

3116 >>> s1.append(s2, ignore_index=True) 

3117 0 1 

3118 1 2 

3119 2 3 

3120 3 4 

3121 4 5 

3122 5 6 

3123 dtype: int64 

3124 

3125 With `verify_integrity` set to True: 

3126 

3127 >>> s1.append(s2, verify_integrity=True) 

3128 Traceback (most recent call last): 

3129 ... 

3130 ValueError: Indexes have overlapping values: [0, 1, 2] 

3131 """ 

3132 warnings.warn( 

3133 "The series.append method is deprecated " 

3134 "and will be removed from pandas in a future version. " 

3135 "Use pandas.concat instead.", 

3136 FutureWarning, 

3137 stacklevel=find_stack_level(), 

3138 ) 

3139 

3140 return self._append(to_append, ignore_index, verify_integrity) 

3141 

3142 def _append( 

3143 self, to_append, ignore_index: bool = False, verify_integrity: bool = False 

3144 ): 

3145 from pandas.core.reshape.concat import concat 

3146 

3147 if isinstance(to_append, (list, tuple)): 

3148 to_concat = [self] 

3149 to_concat.extend(to_append) 

3150 else: 

3151 to_concat = [self, to_append] 

3152 if any(isinstance(x, (ABCDataFrame,)) for x in to_concat[1:]): 

3153 msg = "to_append should be a Series or list/tuple of Series, got DataFrame" 

3154 raise TypeError(msg) 

3155 return concat( 

3156 to_concat, ignore_index=ignore_index, verify_integrity=verify_integrity 

3157 ) 

3158 

3159 def _binop(self, other: Series, func, level=None, fill_value=None): 

3160 """ 

3161 Perform generic binary operation with optional fill value. 

3162 

3163 Parameters 

3164 ---------- 

3165 other : Series 

3166 func : binary operator 

3167 fill_value : float or object 

3168 Value to substitute for NA/null values. If both Series are NA in a 

3169 location, the result will be NA regardless of the passed fill value. 

3170 level : int or level name, default None 

3171 Broadcast across a level, matching Index values on the 

3172 passed MultiIndex level. 

3173 

3174 Returns 

3175 ------- 

3176 Series 

3177 """ 

3178 if not isinstance(other, Series): 

3179 raise AssertionError("Other operand must be Series") 

3180 

3181 this = self 

3182 

3183 if not self.index.equals(other.index): 

3184 this, other = self.align(other, level=level, join="outer", copy=False) 

3185 

3186 this_vals, other_vals = ops.fill_binop(this._values, other._values, fill_value) 

3187 

3188 with np.errstate(all="ignore"): 

3189 result = func(this_vals, other_vals) 

3190 

3191 name = ops.get_op_result_name(self, other) 

3192 return this._construct_result(result, name) 

3193 

3194 def _construct_result( 

3195 self, result: ArrayLike | tuple[ArrayLike, ArrayLike], name: Hashable 

3196 ) -> Series | tuple[Series, Series]: 

3197 """ 

3198 Construct an appropriately-labelled Series from the result of an op. 

3199 

3200 Parameters 

3201 ---------- 

3202 result : ndarray or ExtensionArray 

3203 name : Label 

3204 

3205 Returns 

3206 ------- 

3207 Series 

3208 In the case of __divmod__ or __rdivmod__, a 2-tuple of Series. 

3209 """ 

3210 if isinstance(result, tuple): 

3211 # produced by divmod or rdivmod 

3212 

3213 res1 = self._construct_result(result[0], name=name) 

3214 res2 = self._construct_result(result[1], name=name) 

3215 

3216 # GH#33427 assertions to keep mypy happy 

3217 assert isinstance(res1, Series) 

3218 assert isinstance(res2, Series) 

3219 return (res1, res2) 

3220 

3221 # We do not pass dtype to ensure that the Series constructor 

3222 # does inference in the case where `result` has object-dtype. 

3223 out = self._constructor(result, index=self.index) 

3224 out = out.__finalize__(self) 

3225 

3226 # Set the result's name after __finalize__ is called because __finalize__ 

3227 # would set it back to self.name 

3228 out.name = name 

3229 return out 

3230 

3231 @doc( 

3232 _shared_docs["compare"], 

3233 """ 

3234Returns 

3235------- 

3236Series or DataFrame 

3237 If axis is 0 or 'index' the result will be a Series. 

3238 The resulting index will be a MultiIndex with 'self' and 'other' 

3239 stacked alternately at the inner level. 

3240 

3241 If axis is 1 or 'columns' the result will be a DataFrame. 

3242 It will have two columns namely 'self' and 'other'. 

3243 

3244See Also 

3245-------- 

3246DataFrame.compare : Compare with another DataFrame and show differences. 

3247 

3248Notes 

3249----- 

3250Matching NaNs will not appear as a difference. 

3251 

3252Examples 

3253-------- 

3254>>> s1 = pd.Series(["a", "b", "c", "d", "e"]) 

3255>>> s2 = pd.Series(["a", "a", "c", "b", "e"]) 

3256 

3257Align the differences on columns 

3258 

3259>>> s1.compare(s2) 

3260 self other 

32611 b a 

32623 d b 

3263 

3264Stack the differences on indices 

3265 

3266>>> s1.compare(s2, align_axis=0) 

32671 self b 

3268 other a 

32693 self d 

3270 other b 

3271dtype: object 

3272 

3273Keep all original rows 

3274 

3275>>> s1.compare(s2, keep_shape=True) 

3276 self other 

32770 NaN NaN 

32781 b a 

32792 NaN NaN 

32803 d b 

32814 NaN NaN 

3282 

3283Keep all original rows and also all original values 

3284 

3285>>> s1.compare(s2, keep_shape=True, keep_equal=True) 

3286 self other 

32870 a a 

32881 b a 

32892 c c 

32903 d b 

32914 e e 

3292""", 

3293 klass=_shared_doc_kwargs["klass"], 

3294 ) 

3295 def compare( 

3296 self, 

3297 other: Series, 

3298 align_axis: Axis = 1, 

3299 keep_shape: bool = False, 

3300 keep_equal: bool = False, 

3301 result_names: Suffixes = ("self", "other"), 

3302 ) -> DataFrame | Series: 

3303 return super().compare( 

3304 other=other, 

3305 align_axis=align_axis, 

3306 keep_shape=keep_shape, 

3307 keep_equal=keep_equal, 

3308 result_names=result_names, 

3309 ) 

3310 

3311 def combine( 

3312 self, 

3313 other: Series | Hashable, 

3314 func: Callable[[Hashable, Hashable], Hashable], 

3315 fill_value: Hashable = None, 

3316 ) -> Series: 

3317 """ 

3318 Combine the Series with a Series or scalar according to `func`. 

3319 

3320 Combine the Series and `other` using `func` to perform elementwise 

3321 selection for combined Series. 

3322 `fill_value` is assumed when value is missing at some index 

3323 from one of the two objects being combined. 

3324 

3325 Parameters 

3326 ---------- 

3327 other : Series or scalar 

3328 The value(s) to be combined with the `Series`. 

3329 func : function 

3330 Function that takes two scalars as inputs and returns an element. 

3331 fill_value : scalar, optional 

3332 The value to assume when an index is missing from 

3333 one Series or the other. The default specifies to use the 

3334 appropriate NaN value for the underlying dtype of the Series. 

3335 

3336 Returns 

3337 ------- 

3338 Series 

3339 The result of combining the Series with the other object. 

3340 

3341 See Also 

3342 -------- 

3343 Series.combine_first : Combine Series values, choosing the calling 

3344 Series' values first. 

3345 

3346 Examples 

3347 -------- 

3348 Consider 2 Datasets ``s1`` and ``s2`` containing 

3349 highest clocked speeds of different birds. 

3350 

3351 >>> s1 = pd.Series({'falcon': 330.0, 'eagle': 160.0}) 

3352 >>> s1 

3353 falcon 330.0 

3354 eagle 160.0 

3355 dtype: float64 

3356 >>> s2 = pd.Series({'falcon': 345.0, 'eagle': 200.0, 'duck': 30.0}) 

3357 >>> s2 

3358 falcon 345.0 

3359 eagle 200.0 

3360 duck 30.0 

3361 dtype: float64 

3362 

3363 Now, to combine the two datasets and view the highest speeds 

3364 of the birds across the two datasets 

3365 

3366 >>> s1.combine(s2, max) 

3367 duck NaN 

3368 eagle 200.0 

3369 falcon 345.0 

3370 dtype: float64 

3371 

3372 In the previous example, the resulting value for duck is missing, 

3373 because the maximum of a NaN and a float is a NaN. 

3374 So, in the example, we set ``fill_value=0``, 

3375 so the maximum value returned will be the value from some dataset. 

3376 

3377 >>> s1.combine(s2, max, fill_value=0) 

3378 duck 30.0 

3379 eagle 200.0 

3380 falcon 345.0 

3381 dtype: float64 

3382 """ 

3383 if fill_value is None: 

3384 fill_value = na_value_for_dtype(self.dtype, compat=False) 

3385 

3386 if isinstance(other, Series): 

3387 # If other is a Series, result is based on union of Series, 

3388 # so do this element by element 

3389 new_index = self.index.union(other.index) 

3390 new_name = ops.get_op_result_name(self, other) 

3391 new_values = np.empty(len(new_index), dtype=object) 

3392 for i, idx in enumerate(new_index): 

3393 lv = self.get(idx, fill_value) 

3394 rv = other.get(idx, fill_value) 

3395 with np.errstate(all="ignore"): 

3396 new_values[i] = func(lv, rv) 

3397 else: 

3398 # Assume that other is a scalar, so apply the function for 

3399 # each element in the Series 

3400 new_index = self.index 

3401 new_values = np.empty(len(new_index), dtype=object) 

3402 with np.errstate(all="ignore"): 

3403 new_values[:] = [func(lv, other) for lv in self._values] 

3404 new_name = self.name 

3405 

3406 # try_float=False is to match agg_series 

3407 npvalues = lib.maybe_convert_objects(new_values, try_float=False) 

3408 res_values = maybe_cast_pointwise_result(npvalues, self.dtype, same_dtype=False) 

3409 return self._constructor(res_values, index=new_index, name=new_name) 

3410 

3411 def combine_first(self, other) -> Series: 

3412 """ 

3413 Update null elements with value in the same location in 'other'. 

3414 

3415 Combine two Series objects by filling null values in one Series with 

3416 non-null values from the other Series. Result index will be the union 

3417 of the two indexes. 

3418 

3419 Parameters 

3420 ---------- 

3421 other : Series 

3422 The value(s) to be used for filling null values. 

3423 

3424 Returns 

3425 ------- 

3426 Series 

3427 The result of combining the provided Series with the other object. 

3428 

3429 See Also 

3430 -------- 

3431 Series.combine : Perform element-wise operation on two Series 

3432 using a given function. 

3433 

3434 Examples 

3435 -------- 

3436 >>> s1 = pd.Series([1, np.nan]) 

3437 >>> s2 = pd.Series([3, 4, 5]) 

3438 >>> s1.combine_first(s2) 

3439 0 1.0 

3440 1 4.0 

3441 2 5.0 

3442 dtype: float64 

3443 

3444 Null values still persist if the location of that null value 

3445 does not exist in `other` 

3446 

3447 >>> s1 = pd.Series({'falcon': np.nan, 'eagle': 160.0}) 

3448 >>> s2 = pd.Series({'eagle': 200.0, 'duck': 30.0}) 

3449 >>> s1.combine_first(s2) 

3450 duck 30.0 

3451 eagle 160.0 

3452 falcon NaN 

3453 dtype: float64 

3454 """ 

3455 new_index = self.index.union(other.index) 

3456 this = self.reindex(new_index, copy=False) 

3457 other = other.reindex(new_index, copy=False) 

3458 if this.dtype.kind == "M" and other.dtype.kind != "M": 

3459 other = to_datetime(other) 

3460 

3461 return this.where(notna(this), other) 

3462 

3463 def update(self, other: Series | Sequence | Mapping) -> None: 

3464 """ 

3465 Modify Series in place using values from passed Series. 

3466 

3467 Uses non-NA values from passed Series to make updates. Aligns 

3468 on index. 

3469 

3470 Parameters 

3471 ---------- 

3472 other : Series, or object coercible into Series 

3473 

3474 Examples 

3475 -------- 

3476 >>> s = pd.Series([1, 2, 3]) 

3477 >>> s.update(pd.Series([4, 5, 6])) 

3478 >>> s 

3479 0 4 

3480 1 5 

3481 2 6 

3482 dtype: int64 

3483 

3484 >>> s = pd.Series(['a', 'b', 'c']) 

3485 >>> s.update(pd.Series(['d', 'e'], index=[0, 2])) 

3486 >>> s 

3487 0 d 

3488 1 b 

3489 2 e 

3490 dtype: object 

3491 

3492 >>> s = pd.Series([1, 2, 3]) 

3493 >>> s.update(pd.Series([4, 5, 6, 7, 8])) 

3494 >>> s 

3495 0 4 

3496 1 5 

3497 2 6 

3498 dtype: int64 

3499 

3500 If ``other`` contains NaNs the corresponding values are not updated 

3501 in the original Series. 

3502 

3503 >>> s = pd.Series([1, 2, 3]) 

3504 >>> s.update(pd.Series([4, np.nan, 6])) 

3505 >>> s 

3506 0 4 

3507 1 2 

3508 2 6 

3509 dtype: int64 

3510 

3511 ``other`` can also be a non-Series object type 

3512 that is coercible into a Series 

3513 

3514 >>> s = pd.Series([1, 2, 3]) 

3515 >>> s.update([4, np.nan, 6]) 

3516 >>> s 

3517 0 4 

3518 1 2 

3519 2 6 

3520 dtype: int64 

3521 

3522 >>> s = pd.Series([1, 2, 3]) 

3523 >>> s.update({1: 9}) 

3524 >>> s 

3525 0 1 

3526 1 9 

3527 2 3 

3528 dtype: int64 

3529 """ 

3530 

3531 if not isinstance(other, Series): 

3532 other = Series(other) 

3533 

3534 other = other.reindex_like(self) 

3535 mask = notna(other) 

3536 

3537 self._mgr = self._mgr.putmask(mask=mask, new=other) 

3538 self._maybe_update_cacher() 

3539 

3540 # ---------------------------------------------------------------------- 

3541 # Reindexing, sorting 

3542 

3543 # error: Signature of "sort_values" incompatible with supertype "NDFrame" 

3544 @overload # type: ignore[override] 

3545 def sort_values( 

3546 self, 

3547 *, 

3548 axis: Axis = ..., 

3549 ascending: bool | int | Sequence[bool] | Sequence[int] = ..., 

3550 inplace: Literal[False] = ..., 

3551 kind: str = ..., 

3552 na_position: str = ..., 

3553 ignore_index: bool = ..., 

3554 key: ValueKeyFunc = ..., 

3555 ) -> Series: 

3556 ... 

3557 

3558 @overload 

3559 def sort_values( 

3560 self, 

3561 *, 

3562 axis: Axis = ..., 

3563 ascending: bool | int | Sequence[bool] | Sequence[int] = ..., 

3564 inplace: Literal[True], 

3565 kind: str = ..., 

3566 na_position: str = ..., 

3567 ignore_index: bool = ..., 

3568 key: ValueKeyFunc = ..., 

3569 ) -> None: 

3570 ... 

3571 

3572 # error: Signature of "sort_values" incompatible with supertype "NDFrame" 

3573 @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"]) 

3574 def sort_values( # type: ignore[override] 

3575 self, 

3576 axis: Axis = 0, 

3577 ascending: bool | int | Sequence[bool] | Sequence[int] = True, 

3578 inplace: bool = False, 

3579 kind: str = "quicksort", 

3580 na_position: str = "last", 

3581 ignore_index: bool = False, 

3582 key: ValueKeyFunc = None, 

3583 ) -> Series | None: 

3584 """ 

3585 Sort by the values. 

3586 

3587 Sort a Series in ascending or descending order by some 

3588 criterion. 

3589 

3590 Parameters 

3591 ---------- 

3592 axis : {0 or 'index'} 

3593 Unused. Parameter needed for compatibility with DataFrame. 

3594 ascending : bool or list of bools, default True 

3595 If True, sort values in ascending order, otherwise descending. 

3596 inplace : bool, default False 

3597 If True, perform operation in-place. 

3598 kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' 

3599 Choice of sorting algorithm. See also :func:`numpy.sort` for more 

3600 information. 'mergesort' and 'stable' are the only stable algorithms. 

3601 na_position : {'first' or 'last'}, default 'last' 

3602 Argument 'first' puts NaNs at the beginning, 'last' puts NaNs at 

3603 the end. 

3604 ignore_index : bool, default False 

3605 If True, the resulting axis will be labeled 0, 1, …, n - 1. 

3606 

3607 .. versionadded:: 1.0.0 

3608 

3609 key : callable, optional 

3610 If not None, apply the key function to the series values 

3611 before sorting. This is similar to the `key` argument in the 

3612 builtin :meth:`sorted` function, with the notable difference that 

3613 this `key` function should be *vectorized*. It should expect a 

3614 ``Series`` and return an array-like. 

3615 

3616 .. versionadded:: 1.1.0 

3617 

3618 Returns 

3619 ------- 

3620 Series or None 

3621 Series ordered by values or None if ``inplace=True``. 

3622 

3623 See Also 

3624 -------- 

3625 Series.sort_index : Sort by the Series indices. 

3626 DataFrame.sort_values : Sort DataFrame by the values along either axis. 

3627 DataFrame.sort_index : Sort DataFrame by indices. 

3628 

3629 Examples 

3630 -------- 

3631 >>> s = pd.Series([np.nan, 1, 3, 10, 5]) 

3632 >>> s 

3633 0 NaN 

3634 1 1.0 

3635 2 3.0 

3636 3 10.0 

3637 4 5.0 

3638 dtype: float64 

3639 

3640 Sort values ascending order (default behaviour) 

3641 

3642 >>> s.sort_values(ascending=True) 

3643 1 1.0 

3644 2 3.0 

3645 4 5.0 

3646 3 10.0 

3647 0 NaN 

3648 dtype: float64 

3649 

3650 Sort values descending order 

3651 

3652 >>> s.sort_values(ascending=False) 

3653 3 10.0 

3654 4 5.0 

3655 2 3.0 

3656 1 1.0 

3657 0 NaN 

3658 dtype: float64 

3659 

3660 Sort values inplace 

3661 

3662 >>> s.sort_values(ascending=False, inplace=True) 

3663 >>> s 

3664 3 10.0 

3665 4 5.0 

3666 2 3.0 

3667 1 1.0 

3668 0 NaN 

3669 dtype: float64 

3670 

3671 Sort values putting NAs first 

3672 

3673 >>> s.sort_values(na_position='first') 

3674 0 NaN 

3675 1 1.0 

3676 2 3.0 

3677 4 5.0 

3678 3 10.0 

3679 dtype: float64 

3680 

3681 Sort a series of strings 

3682 

3683 >>> s = pd.Series(['z', 'b', 'd', 'a', 'c']) 

3684 >>> s 

3685 0 z 

3686 1 b 

3687 2 d 

3688 3 a 

3689 4 c 

3690 dtype: object 

3691 

3692 >>> s.sort_values() 

3693 3 a 

3694 1 b 

3695 4 c 

3696 2 d 

3697 0 z 

3698 dtype: object 

3699 

3700 Sort using a key function. Your `key` function will be 

3701 given the ``Series`` of values and should return an array-like. 

3702 

3703 >>> s = pd.Series(['a', 'B', 'c', 'D', 'e']) 

3704 >>> s.sort_values() 

3705 1 B 

3706 3 D 

3707 0 a 

3708 2 c 

3709 4 e 

3710 dtype: object 

3711 >>> s.sort_values(key=lambda x: x.str.lower()) 

3712 0 a 

3713 1 B 

3714 2 c 

3715 3 D 

3716 4 e 

3717 dtype: object 

3718 

3719 NumPy ufuncs work well here. For example, we can 

3720 sort by the ``sin`` of the value 

3721 

3722 >>> s = pd.Series([-4, -2, 0, 2, 4]) 

3723 >>> s.sort_values(key=np.sin) 

3724 1 -2 

3725 4 4 

3726 2 0 

3727 0 -4 

3728 3 2 

3729 dtype: int64 

3730 

3731 More complicated user-defined functions can be used, 

3732 as long as they expect a Series and return an array-like 

3733 

3734 >>> s.sort_values(key=lambda x: (np.tan(x.cumsum()))) 

3735 0 -4 

3736 3 2 

3737 4 4 

3738 1 -2 

3739 2 0 

3740 dtype: int64 

3741 """ 

3742 inplace = validate_bool_kwarg(inplace, "inplace") 

3743 # Validate the axis parameter 

3744 self._get_axis_number(axis) 

3745 

3746 # GH 5856/5853 

3747 if inplace and self._is_cached: 

3748 raise ValueError( 

3749 "This Series is a view of some other array, to " 

3750 "sort in-place you must create a copy" 

3751 ) 

3752 

3753 if is_list_like(ascending): 

3754 ascending = cast(Sequence[Union[bool, int]], ascending) 

3755 if len(ascending) != 1: 

3756 raise ValueError( 

3757 f"Length of ascending ({len(ascending)}) must be 1 for Series" 

3758 ) 

3759 ascending = ascending[0] 

3760 

3761 ascending = validate_ascending(ascending) 

3762 

3763 if na_position not in ["first", "last"]: 

3764 raise ValueError(f"invalid na_position: {na_position}") 

3765 

3766 # GH 35922. Make sorting stable by leveraging nargsort 

3767 values_to_sort = ensure_key_mapped(self, key)._values if key else self._values 

3768 sorted_index = nargsort(values_to_sort, kind, bool(ascending), na_position) 

3769 

3770 result = self._constructor( 

3771 self._values[sorted_index], index=self.index[sorted_index] 

3772 ) 

3773 

3774 if ignore_index: 

3775 result.index = default_index(len(sorted_index)) 

3776 

3777 if not inplace: 

3778 return result.__finalize__(self, method="sort_values") 

3779 self._update_inplace(result) 

3780 return None 

3781 

3782 @overload 

3783 def sort_index( 

3784 self, 

3785 *, 

3786 axis: Axis = ..., 

3787 level: IndexLabel = ..., 

3788 ascending: bool | Sequence[bool] = ..., 

3789 inplace: Literal[True], 

3790 kind: SortKind = ..., 

3791 na_position: NaPosition = ..., 

3792 sort_remaining: bool = ..., 

3793 ignore_index: bool = ..., 

3794 key: IndexKeyFunc = ..., 

3795 ) -> None: 

3796 ... 

3797 

3798 @overload 

3799 def sort_index( 

3800 self, 

3801 *, 

3802 axis: Axis = ..., 

3803 level: IndexLabel = ..., 

3804 ascending: bool | Sequence[bool] = ..., 

3805 inplace: Literal[False] = ..., 

3806 kind: SortKind = ..., 

3807 na_position: NaPosition = ..., 

3808 sort_remaining: bool = ..., 

3809 ignore_index: bool = ..., 

3810 key: IndexKeyFunc = ..., 

3811 ) -> Series: 

3812 ... 

3813 

3814 @overload 

3815 def sort_index( 

3816 self, 

3817 *, 

3818 axis: Axis = ..., 

3819 level: IndexLabel = ..., 

3820 ascending: bool | Sequence[bool] = ..., 

3821 inplace: bool = ..., 

3822 kind: SortKind = ..., 

3823 na_position: NaPosition = ..., 

3824 sort_remaining: bool = ..., 

3825 ignore_index: bool = ..., 

3826 key: IndexKeyFunc = ..., 

3827 ) -> Series | None: 

3828 ... 

3829 

3830 # error: Signature of "sort_index" incompatible with supertype "NDFrame" 

3831 @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"]) 

3832 def sort_index( # type: ignore[override] 

3833 self, 

3834 axis: Axis = 0, 

3835 level: IndexLabel = None, 

3836 ascending: bool | Sequence[bool] = True, 

3837 inplace: bool = False, 

3838 kind: SortKind = "quicksort", 

3839 na_position: NaPosition = "last", 

3840 sort_remaining: bool = True, 

3841 ignore_index: bool = False, 

3842 key: IndexKeyFunc = None, 

3843 ) -> Series | None: 

3844 """ 

3845 Sort Series by index labels. 

3846 

3847 Returns a new Series sorted by label if `inplace` argument is 

3848 ``False``, otherwise updates the original series and returns None. 

3849 

3850 Parameters 

3851 ---------- 

3852 axis : {0 or 'index'} 

3853 Unused. Parameter needed for compatibility with DataFrame. 

3854 level : int, optional 

3855 If not None, sort on values in specified index level(s). 

3856 ascending : bool or list-like of bools, default True 

3857 Sort ascending vs. descending. When the index is a MultiIndex the 

3858 sort direction can be controlled for each level individually. 

3859 inplace : bool, default False 

3860 If True, perform operation in-place. 

3861 kind : {'quicksort', 'mergesort', 'heapsort', 'stable'}, default 'quicksort' 

3862 Choice of sorting algorithm. See also :func:`numpy.sort` for more 

3863 information. 'mergesort' and 'stable' are the only stable algorithms. For 

3864 DataFrames, this option is only applied when sorting on a single 

3865 column or label. 

3866 na_position : {'first', 'last'}, default 'last' 

3867 If 'first' puts NaNs at the beginning, 'last' puts NaNs at the end. 

3868 Not implemented for MultiIndex. 

3869 sort_remaining : bool, default True 

3870 If True and sorting by level and index is multilevel, sort by other 

3871 levels too (in order) after sorting by specified level. 

3872 ignore_index : bool, default False 

3873 If True, the resulting axis will be labeled 0, 1, …, n - 1. 

3874 

3875 .. versionadded:: 1.0.0 

3876 

3877 key : callable, optional 

3878 If not None, apply the key function to the index values 

3879 before sorting. This is similar to the `key` argument in the 

3880 builtin :meth:`sorted` function, with the notable difference that 

3881 this `key` function should be *vectorized*. It should expect an 

3882 ``Index`` and return an ``Index`` of the same shape. 

3883 

3884 .. versionadded:: 1.1.0 

3885 

3886 Returns 

3887 ------- 

3888 Series or None 

3889 The original Series sorted by the labels or None if ``inplace=True``. 

3890 

3891 See Also 

3892 -------- 

3893 DataFrame.sort_index: Sort DataFrame by the index. 

3894 DataFrame.sort_values: Sort DataFrame by the value. 

3895 Series.sort_values : Sort Series by the value. 

3896 

3897 Examples 

3898 -------- 

3899 >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, 4]) 

3900 >>> s.sort_index() 

3901 1 c 

3902 2 b 

3903 3 a 

3904 4 d 

3905 dtype: object 

3906 

3907 Sort Descending 

3908 

3909 >>> s.sort_index(ascending=False) 

3910 4 d 

3911 3 a 

3912 2 b 

3913 1 c 

3914 dtype: object 

3915 

3916 Sort Inplace 

3917 

3918 >>> s.sort_index(inplace=True) 

3919 >>> s 

3920 1 c 

3921 2 b 

3922 3 a 

3923 4 d 

3924 dtype: object 

3925 

3926 By default NaNs are put at the end, but use `na_position` to place 

3927 them at the beginning 

3928 

3929 >>> s = pd.Series(['a', 'b', 'c', 'd'], index=[3, 2, 1, np.nan]) 

3930 >>> s.sort_index(na_position='first') 

3931 NaN d 

3932 1.0 c 

3933 2.0 b 

3934 3.0 a 

3935 dtype: object 

3936 

3937 Specify index level to sort 

3938 

3939 >>> arrays = [np.array(['qux', 'qux', 'foo', 'foo', 

3940 ... 'baz', 'baz', 'bar', 'bar']), 

3941 ... np.array(['two', 'one', 'two', 'one', 

3942 ... 'two', 'one', 'two', 'one'])] 

3943 >>> s = pd.Series([1, 2, 3, 4, 5, 6, 7, 8], index=arrays) 

3944 >>> s.sort_index(level=1) 

3945 bar one 8 

3946 baz one 6 

3947 foo one 4 

3948 qux one 2 

3949 bar two 7 

3950 baz two 5 

3951 foo two 3 

3952 qux two 1 

3953 dtype: int64 

3954 

3955 Does not sort by remaining levels when sorting by levels 

3956 

3957 >>> s.sort_index(level=1, sort_remaining=False) 

3958 qux one 2 

3959 foo one 4 

3960 baz one 6 

3961 bar one 8 

3962 qux two 1 

3963 foo two 3 

3964 baz two 5 

3965 bar two 7 

3966 dtype: int64 

3967 

3968 Apply a key function before sorting 

3969 

3970 >>> s = pd.Series([1, 2, 3, 4], index=['A', 'b', 'C', 'd']) 

3971 >>> s.sort_index(key=lambda x : x.str.lower()) 

3972 A 1 

3973 b 2 

3974 C 3 

3975 d 4 

3976 dtype: int64 

3977 """ 

3978 

3979 return super().sort_index( 

3980 axis=axis, 

3981 level=level, 

3982 ascending=ascending, 

3983 inplace=inplace, 

3984 kind=kind, 

3985 na_position=na_position, 

3986 sort_remaining=sort_remaining, 

3987 ignore_index=ignore_index, 

3988 key=key, 

3989 ) 

3990 

3991 def argsort( 

3992 self, 

3993 axis: Axis = 0, 

3994 kind: SortKind = "quicksort", 

3995 order: None = None, 

3996 ) -> Series: 

3997 """ 

3998 Return the integer indices that would sort the Series values. 

3999 

4000 Override ndarray.argsort. Argsorts the value, omitting NA/null values, 

4001 and places the result in the same locations as the non-NA values. 

4002 

4003 Parameters 

4004 ---------- 

4005 axis : {0 or 'index'} 

4006 Unused. Parameter needed for compatibility with DataFrame. 

4007 kind : {'mergesort', 'quicksort', 'heapsort', 'stable'}, default 'quicksort' 

4008 Choice of sorting algorithm. See :func:`numpy.sort` for more 

4009 information. 'mergesort' and 'stable' are the only stable algorithms. 

4010 order : None 

4011 Has no effect but is accepted for compatibility with numpy. 

4012 

4013 Returns 

4014 ------- 

4015 Series[np.intp] 

4016 Positions of values within the sort order with -1 indicating 

4017 nan values. 

4018 

4019 See Also 

4020 -------- 

4021 numpy.ndarray.argsort : Returns the indices that would sort this array. 

4022 """ 

4023 values = self._values 

4024 mask = isna(values) 

4025 

4026 if mask.any(): 

4027 result = np.full(len(self), -1, dtype=np.intp) 

4028 notmask = ~mask 

4029 result[notmask] = np.argsort(values[notmask], kind=kind) 

4030 else: 

4031 result = np.argsort(values, kind=kind) 

4032 

4033 res = self._constructor(result, index=self.index, name=self.name, dtype=np.intp) 

4034 return res.__finalize__(self, method="argsort") 

4035 

4036 def nlargest( 

4037 self, n: int = 5, keep: Literal["first", "last", "all"] = "first" 

4038 ) -> Series: 

4039 """ 

4040 Return the largest `n` elements. 

4041 

4042 Parameters 

4043 ---------- 

4044 n : int, default 5 

4045 Return this many descending sorted values. 

4046 keep : {'first', 'last', 'all'}, default 'first' 

4047 When there are duplicate values that cannot all fit in a 

4048 Series of `n` elements: 

4049 

4050 - ``first`` : return the first `n` occurrences in order 

4051 of appearance. 

4052 - ``last`` : return the last `n` occurrences in reverse 

4053 order of appearance. 

4054 - ``all`` : keep all occurrences. This can result in a Series of 

4055 size larger than `n`. 

4056 

4057 Returns 

4058 ------- 

4059 Series 

4060 The `n` largest values in the Series, sorted in decreasing order. 

4061 

4062 See Also 

4063 -------- 

4064 Series.nsmallest: Get the `n` smallest elements. 

4065 Series.sort_values: Sort Series by values. 

4066 Series.head: Return the first `n` rows. 

4067 

4068 Notes 

4069 ----- 

4070 Faster than ``.sort_values(ascending=False).head(n)`` for small `n` 

4071 relative to the size of the ``Series`` object. 

4072 

4073 Examples 

4074 -------- 

4075 >>> countries_population = {"Italy": 59000000, "France": 65000000, 

4076 ... "Malta": 434000, "Maldives": 434000, 

4077 ... "Brunei": 434000, "Iceland": 337000, 

4078 ... "Nauru": 11300, "Tuvalu": 11300, 

4079 ... "Anguilla": 11300, "Montserrat": 5200} 

4080 >>> s = pd.Series(countries_population) 

4081 >>> s 

4082 Italy 59000000 

4083 France 65000000 

4084 Malta 434000 

4085 Maldives 434000 

4086 Brunei 434000 

4087 Iceland 337000 

4088 Nauru 11300 

4089 Tuvalu 11300 

4090 Anguilla 11300 

4091 Montserrat 5200 

4092 dtype: int64 

4093 

4094 The `n` largest elements where ``n=5`` by default. 

4095 

4096 >>> s.nlargest() 

4097 France 65000000 

4098 Italy 59000000 

4099 Malta 434000 

4100 Maldives 434000 

4101 Brunei 434000 

4102 dtype: int64 

4103 

4104 The `n` largest elements where ``n=3``. Default `keep` value is 'first' 

4105 so Malta will be kept. 

4106 

4107 >>> s.nlargest(3) 

4108 France 65000000 

4109 Italy 59000000 

4110 Malta 434000 

4111 dtype: int64 

4112 

4113 The `n` largest elements where ``n=3`` and keeping the last duplicates. 

4114 Brunei will be kept since it is the last with value 434000 based on 

4115 the index order. 

4116 

4117 >>> s.nlargest(3, keep='last') 

4118 France 65000000 

4119 Italy 59000000 

4120 Brunei 434000 

4121 dtype: int64 

4122 

4123 The `n` largest elements where ``n=3`` with all duplicates kept. Note 

4124 that the returned Series has five elements due to the three duplicates. 

4125 

4126 >>> s.nlargest(3, keep='all') 

4127 France 65000000 

4128 Italy 59000000 

4129 Malta 434000 

4130 Maldives 434000 

4131 Brunei 434000 

4132 dtype: int64 

4133 """ 

4134 return algorithms.SelectNSeries(self, n=n, keep=keep).nlargest() 

4135 

4136 def nsmallest(self, n: int = 5, keep: str = "first") -> Series: 

4137 """ 

4138 Return the smallest `n` elements. 

4139 

4140 Parameters 

4141 ---------- 

4142 n : int, default 5 

4143 Return this many ascending sorted values. 

4144 keep : {'first', 'last', 'all'}, default 'first' 

4145 When there are duplicate values that cannot all fit in a 

4146 Series of `n` elements: 

4147 

4148 - ``first`` : return the first `n` occurrences in order 

4149 of appearance. 

4150 - ``last`` : return the last `n` occurrences in reverse 

4151 order of appearance. 

4152 - ``all`` : keep all occurrences. This can result in a Series of 

4153 size larger than `n`. 

4154 

4155 Returns 

4156 ------- 

4157 Series 

4158 The `n` smallest values in the Series, sorted in increasing order. 

4159 

4160 See Also 

4161 -------- 

4162 Series.nlargest: Get the `n` largest elements. 

4163 Series.sort_values: Sort Series by values. 

4164 Series.head: Return the first `n` rows. 

4165 

4166 Notes 

4167 ----- 

4168 Faster than ``.sort_values().head(n)`` for small `n` relative to 

4169 the size of the ``Series`` object. 

4170 

4171 Examples 

4172 -------- 

4173 >>> countries_population = {"Italy": 59000000, "France": 65000000, 

4174 ... "Brunei": 434000, "Malta": 434000, 

4175 ... "Maldives": 434000, "Iceland": 337000, 

4176 ... "Nauru": 11300, "Tuvalu": 11300, 

4177 ... "Anguilla": 11300, "Montserrat": 5200} 

4178 >>> s = pd.Series(countries_population) 

4179 >>> s 

4180 Italy 59000000 

4181 France 65000000 

4182 Brunei 434000 

4183 Malta 434000 

4184 Maldives 434000 

4185 Iceland 337000 

4186 Nauru 11300 

4187 Tuvalu 11300 

4188 Anguilla 11300 

4189 Montserrat 5200 

4190 dtype: int64 

4191 

4192 The `n` smallest elements where ``n=5`` by default. 

4193 

4194 >>> s.nsmallest() 

4195 Montserrat 5200 

4196 Nauru 11300 

4197 Tuvalu 11300 

4198 Anguilla 11300 

4199 Iceland 337000 

4200 dtype: int64 

4201 

4202 The `n` smallest elements where ``n=3``. Default `keep` value is 

4203 'first' so Nauru and Tuvalu will be kept. 

4204 

4205 >>> s.nsmallest(3) 

4206 Montserrat 5200 

4207 Nauru 11300 

4208 Tuvalu 11300 

4209 dtype: int64 

4210 

4211 The `n` smallest elements where ``n=3`` and keeping the last 

4212 duplicates. Anguilla and Tuvalu will be kept since they are the last 

4213 with value 11300 based on the index order. 

4214 

4215 >>> s.nsmallest(3, keep='last') 

4216 Montserrat 5200 

4217 Anguilla 11300 

4218 Tuvalu 11300 

4219 dtype: int64 

4220 

4221 The `n` smallest elements where ``n=3`` with all duplicates kept. Note 

4222 that the returned Series has four elements due to the three duplicates. 

4223 

4224 >>> s.nsmallest(3, keep='all') 

4225 Montserrat 5200 

4226 Nauru 11300 

4227 Tuvalu 11300 

4228 Anguilla 11300 

4229 dtype: int64 

4230 """ 

4231 return algorithms.SelectNSeries(self, n=n, keep=keep).nsmallest() 

4232 

4233 @doc( 

4234 klass=_shared_doc_kwargs["klass"], 

4235 extra_params=dedent( 

4236 """copy : bool, default True 

4237 Whether to copy underlying data.""" 

4238 ), 

4239 examples=dedent( 

4240 """\ 

4241 Examples 

4242 -------- 

4243 >>> s = pd.Series( 

4244 ... ["A", "B", "A", "C"], 

4245 ... index=[ 

4246 ... ["Final exam", "Final exam", "Coursework", "Coursework"], 

4247 ... ["History", "Geography", "History", "Geography"], 

4248 ... ["January", "February", "March", "April"], 

4249 ... ], 

4250 ... ) 

4251 >>> s 

4252 Final exam History January A 

4253 Geography February B 

4254 Coursework History March A 

4255 Geography April C 

4256 dtype: object 

4257 

4258 In the following example, we will swap the levels of the indices. 

4259 Here, we will swap the levels column-wise, but levels can be swapped row-wise 

4260 in a similar manner. Note that column-wise is the default behaviour. 

4261 By not supplying any arguments for i and j, we swap the last and second to 

4262 last indices. 

4263 

4264 >>> s.swaplevel() 

4265 Final exam January History A 

4266 February Geography B 

4267 Coursework March History A 

4268 April Geography C 

4269 dtype: object 

4270 

4271 By supplying one argument, we can choose which index to swap the last 

4272 index with. We can for example swap the first index with the last one as 

4273 follows. 

4274 

4275 >>> s.swaplevel(0) 

4276 January History Final exam A 

4277 February Geography Final exam B 

4278 March History Coursework A 

4279 April Geography Coursework C 

4280 dtype: object 

4281 

4282 We can also define explicitly which indices we want to swap by supplying values 

4283 for both i and j. Here, we for example swap the first and second indices. 

4284 

4285 >>> s.swaplevel(0, 1) 

4286 History Final exam January A 

4287 Geography Final exam February B 

4288 History Coursework March A 

4289 Geography Coursework April C 

4290 dtype: object""" 

4291 ), 

4292 ) 

4293 def swaplevel(self, i: Level = -2, j: Level = -1, copy: bool = True) -> Series: 

4294 """ 

4295 Swap levels i and j in a :class:`MultiIndex`. 

4296 

4297 Default is to swap the two innermost levels of the index. 

4298 

4299 Parameters 

4300 ---------- 

4301 i, j : int or str 

4302 Levels of the indices to be swapped. Can pass level name as string. 

4303 {extra_params} 

4304 

4305 Returns 

4306 ------- 

4307 {klass} 

4308 {klass} with levels swapped in MultiIndex. 

4309 

4310 {examples} 

4311 """ 

4312 assert isinstance(self.index, MultiIndex) 

4313 new_index = self.index.swaplevel(i, j) 

4314 return self._constructor(self._values, index=new_index, copy=copy).__finalize__( 

4315 self, method="swaplevel" 

4316 ) 

4317 

4318 def reorder_levels(self, order: Sequence[Level]) -> Series: 

4319 """ 

4320 Rearrange index levels using input order. 

4321 

4322 May not drop or duplicate levels. 

4323 

4324 Parameters 

4325 ---------- 

4326 order : list of int representing new level order 

4327 Reference level by number or key. 

4328 

4329 Returns 

4330 ------- 

4331 type of caller (new object) 

4332 """ 

4333 if not isinstance(self.index, MultiIndex): # pragma: no cover 

4334 raise Exception("Can only reorder levels on a hierarchical axis.") 

4335 

4336 result = self.copy() 

4337 assert isinstance(result.index, MultiIndex) 

4338 result.index = result.index.reorder_levels(order) 

4339 return result 

4340 

4341 def explode(self, ignore_index: bool = False) -> Series: 

4342 """ 

4343 Transform each element of a list-like to a row. 

4344 

4345 .. versionadded:: 0.25.0 

4346 

4347 Parameters 

4348 ---------- 

4349 ignore_index : bool, default False 

4350 If True, the resulting index will be labeled 0, 1, …, n - 1. 

4351 

4352 .. versionadded:: 1.1.0 

4353 

4354 Returns 

4355 ------- 

4356 Series 

4357 Exploded lists to rows; index will be duplicated for these rows. 

4358 

4359 See Also 

4360 -------- 

4361 Series.str.split : Split string values on specified separator. 

4362 Series.unstack : Unstack, a.k.a. pivot, Series with MultiIndex 

4363 to produce DataFrame. 

4364 DataFrame.melt : Unpivot a DataFrame from wide format to long format. 

4365 DataFrame.explode : Explode a DataFrame from list-like 

4366 columns to long format. 

4367 

4368 Notes 

4369 ----- 

4370 This routine will explode list-likes including lists, tuples, sets, 

4371 Series, and np.ndarray. The result dtype of the subset rows will 

4372 be object. Scalars will be returned unchanged, and empty list-likes will 

4373 result in a np.nan for that row. In addition, the ordering of elements in 

4374 the output will be non-deterministic when exploding sets. 

4375 

4376 Reference :ref:`the user guide <reshaping.explode>` for more examples. 

4377 

4378 Examples 

4379 -------- 

4380 >>> s = pd.Series([[1, 2, 3], 'foo', [], [3, 4]]) 

4381 >>> s 

4382 0 [1, 2, 3] 

4383 1 foo 

4384 2 [] 

4385 3 [3, 4] 

4386 dtype: object 

4387 

4388 >>> s.explode() 

4389 0 1 

4390 0 2 

4391 0 3 

4392 1 foo 

4393 2 NaN 

4394 3 3 

4395 3 4 

4396 dtype: object 

4397 """ 

4398 if not len(self) or not is_object_dtype(self): 

4399 result = self.copy() 

4400 return result.reset_index(drop=True) if ignore_index else result 

4401 

4402 values, counts = reshape.explode(np.asarray(self._values)) 

4403 

4404 if ignore_index: 

4405 index = default_index(len(values)) 

4406 else: 

4407 index = self.index.repeat(counts) 

4408 

4409 return self._constructor(values, index=index, name=self.name) 

4410 

4411 def unstack(self, level: IndexLabel = -1, fill_value: Hashable = None) -> DataFrame: 

4412 """ 

4413 Unstack, also known as pivot, Series with MultiIndex to produce DataFrame. 

4414 

4415 Parameters 

4416 ---------- 

4417 level : int, str, or list of these, default last level 

4418 Level(s) to unstack, can pass level name. 

4419 fill_value : scalar value, default None 

4420 Value to use when replacing NaN values. 

4421 

4422 Returns 

4423 ------- 

4424 DataFrame 

4425 Unstacked Series. 

4426 

4427 Notes 

4428 ----- 

4429 Reference :ref:`the user guide <reshaping.stacking>` for more examples. 

4430 

4431 Examples 

4432 -------- 

4433 >>> s = pd.Series([1, 2, 3, 4], 

4434 ... index=pd.MultiIndex.from_product([['one', 'two'], 

4435 ... ['a', 'b']])) 

4436 >>> s 

4437 one a 1 

4438 b 2 

4439 two a 3 

4440 b 4 

4441 dtype: int64 

4442 

4443 >>> s.unstack(level=-1) 

4444 a b 

4445 one 1 2 

4446 two 3 4 

4447 

4448 >>> s.unstack(level=0) 

4449 one two 

4450 a 1 3 

4451 b 2 4 

4452 """ 

4453 from pandas.core.reshape.reshape import unstack 

4454 

4455 return unstack(self, level, fill_value) 

4456 

4457 # ---------------------------------------------------------------------- 

4458 # function application 

4459 

4460 def map( 

4461 self, 

4462 arg: Callable | Mapping | Series, 

4463 na_action: Literal["ignore"] | None = None, 

4464 ) -> Series: 

4465 """ 

4466 Map values of Series according to an input mapping or function. 

4467 

4468 Used for substituting each value in a Series with another value, 

4469 that may be derived from a function, a ``dict`` or 

4470 a :class:`Series`. 

4471 

4472 Parameters 

4473 ---------- 

4474 arg : function, collections.abc.Mapping subclass or Series 

4475 Mapping correspondence. 

4476 na_action : {None, 'ignore'}, default None 

4477 If 'ignore', propagate NaN values, without passing them to the 

4478 mapping correspondence. 

4479 

4480 Returns 

4481 ------- 

4482 Series 

4483 Same index as caller. 

4484 

4485 See Also 

4486 -------- 

4487 Series.apply : For applying more complex functions on a Series. 

4488 DataFrame.apply : Apply a function row-/column-wise. 

4489 DataFrame.applymap : Apply a function elementwise on a whole DataFrame. 

4490 

4491 Notes 

4492 ----- 

4493 When ``arg`` is a dictionary, values in Series that are not in the 

4494 dictionary (as keys) are converted to ``NaN``. However, if the 

4495 dictionary is a ``dict`` subclass that defines ``__missing__`` (i.e. 

4496 provides a method for default values), then this default is used 

4497 rather than ``NaN``. 

4498 

4499 Examples 

4500 -------- 

4501 >>> s = pd.Series(['cat', 'dog', np.nan, 'rabbit']) 

4502 >>> s 

4503 0 cat 

4504 1 dog 

4505 2 NaN 

4506 3 rabbit 

4507 dtype: object 

4508 

4509 ``map`` accepts a ``dict`` or a ``Series``. Values that are not found 

4510 in the ``dict`` are converted to ``NaN``, unless the dict has a default 

4511 value (e.g. ``defaultdict``): 

4512 

4513 >>> s.map({'cat': 'kitten', 'dog': 'puppy'}) 

4514 0 kitten 

4515 1 puppy 

4516 2 NaN 

4517 3 NaN 

4518 dtype: object 

4519 

4520 It also accepts a function: 

4521 

4522 >>> s.map('I am a {}'.format) 

4523 0 I am a cat 

4524 1 I am a dog 

4525 2 I am a nan 

4526 3 I am a rabbit 

4527 dtype: object 

4528 

4529 To avoid applying the function to missing values (and keep them as 

4530 ``NaN``) ``na_action='ignore'`` can be used: 

4531 

4532 >>> s.map('I am a {}'.format, na_action='ignore') 

4533 0 I am a cat 

4534 1 I am a dog 

4535 2 NaN 

4536 3 I am a rabbit 

4537 dtype: object 

4538 """ 

4539 new_values = self._map_values(arg, na_action=na_action) 

4540 return self._constructor(new_values, index=self.index).__finalize__( 

4541 self, method="map" 

4542 ) 

4543 

4544 def _gotitem(self, key, ndim, subset=None) -> Series: 

4545 """ 

4546 Sub-classes to define. Return a sliced object. 

4547 

4548 Parameters 

4549 ---------- 

4550 key : string / list of selections 

4551 ndim : {1, 2} 

4552 Requested ndim of result. 

4553 subset : object, default None 

4554 Subset to act on. 

4555 """ 

4556 return self 

4557 

4558 _agg_see_also_doc = dedent( 

4559 """ 

4560 See Also 

4561 -------- 

4562 Series.apply : Invoke function on a Series. 

4563 Series.transform : Transform function producing a Series with like indexes. 

4564 """ 

4565 ) 

4566 

4567 _agg_examples_doc = dedent( 

4568 """ 

4569 Examples 

4570 -------- 

4571 >>> s = pd.Series([1, 2, 3, 4]) 

4572 >>> s 

4573 0 1 

4574 1 2 

4575 2 3 

4576 3 4 

4577 dtype: int64 

4578 

4579 >>> s.agg('min') 

4580 1 

4581 

4582 >>> s.agg(['min', 'max']) 

4583 min 1 

4584 max 4 

4585 dtype: int64 

4586 """ 

4587 ) 

4588 

4589 @doc( 

4590 _shared_docs["aggregate"], 

4591 klass=_shared_doc_kwargs["klass"], 

4592 axis=_shared_doc_kwargs["axis"], 

4593 see_also=_agg_see_also_doc, 

4594 examples=_agg_examples_doc, 

4595 ) 

4596 def aggregate(self, func=None, axis: Axis = 0, *args, **kwargs): 

4597 # Validate the axis parameter 

4598 self._get_axis_number(axis) 

4599 

4600 # if func is None, will switch to user-provided "named aggregation" kwargs 

4601 if func is None: 

4602 func = dict(kwargs.items()) 

4603 

4604 op = SeriesApply(self, func, convert_dtype=False, args=args, kwargs=kwargs) 

4605 result = op.agg() 

4606 return result 

4607 

4608 agg = aggregate 

4609 

4610 # error: Signature of "any" incompatible with supertype "NDFrame" [override] 

4611 @overload # type: ignore[override] 

4612 def any( 

4613 self, 

4614 *, 

4615 axis: Axis = ..., 

4616 bool_only: bool | None = ..., 

4617 skipna: bool = ..., 

4618 level: None = ..., 

4619 **kwargs, 

4620 ) -> bool: 

4621 ... 

4622 

4623 @overload 

4624 def any( 

4625 self, 

4626 *, 

4627 axis: Axis = ..., 

4628 bool_only: bool | None = ..., 

4629 skipna: bool = ..., 

4630 level: Level, 

4631 **kwargs, 

4632 ) -> Series | bool: 

4633 ... 

4634 

4635 @doc(NDFrame.any, **_shared_doc_kwargs) 

4636 def any( 

4637 self, 

4638 axis: Axis = 0, 

4639 bool_only: bool | None = None, 

4640 skipna: bool = True, 

4641 level: Level | None = None, 

4642 **kwargs, 

4643 ) -> Series | bool: 

4644 ... 

4645 

4646 @doc( 

4647 _shared_docs["transform"], 

4648 klass=_shared_doc_kwargs["klass"], 

4649 axis=_shared_doc_kwargs["axis"], 

4650 ) 

4651 def transform( 

4652 self, func: AggFuncType, axis: Axis = 0, *args, **kwargs 

4653 ) -> DataFrame | Series: 

4654 # Validate axis argument 

4655 self._get_axis_number(axis) 

4656 result = SeriesApply( 

4657 self, func=func, convert_dtype=True, args=args, kwargs=kwargs 

4658 ).transform() 

4659 return result 

4660 

4661 def apply( 

4662 self, 

4663 func: AggFuncType, 

4664 convert_dtype: bool = True, 

4665 args: tuple[Any, ...] = (), 

4666 **kwargs, 

4667 ) -> DataFrame | Series: 

4668 """ 

4669 Invoke function on values of Series. 

4670 

4671 Can be ufunc (a NumPy function that applies to the entire Series) 

4672 or a Python function that only works on single values. 

4673 

4674 Parameters 

4675 ---------- 

4676 func : function 

4677 Python function or NumPy ufunc to apply. 

4678 convert_dtype : bool, default True 

4679 Try to find better dtype for elementwise function results. If 

4680 False, leave as dtype=object. Note that the dtype is always 

4681 preserved for some extension array dtypes, such as Categorical. 

4682 args : tuple 

4683 Positional arguments passed to func after the series value. 

4684 **kwargs 

4685 Additional keyword arguments passed to func. 

4686 

4687 Returns 

4688 ------- 

4689 Series or DataFrame 

4690 If func returns a Series object the result will be a DataFrame. 

4691 

4692 See Also 

4693 -------- 

4694 Series.map: For element-wise operations. 

4695 Series.agg: Only perform aggregating type operations. 

4696 Series.transform: Only perform transforming type operations. 

4697 

4698 Notes 

4699 ----- 

4700 Functions that mutate the passed object can produce unexpected 

4701 behavior or errors and are not supported. See :ref:`gotchas.udf-mutation` 

4702 for more details. 

4703 

4704 Examples 

4705 -------- 

4706 Create a series with typical summer temperatures for each city. 

4707 

4708 >>> s = pd.Series([20, 21, 12], 

4709 ... index=['London', 'New York', 'Helsinki']) 

4710 >>> s 

4711 London 20 

4712 New York 21 

4713 Helsinki 12 

4714 dtype: int64 

4715 

4716 Square the values by defining a function and passing it as an 

4717 argument to ``apply()``. 

4718 

4719 >>> def square(x): 

4720 ... return x ** 2 

4721 >>> s.apply(square) 

4722 London 400 

4723 New York 441 

4724 Helsinki 144 

4725 dtype: int64 

4726 

4727 Square the values by passing an anonymous function as an 

4728 argument to ``apply()``. 

4729 

4730 >>> s.apply(lambda x: x ** 2) 

4731 London 400 

4732 New York 441 

4733 Helsinki 144 

4734 dtype: int64 

4735 

4736 Define a custom function that needs additional positional 

4737 arguments and pass these additional arguments using the 

4738 ``args`` keyword. 

4739 

4740 >>> def subtract_custom_value(x, custom_value): 

4741 ... return x - custom_value 

4742 

4743 >>> s.apply(subtract_custom_value, args=(5,)) 

4744 London 15 

4745 New York 16 

4746 Helsinki 7 

4747 dtype: int64 

4748 

4749 Define a custom function that takes keyword arguments 

4750 and pass these arguments to ``apply``. 

4751 

4752 >>> def add_custom_values(x, **kwargs): 

4753 ... for month in kwargs: 

4754 ... x += kwargs[month] 

4755 ... return x 

4756 

4757 >>> s.apply(add_custom_values, june=30, july=20, august=25) 

4758 London 95 

4759 New York 96 

4760 Helsinki 87 

4761 dtype: int64 

4762 

4763 Use a function from the Numpy library. 

4764 

4765 >>> s.apply(np.log) 

4766 London 2.995732 

4767 New York 3.044522 

4768 Helsinki 2.484907 

4769 dtype: float64 

4770 """ 

4771 return SeriesApply(self, func, convert_dtype, args, kwargs).apply() 

4772 

4773 def _reduce( 

4774 self, 

4775 op, 

4776 name: str, 

4777 *, 

4778 axis=0, 

4779 skipna=True, 

4780 numeric_only=None, 

4781 filter_type=None, 

4782 **kwds, 

4783 ): 

4784 """ 

4785 Perform a reduction operation. 

4786 

4787 If we have an ndarray as a value, then simply perform the operation, 

4788 otherwise delegate to the object. 

4789 """ 

4790 delegate = self._values 

4791 

4792 if axis is not None: 

4793 self._get_axis_number(axis) 

4794 

4795 if isinstance(delegate, ExtensionArray): 

4796 # dispatch to ExtensionArray interface 

4797 return delegate._reduce(name, skipna=skipna, **kwds) 

4798 

4799 else: 

4800 # dispatch to numpy arrays 

4801 if numeric_only and not is_numeric_dtype(self.dtype): 

4802 kwd_name = "numeric_only" 

4803 if name in ["any", "all"]: 

4804 kwd_name = "bool_only" 

4805 # GH#47500 - change to TypeError to match other methods 

4806 warnings.warn( 

4807 f"Calling Series.{name} with {kwd_name}={numeric_only} and " 

4808 f"dtype {self.dtype} will raise a TypeError in the future", 

4809 FutureWarning, 

4810 stacklevel=find_stack_level(), 

4811 ) 

4812 raise NotImplementedError( 

4813 f"Series.{name} does not implement {kwd_name}." 

4814 ) 

4815 with np.errstate(all="ignore"): 

4816 return op(delegate, skipna=skipna, **kwds) 

4817 

4818 def _reindex_indexer( 

4819 self, new_index: Index | None, indexer: npt.NDArray[np.intp] | None, copy: bool 

4820 ) -> Series: 

4821 # Note: new_index is None iff indexer is None 

4822 # if not None, indexer is np.intp 

4823 if indexer is None and ( 

4824 new_index is None or new_index.names == self.index.names 

4825 ): 

4826 if copy: 

4827 return self.copy() 

4828 return self 

4829 

4830 new_values = algorithms.take_nd( 

4831 self._values, indexer, allow_fill=True, fill_value=None 

4832 ) 

4833 return self._constructor(new_values, index=new_index) 

4834 

4835 def _needs_reindex_multi(self, axes, method, level) -> bool: 

4836 """ 

4837 Check if we do need a multi reindex; this is for compat with 

4838 higher dims. 

4839 """ 

4840 return False 

4841 

4842 # error: Cannot determine type of 'align' 

4843 @doc( 

4844 NDFrame.align, # type: ignore[has-type] 

4845 klass=_shared_doc_kwargs["klass"], 

4846 axes_single_arg=_shared_doc_kwargs["axes_single_arg"], 

4847 ) 

4848 def align( 

4849 self, 

4850 other: Series, 

4851 join: Literal["outer", "inner", "left", "right"] = "outer", 

4852 axis: Axis | None = None, 

4853 level: Level = None, 

4854 copy: bool = True, 

4855 fill_value: Hashable = None, 

4856 method: FillnaOptions | None = None, 

4857 limit: int | None = None, 

4858 fill_axis: Axis = 0, 

4859 broadcast_axis: Axis | None = None, 

4860 ) -> Series: 

4861 return super().align( 

4862 other, 

4863 join=join, 

4864 axis=axis, 

4865 level=level, 

4866 copy=copy, 

4867 fill_value=fill_value, 

4868 method=method, 

4869 limit=limit, 

4870 fill_axis=fill_axis, 

4871 broadcast_axis=broadcast_axis, 

4872 ) 

4873 

4874 @overload 

4875 def rename( 

4876 self, 

4877 index: Renamer | Hashable | None = ..., 

4878 *, 

4879 axis: Axis | None = ..., 

4880 copy: bool = ..., 

4881 inplace: Literal[True], 

4882 level: Level | None = ..., 

4883 errors: IgnoreRaise = ..., 

4884 ) -> None: 

4885 ... 

4886 

4887 @overload 

4888 def rename( 

4889 self, 

4890 index: Renamer | Hashable | None = ..., 

4891 *, 

4892 axis: Axis | None = ..., 

4893 copy: bool = ..., 

4894 inplace: Literal[False] = ..., 

4895 level: Level | None = ..., 

4896 errors: IgnoreRaise = ..., 

4897 ) -> Series: 

4898 ... 

4899 

4900 @overload 

4901 def rename( 

4902 self, 

4903 index: Renamer | Hashable | None = ..., 

4904 *, 

4905 axis: Axis | None = ..., 

4906 copy: bool = ..., 

4907 inplace: bool = ..., 

4908 level: Level | None = ..., 

4909 errors: IgnoreRaise = ..., 

4910 ) -> Series | None: 

4911 ... 

4912 

4913 def rename( 

4914 self, 

4915 index: Renamer | Hashable | None = None, 

4916 *, 

4917 axis: Axis | None = None, 

4918 copy: bool = True, 

4919 inplace: bool = False, 

4920 level: Level | None = None, 

4921 errors: IgnoreRaise = "ignore", 

4922 ) -> Series | None: 

4923 """ 

4924 Alter Series index labels or name. 

4925 

4926 Function / dict values must be unique (1-to-1). Labels not contained in 

4927 a dict / Series will be left as-is. Extra labels listed don't throw an 

4928 error. 

4929 

4930 Alternatively, change ``Series.name`` with a scalar value. 

4931 

4932 See the :ref:`user guide <basics.rename>` for more. 

4933 

4934 Parameters 

4935 ---------- 

4936 index : scalar, hashable sequence, dict-like or function optional 

4937 Functions or dict-like are transformations to apply to 

4938 the index. 

4939 Scalar or hashable sequence-like will alter the ``Series.name`` 

4940 attribute. 

4941 axis : {0 or 'index'} 

4942 Unused. Parameter needed for compatibility with DataFrame. 

4943 copy : bool, default True 

4944 Also copy underlying data. 

4945 inplace : bool, default False 

4946 Whether to return a new Series. If True the value of copy is ignored. 

4947 level : int or level name, default None 

4948 In case of MultiIndex, only rename labels in the specified level. 

4949 errors : {'ignore', 'raise'}, default 'ignore' 

4950 If 'raise', raise `KeyError` when a `dict-like mapper` or 

4951 `index` contains labels that are not present in the index being transformed. 

4952 If 'ignore', existing keys will be renamed and extra keys will be ignored. 

4953 

4954 Returns 

4955 ------- 

4956 Series or None 

4957 Series with index labels or name altered or None if ``inplace=True``. 

4958 

4959 See Also 

4960 -------- 

4961 DataFrame.rename : Corresponding DataFrame method. 

4962 Series.rename_axis : Set the name of the axis. 

4963 

4964 Examples 

4965 -------- 

4966 >>> s = pd.Series([1, 2, 3]) 

4967 >>> s 

4968 0 1 

4969 1 2 

4970 2 3 

4971 dtype: int64 

4972 >>> s.rename("my_name") # scalar, changes Series.name 

4973 0 1 

4974 1 2 

4975 2 3 

4976 Name: my_name, dtype: int64 

4977 >>> s.rename(lambda x: x ** 2) # function, changes labels 

4978 0 1 

4979 1 2 

4980 4 3 

4981 dtype: int64 

4982 >>> s.rename({1: 3, 2: 5}) # mapping, changes labels 

4983 0 1 

4984 3 2 

4985 5 3 

4986 dtype: int64 

4987 """ 

4988 if axis is not None: 

4989 # Make sure we raise if an invalid 'axis' is passed. 

4990 axis = self._get_axis_number(axis) 

4991 

4992 if callable(index) or is_dict_like(index): 

4993 # error: Argument 1 to "_rename" of "NDFrame" has incompatible 

4994 # type "Union[Union[Mapping[Any, Hashable], Callable[[Any], 

4995 # Hashable]], Hashable, None]"; expected "Union[Mapping[Any, 

4996 # Hashable], Callable[[Any], Hashable], None]" 

4997 return super()._rename( 

4998 index, # type: ignore[arg-type] 

4999 copy=copy, 

5000 inplace=inplace, 

5001 level=level, 

5002 errors=errors, 

5003 ) 

5004 else: 

5005 return self._set_name(index, inplace=inplace) 

5006 

5007 @overload 

5008 def set_axis( 

5009 self, 

5010 labels, 

5011 *, 

5012 axis: Axis = ..., 

5013 inplace: Literal[False] | lib.NoDefault = ..., 

5014 copy: bool | lib.NoDefault = ..., 

5015 ) -> Series: 

5016 ... 

5017 

5018 @overload 

5019 def set_axis( 

5020 self, 

5021 labels, 

5022 *, 

5023 axis: Axis = ..., 

5024 inplace: Literal[True], 

5025 copy: bool | lib.NoDefault = ..., 

5026 ) -> None: 

5027 ... 

5028 

5029 @overload 

5030 def set_axis( 

5031 self, 

5032 labels, 

5033 *, 

5034 axis: Axis = ..., 

5035 inplace: bool | lib.NoDefault = ..., 

5036 copy: bool | lib.NoDefault = ..., 

5037 ) -> Series | None: 

5038 ... 

5039 

5040 # error: Signature of "set_axis" incompatible with supertype "NDFrame" 

5041 @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"]) 

5042 @Appender( 

5043 """ 

5044 Examples 

5045 -------- 

5046 >>> s = pd.Series([1, 2, 3]) 

5047 >>> s 

5048 0 1 

5049 1 2 

5050 2 3 

5051 dtype: int64 

5052 

5053 >>> s.set_axis(['a', 'b', 'c'], axis=0) 

5054 a 1 

5055 b 2 

5056 c 3 

5057 dtype: int64 

5058 """ 

5059 ) 

5060 @Substitution( 

5061 **_shared_doc_kwargs, 

5062 extended_summary_sub="", 

5063 axis_description_sub="", 

5064 see_also_sub="", 

5065 ) 

5066 @Appender(NDFrame.set_axis.__doc__) 

5067 def set_axis( # type: ignore[override] 

5068 self, 

5069 labels, 

5070 axis: Axis = 0, 

5071 inplace: bool | lib.NoDefault = lib.no_default, 

5072 copy: bool | lib.NoDefault = lib.no_default, 

5073 ) -> Series | None: 

5074 return super().set_axis(labels, axis=axis, inplace=inplace, copy=copy) 

5075 

5076 # error: Cannot determine type of 'reindex' 

5077 @doc( 

5078 NDFrame.reindex, # type: ignore[has-type] 

5079 klass=_shared_doc_kwargs["klass"], 

5080 axes=_shared_doc_kwargs["axes"], 

5081 optional_labels=_shared_doc_kwargs["optional_labels"], 

5082 optional_axis=_shared_doc_kwargs["optional_axis"], 

5083 ) 

5084 def reindex(self, *args, **kwargs) -> Series: 

5085 if len(args) > 1: 

5086 raise TypeError("Only one positional argument ('index') is allowed") 

5087 if args: 

5088 (index,) = args 

5089 if "index" in kwargs: 

5090 raise TypeError( 

5091 "'index' passed as both positional and keyword argument" 

5092 ) 

5093 kwargs.update({"index": index}) 

5094 return super().reindex(**kwargs) 

5095 

5096 @overload 

5097 def drop( 

5098 self, 

5099 labels: IndexLabel = ..., 

5100 *, 

5101 axis: Axis = ..., 

5102 index: IndexLabel = ..., 

5103 columns: IndexLabel = ..., 

5104 level: Level | None = ..., 

5105 inplace: Literal[True], 

5106 errors: IgnoreRaise = ..., 

5107 ) -> None: 

5108 ... 

5109 

5110 @overload 

5111 def drop( 

5112 self, 

5113 labels: IndexLabel = ..., 

5114 *, 

5115 axis: Axis = ..., 

5116 index: IndexLabel = ..., 

5117 columns: IndexLabel = ..., 

5118 level: Level | None = ..., 

5119 inplace: Literal[False] = ..., 

5120 errors: IgnoreRaise = ..., 

5121 ) -> Series: 

5122 ... 

5123 

5124 @overload 

5125 def drop( 

5126 self, 

5127 labels: IndexLabel = ..., 

5128 *, 

5129 axis: Axis = ..., 

5130 index: IndexLabel = ..., 

5131 columns: IndexLabel = ..., 

5132 level: Level | None = ..., 

5133 inplace: bool = ..., 

5134 errors: IgnoreRaise = ..., 

5135 ) -> Series | None: 

5136 ... 

5137 

5138 # error: Signature of "drop" incompatible with supertype "NDFrame" 

5139 # github.com/python/mypy/issues/12387 

5140 @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "labels"]) 

5141 def drop( # type: ignore[override] 

5142 self, 

5143 labels: IndexLabel = None, 

5144 axis: Axis = 0, 

5145 index: IndexLabel = None, 

5146 columns: IndexLabel = None, 

5147 level: Level | None = None, 

5148 inplace: bool = False, 

5149 errors: IgnoreRaise = "raise", 

5150 ) -> Series | None: 

5151 """ 

5152 Return Series with specified index labels removed. 

5153 

5154 Remove elements of a Series based on specifying the index labels. 

5155 When using a multi-index, labels on different levels can be removed 

5156 by specifying the level. 

5157 

5158 Parameters 

5159 ---------- 

5160 labels : single label or list-like 

5161 Index labels to drop. 

5162 axis : {0 or 'index'} 

5163 Unused. Parameter needed for compatibility with DataFrame. 

5164 index : single label or list-like 

5165 Redundant for application on Series, but 'index' can be used instead 

5166 of 'labels'. 

5167 columns : single label or list-like 

5168 No change is made to the Series; use 'index' or 'labels' instead. 

5169 level : int or level name, optional 

5170 For MultiIndex, level for which the labels will be removed. 

5171 inplace : bool, default False 

5172 If True, do operation inplace and return None. 

5173 errors : {'ignore', 'raise'}, default 'raise' 

5174 If 'ignore', suppress error and only existing labels are dropped. 

5175 

5176 Returns 

5177 ------- 

5178 Series or None 

5179 Series with specified index labels removed or None if ``inplace=True``. 

5180 

5181 Raises 

5182 ------ 

5183 KeyError 

5184 If none of the labels are found in the index. 

5185 

5186 See Also 

5187 -------- 

5188 Series.reindex : Return only specified index labels of Series. 

5189 Series.dropna : Return series without null values. 

5190 Series.drop_duplicates : Return Series with duplicate values removed. 

5191 DataFrame.drop : Drop specified labels from rows or columns. 

5192 

5193 Examples 

5194 -------- 

5195 >>> s = pd.Series(data=np.arange(3), index=['A', 'B', 'C']) 

5196 >>> s 

5197 A 0 

5198 B 1 

5199 C 2 

5200 dtype: int64 

5201 

5202 Drop labels B en C 

5203 

5204 >>> s.drop(labels=['B', 'C']) 

5205 A 0 

5206 dtype: int64 

5207 

5208 Drop 2nd level label in MultiIndex Series 

5209 

5210 >>> midx = pd.MultiIndex(levels=[['lama', 'cow', 'falcon'], 

5211 ... ['speed', 'weight', 'length']], 

5212 ... codes=[[0, 0, 0, 1, 1, 1, 2, 2, 2], 

5213 ... [0, 1, 2, 0, 1, 2, 0, 1, 2]]) 

5214 >>> s = pd.Series([45, 200, 1.2, 30, 250, 1.5, 320, 1, 0.3], 

5215 ... index=midx) 

5216 >>> s 

5217 lama speed 45.0 

5218 weight 200.0 

5219 length 1.2 

5220 cow speed 30.0 

5221 weight 250.0 

5222 length 1.5 

5223 falcon speed 320.0 

5224 weight 1.0 

5225 length 0.3 

5226 dtype: float64 

5227 

5228 >>> s.drop(labels='weight', level=1) 

5229 lama speed 45.0 

5230 length 1.2 

5231 cow speed 30.0 

5232 length 1.5 

5233 falcon speed 320.0 

5234 length 0.3 

5235 dtype: float64 

5236 """ 

5237 return super().drop( 

5238 labels=labels, 

5239 axis=axis, 

5240 index=index, 

5241 columns=columns, 

5242 level=level, 

5243 inplace=inplace, 

5244 errors=errors, 

5245 ) 

5246 

5247 @overload 

5248 def fillna( 

5249 self, 

5250 value: Hashable | Mapping | Series | DataFrame = ..., 

5251 *, 

5252 method: FillnaOptions | None = ..., 

5253 axis: Axis | None = ..., 

5254 inplace: Literal[False] = ..., 

5255 limit: int | None = ..., 

5256 downcast: dict | None = ..., 

5257 ) -> Series: 

5258 ... 

5259 

5260 @overload 

5261 def fillna( 

5262 self, 

5263 value: Hashable | Mapping | Series | DataFrame = ..., 

5264 *, 

5265 method: FillnaOptions | None = ..., 

5266 axis: Axis | None = ..., 

5267 inplace: Literal[True], 

5268 limit: int | None = ..., 

5269 downcast: dict | None = ..., 

5270 ) -> None: 

5271 ... 

5272 

5273 @overload 

5274 def fillna( 

5275 self, 

5276 value: Hashable | Mapping | Series | DataFrame = ..., 

5277 *, 

5278 method: FillnaOptions | None = ..., 

5279 axis: Axis | None = ..., 

5280 inplace: bool = ..., 

5281 limit: int | None = ..., 

5282 downcast: dict | None = ..., 

5283 ) -> Series | None: 

5284 ... 

5285 

5286 # error: Signature of "fillna" incompatible with supertype "NDFrame" 

5287 @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "value"]) 

5288 @doc(NDFrame.fillna, **_shared_doc_kwargs) 

5289 def fillna( # type: ignore[override] 

5290 self, 

5291 value: Hashable | Mapping | Series | DataFrame = None, 

5292 method: FillnaOptions | None = None, 

5293 axis: Axis | None = None, 

5294 inplace: bool = False, 

5295 limit: int | None = None, 

5296 downcast: dict | None = None, 

5297 ) -> Series | None: 

5298 return super().fillna( 

5299 value=value, 

5300 method=method, 

5301 axis=axis, 

5302 inplace=inplace, 

5303 limit=limit, 

5304 downcast=downcast, 

5305 ) 

5306 

5307 def pop(self, item: Hashable) -> Any: 

5308 """ 

5309 Return item and drops from series. Raise KeyError if not found. 

5310 

5311 Parameters 

5312 ---------- 

5313 item : label 

5314 Index of the element that needs to be removed. 

5315 

5316 Returns 

5317 ------- 

5318 Value that is popped from series. 

5319 

5320 Examples 

5321 -------- 

5322 >>> ser = pd.Series([1,2,3]) 

5323 

5324 >>> ser.pop(0) 

5325 1 

5326 

5327 >>> ser 

5328 1 2 

5329 2 3 

5330 dtype: int64 

5331 """ 

5332 return super().pop(item=item) 

5333 

5334 # error: Signature of "replace" incompatible with supertype "NDFrame" 

5335 @overload # type: ignore[override] 

5336 def replace( 

5337 self, 

5338 to_replace=..., 

5339 value=..., 

5340 *, 

5341 inplace: Literal[False] = ..., 

5342 limit: int | None = ..., 

5343 regex: bool = ..., 

5344 method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., 

5345 ) -> Series: 

5346 ... 

5347 

5348 @overload 

5349 def replace( 

5350 self, 

5351 to_replace=..., 

5352 value=..., 

5353 *, 

5354 inplace: Literal[True], 

5355 limit: int | None = ..., 

5356 regex: bool = ..., 

5357 method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = ..., 

5358 ) -> None: 

5359 ... 

5360 

5361 # error: Signature of "replace" incompatible with supertype "NDFrame" 

5362 @deprecate_nonkeyword_arguments( 

5363 version=None, allowed_args=["self", "to_replace", "value"] 

5364 ) 

5365 @doc( 

5366 NDFrame.replace, 

5367 klass=_shared_doc_kwargs["klass"], 

5368 inplace=_shared_doc_kwargs["inplace"], 

5369 replace_iloc=_shared_doc_kwargs["replace_iloc"], 

5370 ) 

5371 def replace( # type: ignore[override] 

5372 self, 

5373 to_replace=None, 

5374 value=lib.no_default, 

5375 inplace: bool = False, 

5376 limit: int | None = None, 

5377 regex: bool = False, 

5378 method: Literal["pad", "ffill", "bfill"] | lib.NoDefault = lib.no_default, 

5379 ) -> Series | None: 

5380 return super().replace( 

5381 to_replace=to_replace, 

5382 value=value, 

5383 inplace=inplace, 

5384 limit=limit, 

5385 regex=regex, 

5386 method=method, 

5387 ) 

5388 

5389 @doc(INFO_DOCSTRING, **series_sub_kwargs) 

5390 def info( 

5391 self, 

5392 verbose: bool | None = None, 

5393 buf: IO[str] | None = None, 

5394 max_cols: int | None = None, 

5395 memory_usage: bool | str | None = None, 

5396 show_counts: bool = True, 

5397 ) -> None: 

5398 return SeriesInfo(self, memory_usage).render( 

5399 buf=buf, 

5400 max_cols=max_cols, 

5401 verbose=verbose, 

5402 show_counts=show_counts, 

5403 ) 

5404 

5405 def _replace_single(self, to_replace, method: str, inplace: bool, limit): 

5406 """ 

5407 Replaces values in a Series using the fill method specified when no 

5408 replacement value is given in the replace method 

5409 """ 

5410 

5411 result = self if inplace else self.copy() 

5412 

5413 values = result._values 

5414 mask = missing.mask_missing(values, to_replace) 

5415 

5416 if isinstance(values, ExtensionArray): 

5417 # dispatch to the EA's _pad_mask_inplace method 

5418 values._fill_mask_inplace(method, limit, mask) 

5419 else: 

5420 fill_f = missing.get_fill_func(method) 

5421 fill_f(values, limit=limit, mask=mask) 

5422 

5423 if inplace: 

5424 return 

5425 return result 

5426 

5427 # error: Cannot determine type of 'shift' 

5428 @doc(NDFrame.shift, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] 

5429 def shift( 

5430 self, periods: int = 1, freq=None, axis: Axis = 0, fill_value: Hashable = None 

5431 ) -> Series: 

5432 return super().shift( 

5433 periods=periods, freq=freq, axis=axis, fill_value=fill_value 

5434 ) 

5435 

5436 def memory_usage(self, index: bool = True, deep: bool = False) -> int: 

5437 """ 

5438 Return the memory usage of the Series. 

5439 

5440 The memory usage can optionally include the contribution of 

5441 the index and of elements of `object` dtype. 

5442 

5443 Parameters 

5444 ---------- 

5445 index : bool, default True 

5446 Specifies whether to include the memory usage of the Series index. 

5447 deep : bool, default False 

5448 If True, introspect the data deeply by interrogating 

5449 `object` dtypes for system-level memory consumption, and include 

5450 it in the returned value. 

5451 

5452 Returns 

5453 ------- 

5454 int 

5455 Bytes of memory consumed. 

5456 

5457 See Also 

5458 -------- 

5459 numpy.ndarray.nbytes : Total bytes consumed by the elements of the 

5460 array. 

5461 DataFrame.memory_usage : Bytes consumed by a DataFrame. 

5462 

5463 Examples 

5464 -------- 

5465 >>> s = pd.Series(range(3)) 

5466 >>> s.memory_usage() 

5467 152 

5468 

5469 Not including the index gives the size of the rest of the data, which 

5470 is necessarily smaller: 

5471 

5472 >>> s.memory_usage(index=False) 

5473 24 

5474 

5475 The memory footprint of `object` values is ignored by default: 

5476 

5477 >>> s = pd.Series(["a", "b"]) 

5478 >>> s.values 

5479 array(['a', 'b'], dtype=object) 

5480 >>> s.memory_usage() 

5481 144 

5482 >>> s.memory_usage(deep=True) 

5483 244 

5484 """ 

5485 v = self._memory_usage(deep=deep) 

5486 if index: 

5487 v += self.index.memory_usage(deep=deep) 

5488 return v 

5489 

5490 def isin(self, values) -> Series: 

5491 """ 

5492 Whether elements in Series are contained in `values`. 

5493 

5494 Return a boolean Series showing whether each element in the Series 

5495 matches an element in the passed sequence of `values` exactly. 

5496 

5497 Parameters 

5498 ---------- 

5499 values : set or list-like 

5500 The sequence of values to test. Passing in a single string will 

5501 raise a ``TypeError``. Instead, turn a single string into a 

5502 list of one element. 

5503 

5504 Returns 

5505 ------- 

5506 Series 

5507 Series of booleans indicating if each element is in values. 

5508 

5509 Raises 

5510 ------ 

5511 TypeError 

5512 * If `values` is a string 

5513 

5514 See Also 

5515 -------- 

5516 DataFrame.isin : Equivalent method on DataFrame. 

5517 

5518 Examples 

5519 -------- 

5520 >>> s = pd.Series(['lama', 'cow', 'lama', 'beetle', 'lama', 

5521 ... 'hippo'], name='animal') 

5522 >>> s.isin(['cow', 'lama']) 

5523 0 True 

5524 1 True 

5525 2 True 

5526 3 False 

5527 4 True 

5528 5 False 

5529 Name: animal, dtype: bool 

5530 

5531 To invert the boolean values, use the ``~`` operator: 

5532 

5533 >>> ~s.isin(['cow', 'lama']) 

5534 0 False 

5535 1 False 

5536 2 False 

5537 3 True 

5538 4 False 

5539 5 True 

5540 Name: animal, dtype: bool 

5541 

5542 Passing a single string as ``s.isin('lama')`` will raise an error. Use 

5543 a list of one element instead: 

5544 

5545 >>> s.isin(['lama']) 

5546 0 True 

5547 1 False 

5548 2 True 

5549 3 False 

5550 4 True 

5551 5 False 

5552 Name: animal, dtype: bool 

5553 

5554 Strings and integers are distinct and are therefore not comparable: 

5555 

5556 >>> pd.Series([1]).isin(['1']) 

5557 0 False 

5558 dtype: bool 

5559 >>> pd.Series([1.1]).isin(['1.1']) 

5560 0 False 

5561 dtype: bool 

5562 """ 

5563 result = algorithms.isin(self._values, values) 

5564 return self._constructor(result, index=self.index).__finalize__( 

5565 self, method="isin" 

5566 ) 

5567 

5568 def between( 

5569 self, 

5570 left, 

5571 right, 

5572 inclusive: Literal["both", "neither", "left", "right"] = "both", 

5573 ) -> Series: 

5574 """ 

5575 Return boolean Series equivalent to left <= series <= right. 

5576 

5577 This function returns a boolean vector containing `True` wherever the 

5578 corresponding Series element is between the boundary values `left` and 

5579 `right`. NA values are treated as `False`. 

5580 

5581 Parameters 

5582 ---------- 

5583 left : scalar or list-like 

5584 Left boundary. 

5585 right : scalar or list-like 

5586 Right boundary. 

5587 inclusive : {"both", "neither", "left", "right"} 

5588 Include boundaries. Whether to set each bound as closed or open. 

5589 

5590 .. versionchanged:: 1.3.0 

5591 

5592 Returns 

5593 ------- 

5594 Series 

5595 Series representing whether each element is between left and 

5596 right (inclusive). 

5597 

5598 See Also 

5599 -------- 

5600 Series.gt : Greater than of series and other. 

5601 Series.lt : Less than of series and other. 

5602 

5603 Notes 

5604 ----- 

5605 This function is equivalent to ``(left <= ser) & (ser <= right)`` 

5606 

5607 Examples 

5608 -------- 

5609 >>> s = pd.Series([2, 0, 4, 8, np.nan]) 

5610 

5611 Boundary values are included by default: 

5612 

5613 >>> s.between(1, 4) 

5614 0 True 

5615 1 False 

5616 2 True 

5617 3 False 

5618 4 False 

5619 dtype: bool 

5620 

5621 With `inclusive` set to ``"neither"`` boundary values are excluded: 

5622 

5623 >>> s.between(1, 4, inclusive="neither") 

5624 0 True 

5625 1 False 

5626 2 False 

5627 3 False 

5628 4 False 

5629 dtype: bool 

5630 

5631 `left` and `right` can be any scalar value: 

5632 

5633 >>> s = pd.Series(['Alice', 'Bob', 'Carol', 'Eve']) 

5634 >>> s.between('Anna', 'Daniel') 

5635 0 False 

5636 1 True 

5637 2 True 

5638 3 False 

5639 dtype: bool 

5640 """ 

5641 # error: Non-overlapping identity check (left operand type: "Literal['both', 

5642 # 'neither', 'left', 'right']", right operand type: "Literal[False]") 

5643 if inclusive is True or inclusive is False: # type: ignore[comparison-overlap] 

5644 warnings.warn( 

5645 "Boolean inputs to the `inclusive` argument are deprecated in " 

5646 "favour of `both` or `neither`.", 

5647 FutureWarning, 

5648 stacklevel=find_stack_level(), 

5649 ) 

5650 if inclusive: 

5651 inclusive = "both" 

5652 else: 

5653 inclusive = "neither" 

5654 if inclusive == "both": 

5655 lmask = self >= left 

5656 rmask = self <= right 

5657 elif inclusive == "left": 

5658 lmask = self >= left 

5659 rmask = self < right 

5660 elif inclusive == "right": 

5661 lmask = self > left 

5662 rmask = self <= right 

5663 elif inclusive == "neither": 

5664 lmask = self > left 

5665 rmask = self < right 

5666 else: 

5667 raise ValueError( 

5668 "Inclusive has to be either string of 'both'," 

5669 "'left', 'right', or 'neither'." 

5670 ) 

5671 

5672 return lmask & rmask 

5673 

5674 # ---------------------------------------------------------------------- 

5675 # Convert to types that support pd.NA 

5676 

5677 def _convert_dtypes( 

5678 self, 

5679 infer_objects: bool = True, 

5680 convert_string: bool = True, 

5681 convert_integer: bool = True, 

5682 convert_boolean: bool = True, 

5683 convert_floating: bool = True, 

5684 ) -> Series: 

5685 input_series = self 

5686 if infer_objects: 

5687 input_series = input_series.infer_objects() 

5688 if is_object_dtype(input_series): 

5689 input_series = input_series.copy() 

5690 

5691 if convert_string or convert_integer or convert_boolean or convert_floating: 

5692 inferred_dtype = convert_dtypes( 

5693 input_series._values, 

5694 convert_string, 

5695 convert_integer, 

5696 convert_boolean, 

5697 convert_floating, 

5698 ) 

5699 result = input_series.astype(inferred_dtype) 

5700 else: 

5701 result = input_series.copy() 

5702 return result 

5703 

5704 # error: Cannot determine type of 'isna' 

5705 # error: Return type "Series" of "isna" incompatible with return type "ndarray 

5706 # [Any, dtype[bool_]]" in supertype "IndexOpsMixin" 

5707 @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] 

5708 def isna(self) -> Series: # type: ignore[override] 

5709 return NDFrame.isna(self) 

5710 

5711 # error: Cannot determine type of 'isna' 

5712 @doc(NDFrame.isna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] 

5713 def isnull(self) -> Series: 

5714 """ 

5715 Series.isnull is an alias for Series.isna. 

5716 """ 

5717 return super().isnull() 

5718 

5719 # error: Cannot determine type of 'notna' 

5720 @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] 

5721 def notna(self) -> Series: 

5722 return super().notna() 

5723 

5724 # error: Cannot determine type of 'notna' 

5725 @doc(NDFrame.notna, klass=_shared_doc_kwargs["klass"]) # type: ignore[has-type] 

5726 def notnull(self) -> Series: 

5727 """ 

5728 Series.notnull is an alias for Series.notna. 

5729 """ 

5730 return super().notnull() 

5731 

5732 @overload 

5733 def dropna( 

5734 self, *, axis: Axis = ..., inplace: Literal[False] = ..., how: str | None = ... 

5735 ) -> Series: 

5736 ... 

5737 

5738 @overload 

5739 def dropna( 

5740 self, *, axis: Axis = ..., inplace: Literal[True], how: str | None = ... 

5741 ) -> None: 

5742 ... 

5743 

5744 @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"]) 

5745 def dropna( 

5746 self, axis: Axis = 0, inplace: bool = False, how: str | None = None 

5747 ) -> Series | None: 

5748 """ 

5749 Return a new Series with missing values removed. 

5750 

5751 See the :ref:`User Guide <missing_data>` for more on which values are 

5752 considered missing, and how to work with missing data. 

5753 

5754 Parameters 

5755 ---------- 

5756 axis : {0 or 'index'} 

5757 Unused. Parameter needed for compatibility with DataFrame. 

5758 inplace : bool, default False 

5759 If True, do operation inplace and return None. 

5760 how : str, optional 

5761 Not in use. Kept for compatibility. 

5762 

5763 Returns 

5764 ------- 

5765 Series or None 

5766 Series with NA entries dropped from it or None if ``inplace=True``. 

5767 

5768 See Also 

5769 -------- 

5770 Series.isna: Indicate missing values. 

5771 Series.notna : Indicate existing (non-missing) values. 

5772 Series.fillna : Replace missing values. 

5773 DataFrame.dropna : Drop rows or columns which contain NA values. 

5774 Index.dropna : Drop missing indices. 

5775 

5776 Examples 

5777 -------- 

5778 >>> ser = pd.Series([1., 2., np.nan]) 

5779 >>> ser 

5780 0 1.0 

5781 1 2.0 

5782 2 NaN 

5783 dtype: float64 

5784 

5785 Drop NA values from a Series. 

5786 

5787 >>> ser.dropna() 

5788 0 1.0 

5789 1 2.0 

5790 dtype: float64 

5791 

5792 Keep the Series with valid entries in the same variable. 

5793 

5794 >>> ser.dropna(inplace=True) 

5795 >>> ser 

5796 0 1.0 

5797 1 2.0 

5798 dtype: float64 

5799 

5800 Empty strings are not considered NA values. ``None`` is considered an 

5801 NA value. 

5802 

5803 >>> ser = pd.Series([np.NaN, 2, pd.NaT, '', None, 'I stay']) 

5804 >>> ser 

5805 0 NaN 

5806 1 2 

5807 2 NaT 

5808 3 

5809 4 None 

5810 5 I stay 

5811 dtype: object 

5812 >>> ser.dropna() 

5813 1 2 

5814 3 

5815 5 I stay 

5816 dtype: object 

5817 """ 

5818 inplace = validate_bool_kwarg(inplace, "inplace") 

5819 # Validate the axis parameter 

5820 self._get_axis_number(axis or 0) 

5821 

5822 if self._can_hold_na: 

5823 result = remove_na_arraylike(self) 

5824 if inplace: 

5825 self._update_inplace(result) 

5826 else: 

5827 return result 

5828 else: 

5829 if not inplace: 

5830 return self.copy() 

5831 return None 

5832 

5833 # ---------------------------------------------------------------------- 

5834 # Time series-oriented methods 

5835 

5836 # error: Cannot determine type of 'asfreq' 

5837 @doc(NDFrame.asfreq, **_shared_doc_kwargs) # type: ignore[has-type] 

5838 def asfreq( 

5839 self, 

5840 freq: Frequency, 

5841 method: FillnaOptions | None = None, 

5842 how: str | None = None, 

5843 normalize: bool = False, 

5844 fill_value: Hashable = None, 

5845 ) -> Series: 

5846 return super().asfreq( 

5847 freq=freq, 

5848 method=method, 

5849 how=how, 

5850 normalize=normalize, 

5851 fill_value=fill_value, 

5852 ) 

5853 

5854 # error: Cannot determine type of 'resample' 

5855 @doc(NDFrame.resample, **_shared_doc_kwargs) # type: ignore[has-type] 

5856 def resample( 

5857 self, 

5858 rule, 

5859 axis: Axis = 0, 

5860 closed: str | None = None, 

5861 label: str | None = None, 

5862 convention: str = "start", 

5863 kind: str | None = None, 

5864 loffset=None, 

5865 base: int | None = None, 

5866 on: Level = None, 

5867 level: Level = None, 

5868 origin: str | TimestampConvertibleTypes = "start_day", 

5869 offset: TimedeltaConvertibleTypes | None = None, 

5870 group_keys: bool | lib.NoDefault = no_default, 

5871 ) -> Resampler: 

5872 return super().resample( 

5873 rule=rule, 

5874 axis=axis, 

5875 closed=closed, 

5876 label=label, 

5877 convention=convention, 

5878 kind=kind, 

5879 loffset=loffset, 

5880 base=base, 

5881 on=on, 

5882 level=level, 

5883 origin=origin, 

5884 offset=offset, 

5885 group_keys=group_keys, 

5886 ) 

5887 

5888 def to_timestamp( 

5889 self, 

5890 freq=None, 

5891 how: Literal["s", "e", "start", "end"] = "start", 

5892 copy: bool = True, 

5893 ) -> Series: 

5894 """ 

5895 Cast to DatetimeIndex of Timestamps, at *beginning* of period. 

5896 

5897 Parameters 

5898 ---------- 

5899 freq : str, default frequency of PeriodIndex 

5900 Desired frequency. 

5901 how : {'s', 'e', 'start', 'end'} 

5902 Convention for converting period to timestamp; start of period 

5903 vs. end. 

5904 copy : bool, default True 

5905 Whether or not to return a copy. 

5906 

5907 Returns 

5908 ------- 

5909 Series with DatetimeIndex 

5910 """ 

5911 new_values = self._values 

5912 if copy: 

5913 new_values = new_values.copy() 

5914 

5915 if not isinstance(self.index, PeriodIndex): 

5916 raise TypeError(f"unsupported Type {type(self.index).__name__}") 

5917 new_index = self.index.to_timestamp(freq=freq, how=how) 

5918 return self._constructor(new_values, index=new_index).__finalize__( 

5919 self, method="to_timestamp" 

5920 ) 

5921 

5922 def to_period(self, freq: str | None = None, copy: bool = True) -> Series: 

5923 """ 

5924 Convert Series from DatetimeIndex to PeriodIndex. 

5925 

5926 Parameters 

5927 ---------- 

5928 freq : str, default None 

5929 Frequency associated with the PeriodIndex. 

5930 copy : bool, default True 

5931 Whether or not to return a copy. 

5932 

5933 Returns 

5934 ------- 

5935 Series 

5936 Series with index converted to PeriodIndex. 

5937 """ 

5938 new_values = self._values 

5939 if copy: 

5940 new_values = new_values.copy() 

5941 

5942 if not isinstance(self.index, DatetimeIndex): 

5943 raise TypeError(f"unsupported Type {type(self.index).__name__}") 

5944 new_index = self.index.to_period(freq=freq) 

5945 return self._constructor(new_values, index=new_index).__finalize__( 

5946 self, method="to_period" 

5947 ) 

5948 

5949 @overload 

5950 def ffill( 

5951 self, 

5952 *, 

5953 axis: None | Axis = ..., 

5954 inplace: Literal[False] = ..., 

5955 limit: None | int = ..., 

5956 downcast: dict | None = ..., 

5957 ) -> Series: 

5958 ... 

5959 

5960 @overload 

5961 def ffill( 

5962 self, 

5963 *, 

5964 axis: None | Axis = ..., 

5965 inplace: Literal[True], 

5966 limit: None | int = ..., 

5967 downcast: dict | None = ..., 

5968 ) -> None: 

5969 ... 

5970 

5971 @overload 

5972 def ffill( 

5973 self, 

5974 *, 

5975 axis: None | Axis = ..., 

5976 inplace: bool = ..., 

5977 limit: None | int = ..., 

5978 downcast: dict | None = ..., 

5979 ) -> Series | None: 

5980 ... 

5981 

5982 # error: Signature of "ffill" incompatible with supertype "NDFrame" 

5983 @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"]) 

5984 def ffill( # type: ignore[override] 

5985 self, 

5986 axis: None | Axis = None, 

5987 inplace: bool = False, 

5988 limit: None | int = None, 

5989 downcast: dict | None = None, 

5990 ) -> Series | None: 

5991 return super().ffill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) 

5992 

5993 @overload 

5994 def bfill( 

5995 self, 

5996 *, 

5997 axis: None | Axis = ..., 

5998 inplace: Literal[False] = ..., 

5999 limit: None | int = ..., 

6000 downcast: dict | None = ..., 

6001 ) -> Series: 

6002 ... 

6003 

6004 @overload 

6005 def bfill( 

6006 self, 

6007 *, 

6008 axis: None | Axis = ..., 

6009 inplace: Literal[True], 

6010 limit: None | int = ..., 

6011 downcast: dict | None = ..., 

6012 ) -> None: 

6013 ... 

6014 

6015 @overload 

6016 def bfill( 

6017 self, 

6018 *, 

6019 axis: None | Axis = ..., 

6020 inplace: bool = ..., 

6021 limit: None | int = ..., 

6022 downcast: dict | None = ..., 

6023 ) -> Series | None: 

6024 ... 

6025 

6026 # error: Signature of "bfill" incompatible with supertype "NDFrame" 

6027 @deprecate_nonkeyword_arguments(version=None, allowed_args=["self"]) 

6028 def bfill( # type: ignore[override] 

6029 self, 

6030 axis: None | Axis = None, 

6031 inplace: bool = False, 

6032 limit: None | int = None, 

6033 downcast: dict | None = None, 

6034 ) -> Series | None: 

6035 return super().bfill(axis=axis, inplace=inplace, limit=limit, downcast=downcast) 

6036 

6037 @deprecate_nonkeyword_arguments( 

6038 version=None, allowed_args=["self", "lower", "upper"] 

6039 ) 

6040 def clip( 

6041 self: Series, 

6042 lower=None, 

6043 upper=None, 

6044 axis: Axis | None = None, 

6045 inplace: bool = False, 

6046 *args, 

6047 **kwargs, 

6048 ) -> Series | None: 

6049 return super().clip(lower, upper, axis, inplace, *args, **kwargs) 

6050 

6051 @deprecate_nonkeyword_arguments(version=None, allowed_args=["self", "method"]) 

6052 def interpolate( 

6053 self: Series, 

6054 method: str = "linear", 

6055 axis: Axis = 0, 

6056 limit: int | None = None, 

6057 inplace: bool = False, 

6058 limit_direction: str | None = None, 

6059 limit_area: str | None = None, 

6060 downcast: str | None = None, 

6061 **kwargs, 

6062 ) -> Series | None: 

6063 return super().interpolate( 

6064 method, 

6065 axis, 

6066 limit, 

6067 inplace, 

6068 limit_direction, 

6069 limit_area, 

6070 downcast, 

6071 **kwargs, 

6072 ) 

6073 

6074 @overload 

6075 def where( 

6076 self, 

6077 cond, 

6078 other=..., 

6079 *, 

6080 inplace: Literal[False] = ..., 

6081 axis: Axis | None = ..., 

6082 level: Level = ..., 

6083 errors: IgnoreRaise | lib.NoDefault = ..., 

6084 try_cast: bool | lib.NoDefault = ..., 

6085 ) -> Series: 

6086 ... 

6087 

6088 @overload 

6089 def where( 

6090 self, 

6091 cond, 

6092 other=..., 

6093 *, 

6094 inplace: Literal[True], 

6095 axis: Axis | None = ..., 

6096 level: Level = ..., 

6097 errors: IgnoreRaise | lib.NoDefault = ..., 

6098 try_cast: bool | lib.NoDefault = ..., 

6099 ) -> None: 

6100 ... 

6101 

6102 @overload 

6103 def where( 

6104 self, 

6105 cond, 

6106 other=..., 

6107 *, 

6108 inplace: bool = ..., 

6109 axis: Axis | None = ..., 

6110 level: Level = ..., 

6111 errors: IgnoreRaise | lib.NoDefault = ..., 

6112 try_cast: bool | lib.NoDefault = ..., 

6113 ) -> Series | None: 

6114 ... 

6115 

6116 # error: Signature of "where" incompatible with supertype "NDFrame" 

6117 @deprecate_kwarg(old_arg_name="errors", new_arg_name=None) 

6118 @deprecate_nonkeyword_arguments( 

6119 version=None, allowed_args=["self", "cond", "other"] 

6120 ) 

6121 def where( # type: ignore[override] 

6122 self, 

6123 cond, 

6124 other=lib.no_default, 

6125 inplace: bool = False, 

6126 axis: Axis | None = None, 

6127 level: Level = None, 

6128 errors: IgnoreRaise | lib.NoDefault = lib.no_default, 

6129 try_cast: bool | lib.NoDefault = lib.no_default, 

6130 ) -> Series | None: 

6131 return super().where( 

6132 cond, 

6133 other, 

6134 inplace=inplace, 

6135 axis=axis, 

6136 level=level, 

6137 try_cast=try_cast, 

6138 ) 

6139 

6140 @overload 

6141 def mask( 

6142 self, 

6143 cond, 

6144 other=..., 

6145 *, 

6146 inplace: Literal[False] = ..., 

6147 axis: Axis | None = ..., 

6148 level: Level = ..., 

6149 errors: IgnoreRaise | lib.NoDefault = ..., 

6150 try_cast: bool | lib.NoDefault = ..., 

6151 ) -> Series: 

6152 ... 

6153 

6154 @overload 

6155 def mask( 

6156 self, 

6157 cond, 

6158 other=..., 

6159 *, 

6160 inplace: Literal[True], 

6161 axis: Axis | None = ..., 

6162 level: Level = ..., 

6163 errors: IgnoreRaise | lib.NoDefault = ..., 

6164 try_cast: bool | lib.NoDefault = ..., 

6165 ) -> None: 

6166 ... 

6167 

6168 @overload 

6169 def mask( 

6170 self, 

6171 cond, 

6172 other=..., 

6173 *, 

6174 inplace: bool = ..., 

6175 axis: Axis | None = ..., 

6176 level: Level = ..., 

6177 errors: IgnoreRaise | lib.NoDefault = ..., 

6178 try_cast: bool | lib.NoDefault = ..., 

6179 ) -> Series | None: 

6180 ... 

6181 

6182 # error: Signature of "mask" incompatible with supertype "NDFrame" 

6183 @deprecate_kwarg(old_arg_name="errors", new_arg_name=None) 

6184 @deprecate_nonkeyword_arguments( 

6185 version=None, allowed_args=["self", "cond", "other"] 

6186 ) 

6187 def mask( # type: ignore[override] 

6188 self, 

6189 cond, 

6190 other=np.nan, 

6191 inplace: bool = False, 

6192 axis: Axis | None = None, 

6193 level: Level = None, 

6194 errors: IgnoreRaise | lib.NoDefault = lib.no_default, 

6195 try_cast: bool | lib.NoDefault = lib.no_default, 

6196 ) -> Series | None: 

6197 return super().mask( 

6198 cond, 

6199 other, 

6200 inplace=inplace, 

6201 axis=axis, 

6202 level=level, 

6203 try_cast=try_cast, 

6204 ) 

6205 

6206 # ---------------------------------------------------------------------- 

6207 # Add index 

6208 _AXIS_ORDERS = ["index"] 

6209 _AXIS_LEN = len(_AXIS_ORDERS) 

6210 _info_axis_number = 0 

6211 _info_axis_name = "index" 

6212 

6213 index = properties.AxisProperty( 

6214 axis=0, doc="The index (axis labels) of the Series." 

6215 ) 

6216 

6217 # ---------------------------------------------------------------------- 

6218 # Accessor Methods 

6219 # ---------------------------------------------------------------------- 

6220 str = CachedAccessor("str", StringMethods) 

6221 dt = CachedAccessor("dt", CombinedDatetimelikeProperties) 

6222 cat = CachedAccessor("cat", CategoricalAccessor) 

6223 plot = CachedAccessor("plot", pandas.plotting.PlotAccessor) 

6224 sparse = CachedAccessor("sparse", SparseAccessor) 

6225 

6226 # ---------------------------------------------------------------------- 

6227 # Add plotting methods to Series 

6228 hist = pandas.plotting.hist_series 

6229 

6230 # ---------------------------------------------------------------------- 

6231 # Template-Based Arithmetic/Comparison Methods 

6232 

6233 def _cmp_method(self, other, op): 

6234 res_name = ops.get_op_result_name(self, other) 

6235 

6236 if isinstance(other, Series) and not self._indexed_same(other): 

6237 raise ValueError("Can only compare identically-labeled Series objects") 

6238 

6239 lvalues = self._values 

6240 rvalues = extract_array(other, extract_numpy=True, extract_range=True) 

6241 

6242 with np.errstate(all="ignore"): 

6243 res_values = ops.comparison_op(lvalues, rvalues, op) 

6244 

6245 return self._construct_result(res_values, name=res_name) 

6246 

6247 def _logical_method(self, other, op): 

6248 res_name = ops.get_op_result_name(self, other) 

6249 self, other = ops.align_method_SERIES(self, other, align_asobject=True) 

6250 

6251 lvalues = self._values 

6252 rvalues = extract_array(other, extract_numpy=True, extract_range=True) 

6253 

6254 res_values = ops.logical_op(lvalues, rvalues, op) 

6255 return self._construct_result(res_values, name=res_name) 

6256 

6257 def _arith_method(self, other, op): 

6258 self, other = ops.align_method_SERIES(self, other) 

6259 return base.IndexOpsMixin._arith_method(self, other, op) 

6260 

6261 

6262Series._add_numeric_operations() 

6263 

6264# Add arithmetic! 

6265ops.add_flex_arithmetic_methods(Series)