Coverage for /var/srv/projects/api.amasfac.comuna18.com/tmp/venv/lib/python3.9/site-packages/pandas/_typing.py: 77%

134 statements  

« prev     ^ index     » next       coverage.py v6.4.4, created at 2023-07-17 14:22 -0600

1from __future__ import annotations 

2 

3from datetime import ( 

4 datetime, 

5 timedelta, 

6 tzinfo, 

7) 

8from os import PathLike 

9from typing import ( 

10 TYPE_CHECKING, 

11 Any, 

12 Callable, 

13 Dict, 

14 Hashable, 

15 Iterator, 

16 List, 

17 Literal, 

18 Mapping, 

19 Optional, 

20 Protocol, 

21 Sequence, 

22 Tuple, 

23 Type as type_t, 

24 TypeVar, 

25 Union, 

26) 

27 

28import numpy as np 

29 

30# To prevent import cycles place any internal imports in the branch below 

31# and use a string literal forward reference to it in subsequent types 

32# https://mypy.readthedocs.io/en/latest/common_issues.html#import-cycles 

33if TYPE_CHECKING: 33 ↛ 34line 33 didn't jump to line 34, because the condition on line 33 was never true

34 import numpy.typing as npt 

35 

36 from pandas._libs import ( 

37 NaTType, 

38 Period, 

39 Timedelta, 

40 Timestamp, 

41 ) 

42 from pandas._libs.tslibs import BaseOffset 

43 

44 from pandas.core.dtypes.dtypes import ExtensionDtype 

45 

46 from pandas import Interval 

47 from pandas.core.arrays.base import ExtensionArray 

48 from pandas.core.frame import DataFrame 

49 from pandas.core.generic import NDFrame 

50 from pandas.core.groupby.generic import ( 

51 DataFrameGroupBy, 

52 GroupBy, 

53 SeriesGroupBy, 

54 ) 

55 from pandas.core.indexes.base import Index 

56 from pandas.core.internals import ( 

57 ArrayManager, 

58 BlockManager, 

59 SingleArrayManager, 

60 SingleBlockManager, 

61 ) 

62 from pandas.core.resample import Resampler 

63 from pandas.core.series import Series 

64 from pandas.core.window.rolling import BaseWindow 

65 

66 from pandas.io.formats.format import EngFormatter 

67 

68 ScalarLike_co = Union[ 

69 int, 

70 float, 

71 complex, 

72 str, 

73 bytes, 

74 np.generic, 

75 ] 

76 

77 # numpy compatible types 

78 NumpyValueArrayLike = Union[ScalarLike_co, npt.ArrayLike] 

79 # Name "npt._ArrayLikeInt_co" is not defined [name-defined] 

80 NumpySorter = Optional[npt._ArrayLikeInt_co] # type: ignore[name-defined] 

81 

82else: 

83 npt: Any = None 

84 

85HashableT = TypeVar("HashableT", bound=Hashable) 

86 

87# array-like 

88 

89ArrayLike = Union["ExtensionArray", np.ndarray] 

90AnyArrayLike = Union[ArrayLike, "Index", "Series"] 

91 

92# scalars 

93 

94PythonScalar = Union[str, float, bool] 

95DatetimeLikeScalar = Union["Period", "Timestamp", "Timedelta"] 

96PandasScalar = Union["Period", "Timestamp", "Timedelta", "Interval"] 

97Scalar = Union[PythonScalar, PandasScalar, np.datetime64, np.timedelta64, datetime] 

98IntStrT = TypeVar("IntStrT", int, str) 

99 

100 

101# timestamp and timedelta convertible types 

102 

103TimestampConvertibleTypes = Union[ 

104 "Timestamp", datetime, np.datetime64, np.int64, float, str 

105] 

106TimedeltaConvertibleTypes = Union[ 

107 "Timedelta", timedelta, np.timedelta64, np.int64, float, str 

108] 

109Timezone = Union[str, tzinfo] 

110 

111# NDFrameT is stricter and ensures that the same subclass of NDFrame always is 

112# used. E.g. `def func(a: NDFrameT) -> NDFrameT: ...` means that if a 

113# Series is passed into a function, a Series is always returned and if a DataFrame is 

114# passed in, a DataFrame is always returned. 

115NDFrameT = TypeVar("NDFrameT", bound="NDFrame") 

116 

117NumpyIndexT = TypeVar("NumpyIndexT", np.ndarray, "Index") 

118 

119Axis = Union[str, int] 

120IndexLabel = Union[Hashable, Sequence[Hashable]] 

121Level = Hashable 

122Shape = Tuple[int, ...] 

123Suffixes = Tuple[Optional[str], Optional[str]] 

124Ordered = Optional[bool] 

125JSONSerializable = Optional[Union[PythonScalar, List, Dict]] 

126Frequency = Union[str, "BaseOffset"] 

127Axes = Union[AnyArrayLike, List, range] 

128 

129RandomState = Union[ 

130 int, 

131 ArrayLike, 

132 np.random.Generator, 

133 np.random.BitGenerator, 

134 np.random.RandomState, 

135] 

136 

137# dtypes 

138NpDtype = Union[str, np.dtype, type_t[Union[str, complex, bool, object]]] 

139Dtype = Union["ExtensionDtype", NpDtype] 

140AstypeArg = Union["ExtensionDtype", "npt.DTypeLike"] 

141# DtypeArg specifies all allowable dtypes in a functions its dtype argument 

142DtypeArg = Union[Dtype, Dict[Hashable, Dtype]] 

143DtypeObj = Union[np.dtype, "ExtensionDtype"] 

144 

145# converters 

146ConvertersArg = Dict[Hashable, Callable[[Dtype], Dtype]] 

147 

148# parse_dates 

149ParseDatesArg = Union[ 

150 bool, List[Hashable], List[List[Hashable]], Dict[Hashable, List[Hashable]] 

151] 

152 

153# For functions like rename that convert one label to another 

154Renamer = Union[Mapping[Any, Hashable], Callable[[Any], Hashable]] 

155 

156# to maintain type information across generic functions and parametrization 

157T = TypeVar("T") 

158 

159# used in decorators to preserve the signature of the function it decorates 

160# see https://mypy.readthedocs.io/en/stable/generics.html#declaring-decorators 

161FuncType = Callable[..., Any] 

162F = TypeVar("F", bound=FuncType) 

163 

164# types of vectorized key functions for DataFrame::sort_values and 

165# DataFrame::sort_index, among others 

166ValueKeyFunc = Optional[Callable[["Series"], Union["Series", AnyArrayLike]]] 

167IndexKeyFunc = Optional[Callable[["Index"], Union["Index", AnyArrayLike]]] 

168 

169# types of `func` kwarg for DataFrame.aggregate and Series.aggregate 

170AggFuncTypeBase = Union[Callable, str] 

171AggFuncTypeDict = Dict[Hashable, Union[AggFuncTypeBase, List[AggFuncTypeBase]]] 

172AggFuncType = Union[ 

173 AggFuncTypeBase, 

174 List[AggFuncTypeBase], 

175 AggFuncTypeDict, 

176] 

177AggObjType = Union[ 

178 "Series", 

179 "DataFrame", 

180 "GroupBy", 

181 "SeriesGroupBy", 

182 "DataFrameGroupBy", 

183 "BaseWindow", 

184 "Resampler", 

185] 

186 

187PythonFuncType = Callable[[Any], Any] 

188 

189# filenames and file-like-objects 

190AnyStr_cov = TypeVar("AnyStr_cov", str, bytes, covariant=True) 

191AnyStr_con = TypeVar("AnyStr_con", str, bytes, contravariant=True) 

192 

193 

194class BaseBuffer(Protocol): 

195 @property 

196 def mode(self) -> str: 

197 # for _get_filepath_or_buffer 

198 ... 

199 

200 def fileno(self) -> int: 

201 # for _MMapWrapper 

202 ... 

203 

204 def seek(self, __offset: int, __whence: int = ...) -> int: 

205 # with one argument: gzip.GzipFile, bz2.BZ2File 

206 # with two arguments: zip.ZipFile, read_sas 

207 ... 

208 

209 def seekable(self) -> bool: 

210 # for bz2.BZ2File 

211 ... 

212 

213 def tell(self) -> int: 

214 # for zip.ZipFile, read_stata, to_stata 

215 ... 

216 

217 

218class ReadBuffer(BaseBuffer, Protocol[AnyStr_cov]): 

219 def read(self, __n: int | None = ...) -> AnyStr_cov: 

220 # for BytesIOWrapper, gzip.GzipFile, bz2.BZ2File 

221 ... 

222 

223 

224class WriteBuffer(BaseBuffer, Protocol[AnyStr_con]): 

225 def write(self, __b: AnyStr_con) -> Any: 

226 # for gzip.GzipFile, bz2.BZ2File 

227 ... 

228 

229 def flush(self) -> Any: 

230 # for gzip.GzipFile, bz2.BZ2File 

231 ... 

232 

233 

234class ReadPickleBuffer(ReadBuffer[bytes], Protocol): 

235 def readline(self) -> AnyStr_cov: 

236 ... 

237 

238 

239class WriteExcelBuffer(WriteBuffer[bytes], Protocol): 

240 def truncate(self, size: int | None = ...) -> int: 

241 ... 

242 

243 

244class ReadCsvBuffer(ReadBuffer[AnyStr_cov], Protocol): 

245 def __iter__(self) -> Iterator[AnyStr_cov]: 

246 # for engine=python 

247 ... 

248 

249 def readline(self) -> AnyStr_cov: 

250 # for engine=python 

251 ... 

252 

253 @property 

254 def closed(self) -> bool: 

255 # for enine=pyarrow 

256 ... 

257 

258 

259FilePath = Union[str, "PathLike[str]"] 

260 

261# for arbitrary kwargs passed during reading/writing files 

262StorageOptions = Optional[Dict[str, Any]] 

263 

264 

265# compression keywords and compression 

266CompressionDict = Dict[str, Any] 

267CompressionOptions = Optional[ 

268 Union[Literal["infer", "gzip", "bz2", "zip", "xz", "zstd", "tar"], CompressionDict] 

269] 

270 

271# types in DataFrameFormatter 

272FormattersType = Union[ 

273 List[Callable], Tuple[Callable, ...], Mapping[Union[str, int], Callable] 

274] 

275ColspaceType = Mapping[Hashable, Union[str, int]] 

276FloatFormatType = Union[str, Callable, "EngFormatter"] 

277ColspaceArgType = Union[ 

278 str, int, Sequence[Union[str, int]], Mapping[Hashable, Union[str, int]] 

279] 

280 

281# Arguments for fillna() 

282FillnaOptions = Literal["backfill", "bfill", "ffill", "pad"] 

283 

284# internals 

285Manager = Union[ 

286 "ArrayManager", "SingleArrayManager", "BlockManager", "SingleBlockManager" 

287] 

288SingleManager = Union["SingleArrayManager", "SingleBlockManager"] 

289Manager2D = Union["ArrayManager", "BlockManager"] 

290 

291# indexing 

292# PositionalIndexer -> valid 1D positional indexer, e.g. can pass 

293# to ndarray.__getitem__ 

294# ScalarIndexer is for a single value as the index 

295# SequenceIndexer is for list like or slices (but not tuples) 

296# PositionalIndexerTuple is extends the PositionalIndexer for 2D arrays 

297# These are used in various __getitem__ overloads 

298# TODO(typing#684): add Ellipsis, see 

299# https://github.com/python/typing/issues/684#issuecomment-548203158 

300# https://bugs.python.org/issue41810 

301# Using List[int] here rather than Sequence[int] to disallow tuples. 

302ScalarIndexer = Union[int, np.integer] 

303SequenceIndexer = Union[slice, List[int], np.ndarray] 

304PositionalIndexer = Union[ScalarIndexer, SequenceIndexer] 

305PositionalIndexerTuple = Tuple[PositionalIndexer, PositionalIndexer] 

306PositionalIndexer2D = Union[PositionalIndexer, PositionalIndexerTuple] 

307if TYPE_CHECKING: 307 ↛ 308line 307 didn't jump to line 308, because the condition on line 307 was never true

308 TakeIndexer = Union[Sequence[int], Sequence[np.integer], npt.NDArray[np.integer]] 

309else: 

310 TakeIndexer = Any 

311 

312# Shared by functions such as drop and astype 

313IgnoreRaise = Literal["ignore", "raise"] 

314 

315# Windowing rank methods 

316WindowingRankType = Literal["average", "min", "max"] 

317 

318# read_csv engines 

319CSVEngine = Literal["c", "python", "pyarrow", "python-fwf"] 

320 

321# read_xml parsers 

322XMLParsers = Literal["lxml", "etree"] 

323 

324# Interval closed type 

325IntervalLeftRight = Literal["left", "right"] 

326IntervalClosedType = Union[IntervalLeftRight, Literal["both", "neither"]] 

327 

328# datetime and NaTType 

329DatetimeNaTType = Union[datetime, "NaTType"] 

330DateTimeErrorChoices = Union[IgnoreRaise, Literal["coerce"]] 

331 

332# sort_index 

333SortKind = Literal["quicksort", "mergesort", "heapsort", "stable"] 

334NaPosition = Literal["first", "last"] 

335 

336# quantile interpolation 

337QuantileInterpolation = Literal["linear", "lower", "higher", "midpoint", "nearest"] 

338 

339# plotting 

340PlottingOrientation = Literal["horizontal", "vertical"]