Coverage for /var/srv/projects/api.amasfac.comuna18.com/tmp/venv/lib/python3.9/site-packages/numpy/lib/npyio.py: 6%

786 statements  

« prev     ^ index     » next       coverage.py v6.4.4, created at 2023-07-17 14:22 -0600

1import os 

2import re 

3import functools 

4import itertools 

5import warnings 

6import weakref 

7import contextlib 

8import operator 

9from operator import itemgetter, index as opindex, methodcaller 

10from collections.abc import Mapping 

11 

12import numpy as np 

13from . import format 

14from ._datasource import DataSource 

15from numpy.core import overrides 

16from numpy.core.multiarray import packbits, unpackbits 

17from numpy.core._multiarray_umath import _load_from_filelike 

18from numpy.core.overrides import set_array_function_like_doc, set_module 

19from ._iotools import ( 

20 LineSplitter, NameValidator, StringConverter, ConverterError, 

21 ConverterLockError, ConversionWarning, _is_string_like, 

22 has_nested_fields, flatten_dtype, easy_dtype, _decode_line 

23 ) 

24 

25from numpy.compat import ( 

26 asbytes, asstr, asunicode, os_fspath, os_PathLike, 

27 pickle 

28 ) 

29 

30 

31__all__ = [ 

32 'savetxt', 'loadtxt', 'genfromtxt', 

33 'recfromtxt', 'recfromcsv', 'load', 'save', 'savez', 

34 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' 

35 ] 

36 

37 

38array_function_dispatch = functools.partial( 

39 overrides.array_function_dispatch, module='numpy') 

40 

41 

42class BagObj: 

43 """ 

44 BagObj(obj) 

45 

46 Convert attribute look-ups to getitems on the object passed in. 

47 

48 Parameters 

49 ---------- 

50 obj : class instance 

51 Object on which attribute look-up is performed. 

52 

53 Examples 

54 -------- 

55 >>> from numpy.lib.npyio import BagObj as BO 

56 >>> class BagDemo: 

57 ... def __getitem__(self, key): # An instance of BagObj(BagDemo) 

58 ... # will call this method when any 

59 ... # attribute look-up is required 

60 ... result = "Doesn't matter what you want, " 

61 ... return result + "you're gonna get this" 

62 ... 

63 >>> demo_obj = BagDemo() 

64 >>> bagobj = BO(demo_obj) 

65 >>> bagobj.hello_there 

66 "Doesn't matter what you want, you're gonna get this" 

67 >>> bagobj.I_can_be_anything 

68 "Doesn't matter what you want, you're gonna get this" 

69 

70 """ 

71 

72 def __init__(self, obj): 

73 # Use weakref to make NpzFile objects collectable by refcount 

74 self._obj = weakref.proxy(obj) 

75 

76 def __getattribute__(self, key): 

77 try: 

78 return object.__getattribute__(self, '_obj')[key] 

79 except KeyError: 

80 raise AttributeError(key) from None 

81 

82 def __dir__(self): 

83 """ 

84 Enables dir(bagobj) to list the files in an NpzFile. 

85 

86 This also enables tab-completion in an interpreter or IPython. 

87 """ 

88 return list(object.__getattribute__(self, '_obj').keys()) 

89 

90 

91def zipfile_factory(file, *args, **kwargs): 

92 """ 

93 Create a ZipFile. 

94 

95 Allows for Zip64, and the `file` argument can accept file, str, or 

96 pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile 

97 constructor. 

98 """ 

99 if not hasattr(file, 'read'): 

100 file = os_fspath(file) 

101 import zipfile 

102 kwargs['allowZip64'] = True 

103 return zipfile.ZipFile(file, *args, **kwargs) 

104 

105 

106class NpzFile(Mapping): 

107 """ 

108 NpzFile(fid) 

109 

110 A dictionary-like object with lazy-loading of files in the zipped 

111 archive provided on construction. 

112 

113 `NpzFile` is used to load files in the NumPy ``.npz`` data archive 

114 format. It assumes that files in the archive have a ``.npy`` extension, 

115 other files are ignored. 

116 

117 The arrays and file strings are lazily loaded on either 

118 getitem access using ``obj['key']`` or attribute lookup using 

119 ``obj.f.key``. A list of all files (without ``.npy`` extensions) can 

120 be obtained with ``obj.files`` and the ZipFile object itself using 

121 ``obj.zip``. 

122 

123 Attributes 

124 ---------- 

125 files : list of str 

126 List of all files in the archive with a ``.npy`` extension. 

127 zip : ZipFile instance 

128 The ZipFile object initialized with the zipped archive. 

129 f : BagObj instance 

130 An object on which attribute can be performed as an alternative 

131 to getitem access on the `NpzFile` instance itself. 

132 allow_pickle : bool, optional 

133 Allow loading pickled data. Default: False 

134 

135 .. versionchanged:: 1.16.3 

136 Made default False in response to CVE-2019-6446. 

137 

138 pickle_kwargs : dict, optional 

139 Additional keyword arguments to pass on to pickle.load. 

140 These are only useful when loading object arrays saved on 

141 Python 2 when using Python 3. 

142 max_header_size : int, optional 

143 Maximum allowed size of the header. Large headers may not be safe 

144 to load securely and thus require explicitly passing a larger value. 

145 See :py:meth:`ast.literal_eval()` for details. 

146 This option is ignored when `allow_pickle` is passed. In that case 

147 the file is by definition trusted and the limit is unnecessary. 

148 

149 Parameters 

150 ---------- 

151 fid : file or str 

152 The zipped archive to open. This is either a file-like object 

153 or a string containing the path to the archive. 

154 own_fid : bool, optional 

155 Whether NpzFile should close the file handle. 

156 Requires that `fid` is a file-like object. 

157 

158 Examples 

159 -------- 

160 >>> from tempfile import TemporaryFile 

161 >>> outfile = TemporaryFile() 

162 >>> x = np.arange(10) 

163 >>> y = np.sin(x) 

164 >>> np.savez(outfile, x=x, y=y) 

165 >>> _ = outfile.seek(0) 

166 

167 >>> npz = np.load(outfile) 

168 >>> isinstance(npz, np.lib.npyio.NpzFile) 

169 True 

170 >>> sorted(npz.files) 

171 ['x', 'y'] 

172 >>> npz['x'] # getitem access 

173 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 

174 >>> npz.f.x # attribute lookup 

175 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 

176 

177 """ 

178 # Make __exit__ safe if zipfile_factory raises an exception 

179 zip = None 

180 fid = None 

181 

182 def __init__(self, fid, own_fid=False, allow_pickle=False, 

183 pickle_kwargs=None, *, 

184 max_header_size=format._MAX_HEADER_SIZE): 

185 # Import is postponed to here since zipfile depends on gzip, an 

186 # optional component of the so-called standard library. 

187 _zip = zipfile_factory(fid) 

188 self._files = _zip.namelist() 

189 self.files = [] 

190 self.allow_pickle = allow_pickle 

191 self.max_header_size = max_header_size 

192 self.pickle_kwargs = pickle_kwargs 

193 for x in self._files: 

194 if x.endswith('.npy'): 

195 self.files.append(x[:-4]) 

196 else: 

197 self.files.append(x) 

198 self.zip = _zip 

199 self.f = BagObj(self) 

200 if own_fid: 

201 self.fid = fid 

202 

203 def __enter__(self): 

204 return self 

205 

206 def __exit__(self, exc_type, exc_value, traceback): 

207 self.close() 

208 

209 def close(self): 

210 """ 

211 Close the file. 

212 

213 """ 

214 if self.zip is not None: 

215 self.zip.close() 

216 self.zip = None 

217 if self.fid is not None: 

218 self.fid.close() 

219 self.fid = None 

220 self.f = None # break reference cycle 

221 

222 def __del__(self): 

223 self.close() 

224 

225 # Implement the Mapping ABC 

226 def __iter__(self): 

227 return iter(self.files) 

228 

229 def __len__(self): 

230 return len(self.files) 

231 

232 def __getitem__(self, key): 

233 # FIXME: This seems like it will copy strings around 

234 # more than is strictly necessary. The zipfile 

235 # will read the string and then 

236 # the format.read_array will copy the string 

237 # to another place in memory. 

238 # It would be better if the zipfile could read 

239 # (or at least uncompress) the data 

240 # directly into the array memory. 

241 member = False 

242 if key in self._files: 

243 member = True 

244 elif key in self.files: 

245 member = True 

246 key += '.npy' 

247 if member: 

248 bytes = self.zip.open(key) 

249 magic = bytes.read(len(format.MAGIC_PREFIX)) 

250 bytes.close() 

251 if magic == format.MAGIC_PREFIX: 

252 bytes = self.zip.open(key) 

253 return format.read_array(bytes, 

254 allow_pickle=self.allow_pickle, 

255 pickle_kwargs=self.pickle_kwargs, 

256 max_header_size=self.max_header_size) 

257 else: 

258 return self.zip.read(key) 

259 else: 

260 raise KeyError("%s is not a file in the archive" % key) 

261 

262 

263@set_module('numpy') 

264def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, 

265 encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE): 

266 """ 

267 Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. 

268 

269 .. warning:: Loading files that contain object arrays uses the ``pickle`` 

270 module, which is not secure against erroneous or maliciously 

271 constructed data. Consider passing ``allow_pickle=False`` to 

272 load data that is known not to contain object arrays for the 

273 safer handling of untrusted sources. 

274 

275 Parameters 

276 ---------- 

277 file : file-like object, string, or pathlib.Path 

278 The file to read. File-like objects must support the 

279 ``seek()`` and ``read()`` methods and must always 

280 be opened in binary mode. Pickled files require that the 

281 file-like object support the ``readline()`` method as well. 

282 mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional 

283 If not None, then memory-map the file, using the given mode (see 

284 `numpy.memmap` for a detailed description of the modes). A 

285 memory-mapped array is kept on disk. However, it can be accessed 

286 and sliced like any ndarray. Memory mapping is especially useful 

287 for accessing small fragments of large files without reading the 

288 entire file into memory. 

289 allow_pickle : bool, optional 

290 Allow loading pickled object arrays stored in npy files. Reasons for 

291 disallowing pickles include security, as loading pickled data can 

292 execute arbitrary code. If pickles are disallowed, loading object 

293 arrays will fail. Default: False 

294 

295 .. versionchanged:: 1.16.3 

296 Made default False in response to CVE-2019-6446. 

297 

298 fix_imports : bool, optional 

299 Only useful when loading Python 2 generated pickled files on Python 3, 

300 which includes npy/npz files containing object arrays. If `fix_imports` 

301 is True, pickle will try to map the old Python 2 names to the new names 

302 used in Python 3. 

303 encoding : str, optional 

304 What encoding to use when reading Python 2 strings. Only useful when 

305 loading Python 2 generated pickled files in Python 3, which includes 

306 npy/npz files containing object arrays. Values other than 'latin1', 

307 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical 

308 data. Default: 'ASCII' 

309 max_header_size : int, optional 

310 Maximum allowed size of the header. Large headers may not be safe 

311 to load securely and thus require explicitly passing a larger value. 

312 See :py:meth:`ast.literal_eval()` for details. 

313 This option is ignored when `allow_pickle` is passed. In that case 

314 the file is by definition trusted and the limit is unnecessary. 

315 

316 Returns 

317 ------- 

318 result : array, tuple, dict, etc. 

319 Data stored in the file. For ``.npz`` files, the returned instance 

320 of NpzFile class must be closed to avoid leaking file descriptors. 

321 

322 Raises 

323 ------ 

324 OSError 

325 If the input file does not exist or cannot be read. 

326 UnpicklingError 

327 If ``allow_pickle=True``, but the file cannot be loaded as a pickle. 

328 ValueError 

329 The file contains an object array, but ``allow_pickle=False`` given. 

330 

331 See Also 

332 -------- 

333 save, savez, savez_compressed, loadtxt 

334 memmap : Create a memory-map to an array stored in a file on disk. 

335 lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. 

336 

337 Notes 

338 ----- 

339 - If the file contains pickle data, then whatever object is stored 

340 in the pickle is returned. 

341 - If the file is a ``.npy`` file, then a single array is returned. 

342 - If the file is a ``.npz`` file, then a dictionary-like object is 

343 returned, containing ``{filename: array}`` key-value pairs, one for 

344 each file in the archive. 

345 - If the file is a ``.npz`` file, the returned value supports the 

346 context manager protocol in a similar fashion to the open function:: 

347 

348 with load('foo.npz') as data: 

349 a = data['a'] 

350 

351 The underlying file descriptor is closed when exiting the 'with' 

352 block. 

353 

354 Examples 

355 -------- 

356 Store data to disk, and load it again: 

357 

358 >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) 

359 >>> np.load('/tmp/123.npy') 

360 array([[1, 2, 3], 

361 [4, 5, 6]]) 

362 

363 Store compressed data to disk, and load it again: 

364 

365 >>> a=np.array([[1, 2, 3], [4, 5, 6]]) 

366 >>> b=np.array([1, 2]) 

367 >>> np.savez('/tmp/123.npz', a=a, b=b) 

368 >>> data = np.load('/tmp/123.npz') 

369 >>> data['a'] 

370 array([[1, 2, 3], 

371 [4, 5, 6]]) 

372 >>> data['b'] 

373 array([1, 2]) 

374 >>> data.close() 

375 

376 Mem-map the stored array, and then access the second row 

377 directly from disk: 

378 

379 >>> X = np.load('/tmp/123.npy', mmap_mode='r') 

380 >>> X[1, :] 

381 memmap([4, 5, 6]) 

382 

383 """ 

384 if encoding not in ('ASCII', 'latin1', 'bytes'): 

385 # The 'encoding' value for pickle also affects what encoding 

386 # the serialized binary data of NumPy arrays is loaded 

387 # in. Pickle does not pass on the encoding information to 

388 # NumPy. The unpickling code in numpy.core.multiarray is 

389 # written to assume that unicode data appearing where binary 

390 # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. 

391 # 

392 # Other encoding values can corrupt binary data, and we 

393 # purposefully disallow them. For the same reason, the errors= 

394 # argument is not exposed, as values other than 'strict' 

395 # result can similarly silently corrupt numerical data. 

396 raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") 

397 

398 pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) 

399 

400 with contextlib.ExitStack() as stack: 

401 if hasattr(file, 'read'): 

402 fid = file 

403 own_fid = False 

404 else: 

405 fid = stack.enter_context(open(os_fspath(file), "rb")) 

406 own_fid = True 

407 

408 # Code to distinguish from NumPy binary files and pickles. 

409 _ZIP_PREFIX = b'PK\x03\x04' 

410 _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this 

411 N = len(format.MAGIC_PREFIX) 

412 magic = fid.read(N) 

413 # If the file size is less than N, we need to make sure not 

414 # to seek past the beginning of the file 

415 fid.seek(-min(N, len(magic)), 1) # back-up 

416 if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX): 

417 # zip-file (assume .npz) 

418 # Potentially transfer file ownership to NpzFile 

419 stack.pop_all() 

420 ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, 

421 pickle_kwargs=pickle_kwargs, 

422 max_header_size=max_header_size) 

423 return ret 

424 elif magic == format.MAGIC_PREFIX: 

425 # .npy file 

426 if mmap_mode: 

427 if allow_pickle: 

428 max_header_size = 2**64 

429 return format.open_memmap(file, mode=mmap_mode, 

430 max_header_size=max_header_size) 

431 else: 

432 return format.read_array(fid, allow_pickle=allow_pickle, 

433 pickle_kwargs=pickle_kwargs, 

434 max_header_size=max_header_size) 

435 else: 

436 # Try a pickle 

437 if not allow_pickle: 

438 raise ValueError("Cannot load file containing pickled data " 

439 "when allow_pickle=False") 

440 try: 

441 return pickle.load(fid, **pickle_kwargs) 

442 except Exception as e: 

443 raise pickle.UnpicklingError( 

444 f"Failed to interpret file {file!r} as a pickle") from e 

445 

446 

447def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): 

448 return (arr,) 

449 

450 

451@array_function_dispatch(_save_dispatcher) 

452def save(file, arr, allow_pickle=True, fix_imports=True): 

453 """ 

454 Save an array to a binary file in NumPy ``.npy`` format. 

455 

456 Parameters 

457 ---------- 

458 file : file, str, or pathlib.Path 

459 File or filename to which the data is saved. If file is a file-object, 

460 then the filename is unchanged. If file is a string or Path, a ``.npy`` 

461 extension will be appended to the filename if it does not already 

462 have one. 

463 arr : array_like 

464 Array data to be saved. 

465 allow_pickle : bool, optional 

466 Allow saving object arrays using Python pickles. Reasons for disallowing 

467 pickles include security (loading pickled data can execute arbitrary 

468 code) and portability (pickled objects may not be loadable on different 

469 Python installations, for example if the stored objects require libraries 

470 that are not available, and not all pickled data is compatible between 

471 Python 2 and Python 3). 

472 Default: True 

473 fix_imports : bool, optional 

474 Only useful in forcing objects in object arrays on Python 3 to be 

475 pickled in a Python 2 compatible way. If `fix_imports` is True, pickle 

476 will try to map the new Python 3 names to the old module names used in 

477 Python 2, so that the pickle data stream is readable with Python 2. 

478 

479 See Also 

480 -------- 

481 savez : Save several arrays into a ``.npz`` archive 

482 savetxt, load 

483 

484 Notes 

485 ----- 

486 For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. 

487 

488 Any data saved to the file is appended to the end of the file. 

489 

490 Examples 

491 -------- 

492 >>> from tempfile import TemporaryFile 

493 >>> outfile = TemporaryFile() 

494 

495 >>> x = np.arange(10) 

496 >>> np.save(outfile, x) 

497 

498 >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file 

499 >>> np.load(outfile) 

500 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 

501 

502 

503 >>> with open('test.npy', 'wb') as f: 

504 ... np.save(f, np.array([1, 2])) 

505 ... np.save(f, np.array([1, 3])) 

506 >>> with open('test.npy', 'rb') as f: 

507 ... a = np.load(f) 

508 ... b = np.load(f) 

509 >>> print(a, b) 

510 # [1 2] [1 3] 

511 """ 

512 if hasattr(file, 'write'): 

513 file_ctx = contextlib.nullcontext(file) 

514 else: 

515 file = os_fspath(file) 

516 if not file.endswith('.npy'): 

517 file = file + '.npy' 

518 file_ctx = open(file, "wb") 

519 

520 with file_ctx as fid: 

521 arr = np.asanyarray(arr) 

522 format.write_array(fid, arr, allow_pickle=allow_pickle, 

523 pickle_kwargs=dict(fix_imports=fix_imports)) 

524 

525 

526def _savez_dispatcher(file, *args, **kwds): 

527 yield from args 

528 yield from kwds.values() 

529 

530 

531@array_function_dispatch(_savez_dispatcher) 

532def savez(file, *args, **kwds): 

533 """Save several arrays into a single file in uncompressed ``.npz`` format. 

534 

535 Provide arrays as keyword arguments to store them under the 

536 corresponding name in the output file: ``savez(fn, x=x, y=y)``. 

537 

538 If arrays are specified as positional arguments, i.e., ``savez(fn, 

539 x, y)``, their names will be `arr_0`, `arr_1`, etc. 

540 

541 Parameters 

542 ---------- 

543 file : str or file 

544 Either the filename (string) or an open file (file-like object) 

545 where the data will be saved. If file is a string or a Path, the 

546 ``.npz`` extension will be appended to the filename if it is not 

547 already there. 

548 args : Arguments, optional 

549 Arrays to save to the file. Please use keyword arguments (see 

550 `kwds` below) to assign names to arrays. Arrays specified as 

551 args will be named "arr_0", "arr_1", and so on. 

552 kwds : Keyword arguments, optional 

553 Arrays to save to the file. Each array will be saved to the 

554 output file with its corresponding keyword name. 

555 

556 Returns 

557 ------- 

558 None 

559 

560 See Also 

561 -------- 

562 save : Save a single array to a binary file in NumPy format. 

563 savetxt : Save an array to a file as plain text. 

564 savez_compressed : Save several arrays into a compressed ``.npz`` archive 

565 

566 Notes 

567 ----- 

568 The ``.npz`` file format is a zipped archive of files named after the 

569 variables they contain. The archive is not compressed and each file 

570 in the archive contains one variable in ``.npy`` format. For a 

571 description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. 

572 

573 When opening the saved ``.npz`` file with `load` a `NpzFile` object is 

574 returned. This is a dictionary-like object which can be queried for 

575 its list of arrays (with the ``.files`` attribute), and for the arrays 

576 themselves. 

577 

578 Keys passed in `kwds` are used as filenames inside the ZIP archive. 

579 Therefore, keys should be valid filenames; e.g., avoid keys that begin with 

580 ``/`` or contain ``.``. 

581 

582 When naming variables with keyword arguments, it is not possible to name a 

583 variable ``file``, as this would cause the ``file`` argument to be defined 

584 twice in the call to ``savez``. 

585 

586 Examples 

587 -------- 

588 >>> from tempfile import TemporaryFile 

589 >>> outfile = TemporaryFile() 

590 >>> x = np.arange(10) 

591 >>> y = np.sin(x) 

592 

593 Using `savez` with \\*args, the arrays are saved with default names. 

594 

595 >>> np.savez(outfile, x, y) 

596 >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file 

597 >>> npzfile = np.load(outfile) 

598 >>> npzfile.files 

599 ['arr_0', 'arr_1'] 

600 >>> npzfile['arr_0'] 

601 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 

602 

603 Using `savez` with \\**kwds, the arrays are saved with the keyword names. 

604 

605 >>> outfile = TemporaryFile() 

606 >>> np.savez(outfile, x=x, y=y) 

607 >>> _ = outfile.seek(0) 

608 >>> npzfile = np.load(outfile) 

609 >>> sorted(npzfile.files) 

610 ['x', 'y'] 

611 >>> npzfile['x'] 

612 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 

613 

614 """ 

615 _savez(file, args, kwds, False) 

616 

617 

618def _savez_compressed_dispatcher(file, *args, **kwds): 

619 yield from args 

620 yield from kwds.values() 

621 

622 

623@array_function_dispatch(_savez_compressed_dispatcher) 

624def savez_compressed(file, *args, **kwds): 

625 """ 

626 Save several arrays into a single file in compressed ``.npz`` format. 

627 

628 Provide arrays as keyword arguments to store them under the 

629 corresponding name in the output file: ``savez(fn, x=x, y=y)``. 

630 

631 If arrays are specified as positional arguments, i.e., ``savez(fn, 

632 x, y)``, their names will be `arr_0`, `arr_1`, etc. 

633 

634 Parameters 

635 ---------- 

636 file : str or file 

637 Either the filename (string) or an open file (file-like object) 

638 where the data will be saved. If file is a string or a Path, the 

639 ``.npz`` extension will be appended to the filename if it is not 

640 already there. 

641 args : Arguments, optional 

642 Arrays to save to the file. Please use keyword arguments (see 

643 `kwds` below) to assign names to arrays. Arrays specified as 

644 args will be named "arr_0", "arr_1", and so on. 

645 kwds : Keyword arguments, optional 

646 Arrays to save to the file. Each array will be saved to the 

647 output file with its corresponding keyword name. 

648 

649 Returns 

650 ------- 

651 None 

652 

653 See Also 

654 -------- 

655 numpy.save : Save a single array to a binary file in NumPy format. 

656 numpy.savetxt : Save an array to a file as plain text. 

657 numpy.savez : Save several arrays into an uncompressed ``.npz`` file format 

658 numpy.load : Load the files created by savez_compressed. 

659 

660 Notes 

661 ----- 

662 The ``.npz`` file format is a zipped archive of files named after the 

663 variables they contain. The archive is compressed with 

664 ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable 

665 in ``.npy`` format. For a description of the ``.npy`` format, see 

666 :py:mod:`numpy.lib.format`. 

667 

668 

669 When opening the saved ``.npz`` file with `load` a `NpzFile` object is 

670 returned. This is a dictionary-like object which can be queried for 

671 its list of arrays (with the ``.files`` attribute), and for the arrays 

672 themselves. 

673 

674 Examples 

675 -------- 

676 >>> test_array = np.random.rand(3, 2) 

677 >>> test_vector = np.random.rand(4) 

678 >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) 

679 >>> loaded = np.load('/tmp/123.npz') 

680 >>> print(np.array_equal(test_array, loaded['a'])) 

681 True 

682 >>> print(np.array_equal(test_vector, loaded['b'])) 

683 True 

684 

685 """ 

686 _savez(file, args, kwds, True) 

687 

688 

689def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): 

690 # Import is postponed to here since zipfile depends on gzip, an optional 

691 # component of the so-called standard library. 

692 import zipfile 

693 

694 if not hasattr(file, 'write'): 

695 file = os_fspath(file) 

696 if not file.endswith('.npz'): 

697 file = file + '.npz' 

698 

699 namedict = kwds 

700 for i, val in enumerate(args): 

701 key = 'arr_%d' % i 

702 if key in namedict.keys(): 

703 raise ValueError( 

704 "Cannot use un-named variables and keyword %s" % key) 

705 namedict[key] = val 

706 

707 if compress: 

708 compression = zipfile.ZIP_DEFLATED 

709 else: 

710 compression = zipfile.ZIP_STORED 

711 

712 zipf = zipfile_factory(file, mode="w", compression=compression) 

713 

714 for key, val in namedict.items(): 

715 fname = key + '.npy' 

716 val = np.asanyarray(val) 

717 # always force zip64, gh-10776 

718 with zipf.open(fname, 'w', force_zip64=True) as fid: 

719 format.write_array(fid, val, 

720 allow_pickle=allow_pickle, 

721 pickle_kwargs=pickle_kwargs) 

722 

723 zipf.close() 

724 

725 

726def _ensure_ndmin_ndarray_check_param(ndmin): 

727 """Just checks if the param ndmin is supported on 

728 _ensure_ndmin_ndarray. It is intended to be used as 

729 verification before running anything expensive. 

730 e.g. loadtxt, genfromtxt 

731 """ 

732 # Check correctness of the values of `ndmin` 

733 if ndmin not in [0, 1, 2]: 

734 raise ValueError(f"Illegal value of ndmin keyword: {ndmin}") 

735 

736def _ensure_ndmin_ndarray(a, *, ndmin: int): 

737 """This is a helper function of loadtxt and genfromtxt to ensure 

738 proper minimum dimension as requested 

739 

740 ndim : int. Supported values 1, 2, 3 

741 ^^ whenever this changes, keep in sync with 

742 _ensure_ndmin_ndarray_check_param 

743 """ 

744 # Verify that the array has at least dimensions `ndmin`. 

745 # Tweak the size and shape of the arrays - remove extraneous dimensions 

746 if a.ndim > ndmin: 

747 a = np.squeeze(a) 

748 # and ensure we have the minimum number of dimensions asked for 

749 # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0 

750 if a.ndim < ndmin: 

751 if ndmin == 1: 

752 a = np.atleast_1d(a) 

753 elif ndmin == 2: 

754 a = np.atleast_2d(a).T 

755 

756 return a 

757 

758 

759# amount of lines loadtxt reads in one chunk, can be overridden for testing 

760_loadtxt_chunksize = 50000 

761 

762 

763def _loadtxt_dispatcher( 

764 fname, dtype=None, comments=None, delimiter=None, 

765 converters=None, skiprows=None, usecols=None, unpack=None, 

766 ndmin=None, encoding=None, max_rows=None, *, like=None): 

767 return (like,) 

768 

769 

770def _check_nonneg_int(value, name="argument"): 

771 try: 

772 operator.index(value) 

773 except TypeError: 

774 raise TypeError(f"{name} must be an integer") from None 

775 if value < 0: 

776 raise ValueError(f"{name} must be nonnegative") 

777 

778 

779def _preprocess_comments(iterable, comments, encoding): 

780 """ 

781 Generator that consumes a line iterated iterable and strips out the 

782 multiple (or multi-character) comments from lines. 

783 This is a pre-processing step to achieve feature parity with loadtxt 

784 (we assume that this feature is a nieche feature). 

785 """ 

786 for line in iterable: 

787 if isinstance(line, bytes): 

788 # Need to handle conversion here, or the splitting would fail 

789 line = line.decode(encoding) 

790 

791 for c in comments: 

792 line = line.split(c, 1)[0] 

793 

794 yield line 

795 

796 

797# The number of rows we read in one go if confronted with a parametric dtype 

798_loadtxt_chunksize = 50000 

799 

800 

801def _read(fname, *, delimiter=',', comment='#', quote='"', 

802 imaginary_unit='j', usecols=None, skiplines=0, 

803 max_rows=None, converters=None, ndmin=None, unpack=False, 

804 dtype=np.float64, encoding="bytes"): 

805 r""" 

806 Read a NumPy array from a text file. 

807 

808 Parameters 

809 ---------- 

810 fname : str or file object 

811 The filename or the file to be read. 

812 delimiter : str, optional 

813 Field delimiter of the fields in line of the file. 

814 Default is a comma, ','. If None any sequence of whitespace is 

815 considered a delimiter. 

816 comment : str or sequence of str or None, optional 

817 Character that begins a comment. All text from the comment 

818 character to the end of the line is ignored. 

819 Multiple comments or multiple-character comment strings are supported, 

820 but may be slower and `quote` must be empty if used. 

821 Use None to disable all use of comments. 

822 quote : str or None, optional 

823 Character that is used to quote string fields. Default is '"' 

824 (a double quote). Use None to disable quote support. 

825 imaginary_unit : str, optional 

826 Character that represent the imaginay unit `sqrt(-1)`. 

827 Default is 'j'. 

828 usecols : array_like, optional 

829 A one-dimensional array of integer column numbers. These are the 

830 columns from the file to be included in the array. If this value 

831 is not given, all the columns are used. 

832 skiplines : int, optional 

833 Number of lines to skip before interpreting the data in the file. 

834 max_rows : int, optional 

835 Maximum number of rows of data to read. Default is to read the 

836 entire file. 

837 converters : dict or callable, optional 

838 A function to parse all columns strings into the desired value, or 

839 a dictionary mapping column number to a parser function. 

840 E.g. if column 0 is a date string: ``converters = {0: datestr2num}``. 

841 Converters can also be used to provide a default value for missing 

842 data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will 

843 convert empty fields to 0. 

844 Default: None 

845 ndmin : int, optional 

846 Minimum dimension of the array returned. 

847 Allowed values are 0, 1 or 2. Default is 0. 

848 unpack : bool, optional 

849 If True, the returned array is transposed, so that arguments may be 

850 unpacked using ``x, y, z = read(...)``. When used with a structured 

851 data-type, arrays are returned for each field. Default is False. 

852 dtype : numpy data type 

853 A NumPy dtype instance, can be a structured dtype to map to the 

854 columns of the file. 

855 encoding : str, optional 

856 Encoding used to decode the inputfile. The special value 'bytes' 

857 (the default) enables backwards-compatible behavior for `converters`, 

858 ensuring that inputs to the converter functions are encoded 

859 bytes objects. The special value 'bytes' has no additional effect if 

860 ``converters=None``. If encoding is ``'bytes'`` or ``None``, the 

861 default system encoding is used. 

862 

863 Returns 

864 ------- 

865 ndarray 

866 NumPy array. 

867 

868 Examples 

869 -------- 

870 First we create a file for the example. 

871 

872 >>> s1 = '1.0,2.0,3.0\n4.0,5.0,6.0\n' 

873 >>> with open('example1.csv', 'w') as f: 

874 ... f.write(s1) 

875 >>> a1 = read_from_filename('example1.csv') 

876 >>> a1 

877 array([[1., 2., 3.], 

878 [4., 5., 6.]]) 

879 

880 The second example has columns with different data types, so a 

881 one-dimensional array with a structured data type is returned. 

882 The tab character is used as the field delimiter. 

883 

884 >>> s2 = '1.0\t10\talpha\n2.3\t25\tbeta\n4.5\t16\tgamma\n' 

885 >>> with open('example2.tsv', 'w') as f: 

886 ... f.write(s2) 

887 >>> a2 = read_from_filename('example2.tsv', delimiter='\t') 

888 >>> a2 

889 array([(1. , 10, b'alpha'), (2.3, 25, b'beta'), (4.5, 16, b'gamma')], 

890 dtype=[('f0', '<f8'), ('f1', 'u1'), ('f2', 'S5')]) 

891 """ 

892 # Handle special 'bytes' keyword for encoding 

893 byte_converters = False 

894 if encoding == 'bytes': 

895 encoding = None 

896 byte_converters = True 

897 

898 if dtype is None: 

899 raise TypeError("a dtype must be provided.") 

900 dtype = np.dtype(dtype) 

901 

902 read_dtype_via_object_chunks = None 

903 if dtype.kind in 'SUM' and ( 

904 dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'): 

905 # This is a legacy "flexible" dtype. We do not truly support 

906 # parametric dtypes currently (no dtype discovery step in the core), 

907 # but have to support these for backward compatibility. 

908 read_dtype_via_object_chunks = dtype 

909 dtype = np.dtype(object) 

910 

911 if usecols is not None: 

912 # Allow usecols to be a single int or a sequence of ints, the C-code 

913 # handles the rest 

914 try: 

915 usecols = list(usecols) 

916 except TypeError: 

917 usecols = [usecols] 

918 

919 _ensure_ndmin_ndarray_check_param(ndmin) 

920 

921 if comment is None: 

922 comments = None 

923 else: 

924 # assume comments are a sequence of strings 

925 if "" in comment: 

926 raise ValueError( 

927 "comments cannot be an empty string. Use comments=None to " 

928 "disable comments." 

929 ) 

930 comments = tuple(comment) 

931 comment = None 

932 if len(comments) == 0: 

933 comments = None # No comments at all 

934 elif len(comments) == 1: 

935 # If there is only one comment, and that comment has one character, 

936 # the normal parsing can deal with it just fine. 

937 if isinstance(comments[0], str) and len(comments[0]) == 1: 

938 comment = comments[0] 

939 comments = None 

940 else: 

941 # Input validation if there are multiple comment characters 

942 if delimiter in comments: 

943 raise TypeError( 

944 f"Comment characters '{comments}' cannot include the " 

945 f"delimiter '{delimiter}'" 

946 ) 

947 

948 # comment is now either a 1 or 0 character string or a tuple: 

949 if comments is not None: 

950 # Note: An earlier version support two character comments (and could 

951 # have been extended to multiple characters, we assume this is 

952 # rare enough to not optimize for. 

953 if quote is not None: 

954 raise ValueError( 

955 "when multiple comments or a multi-character comment is " 

956 "given, quotes are not supported. In this case quotechar " 

957 "must be set to None.") 

958 

959 if len(imaginary_unit) != 1: 

960 raise ValueError('len(imaginary_unit) must be 1.') 

961 

962 _check_nonneg_int(skiplines) 

963 if max_rows is not None: 

964 _check_nonneg_int(max_rows) 

965 else: 

966 # Passing -1 to the C code means "read the entire file". 

967 max_rows = -1 

968 

969 fh_closing_ctx = contextlib.nullcontext() 

970 filelike = False 

971 try: 

972 if isinstance(fname, os.PathLike): 

973 fname = os.fspath(fname) 

974 if isinstance(fname, str): 

975 fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) 

976 if encoding is None: 

977 encoding = getattr(fh, 'encoding', 'latin1') 

978 

979 fh_closing_ctx = contextlib.closing(fh) 

980 data = fh 

981 filelike = True 

982 else: 

983 if encoding is None: 

984 encoding = getattr(fname, 'encoding', 'latin1') 

985 data = iter(fname) 

986 except TypeError as e: 

987 raise ValueError( 

988 f"fname must be a string, filehandle, list of strings,\n" 

989 f"or generator. Got {type(fname)} instead.") from e 

990 

991 with fh_closing_ctx: 

992 if comments is not None: 

993 if filelike: 

994 data = iter(data) 

995 filelike = False 

996 data = _preprocess_comments(data, comments, encoding) 

997 

998 if read_dtype_via_object_chunks is None: 

999 arr = _load_from_filelike( 

1000 data, delimiter=delimiter, comment=comment, quote=quote, 

1001 imaginary_unit=imaginary_unit, 

1002 usecols=usecols, skiplines=skiplines, max_rows=max_rows, 

1003 converters=converters, dtype=dtype, 

1004 encoding=encoding, filelike=filelike, 

1005 byte_converters=byte_converters) 

1006 

1007 else: 

1008 # This branch reads the file into chunks of object arrays and then 

1009 # casts them to the desired actual dtype. This ensures correct 

1010 # string-length and datetime-unit discovery (like `arr.astype()`). 

1011 # Due to chunking, certain error reports are less clear, currently. 

1012 if filelike: 

1013 data = iter(data) # cannot chunk when reading from file 

1014 

1015 c_byte_converters = False 

1016 if read_dtype_via_object_chunks == "S": 

1017 c_byte_converters = True # Use latin1 rather than ascii 

1018 

1019 chunks = [] 

1020 while max_rows != 0: 

1021 if max_rows < 0: 

1022 chunk_size = _loadtxt_chunksize 

1023 else: 

1024 chunk_size = min(_loadtxt_chunksize, max_rows) 

1025 

1026 next_arr = _load_from_filelike( 

1027 data, delimiter=delimiter, comment=comment, quote=quote, 

1028 imaginary_unit=imaginary_unit, 

1029 usecols=usecols, skiplines=skiplines, max_rows=max_rows, 

1030 converters=converters, dtype=dtype, 

1031 encoding=encoding, filelike=filelike, 

1032 byte_converters=byte_converters, 

1033 c_byte_converters=c_byte_converters) 

1034 # Cast here already. We hope that this is better even for 

1035 # large files because the storage is more compact. It could 

1036 # be adapted (in principle the concatenate could cast). 

1037 chunks.append(next_arr.astype(read_dtype_via_object_chunks)) 

1038 

1039 skiprows = 0 # Only have to skip for first chunk 

1040 if max_rows >= 0: 

1041 max_rows -= chunk_size 

1042 if len(next_arr) < chunk_size: 

1043 # There was less data than requested, so we are done. 

1044 break 

1045 

1046 # Need at least one chunk, but if empty, the last one may have 

1047 # the wrong shape. 

1048 if len(chunks) > 1 and len(chunks[-1]) == 0: 

1049 del chunks[-1] 

1050 if len(chunks) == 1: 

1051 arr = chunks[0] 

1052 else: 

1053 arr = np.concatenate(chunks, axis=0) 

1054 

1055 # NOTE: ndmin works as advertised for structured dtypes, but normally 

1056 # these would return a 1D result plus the structured dimension, 

1057 # so ndmin=2 adds a third dimension even when no squeezing occurs. 

1058 # A `squeeze=False` could be a better solution (pandas uses squeeze). 

1059 arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin) 

1060 

1061 if arr.shape: 

1062 if arr.shape[0] == 0: 

1063 warnings.warn( 

1064 f'loadtxt: input contained no data: "{fname}"', 

1065 category=UserWarning, 

1066 stacklevel=3 

1067 ) 

1068 

1069 if unpack: 

1070 # Unpack structured dtypes if requested: 

1071 dt = arr.dtype 

1072 if dt.names is not None: 

1073 # For structured arrays, return an array for each field. 

1074 return [arr[field] for field in dt.names] 

1075 else: 

1076 return arr.T 

1077 else: 

1078 return arr 

1079 

1080 

1081@set_array_function_like_doc 

1082@set_module('numpy') 

1083def loadtxt(fname, dtype=float, comments='#', delimiter=None, 

1084 converters=None, skiprows=0, usecols=None, unpack=False, 

1085 ndmin=0, encoding='bytes', max_rows=None, *, quotechar=None, 

1086 like=None): 

1087 r""" 

1088 Load data from a text file. 

1089 

1090 Each row in the text file must have the same number of values. 

1091 

1092 Parameters 

1093 ---------- 

1094 fname : file, str, pathlib.Path, list of str, generator 

1095 File, filename, list, or generator to read. If the filename 

1096 extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note 

1097 that generators must return bytes or strings. The strings 

1098 in a list or produced by a generator are treated as lines. 

1099 dtype : data-type, optional 

1100 Data-type of the resulting array; default: float. If this is a 

1101 structured data-type, the resulting array will be 1-dimensional, and 

1102 each row will be interpreted as an element of the array. In this 

1103 case, the number of columns used must match the number of fields in 

1104 the data-type. 

1105 comments : str or sequence of str or None, optional 

1106 The characters or list of characters used to indicate the start of a 

1107 comment. None implies no comments. For backwards compatibility, byte 

1108 strings will be decoded as 'latin1'. The default is '#'. 

1109 delimiter : str, optional 

1110 The character used to separate the values. For backwards compatibility, 

1111 byte strings will be decoded as 'latin1'. The default is whitespace. 

1112 

1113 .. versionchanged:: 1.23.0 

1114 Only single character delimiters are supported. Newline characters 

1115 cannot be used as the delimiter. 

1116 

1117 converters : dict or callable, optional 

1118 A function to parse all columns strings into the desired value, or 

1119 a dictionary mapping column number to a parser function. 

1120 E.g. if column 0 is a date string: ``converters = {0: datestr2num}``. 

1121 Converters can also be used to provide a default value for missing 

1122 data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will 

1123 convert empty fields to 0. 

1124 Default: None. 

1125 

1126 .. versionchanged:: 1.23.0 

1127 The ability to pass a single callable to be applied to all columns 

1128 was added. 

1129 

1130 skiprows : int, optional 

1131 Skip the first `skiprows` lines, including comments; default: 0. 

1132 usecols : int or sequence, optional 

1133 Which columns to read, with 0 being the first. For example, 

1134 ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. 

1135 The default, None, results in all columns being read. 

1136 

1137 .. versionchanged:: 1.11.0 

1138 When a single column has to be read it is possible to use 

1139 an integer instead of a tuple. E.g ``usecols = 3`` reads the 

1140 fourth column the same way as ``usecols = (3,)`` would. 

1141 unpack : bool, optional 

1142 If True, the returned array is transposed, so that arguments may be 

1143 unpacked using ``x, y, z = loadtxt(...)``. When used with a 

1144 structured data-type, arrays are returned for each field. 

1145 Default is False. 

1146 ndmin : int, optional 

1147 The returned array will have at least `ndmin` dimensions. 

1148 Otherwise mono-dimensional axes will be squeezed. 

1149 Legal values: 0 (default), 1 or 2. 

1150 

1151 .. versionadded:: 1.6.0 

1152 encoding : str, optional 

1153 Encoding used to decode the inputfile. Does not apply to input streams. 

1154 The special value 'bytes' enables backward compatibility workarounds 

1155 that ensures you receive byte arrays as results if possible and passes 

1156 'latin1' encoded strings to converters. Override this value to receive 

1157 unicode arrays and pass strings as input to converters. If set to None 

1158 the system default is used. The default value is 'bytes'. 

1159 

1160 .. versionadded:: 1.14.0 

1161 max_rows : int, optional 

1162 Read `max_rows` rows of content after `skiprows` lines. The default is 

1163 to read all the rows. Note that empty rows containing no data such as 

1164 empty lines and comment lines are not counted towards `max_rows`, 

1165 while such lines are counted in `skiprows`. 

1166 

1167 .. versionadded:: 1.16.0 

1168  

1169 .. versionchanged:: 1.23.0 

1170 Lines containing no data, including comment lines (e.g., lines  

1171 starting with '#' or as specified via `comments`) are not counted  

1172 towards `max_rows`. 

1173 quotechar : unicode character or None, optional 

1174 The character used to denote the start and end of a quoted item. 

1175 Occurrences of the delimiter or comment characters are ignored within 

1176 a quoted item. The default value is ``quotechar=None``, which means 

1177 quoting support is disabled. 

1178 

1179 If two consecutive instances of `quotechar` are found within a quoted 

1180 field, the first is treated as an escape character. See examples. 

1181 

1182 .. versionadded:: 1.23.0 

1183 ${ARRAY_FUNCTION_LIKE} 

1184 

1185 .. versionadded:: 1.20.0 

1186 

1187 Returns 

1188 ------- 

1189 out : ndarray 

1190 Data read from the text file. 

1191 

1192 See Also 

1193 -------- 

1194 load, fromstring, fromregex 

1195 genfromtxt : Load data with missing values handled as specified. 

1196 scipy.io.loadmat : reads MATLAB data files 

1197 

1198 Notes 

1199 ----- 

1200 This function aims to be a fast reader for simply formatted files. The 

1201 `genfromtxt` function provides more sophisticated handling of, e.g., 

1202 lines with missing values. 

1203 

1204 .. versionadded:: 1.10.0 

1205 

1206 The strings produced by the Python float.hex method can be used as 

1207 input for floats. 

1208 

1209 Examples 

1210 -------- 

1211 >>> from io import StringIO # StringIO behaves like a file object 

1212 >>> c = StringIO("0 1\n2 3") 

1213 >>> np.loadtxt(c) 

1214 array([[0., 1.], 

1215 [2., 3.]]) 

1216 

1217 >>> d = StringIO("M 21 72\nF 35 58") 

1218 >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), 

1219 ... 'formats': ('S1', 'i4', 'f4')}) 

1220 array([(b'M', 21, 72.), (b'F', 35, 58.)], 

1221 dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')]) 

1222 

1223 >>> c = StringIO("1,0,2\n3,0,4") 

1224 >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) 

1225 >>> x 

1226 array([1., 3.]) 

1227 >>> y 

1228 array([2., 4.]) 

1229 

1230 The `converters` argument is used to specify functions to preprocess the 

1231 text prior to parsing. `converters` can be a dictionary that maps 

1232 preprocessing functions to each column: 

1233 

1234 >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n") 

1235 >>> conv = { 

1236 ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0 

1237 ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1 

1238 ... } 

1239 >>> np.loadtxt(s, delimiter=",", converters=conv) 

1240 array([[1., 3.], 

1241 [3., 5.]]) 

1242 

1243 `converters` can be a callable instead of a dictionary, in which case it 

1244 is applied to all columns: 

1245 

1246 >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE") 

1247 >>> import functools 

1248 >>> conv = functools.partial(int, base=16) 

1249 >>> np.loadtxt(s, converters=conv) 

1250 array([[222., 173.], 

1251 [192., 222.]]) 

1252 

1253 This example shows how `converters` can be used to convert a field 

1254 with a trailing minus sign into a negative number. 

1255 

1256 >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94') 

1257 >>> def conv(fld): 

1258 ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld) 

1259 ... 

1260 >>> np.loadtxt(s, converters=conv) 

1261 array([[ 10.01, -31.25], 

1262 [ 19.22, 64.31], 

1263 [-17.57, 63.94]]) 

1264 

1265 Using a callable as the converter can be particularly useful for handling 

1266 values with different formatting, e.g. floats with underscores: 

1267 

1268 >>> s = StringIO("1 2.7 100_000") 

1269 >>> np.loadtxt(s, converters=float) 

1270 array([1.e+00, 2.7e+00, 1.e+05]) 

1271 

1272 This idea can be extended to automatically handle values specified in 

1273 many different formats: 

1274 

1275 >>> def conv(val): 

1276 ... try: 

1277 ... return float(val) 

1278 ... except ValueError: 

1279 ... return float.fromhex(val) 

1280 >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2") 

1281 >>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None) 

1282 array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00]) 

1283 

1284 Note that with the default ``encoding="bytes"``, the inputs to the 

1285 converter function are latin-1 encoded byte strings. To deactivate the 

1286 implicit encoding prior to conversion, use ``encoding=None`` 

1287 

1288 >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94') 

1289 >>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x) 

1290 >>> np.loadtxt(s, converters=conv, encoding=None) 

1291 array([[ 10.01, -31.25], 

1292 [ 19.22, 64.31], 

1293 [-17.57, 63.94]]) 

1294 

1295 Support for quoted fields is enabled with the `quotechar` parameter. 

1296 Comment and delimiter characters are ignored when they appear within a 

1297 quoted item delineated by `quotechar`: 

1298 

1299 >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n') 

1300 >>> dtype = np.dtype([("label", "U12"), ("value", float)]) 

1301 >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"') 

1302 array([('alpha, #42', 10.), ('beta, #64', 2.)], 

1303 dtype=[('label', '<U12'), ('value', '<f8')]) 

1304 

1305 Two consecutive quote characters within a quoted field are treated as a 

1306 single escaped character: 

1307 

1308 >>> s = StringIO('"Hello, my name is ""Monty""!"') 

1309 >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') 

1310 array('Hello, my name is "Monty"!', dtype='<U26') 

1311 

1312 """ 

1313 

1314 if like is not None: 

1315 return _loadtxt_with_like( 

1316 fname, dtype=dtype, comments=comments, delimiter=delimiter, 

1317 converters=converters, skiprows=skiprows, usecols=usecols, 

1318 unpack=unpack, ndmin=ndmin, encoding=encoding, 

1319 max_rows=max_rows, like=like 

1320 ) 

1321 

1322 if isinstance(delimiter, bytes): 

1323 delimiter.decode("latin1") 

1324 

1325 if dtype is None: 

1326 dtype = np.float64 

1327 

1328 comment = comments 

1329 # Control character type conversions for Py3 convenience 

1330 if comment is not None: 

1331 if isinstance(comment, (str, bytes)): 

1332 comment = [comment] 

1333 comment = [ 

1334 x.decode('latin1') if isinstance(x, bytes) else x for x in comment] 

1335 if isinstance(delimiter, bytes): 

1336 delimiter = delimiter.decode('latin1') 

1337 

1338 arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter, 

1339 converters=converters, skiplines=skiprows, usecols=usecols, 

1340 unpack=unpack, ndmin=ndmin, encoding=encoding, 

1341 max_rows=max_rows, quote=quotechar) 

1342 

1343 return arr 

1344 

1345 

1346_loadtxt_with_like = array_function_dispatch( 

1347 _loadtxt_dispatcher 

1348)(loadtxt) 

1349 

1350 

1351def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, 

1352 header=None, footer=None, comments=None, 

1353 encoding=None): 

1354 return (X,) 

1355 

1356 

1357@array_function_dispatch(_savetxt_dispatcher) 

1358def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', 

1359 footer='', comments='# ', encoding=None): 

1360 """ 

1361 Save an array to a text file. 

1362 

1363 Parameters 

1364 ---------- 

1365 fname : filename or file handle 

1366 If the filename ends in ``.gz``, the file is automatically saved in 

1367 compressed gzip format. `loadtxt` understands gzipped files 

1368 transparently. 

1369 X : 1D or 2D array_like 

1370 Data to be saved to a text file. 

1371 fmt : str or sequence of strs, optional 

1372 A single format (%10.5f), a sequence of formats, or a 

1373 multi-format string, e.g. 'Iteration %d -- %10.5f', in which 

1374 case `delimiter` is ignored. For complex `X`, the legal options 

1375 for `fmt` are: 

1376 

1377 * a single specifier, `fmt='%.4e'`, resulting in numbers formatted 

1378 like `' (%s+%sj)' % (fmt, fmt)` 

1379 * a full string specifying every real and imaginary part, e.g. 

1380 `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns 

1381 * a list of specifiers, one per column - in this case, the real 

1382 and imaginary part must have separate specifiers, 

1383 e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns 

1384 delimiter : str, optional 

1385 String or character separating columns. 

1386 newline : str, optional 

1387 String or character separating lines. 

1388 

1389 .. versionadded:: 1.5.0 

1390 header : str, optional 

1391 String that will be written at the beginning of the file. 

1392 

1393 .. versionadded:: 1.7.0 

1394 footer : str, optional 

1395 String that will be written at the end of the file. 

1396 

1397 .. versionadded:: 1.7.0 

1398 comments : str, optional 

1399 String that will be prepended to the ``header`` and ``footer`` strings, 

1400 to mark them as comments. Default: '# ', as expected by e.g. 

1401 ``numpy.loadtxt``. 

1402 

1403 .. versionadded:: 1.7.0 

1404 encoding : {None, str}, optional 

1405 Encoding used to encode the outputfile. Does not apply to output 

1406 streams. If the encoding is something other than 'bytes' or 'latin1' 

1407 you will not be able to load the file in NumPy versions < 1.14. Default 

1408 is 'latin1'. 

1409 

1410 .. versionadded:: 1.14.0 

1411 

1412 

1413 See Also 

1414 -------- 

1415 save : Save an array to a binary file in NumPy ``.npy`` format 

1416 savez : Save several arrays into an uncompressed ``.npz`` archive 

1417 savez_compressed : Save several arrays into a compressed ``.npz`` archive 

1418 

1419 Notes 

1420 ----- 

1421 Further explanation of the `fmt` parameter 

1422 (``%[flag]width[.precision]specifier``): 

1423 

1424 flags: 

1425 ``-`` : left justify 

1426 

1427 ``+`` : Forces to precede result with + or -. 

1428 

1429 ``0`` : Left pad the number with zeros instead of space (see width). 

1430 

1431 width: 

1432 Minimum number of characters to be printed. The value is not truncated 

1433 if it has more characters. 

1434 

1435 precision: 

1436 - For integer specifiers (eg. ``d,i,o,x``), the minimum number of 

1437 digits. 

1438 - For ``e, E`` and ``f`` specifiers, the number of digits to print 

1439 after the decimal point. 

1440 - For ``g`` and ``G``, the maximum number of significant digits. 

1441 - For ``s``, the maximum number of characters. 

1442 

1443 specifiers: 

1444 ``c`` : character 

1445 

1446 ``d`` or ``i`` : signed decimal integer 

1447 

1448 ``e`` or ``E`` : scientific notation with ``e`` or ``E``. 

1449 

1450 ``f`` : decimal floating point 

1451 

1452 ``g,G`` : use the shorter of ``e,E`` or ``f`` 

1453 

1454 ``o`` : signed octal 

1455 

1456 ``s`` : string of characters 

1457 

1458 ``u`` : unsigned decimal integer 

1459 

1460 ``x,X`` : unsigned hexadecimal integer 

1461 

1462 This explanation of ``fmt`` is not complete, for an exhaustive 

1463 specification see [1]_. 

1464 

1465 References 

1466 ---------- 

1467 .. [1] `Format Specification Mini-Language 

1468 <https://docs.python.org/library/string.html#format-specification-mini-language>`_, 

1469 Python Documentation. 

1470 

1471 Examples 

1472 -------- 

1473 >>> x = y = z = np.arange(0.0,5.0,1.0) 

1474 >>> np.savetxt('test.out', x, delimiter=',') # X is an array 

1475 >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays 

1476 >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation 

1477 

1478 """ 

1479 

1480 # Py3 conversions first 

1481 if isinstance(fmt, bytes): 

1482 fmt = asstr(fmt) 

1483 delimiter = asstr(delimiter) 

1484 

1485 class WriteWrap: 

1486 """Convert to bytes on bytestream inputs. 

1487 

1488 """ 

1489 def __init__(self, fh, encoding): 

1490 self.fh = fh 

1491 self.encoding = encoding 

1492 self.do_write = self.first_write 

1493 

1494 def close(self): 

1495 self.fh.close() 

1496 

1497 def write(self, v): 

1498 self.do_write(v) 

1499 

1500 def write_bytes(self, v): 

1501 if isinstance(v, bytes): 

1502 self.fh.write(v) 

1503 else: 

1504 self.fh.write(v.encode(self.encoding)) 

1505 

1506 def write_normal(self, v): 

1507 self.fh.write(asunicode(v)) 

1508 

1509 def first_write(self, v): 

1510 try: 

1511 self.write_normal(v) 

1512 self.write = self.write_normal 

1513 except TypeError: 

1514 # input is probably a bytestream 

1515 self.write_bytes(v) 

1516 self.write = self.write_bytes 

1517 

1518 own_fh = False 

1519 if isinstance(fname, os_PathLike): 

1520 fname = os_fspath(fname) 

1521 if _is_string_like(fname): 

1522 # datasource doesn't support creating a new file ... 

1523 open(fname, 'wt').close() 

1524 fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) 

1525 own_fh = True 

1526 elif hasattr(fname, 'write'): 

1527 # wrap to handle byte output streams 

1528 fh = WriteWrap(fname, encoding or 'latin1') 

1529 else: 

1530 raise ValueError('fname must be a string or file handle') 

1531 

1532 try: 

1533 X = np.asarray(X) 

1534 

1535 # Handle 1-dimensional arrays 

1536 if X.ndim == 0 or X.ndim > 2: 

1537 raise ValueError( 

1538 "Expected 1D or 2D array, got %dD array instead" % X.ndim) 

1539 elif X.ndim == 1: 

1540 # Common case -- 1d array of numbers 

1541 if X.dtype.names is None: 

1542 X = np.atleast_2d(X).T 

1543 ncol = 1 

1544 

1545 # Complex dtype -- each field indicates a separate column 

1546 else: 

1547 ncol = len(X.dtype.names) 

1548 else: 

1549 ncol = X.shape[1] 

1550 

1551 iscomplex_X = np.iscomplexobj(X) 

1552 # `fmt` can be a string with multiple insertion points or a 

1553 # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') 

1554 if type(fmt) in (list, tuple): 

1555 if len(fmt) != ncol: 

1556 raise AttributeError('fmt has wrong shape. %s' % str(fmt)) 

1557 format = asstr(delimiter).join(map(asstr, fmt)) 

1558 elif isinstance(fmt, str): 

1559 n_fmt_chars = fmt.count('%') 

1560 error = ValueError('fmt has wrong number of %% formats: %s' % fmt) 

1561 if n_fmt_chars == 1: 

1562 if iscomplex_X: 

1563 fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol 

1564 else: 

1565 fmt = [fmt, ] * ncol 

1566 format = delimiter.join(fmt) 

1567 elif iscomplex_X and n_fmt_chars != (2 * ncol): 

1568 raise error 

1569 elif ((not iscomplex_X) and n_fmt_chars != ncol): 

1570 raise error 

1571 else: 

1572 format = fmt 

1573 else: 

1574 raise ValueError('invalid fmt: %r' % (fmt,)) 

1575 

1576 if len(header) > 0: 

1577 header = header.replace('\n', '\n' + comments) 

1578 fh.write(comments + header + newline) 

1579 if iscomplex_X: 

1580 for row in X: 

1581 row2 = [] 

1582 for number in row: 

1583 row2.append(number.real) 

1584 row2.append(number.imag) 

1585 s = format % tuple(row2) + newline 

1586 fh.write(s.replace('+-', '-')) 

1587 else: 

1588 for row in X: 

1589 try: 

1590 v = format % tuple(row) + newline 

1591 except TypeError as e: 

1592 raise TypeError("Mismatch between array dtype ('%s') and " 

1593 "format specifier ('%s')" 

1594 % (str(X.dtype), format)) from e 

1595 fh.write(v) 

1596 

1597 if len(footer) > 0: 

1598 footer = footer.replace('\n', '\n' + comments) 

1599 fh.write(comments + footer + newline) 

1600 finally: 

1601 if own_fh: 

1602 fh.close() 

1603 

1604 

1605@set_module('numpy') 

1606def fromregex(file, regexp, dtype, encoding=None): 

1607 r""" 

1608 Construct an array from a text file, using regular expression parsing. 

1609 

1610 The returned array is always a structured array, and is constructed from 

1611 all matches of the regular expression in the file. Groups in the regular 

1612 expression are converted to fields of the structured array. 

1613 

1614 Parameters 

1615 ---------- 

1616 file : path or file 

1617 Filename or file object to read. 

1618 

1619 .. versionchanged:: 1.22.0 

1620 Now accepts `os.PathLike` implementations. 

1621 regexp : str or regexp 

1622 Regular expression used to parse the file. 

1623 Groups in the regular expression correspond to fields in the dtype. 

1624 dtype : dtype or list of dtypes 

1625 Dtype for the structured array; must be a structured datatype. 

1626 encoding : str, optional 

1627 Encoding used to decode the inputfile. Does not apply to input streams. 

1628 

1629 .. versionadded:: 1.14.0 

1630 

1631 Returns 

1632 ------- 

1633 output : ndarray 

1634 The output array, containing the part of the content of `file` that 

1635 was matched by `regexp`. `output` is always a structured array. 

1636 

1637 Raises 

1638 ------ 

1639 TypeError 

1640 When `dtype` is not a valid dtype for a structured array. 

1641 

1642 See Also 

1643 -------- 

1644 fromstring, loadtxt 

1645 

1646 Notes 

1647 ----- 

1648 Dtypes for structured arrays can be specified in several forms, but all 

1649 forms specify at least the data type and field name. For details see 

1650 `basics.rec`. 

1651 

1652 Examples 

1653 -------- 

1654 >>> from io import StringIO 

1655 >>> text = StringIO("1312 foo\n1534 bar\n444 qux") 

1656 

1657 >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything] 

1658 >>> output = np.fromregex(text, regexp, 

1659 ... [('num', np.int64), ('key', 'S3')]) 

1660 >>> output 

1661 array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], 

1662 dtype=[('num', '<i8'), ('key', 'S3')]) 

1663 >>> output['num'] 

1664 array([1312, 1534, 444]) 

1665 

1666 """ 

1667 own_fh = False 

1668 if not hasattr(file, "read"): 

1669 file = os.fspath(file) 

1670 file = np.lib._datasource.open(file, 'rt', encoding=encoding) 

1671 own_fh = True 

1672 

1673 try: 

1674 if not isinstance(dtype, np.dtype): 

1675 dtype = np.dtype(dtype) 

1676 if dtype.names is None: 

1677 raise TypeError('dtype must be a structured datatype.') 

1678 

1679 content = file.read() 

1680 if isinstance(content, bytes) and isinstance(regexp, str): 

1681 regexp = asbytes(regexp) 

1682 elif isinstance(content, str) and isinstance(regexp, bytes): 

1683 regexp = asstr(regexp) 

1684 

1685 if not hasattr(regexp, 'match'): 

1686 regexp = re.compile(regexp) 

1687 seq = regexp.findall(content) 

1688 if seq and not isinstance(seq[0], tuple): 

1689 # Only one group is in the regexp. 

1690 # Create the new array as a single data-type and then 

1691 # re-interpret as a single-field structured array. 

1692 newdtype = np.dtype(dtype[dtype.names[0]]) 

1693 output = np.array(seq, dtype=newdtype) 

1694 output.dtype = dtype 

1695 else: 

1696 output = np.array(seq, dtype=dtype) 

1697 

1698 return output 

1699 finally: 

1700 if own_fh: 

1701 file.close() 

1702 

1703 

1704#####-------------------------------------------------------------------------- 

1705#---- --- ASCII functions --- 

1706#####-------------------------------------------------------------------------- 

1707 

1708 

1709def _genfromtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None, 

1710 skip_header=None, skip_footer=None, converters=None, 

1711 missing_values=None, filling_values=None, usecols=None, 

1712 names=None, excludelist=None, deletechars=None, 

1713 replace_space=None, autostrip=None, case_sensitive=None, 

1714 defaultfmt=None, unpack=None, usemask=None, loose=None, 

1715 invalid_raise=None, max_rows=None, encoding=None, 

1716 *, ndmin=None, like=None): 

1717 return (like,) 

1718 

1719 

1720@set_array_function_like_doc 

1721@set_module('numpy') 

1722def genfromtxt(fname, dtype=float, comments='#', delimiter=None, 

1723 skip_header=0, skip_footer=0, converters=None, 

1724 missing_values=None, filling_values=None, usecols=None, 

1725 names=None, excludelist=None, 

1726 deletechars=''.join(sorted(NameValidator.defaultdeletechars)), 

1727 replace_space='_', autostrip=False, case_sensitive=True, 

1728 defaultfmt="f%i", unpack=None, usemask=False, loose=True, 

1729 invalid_raise=True, max_rows=None, encoding='bytes', 

1730 *, ndmin=0, like=None): 

1731 """ 

1732 Load data from a text file, with missing values handled as specified. 

1733 

1734 Each line past the first `skip_header` lines is split at the `delimiter` 

1735 character, and characters following the `comments` character are discarded. 

1736 

1737 Parameters 

1738 ---------- 

1739 fname : file, str, pathlib.Path, list of str, generator 

1740 File, filename, list, or generator to read. If the filename 

1741 extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note 

1742 that generators must return bytes or strings. The strings 

1743 in a list or produced by a generator are treated as lines. 

1744 dtype : dtype, optional 

1745 Data type of the resulting array. 

1746 If None, the dtypes will be determined by the contents of each 

1747 column, individually. 

1748 comments : str, optional 

1749 The character used to indicate the start of a comment. 

1750 All the characters occurring on a line after a comment are discarded. 

1751 delimiter : str, int, or sequence, optional 

1752 The string used to separate values. By default, any consecutive 

1753 whitespaces act as delimiter. An integer or sequence of integers 

1754 can also be provided as width(s) of each field. 

1755 skiprows : int, optional 

1756 `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. 

1757 skip_header : int, optional 

1758 The number of lines to skip at the beginning of the file. 

1759 skip_footer : int, optional 

1760 The number of lines to skip at the end of the file. 

1761 converters : variable, optional 

1762 The set of functions that convert the data of a column to a value. 

1763 The converters can also be used to provide a default value 

1764 for missing data: ``converters = {3: lambda s: float(s or 0)}``. 

1765 missing : variable, optional 

1766 `missing` was removed in numpy 1.10. Please use `missing_values` 

1767 instead. 

1768 missing_values : variable, optional 

1769 The set of strings corresponding to missing data. 

1770 filling_values : variable, optional 

1771 The set of values to be used as default when the data are missing. 

1772 usecols : sequence, optional 

1773 Which columns to read, with 0 being the first. For example, 

1774 ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. 

1775 names : {None, True, str, sequence}, optional 

1776 If `names` is True, the field names are read from the first line after 

1777 the first `skip_header` lines. This line can optionally be preceded 

1778 by a comment delimiter. If `names` is a sequence or a single-string of 

1779 comma-separated names, the names will be used to define the field names 

1780 in a structured dtype. If `names` is None, the names of the dtype 

1781 fields will be used, if any. 

1782 excludelist : sequence, optional 

1783 A list of names to exclude. This list is appended to the default list 

1784 ['return','file','print']. Excluded names are appended with an 

1785 underscore: for example, `file` would become `file_`. 

1786 deletechars : str, optional 

1787 A string combining invalid characters that must be deleted from the 

1788 names. 

1789 defaultfmt : str, optional 

1790 A format used to define default field names, such as "f%i" or "f_%02i". 

1791 autostrip : bool, optional 

1792 Whether to automatically strip white spaces from the variables. 

1793 replace_space : char, optional 

1794 Character(s) used in replacement of white spaces in the variable 

1795 names. By default, use a '_'. 

1796 case_sensitive : {True, False, 'upper', 'lower'}, optional 

1797 If True, field names are case sensitive. 

1798 If False or 'upper', field names are converted to upper case. 

1799 If 'lower', field names are converted to lower case. 

1800 unpack : bool, optional 

1801 If True, the returned array is transposed, so that arguments may be 

1802 unpacked using ``x, y, z = genfromtxt(...)``. When used with a 

1803 structured data-type, arrays are returned for each field. 

1804 Default is False. 

1805 usemask : bool, optional 

1806 If True, return a masked array. 

1807 If False, return a regular array. 

1808 loose : bool, optional 

1809 If True, do not raise errors for invalid values. 

1810 invalid_raise : bool, optional 

1811 If True, an exception is raised if an inconsistency is detected in the 

1812 number of columns. 

1813 If False, a warning is emitted and the offending lines are skipped. 

1814 max_rows : int, optional 

1815 The maximum number of rows to read. Must not be used with skip_footer 

1816 at the same time. If given, the value must be at least 1. Default is 

1817 to read the entire file. 

1818 

1819 .. versionadded:: 1.10.0 

1820 encoding : str, optional 

1821 Encoding used to decode the inputfile. Does not apply when `fname` is 

1822 a file object. The special value 'bytes' enables backward compatibility 

1823 workarounds that ensure that you receive byte arrays when possible 

1824 and passes latin1 encoded strings to converters. Override this value to 

1825 receive unicode arrays and pass strings as input to converters. If set 

1826 to None the system default is used. The default value is 'bytes'. 

1827 

1828 .. versionadded:: 1.14.0 

1829 ndmin : int, optional 

1830 Same parameter as `loadtxt` 

1831 

1832 .. versionadded:: 1.23.0 

1833 ${ARRAY_FUNCTION_LIKE} 

1834 

1835 .. versionadded:: 1.20.0 

1836 

1837 Returns 

1838 ------- 

1839 out : ndarray 

1840 Data read from the text file. If `usemask` is True, this is a 

1841 masked array. 

1842 

1843 See Also 

1844 -------- 

1845 numpy.loadtxt : equivalent function when no data is missing. 

1846 

1847 Notes 

1848 ----- 

1849 * When spaces are used as delimiters, or when no delimiter has been given 

1850 as input, there should not be any missing data between two fields. 

1851 * When the variables are named (either by a flexible dtype or with `names`), 

1852 there must not be any header in the file (else a ValueError 

1853 exception is raised). 

1854 * Individual values are not stripped of spaces by default. 

1855 When using a custom converter, make sure the function does remove spaces. 

1856 

1857 References 

1858 ---------- 

1859 .. [1] NumPy User Guide, section `I/O with NumPy 

1860 <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_. 

1861 

1862 Examples 

1863 -------- 

1864 >>> from io import StringIO 

1865 >>> import numpy as np 

1866 

1867 Comma delimited file with mixed dtype 

1868 

1869 >>> s = StringIO(u"1,1.3,abcde") 

1870 >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), 

1871 ... ('mystring','S5')], delimiter=",") 

1872 >>> data 

1873 array((1, 1.3, b'abcde'), 

1874 dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) 

1875 

1876 Using dtype = None 

1877 

1878 >>> _ = s.seek(0) # needed for StringIO example only 

1879 >>> data = np.genfromtxt(s, dtype=None, 

1880 ... names = ['myint','myfloat','mystring'], delimiter=",") 

1881 >>> data 

1882 array((1, 1.3, b'abcde'), 

1883 dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) 

1884 

1885 Specifying dtype and names 

1886 

1887 >>> _ = s.seek(0) 

1888 >>> data = np.genfromtxt(s, dtype="i8,f8,S5", 

1889 ... names=['myint','myfloat','mystring'], delimiter=",") 

1890 >>> data 

1891 array((1, 1.3, b'abcde'), 

1892 dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) 

1893 

1894 An example with fixed-width columns 

1895 

1896 >>> s = StringIO(u"11.3abcde") 

1897 >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], 

1898 ... delimiter=[1,3,5]) 

1899 >>> data 

1900 array((1, 1.3, b'abcde'), 

1901 dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')]) 

1902 

1903 An example to show comments 

1904 

1905 >>> f = StringIO(''' 

1906 ... text,# of chars 

1907 ... hello world,11 

1908 ... numpy,5''') 

1909 >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') 

1910 array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], 

1911 dtype=[('f0', 'S12'), ('f1', 'S12')]) 

1912 

1913 """ 

1914 

1915 if like is not None: 

1916 return _genfromtxt_with_like( 

1917 fname, dtype=dtype, comments=comments, delimiter=delimiter, 

1918 skip_header=skip_header, skip_footer=skip_footer, 

1919 converters=converters, missing_values=missing_values, 

1920 filling_values=filling_values, usecols=usecols, names=names, 

1921 excludelist=excludelist, deletechars=deletechars, 

1922 replace_space=replace_space, autostrip=autostrip, 

1923 case_sensitive=case_sensitive, defaultfmt=defaultfmt, 

1924 unpack=unpack, usemask=usemask, loose=loose, 

1925 invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding, 

1926 ndmin=ndmin, 

1927 like=like 

1928 ) 

1929 

1930 _ensure_ndmin_ndarray_check_param(ndmin) 

1931 

1932 if max_rows is not None: 

1933 if skip_footer: 

1934 raise ValueError( 

1935 "The keywords 'skip_footer' and 'max_rows' can not be " 

1936 "specified at the same time.") 

1937 if max_rows < 1: 

1938 raise ValueError("'max_rows' must be at least 1.") 

1939 

1940 if usemask: 

1941 from numpy.ma import MaskedArray, make_mask_descr 

1942 # Check the input dictionary of converters 

1943 user_converters = converters or {} 

1944 if not isinstance(user_converters, dict): 

1945 raise TypeError( 

1946 "The input argument 'converter' should be a valid dictionary " 

1947 "(got '%s' instead)" % type(user_converters)) 

1948 

1949 if encoding == 'bytes': 

1950 encoding = None 

1951 byte_converters = True 

1952 else: 

1953 byte_converters = False 

1954 

1955 # Initialize the filehandle, the LineSplitter and the NameValidator 

1956 if isinstance(fname, os_PathLike): 

1957 fname = os_fspath(fname) 

1958 if isinstance(fname, str): 

1959 fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) 

1960 fid_ctx = contextlib.closing(fid) 

1961 else: 

1962 fid = fname 

1963 fid_ctx = contextlib.nullcontext(fid) 

1964 try: 

1965 fhd = iter(fid) 

1966 except TypeError as e: 

1967 raise TypeError( 

1968 "fname must be a string, a filehandle, a sequence of strings,\n" 

1969 f"or an iterator of strings. Got {type(fname)} instead." 

1970 ) from e 

1971 with fid_ctx: 

1972 split_line = LineSplitter(delimiter=delimiter, comments=comments, 

1973 autostrip=autostrip, encoding=encoding) 

1974 validate_names = NameValidator(excludelist=excludelist, 

1975 deletechars=deletechars, 

1976 case_sensitive=case_sensitive, 

1977 replace_space=replace_space) 

1978 

1979 # Skip the first `skip_header` rows 

1980 try: 

1981 for i in range(skip_header): 

1982 next(fhd) 

1983 

1984 # Keep on until we find the first valid values 

1985 first_values = None 

1986 

1987 while not first_values: 

1988 first_line = _decode_line(next(fhd), encoding) 

1989 if (names is True) and (comments is not None): 

1990 if comments in first_line: 

1991 first_line = ( 

1992 ''.join(first_line.split(comments)[1:])) 

1993 first_values = split_line(first_line) 

1994 except StopIteration: 

1995 # return an empty array if the datafile is empty 

1996 first_line = '' 

1997 first_values = [] 

1998 warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2) 

1999 

2000 # Should we take the first values as names ? 

2001 if names is True: 

2002 fval = first_values[0].strip() 

2003 if comments is not None: 

2004 if fval in comments: 

2005 del first_values[0] 

2006 

2007 # Check the columns to use: make sure `usecols` is a list 

2008 if usecols is not None: 

2009 try: 

2010 usecols = [_.strip() for _ in usecols.split(",")] 

2011 except AttributeError: 

2012 try: 

2013 usecols = list(usecols) 

2014 except TypeError: 

2015 usecols = [usecols, ] 

2016 nbcols = len(usecols or first_values) 

2017 

2018 # Check the names and overwrite the dtype.names if needed 

2019 if names is True: 

2020 names = validate_names([str(_.strip()) for _ in first_values]) 

2021 first_line = '' 

2022 elif _is_string_like(names): 

2023 names = validate_names([_.strip() for _ in names.split(',')]) 

2024 elif names: 

2025 names = validate_names(names) 

2026 # Get the dtype 

2027 if dtype is not None: 

2028 dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, 

2029 excludelist=excludelist, 

2030 deletechars=deletechars, 

2031 case_sensitive=case_sensitive, 

2032 replace_space=replace_space) 

2033 # Make sure the names is a list (for 2.5) 

2034 if names is not None: 

2035 names = list(names) 

2036 

2037 if usecols: 

2038 for (i, current) in enumerate(usecols): 

2039 # if usecols is a list of names, convert to a list of indices 

2040 if _is_string_like(current): 

2041 usecols[i] = names.index(current) 

2042 elif current < 0: 

2043 usecols[i] = current + len(first_values) 

2044 # If the dtype is not None, make sure we update it 

2045 if (dtype is not None) and (len(dtype) > nbcols): 

2046 descr = dtype.descr 

2047 dtype = np.dtype([descr[_] for _ in usecols]) 

2048 names = list(dtype.names) 

2049 # If `names` is not None, update the names 

2050 elif (names is not None) and (len(names) > nbcols): 

2051 names = [names[_] for _ in usecols] 

2052 elif (names is not None) and (dtype is not None): 

2053 names = list(dtype.names) 

2054 

2055 # Process the missing values ............................... 

2056 # Rename missing_values for convenience 

2057 user_missing_values = missing_values or () 

2058 if isinstance(user_missing_values, bytes): 

2059 user_missing_values = user_missing_values.decode('latin1') 

2060 

2061 # Define the list of missing_values (one column: one list) 

2062 missing_values = [list(['']) for _ in range(nbcols)] 

2063 

2064 # We have a dictionary: process it field by field 

2065 if isinstance(user_missing_values, dict): 

2066 # Loop on the items 

2067 for (key, val) in user_missing_values.items(): 

2068 # Is the key a string ? 

2069 if _is_string_like(key): 

2070 try: 

2071 # Transform it into an integer 

2072 key = names.index(key) 

2073 except ValueError: 

2074 # We couldn't find it: the name must have been dropped 

2075 continue 

2076 # Redefine the key as needed if it's a column number 

2077 if usecols: 

2078 try: 

2079 key = usecols.index(key) 

2080 except ValueError: 

2081 pass 

2082 # Transform the value as a list of string 

2083 if isinstance(val, (list, tuple)): 

2084 val = [str(_) for _ in val] 

2085 else: 

2086 val = [str(val), ] 

2087 # Add the value(s) to the current list of missing 

2088 if key is None: 

2089 # None acts as default 

2090 for miss in missing_values: 

2091 miss.extend(val) 

2092 else: 

2093 missing_values[key].extend(val) 

2094 # We have a sequence : each item matches a column 

2095 elif isinstance(user_missing_values, (list, tuple)): 

2096 for (value, entry) in zip(user_missing_values, missing_values): 

2097 value = str(value) 

2098 if value not in entry: 

2099 entry.append(value) 

2100 # We have a string : apply it to all entries 

2101 elif isinstance(user_missing_values, str): 

2102 user_value = user_missing_values.split(",") 

2103 for entry in missing_values: 

2104 entry.extend(user_value) 

2105 # We have something else: apply it to all entries 

2106 else: 

2107 for entry in missing_values: 

2108 entry.extend([str(user_missing_values)]) 

2109 

2110 # Process the filling_values ............................... 

2111 # Rename the input for convenience 

2112 user_filling_values = filling_values 

2113 if user_filling_values is None: 

2114 user_filling_values = [] 

2115 # Define the default 

2116 filling_values = [None] * nbcols 

2117 # We have a dictionary : update each entry individually 

2118 if isinstance(user_filling_values, dict): 

2119 for (key, val) in user_filling_values.items(): 

2120 if _is_string_like(key): 

2121 try: 

2122 # Transform it into an integer 

2123 key = names.index(key) 

2124 except ValueError: 

2125 # We couldn't find it: the name must have been dropped, 

2126 continue 

2127 # Redefine the key if it's a column number and usecols is defined 

2128 if usecols: 

2129 try: 

2130 key = usecols.index(key) 

2131 except ValueError: 

2132 pass 

2133 # Add the value to the list 

2134 filling_values[key] = val 

2135 # We have a sequence : update on a one-to-one basis 

2136 elif isinstance(user_filling_values, (list, tuple)): 

2137 n = len(user_filling_values) 

2138 if (n <= nbcols): 

2139 filling_values[:n] = user_filling_values 

2140 else: 

2141 filling_values = user_filling_values[:nbcols] 

2142 # We have something else : use it for all entries 

2143 else: 

2144 filling_values = [user_filling_values] * nbcols 

2145 

2146 # Initialize the converters ................................ 

2147 if dtype is None: 

2148 # Note: we can't use a [...]*nbcols, as we would have 3 times the same 

2149 # ... converter, instead of 3 different converters. 

2150 converters = [StringConverter(None, missing_values=miss, default=fill) 

2151 for (miss, fill) in zip(missing_values, filling_values)] 

2152 else: 

2153 dtype_flat = flatten_dtype(dtype, flatten_base=True) 

2154 # Initialize the converters 

2155 if len(dtype_flat) > 1: 

2156 # Flexible type : get a converter from each dtype 

2157 zipit = zip(dtype_flat, missing_values, filling_values) 

2158 converters = [StringConverter(dt, locked=True, 

2159 missing_values=miss, default=fill) 

2160 for (dt, miss, fill) in zipit] 

2161 else: 

2162 # Set to a default converter (but w/ different missing values) 

2163 zipit = zip(missing_values, filling_values) 

2164 converters = [StringConverter(dtype, locked=True, 

2165 missing_values=miss, default=fill) 

2166 for (miss, fill) in zipit] 

2167 # Update the converters to use the user-defined ones 

2168 uc_update = [] 

2169 for (j, conv) in user_converters.items(): 

2170 # If the converter is specified by column names, use the index instead 

2171 if _is_string_like(j): 

2172 try: 

2173 j = names.index(j) 

2174 i = j 

2175 except ValueError: 

2176 continue 

2177 elif usecols: 

2178 try: 

2179 i = usecols.index(j) 

2180 except ValueError: 

2181 # Unused converter specified 

2182 continue 

2183 else: 

2184 i = j 

2185 # Find the value to test - first_line is not filtered by usecols: 

2186 if len(first_line): 

2187 testing_value = first_values[j] 

2188 else: 

2189 testing_value = None 

2190 if conv is bytes: 

2191 user_conv = asbytes 

2192 elif byte_converters: 

2193 # converters may use decode to workaround numpy's old behaviour, 

2194 # so encode the string again before passing to the user converter 

2195 def tobytes_first(x, conv): 

2196 if type(x) is bytes: 

2197 return conv(x) 

2198 return conv(x.encode("latin1")) 

2199 user_conv = functools.partial(tobytes_first, conv=conv) 

2200 else: 

2201 user_conv = conv 

2202 converters[i].update(user_conv, locked=True, 

2203 testing_value=testing_value, 

2204 default=filling_values[i], 

2205 missing_values=missing_values[i],) 

2206 uc_update.append((i, user_conv)) 

2207 # Make sure we have the corrected keys in user_converters... 

2208 user_converters.update(uc_update) 

2209 

2210 # Fixme: possible error as following variable never used. 

2211 # miss_chars = [_.missing_values for _ in converters] 

2212 

2213 # Initialize the output lists ... 

2214 # ... rows 

2215 rows = [] 

2216 append_to_rows = rows.append 

2217 # ... masks 

2218 if usemask: 

2219 masks = [] 

2220 append_to_masks = masks.append 

2221 # ... invalid 

2222 invalid = [] 

2223 append_to_invalid = invalid.append 

2224 

2225 # Parse each line 

2226 for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): 

2227 values = split_line(line) 

2228 nbvalues = len(values) 

2229 # Skip an empty line 

2230 if nbvalues == 0: 

2231 continue 

2232 if usecols: 

2233 # Select only the columns we need 

2234 try: 

2235 values = [values[_] for _ in usecols] 

2236 except IndexError: 

2237 append_to_invalid((i + skip_header + 1, nbvalues)) 

2238 continue 

2239 elif nbvalues != nbcols: 

2240 append_to_invalid((i + skip_header + 1, nbvalues)) 

2241 continue 

2242 # Store the values 

2243 append_to_rows(tuple(values)) 

2244 if usemask: 

2245 append_to_masks(tuple([v.strip() in m 

2246 for (v, m) in zip(values, 

2247 missing_values)])) 

2248 if len(rows) == max_rows: 

2249 break 

2250 

2251 # Upgrade the converters (if needed) 

2252 if dtype is None: 

2253 for (i, converter) in enumerate(converters): 

2254 current_column = [itemgetter(i)(_m) for _m in rows] 

2255 try: 

2256 converter.iterupgrade(current_column) 

2257 except ConverterLockError: 

2258 errmsg = "Converter #%i is locked and cannot be upgraded: " % i 

2259 current_column = map(itemgetter(i), rows) 

2260 for (j, value) in enumerate(current_column): 

2261 try: 

2262 converter.upgrade(value) 

2263 except (ConverterError, ValueError): 

2264 errmsg += "(occurred line #%i for value '%s')" 

2265 errmsg %= (j + 1 + skip_header, value) 

2266 raise ConverterError(errmsg) 

2267 

2268 # Check that we don't have invalid values 

2269 nbinvalid = len(invalid) 

2270 if nbinvalid > 0: 

2271 nbrows = len(rows) + nbinvalid - skip_footer 

2272 # Construct the error message 

2273 template = " Line #%%i (got %%i columns instead of %i)" % nbcols 

2274 if skip_footer > 0: 

2275 nbinvalid_skipped = len([_ for _ in invalid 

2276 if _[0] > nbrows + skip_header]) 

2277 invalid = invalid[:nbinvalid - nbinvalid_skipped] 

2278 skip_footer -= nbinvalid_skipped 

2279# 

2280# nbrows -= skip_footer 

2281# errmsg = [template % (i, nb) 

2282# for (i, nb) in invalid if i < nbrows] 

2283# else: 

2284 errmsg = [template % (i, nb) 

2285 for (i, nb) in invalid] 

2286 if len(errmsg): 

2287 errmsg.insert(0, "Some errors were detected !") 

2288 errmsg = "\n".join(errmsg) 

2289 # Raise an exception ? 

2290 if invalid_raise: 

2291 raise ValueError(errmsg) 

2292 # Issue a warning ? 

2293 else: 

2294 warnings.warn(errmsg, ConversionWarning, stacklevel=2) 

2295 

2296 # Strip the last skip_footer data 

2297 if skip_footer > 0: 

2298 rows = rows[:-skip_footer] 

2299 if usemask: 

2300 masks = masks[:-skip_footer] 

2301 

2302 # Convert each value according to the converter: 

2303 # We want to modify the list in place to avoid creating a new one... 

2304 if loose: 

2305 rows = list( 

2306 zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] 

2307 for (i, conv) in enumerate(converters)])) 

2308 else: 

2309 rows = list( 

2310 zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] 

2311 for (i, conv) in enumerate(converters)])) 

2312 

2313 # Reset the dtype 

2314 data = rows 

2315 if dtype is None: 

2316 # Get the dtypes from the types of the converters 

2317 column_types = [conv.type for conv in converters] 

2318 # Find the columns with strings... 

2319 strcolidx = [i for (i, v) in enumerate(column_types) 

2320 if v == np.unicode_] 

2321 

2322 if byte_converters and strcolidx: 

2323 # convert strings back to bytes for backward compatibility 

2324 warnings.warn( 

2325 "Reading unicode strings without specifying the encoding " 

2326 "argument is deprecated. Set the encoding, use None for the " 

2327 "system default.", 

2328 np.VisibleDeprecationWarning, stacklevel=2) 

2329 def encode_unicode_cols(row_tup): 

2330 row = list(row_tup) 

2331 for i in strcolidx: 

2332 row[i] = row[i].encode('latin1') 

2333 return tuple(row) 

2334 

2335 try: 

2336 data = [encode_unicode_cols(r) for r in data] 

2337 except UnicodeEncodeError: 

2338 pass 

2339 else: 

2340 for i in strcolidx: 

2341 column_types[i] = np.bytes_ 

2342 

2343 # Update string types to be the right length 

2344 sized_column_types = column_types[:] 

2345 for i, col_type in enumerate(column_types): 

2346 if np.issubdtype(col_type, np.character): 

2347 n_chars = max(len(row[i]) for row in data) 

2348 sized_column_types[i] = (col_type, n_chars) 

2349 

2350 if names is None: 

2351 # If the dtype is uniform (before sizing strings) 

2352 base = { 

2353 c_type 

2354 for c, c_type in zip(converters, column_types) 

2355 if c._checked} 

2356 if len(base) == 1: 

2357 uniform_type, = base 

2358 (ddtype, mdtype) = (uniform_type, bool) 

2359 else: 

2360 ddtype = [(defaultfmt % i, dt) 

2361 for (i, dt) in enumerate(sized_column_types)] 

2362 if usemask: 

2363 mdtype = [(defaultfmt % i, bool) 

2364 for (i, dt) in enumerate(sized_column_types)] 

2365 else: 

2366 ddtype = list(zip(names, sized_column_types)) 

2367 mdtype = list(zip(names, [bool] * len(sized_column_types))) 

2368 output = np.array(data, dtype=ddtype) 

2369 if usemask: 

2370 outputmask = np.array(masks, dtype=mdtype) 

2371 else: 

2372 # Overwrite the initial dtype names if needed 

2373 if names and dtype.names is not None: 

2374 dtype.names = names 

2375 # Case 1. We have a structured type 

2376 if len(dtype_flat) > 1: 

2377 # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] 

2378 # First, create the array using a flattened dtype: 

2379 # [('a', int), ('b1', int), ('b2', float)] 

2380 # Then, view the array using the specified dtype. 

2381 if 'O' in (_.char for _ in dtype_flat): 

2382 if has_nested_fields(dtype): 

2383 raise NotImplementedError( 

2384 "Nested fields involving objects are not supported...") 

2385 else: 

2386 output = np.array(data, dtype=dtype) 

2387 else: 

2388 rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) 

2389 output = rows.view(dtype) 

2390 # Now, process the rowmasks the same way 

2391 if usemask: 

2392 rowmasks = np.array( 

2393 masks, dtype=np.dtype([('', bool) for t in dtype_flat])) 

2394 # Construct the new dtype 

2395 mdtype = make_mask_descr(dtype) 

2396 outputmask = rowmasks.view(mdtype) 

2397 # Case #2. We have a basic dtype 

2398 else: 

2399 # We used some user-defined converters 

2400 if user_converters: 

2401 ishomogeneous = True 

2402 descr = [] 

2403 for i, ttype in enumerate([conv.type for conv in converters]): 

2404 # Keep the dtype of the current converter 

2405 if i in user_converters: 

2406 ishomogeneous &= (ttype == dtype.type) 

2407 if np.issubdtype(ttype, np.character): 

2408 ttype = (ttype, max(len(row[i]) for row in data)) 

2409 descr.append(('', ttype)) 

2410 else: 

2411 descr.append(('', dtype)) 

2412 # So we changed the dtype ? 

2413 if not ishomogeneous: 

2414 # We have more than one field 

2415 if len(descr) > 1: 

2416 dtype = np.dtype(descr) 

2417 # We have only one field: drop the name if not needed. 

2418 else: 

2419 dtype = np.dtype(ttype) 

2420 # 

2421 output = np.array(data, dtype) 

2422 if usemask: 

2423 if dtype.names is not None: 

2424 mdtype = [(_, bool) for _ in dtype.names] 

2425 else: 

2426 mdtype = bool 

2427 outputmask = np.array(masks, dtype=mdtype) 

2428 # Try to take care of the missing data we missed 

2429 names = output.dtype.names 

2430 if usemask and names: 

2431 for (name, conv) in zip(names, converters): 

2432 missing_values = [conv(_) for _ in conv.missing_values 

2433 if _ != ''] 

2434 for mval in missing_values: 

2435 outputmask[name] |= (output[name] == mval) 

2436 # Construct the final array 

2437 if usemask: 

2438 output = output.view(MaskedArray) 

2439 output._mask = outputmask 

2440 

2441 output = _ensure_ndmin_ndarray(output, ndmin=ndmin) 

2442 

2443 if unpack: 

2444 if names is None: 

2445 return output.T 

2446 elif len(names) == 1: 

2447 # squeeze single-name dtypes too 

2448 return output[names[0]] 

2449 else: 

2450 # For structured arrays with multiple fields, 

2451 # return an array for each field. 

2452 return [output[field] for field in names] 

2453 return output 

2454 

2455 

2456_genfromtxt_with_like = array_function_dispatch( 

2457 _genfromtxt_dispatcher 

2458)(genfromtxt) 

2459 

2460 

2461def recfromtxt(fname, **kwargs): 

2462 """ 

2463 Load ASCII data from a file and return it in a record array. 

2464 

2465 If ``usemask=False`` a standard `recarray` is returned, 

2466 if ``usemask=True`` a MaskedRecords array is returned. 

2467 

2468 Parameters 

2469 ---------- 

2470 fname, kwargs : For a description of input parameters, see `genfromtxt`. 

2471 

2472 See Also 

2473 -------- 

2474 numpy.genfromtxt : generic function 

2475 

2476 Notes 

2477 ----- 

2478 By default, `dtype` is None, which means that the data-type of the output 

2479 array will be determined from the data. 

2480 

2481 """ 

2482 kwargs.setdefault("dtype", None) 

2483 usemask = kwargs.get('usemask', False) 

2484 output = genfromtxt(fname, **kwargs) 

2485 if usemask: 

2486 from numpy.ma.mrecords import MaskedRecords 

2487 output = output.view(MaskedRecords) 

2488 else: 

2489 output = output.view(np.recarray) 

2490 return output 

2491 

2492 

2493def recfromcsv(fname, **kwargs): 

2494 """ 

2495 Load ASCII data stored in a comma-separated file. 

2496 

2497 The returned array is a record array (if ``usemask=False``, see 

2498 `recarray`) or a masked record array (if ``usemask=True``, 

2499 see `ma.mrecords.MaskedRecords`). 

2500 

2501 Parameters 

2502 ---------- 

2503 fname, kwargs : For a description of input parameters, see `genfromtxt`. 

2504 

2505 See Also 

2506 -------- 

2507 numpy.genfromtxt : generic function to load ASCII data. 

2508 

2509 Notes 

2510 ----- 

2511 By default, `dtype` is None, which means that the data-type of the output 

2512 array will be determined from the data. 

2513 

2514 """ 

2515 # Set default kwargs for genfromtxt as relevant to csv import. 

2516 kwargs.setdefault("case_sensitive", "lower") 

2517 kwargs.setdefault("names", True) 

2518 kwargs.setdefault("delimiter", ",") 

2519 kwargs.setdefault("dtype", None) 

2520 output = genfromtxt(fname, **kwargs) 

2521 

2522 usemask = kwargs.get("usemask", False) 

2523 if usemask: 

2524 from numpy.ma.mrecords import MaskedRecords 

2525 output = output.view(MaskedRecords) 

2526 else: 

2527 output = output.view(np.recarray) 

2528 return output