Coverage for /var/srv/projects/api.amasfac.comuna18.com/tmp/venv/lib/python3.9/site-packages/gitdb/pack.py: 17%
465 statements
« prev ^ index » next coverage.py v6.4.4, created at 2023-07-17 14:22 -0600
« prev ^ index » next coverage.py v6.4.4, created at 2023-07-17 14:22 -0600
1# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2#
3# This module is part of GitDB and is released under
4# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
5"""Contains PackIndexFile and PackFile implementations"""
6import zlib
8from gitdb.exc import (
9 BadObject,
10 AmbiguousObjectName,
11 UnsupportedOperation,
12 ParseError
13)
15from gitdb.util import (
16 mman,
17 LazyMixin,
18 unpack_from,
19 bin_to_hex,
20 byte_ord,
21)
23from gitdb.fun import (
24 create_pack_object_header,
25 pack_object_header_info,
26 is_equal_canonical_sha,
27 type_id_to_type_map,
28 write_object,
29 stream_copy,
30 chunk_size,
31 delta_types,
32 OFS_DELTA,
33 REF_DELTA,
34 msb_size
35)
37try:
38 from gitdb_speedups._perf import PackIndexFile_sha_to_index
39except ImportError:
40 pass
41# END try c module
43from gitdb.base import ( # Amazing !
44 OInfo,
45 OStream,
46 OPackInfo,
47 OPackStream,
48 ODeltaStream,
49 ODeltaPackInfo,
50 ODeltaPackStream,
51)
53from gitdb.stream import (
54 DecompressMemMapReader,
55 DeltaApplyReader,
56 Sha1Writer,
57 NullStream,
58 FlexibleSha1Writer
59)
61from struct import pack
62from binascii import crc32
64from gitdb.const import NULL_BYTE
66import tempfile
67import array
68import os
69import sys
71__all__ = ('PackIndexFile', 'PackFile', 'PackEntity')
74#{ Utilities
76def pack_object_at(cursor, offset, as_stream):
77 """
78 :return: Tuple(abs_data_offset, PackInfo|PackStream)
79 an object of the correct type according to the type_id of the object.
80 If as_stream is True, the object will contain a stream, allowing the
81 data to be read decompressed.
82 :param data: random accessible data containing all required information
83 :parma offset: offset in to the data at which the object information is located
84 :param as_stream: if True, a stream object will be returned that can read
85 the data, otherwise you receive an info object only"""
86 data = cursor.use_region(offset).buffer()
87 type_id, uncomp_size, data_rela_offset = pack_object_header_info(data)
88 total_rela_offset = None # set later, actual offset until data stream begins
89 delta_info = None
91 # OFFSET DELTA
92 if type_id == OFS_DELTA:
93 i = data_rela_offset
94 c = byte_ord(data[i])
95 i += 1
96 delta_offset = c & 0x7f
97 while c & 0x80:
98 c = byte_ord(data[i])
99 i += 1
100 delta_offset += 1
101 delta_offset = (delta_offset << 7) + (c & 0x7f)
102 # END character loop
103 delta_info = delta_offset
104 total_rela_offset = i
105 # REF DELTA
106 elif type_id == REF_DELTA:
107 total_rela_offset = data_rela_offset + 20
108 delta_info = data[data_rela_offset:total_rela_offset]
109 # BASE OBJECT
110 else:
111 # assume its a base object
112 total_rela_offset = data_rela_offset
113 # END handle type id
114 abs_data_offset = offset + total_rela_offset
115 if as_stream:
116 stream = DecompressMemMapReader(data[total_rela_offset:], False, uncomp_size)
117 if delta_info is None:
118 return abs_data_offset, OPackStream(offset, type_id, uncomp_size, stream)
119 else:
120 return abs_data_offset, ODeltaPackStream(offset, type_id, uncomp_size, delta_info, stream)
121 else:
122 if delta_info is None:
123 return abs_data_offset, OPackInfo(offset, type_id, uncomp_size)
124 else:
125 return abs_data_offset, ODeltaPackInfo(offset, type_id, uncomp_size, delta_info)
126 # END handle info
127 # END handle stream
130def write_stream_to_pack(read, write, zstream, base_crc=None):
131 """Copy a stream as read from read function, zip it, and write the result.
132 Count the number of written bytes and return it
133 :param base_crc: if not None, the crc will be the base for all compressed data
134 we consecutively write and generate a crc32 from. If None, no crc will be generated
135 :return: tuple(no bytes read, no bytes written, crc32) crc might be 0 if base_crc
136 was false"""
137 br = 0 # bytes read
138 bw = 0 # bytes written
139 want_crc = base_crc is not None
140 crc = 0
141 if want_crc:
142 crc = base_crc
143 # END initialize crc
145 while True:
146 chunk = read(chunk_size)
147 br += len(chunk)
148 compressed = zstream.compress(chunk)
149 bw += len(compressed)
150 write(compressed) # cannot assume return value
152 if want_crc:
153 crc = crc32(compressed, crc)
154 # END handle crc
156 if len(chunk) != chunk_size:
157 break
158 # END copy loop
160 compressed = zstream.flush()
161 bw += len(compressed)
162 write(compressed)
163 if want_crc:
164 crc = crc32(compressed, crc)
165 # END handle crc
167 return (br, bw, crc)
170#} END utilities
173class IndexWriter:
175 """Utility to cache index information, allowing to write all information later
176 in one go to the given stream
177 **Note:** currently only writes v2 indices"""
178 __slots__ = '_objs'
180 def __init__(self):
181 self._objs = list()
183 def append(self, binsha, crc, offset):
184 """Append one piece of object information"""
185 self._objs.append((binsha, crc, offset))
187 def write(self, pack_sha, write):
188 """Write the index file using the given write method
189 :param pack_sha: binary sha over the whole pack that we index
190 :return: sha1 binary sha over all index file contents"""
191 # sort for sha1 hash
192 self._objs.sort(key=lambda o: o[0])
194 sha_writer = FlexibleSha1Writer(write)
195 sha_write = sha_writer.write
196 sha_write(PackIndexFile.index_v2_signature)
197 sha_write(pack(">L", PackIndexFile.index_version_default))
199 # fanout
200 tmplist = list((0,) * 256) # fanout or list with 64 bit offsets
201 for t in self._objs:
202 tmplist[byte_ord(t[0][0])] += 1
203 # END prepare fanout
204 for i in range(255):
205 v = tmplist[i]
206 sha_write(pack('>L', v))
207 tmplist[i + 1] += v
208 # END write each fanout entry
209 sha_write(pack('>L', tmplist[255]))
211 # sha1 ordered
212 # save calls, that is push them into c
213 sha_write(b''.join(t[0] for t in self._objs))
215 # crc32
216 for t in self._objs:
217 sha_write(pack('>L', t[1] & 0xffffffff))
218 # END for each crc
220 tmplist = list()
221 # offset 32
222 for t in self._objs:
223 ofs = t[2]
224 if ofs > 0x7fffffff:
225 tmplist.append(ofs)
226 ofs = 0x80000000 + len(tmplist) - 1
227 # END handle 64 bit offsets
228 sha_write(pack('>L', ofs & 0xffffffff))
229 # END for each offset
231 # offset 64
232 for ofs in tmplist:
233 sha_write(pack(">Q", ofs))
234 # END for each offset
236 # trailer
237 assert(len(pack_sha) == 20)
238 sha_write(pack_sha)
239 sha = sha_writer.sha(as_hex=False)
240 write(sha)
241 return sha
244class PackIndexFile(LazyMixin):
246 """A pack index provides offsets into the corresponding pack, allowing to find
247 locations for offsets faster."""
249 # Dont use slots as we dynamically bind functions for each version, need a dict for this
250 # The slots you see here are just to keep track of our instance variables
251 # __slots__ = ('_indexpath', '_fanout_table', '_cursor', '_version',
252 # '_sha_list_offset', '_crc_list_offset', '_pack_offset', '_pack_64_offset')
254 # used in v2 indices
255 _sha_list_offset = 8 + 1024
256 index_v2_signature = b'\xfftOc'
257 index_version_default = 2
259 def __init__(self, indexpath):
260 super().__init__()
261 self._indexpath = indexpath
263 def close(self):
264 mman.force_map_handle_removal_win(self._indexpath)
265 self._cursor = None
267 def _set_cache_(self, attr):
268 if attr == "_packfile_checksum":
269 self._packfile_checksum = self._cursor.map()[-40:-20]
270 elif attr == "_packfile_checksum":
271 self._packfile_checksum = self._cursor.map()[-20:]
272 elif attr == "_cursor":
273 # Note: We don't lock the file when reading as we cannot be sure
274 # that we can actually write to the location - it could be a read-only
275 # alternate for instance
276 self._cursor = mman.make_cursor(self._indexpath).use_region()
277 # We will assume that the index will always fully fit into memory !
278 if mman.window_size() > 0 and self._cursor.file_size() > mman.window_size():
279 raise AssertionError("The index file at %s is too large to fit into a mapped window (%i > %i). This is a limitation of the implementation" % (
280 self._indexpath, self._cursor.file_size(), mman.window_size()))
281 # END assert window size
282 else:
283 # now its time to initialize everything - if we are here, someone wants
284 # to access the fanout table or related properties
286 # CHECK VERSION
287 mmap = self._cursor.map()
288 self._version = (mmap[:4] == self.index_v2_signature and 2) or 1
289 if self._version == 2:
290 version_id = unpack_from(">L", mmap, 4)[0]
291 assert version_id == self._version, "Unsupported index version: %i" % version_id
292 # END assert version
294 # SETUP FUNCTIONS
295 # setup our functions according to the actual version
296 for fname in ('entry', 'offset', 'sha', 'crc'):
297 setattr(self, fname, getattr(self, "_%s_v%i" % (fname, self._version)))
298 # END for each function to initialize
300 # INITIALIZE DATA
301 # byte offset is 8 if version is 2, 0 otherwise
302 self._initialize()
303 # END handle attributes
305 #{ Access V1
307 def _entry_v1(self, i):
308 """:return: tuple(offset, binsha, 0)"""
309 return unpack_from(">L20s", self._cursor.map(), 1024 + i * 24) + (0, )
311 def _offset_v1(self, i):
312 """see ``_offset_v2``"""
313 return unpack_from(">L", self._cursor.map(), 1024 + i * 24)[0]
315 def _sha_v1(self, i):
316 """see ``_sha_v2``"""
317 base = 1024 + (i * 24) + 4
318 return self._cursor.map()[base:base + 20]
320 def _crc_v1(self, i):
321 """unsupported"""
322 return 0
324 #} END access V1
326 #{ Access V2
327 def _entry_v2(self, i):
328 """:return: tuple(offset, binsha, crc)"""
329 return (self._offset_v2(i), self._sha_v2(i), self._crc_v2(i))
331 def _offset_v2(self, i):
332 """:return: 32 or 64 byte offset into pack files. 64 byte offsets will only
333 be returned if the pack is larger than 4 GiB, or 2^32"""
334 offset = unpack_from(">L", self._cursor.map(), self._pack_offset + i * 4)[0]
336 # if the high-bit is set, this indicates that we have to lookup the offset
337 # in the 64 bit region of the file. The current offset ( lower 31 bits )
338 # are the index into it
339 if offset & 0x80000000:
340 offset = unpack_from(">Q", self._cursor.map(), self._pack_64_offset + (offset & ~0x80000000) * 8)[0]
341 # END handle 64 bit offset
343 return offset
345 def _sha_v2(self, i):
346 """:return: sha at the given index of this file index instance"""
347 base = self._sha_list_offset + i * 20
348 return self._cursor.map()[base:base + 20]
350 def _crc_v2(self, i):
351 """:return: 4 bytes crc for the object at index i"""
352 return unpack_from(">L", self._cursor.map(), self._crc_list_offset + i * 4)[0]
354 #} END access V2
356 #{ Initialization
358 def _initialize(self):
359 """initialize base data"""
360 self._fanout_table = self._read_fanout((self._version == 2) * 8)
362 if self._version == 2:
363 self._crc_list_offset = self._sha_list_offset + self.size() * 20
364 self._pack_offset = self._crc_list_offset + self.size() * 4
365 self._pack_64_offset = self._pack_offset + self.size() * 4
366 # END setup base
368 def _read_fanout(self, byte_offset):
369 """Generate a fanout table from our data"""
370 d = self._cursor.map()
371 out = list()
372 append = out.append
373 for i in range(256):
374 append(unpack_from('>L', d, byte_offset + i * 4)[0])
375 # END for each entry
376 return out
378 #} END initialization
380 #{ Properties
381 def version(self):
382 return self._version
384 def size(self):
385 """:return: amount of objects referred to by this index"""
386 return self._fanout_table[255]
388 def path(self):
389 """:return: path to the packindexfile"""
390 return self._indexpath
392 def packfile_checksum(self):
393 """:return: 20 byte sha representing the sha1 hash of the pack file"""
394 return self._cursor.map()[-40:-20]
396 def indexfile_checksum(self):
397 """:return: 20 byte sha representing the sha1 hash of this index file"""
398 return self._cursor.map()[-20:]
400 def offsets(self):
401 """:return: sequence of all offsets in the order in which they were written
403 **Note:** return value can be random accessed, but may be immmutable"""
404 if self._version == 2:
405 # read stream to array, convert to tuple
406 a = array.array('I') # 4 byte unsigned int, long are 8 byte on 64 bit it appears
407 a.frombytes(self._cursor.map()[self._pack_offset:self._pack_64_offset])
409 # networkbyteorder to something array likes more
410 if sys.byteorder == 'little':
411 a.byteswap()
412 return a
413 else:
414 return tuple(self.offset(index) for index in range(self.size()))
415 # END handle version
417 def sha_to_index(self, sha):
418 """
419 :return: index usable with the ``offset`` or ``entry`` method, or None
420 if the sha was not found in this pack index
421 :param sha: 20 byte sha to lookup"""
422 first_byte = byte_ord(sha[0])
423 get_sha = self.sha
424 lo = 0 # lower index, the left bound of the bisection
425 if first_byte != 0:
426 lo = self._fanout_table[first_byte - 1]
427 hi = self._fanout_table[first_byte] # the upper, right bound of the bisection
429 # bisect until we have the sha
430 while lo < hi:
431 mid = (lo + hi) // 2
432 mid_sha = get_sha(mid)
433 if sha < mid_sha:
434 hi = mid
435 elif sha == mid_sha:
436 return mid
437 else:
438 lo = mid + 1
439 # END handle midpoint
440 # END bisect
441 return None
443 def partial_sha_to_index(self, partial_bin_sha, canonical_length):
444 """
445 :return: index as in `sha_to_index` or None if the sha was not found in this
446 index file
447 :param partial_bin_sha: an at least two bytes of a partial binary sha as bytes
448 :param canonical_length: length of the original hexadecimal representation of the
449 given partial binary sha
450 :raise AmbiguousObjectName:"""
451 if len(partial_bin_sha) < 2:
452 raise ValueError("Require at least 2 bytes of partial sha")
454 assert isinstance(partial_bin_sha, bytes), "partial_bin_sha must be bytes"
455 first_byte = byte_ord(partial_bin_sha[0])
457 get_sha = self.sha
458 lo = 0 # lower index, the left bound of the bisection
459 if first_byte != 0:
460 lo = self._fanout_table[first_byte - 1]
461 hi = self._fanout_table[first_byte] # the upper, right bound of the bisection
463 # fill the partial to full 20 bytes
464 filled_sha = partial_bin_sha + NULL_BYTE * (20 - len(partial_bin_sha))
466 # find lowest
467 while lo < hi:
468 mid = (lo + hi) // 2
469 mid_sha = get_sha(mid)
470 if filled_sha < mid_sha:
471 hi = mid
472 elif filled_sha == mid_sha:
473 # perfect match
474 lo = mid
475 break
476 else:
477 lo = mid + 1
478 # END handle midpoint
479 # END bisect
481 if lo < self.size():
482 cur_sha = get_sha(lo)
483 if is_equal_canonical_sha(canonical_length, partial_bin_sha, cur_sha):
484 next_sha = None
485 if lo + 1 < self.size():
486 next_sha = get_sha(lo + 1)
487 if next_sha and next_sha == cur_sha:
488 raise AmbiguousObjectName(partial_bin_sha)
489 return lo
490 # END if we have a match
491 # END if we found something
492 return None
494 if 'PackIndexFile_sha_to_index' in globals(): 494 ↛ 497line 494 didn't jump to line 497, because the condition on line 494 was never true
495 # NOTE: Its just about 25% faster, the major bottleneck might be the attr
496 # accesses
497 def sha_to_index(self, sha):
498 return PackIndexFile_sha_to_index(self, sha)
499 # END redefine heavy-hitter with c version
501 #} END properties
504class PackFile(LazyMixin):
506 """A pack is a file written according to the Version 2 for git packs
508 As we currently use memory maps, it could be assumed that the maximum size of
509 packs therefore is 32 bit on 32 bit systems. On 64 bit systems, this should be
510 fine though.
512 **Note:** at some point, this might be implemented using streams as well, or
513 streams are an alternate path in the case memory maps cannot be created
514 for some reason - one clearly doesn't want to read 10GB at once in that
515 case"""
517 __slots__ = ('_packpath', '_cursor', '_size', '_version')
518 pack_signature = 0x5041434b # 'PACK'
519 pack_version_default = 2
521 # offset into our data at which the first object starts
522 first_object_offset = 3 * 4 # header bytes
523 footer_size = 20 # final sha
525 def __init__(self, packpath):
526 self._packpath = packpath
528 def close(self):
529 mman.force_map_handle_removal_win(self._packpath)
530 self._cursor = None
532 def _set_cache_(self, attr):
533 # we fill the whole cache, whichever attribute gets queried first
534 self._cursor = mman.make_cursor(self._packpath).use_region()
536 # read the header information
537 type_id, self._version, self._size = unpack_from(">LLL", self._cursor.map(), 0)
539 # TODO: figure out whether we should better keep the lock, or maybe
540 # add a .keep file instead ?
541 if type_id != self.pack_signature:
542 raise ParseError("Invalid pack signature: %i" % type_id)
544 def _iter_objects(self, start_offset, as_stream=True):
545 """Handle the actual iteration of objects within this pack"""
546 c = self._cursor
547 content_size = c.file_size() - self.footer_size
548 cur_offset = start_offset or self.first_object_offset
550 null = NullStream()
551 while cur_offset < content_size:
552 data_offset, ostream = pack_object_at(c, cur_offset, True)
553 # scrub the stream to the end - this decompresses the object, but yields
554 # the amount of compressed bytes we need to get to the next offset
556 stream_copy(ostream.read, null.write, ostream.size, chunk_size)
557 assert ostream.stream._br == ostream.size
558 cur_offset += (data_offset - ostream.pack_offset) + ostream.stream.compressed_bytes_read()
560 # if a stream is requested, reset it beforehand
561 # Otherwise return the Stream object directly, its derived from the
562 # info object
563 if as_stream:
564 ostream.stream.seek(0)
565 yield ostream
566 # END until we have read everything
568 #{ Pack Information
570 def size(self):
571 """:return: The amount of objects stored in this pack"""
572 return self._size
574 def version(self):
575 """:return: the version of this pack"""
576 return self._version
578 def data(self):
579 """
580 :return: read-only data of this pack. It provides random access and usually
581 is a memory map.
582 :note: This method is unsafe as it returns a window into a file which might be larger than than the actual window size"""
583 # can use map as we are starting at offset 0. Otherwise we would have to use buffer()
584 return self._cursor.use_region().map()
586 def checksum(self):
587 """:return: 20 byte sha1 hash on all object sha's contained in this file"""
588 return self._cursor.use_region(self._cursor.file_size() - 20).buffer()[:]
590 def path(self):
591 """:return: path to the packfile"""
592 return self._packpath
593 #} END pack information
595 #{ Pack Specific
597 def collect_streams(self, offset):
598 """
599 :return: list of pack streams which are required to build the object
600 at the given offset. The first entry of the list is the object at offset,
601 the last one is either a full object, or a REF_Delta stream. The latter
602 type needs its reference object to be locked up in an ODB to form a valid
603 delta chain.
604 If the object at offset is no delta, the size of the list is 1.
605 :param offset: specifies the first byte of the object within this pack"""
606 out = list()
607 c = self._cursor
608 while True:
609 ostream = pack_object_at(c, offset, True)[1]
610 out.append(ostream)
611 if ostream.type_id == OFS_DELTA:
612 offset = ostream.pack_offset - ostream.delta_info
613 else:
614 # the only thing we can lookup are OFFSET deltas. Everything
615 # else is either an object, or a ref delta, in the latter
616 # case someone else has to find it
617 break
618 # END handle type
619 # END while chaining streams
620 return out
622 #} END pack specific
624 #{ Read-Database like Interface
626 def info(self, offset):
627 """Retrieve information about the object at the given file-absolute offset
629 :param offset: byte offset
630 :return: OPackInfo instance, the actual type differs depending on the type_id attribute"""
631 return pack_object_at(self._cursor, offset or self.first_object_offset, False)[1]
633 def stream(self, offset):
634 """Retrieve an object at the given file-relative offset as stream along with its information
636 :param offset: byte offset
637 :return: OPackStream instance, the actual type differs depending on the type_id attribute"""
638 return pack_object_at(self._cursor, offset or self.first_object_offset, True)[1]
640 def stream_iter(self, start_offset=0):
641 """
642 :return: iterator yielding OPackStream compatible instances, allowing
643 to access the data in the pack directly.
644 :param start_offset: offset to the first object to iterate. If 0, iteration
645 starts at the very first object in the pack.
647 **Note:** Iterating a pack directly is costly as the datastream has to be decompressed
648 to determine the bounds between the objects"""
649 return self._iter_objects(start_offset, as_stream=True)
651 #} END Read-Database like Interface
654class PackEntity(LazyMixin):
656 """Combines the PackIndexFile and the PackFile into one, allowing the
657 actual objects to be resolved and iterated"""
659 __slots__ = ('_index', # our index file
660 '_pack', # our pack file
661 '_offset_map' # on demand dict mapping one offset to the next consecutive one
662 )
664 IndexFileCls = PackIndexFile
665 PackFileCls = PackFile
667 def __init__(self, pack_or_index_path):
668 """Initialize ourselves with the path to the respective pack or index file"""
669 basename, ext = os.path.splitext(pack_or_index_path)
670 self._index = self.IndexFileCls("%s.idx" % basename) # PackIndexFile instance
671 self._pack = self.PackFileCls("%s.pack" % basename) # corresponding PackFile instance
673 def close(self):
674 self._index.close()
675 self._pack.close()
677 def _set_cache_(self, attr):
678 # currently this can only be _offset_map
679 # TODO: make this a simple sorted offset array which can be bisected
680 # to find the respective entry, from which we can take a +1 easily
681 # This might be slower, but should also be much lighter in memory !
682 offsets_sorted = sorted(self._index.offsets())
683 last_offset = len(self._pack.data()) - self._pack.footer_size
684 assert offsets_sorted, "Cannot handle empty indices"
686 offset_map = None
687 if len(offsets_sorted) == 1:
688 offset_map = {offsets_sorted[0]: last_offset}
689 else:
690 iter_offsets = iter(offsets_sorted)
691 iter_offsets_plus_one = iter(offsets_sorted)
692 next(iter_offsets_plus_one)
693 consecutive = zip(iter_offsets, iter_offsets_plus_one)
695 offset_map = dict(consecutive)
697 # the last offset is not yet set
698 offset_map[offsets_sorted[-1]] = last_offset
699 # END handle offset amount
700 self._offset_map = offset_map
702 def _sha_to_index(self, sha):
703 """:return: index for the given sha, or raise"""
704 index = self._index.sha_to_index(sha)
705 if index is None:
706 raise BadObject(sha)
707 return index
709 def _iter_objects(self, as_stream):
710 """Iterate over all objects in our index and yield their OInfo or OStream instences"""
711 _sha = self._index.sha
712 _object = self._object
713 for index in range(self._index.size()):
714 yield _object(_sha(index), as_stream, index)
715 # END for each index
717 def _object(self, sha, as_stream, index=-1):
718 """:return: OInfo or OStream object providing information about the given sha
719 :param index: if not -1, its assumed to be the sha's index in the IndexFile"""
720 # its a little bit redundant here, but it needs to be efficient
721 if index < 0:
722 index = self._sha_to_index(sha)
723 if sha is None:
724 sha = self._index.sha(index)
725 # END assure sha is present ( in output )
726 offset = self._index.offset(index)
727 type_id, uncomp_size, data_rela_offset = pack_object_header_info(self._pack._cursor.use_region(offset).buffer())
728 if as_stream:
729 if type_id not in delta_types:
730 packstream = self._pack.stream(offset)
731 return OStream(sha, packstream.type, packstream.size, packstream.stream)
732 # END handle non-deltas
734 # produce a delta stream containing all info
735 # To prevent it from applying the deltas when querying the size,
736 # we extract it from the delta stream ourselves
737 streams = self.collect_streams_at_offset(offset)
738 dstream = DeltaApplyReader.new(streams)
740 return ODeltaStream(sha, dstream.type, None, dstream)
741 else:
742 if type_id not in delta_types:
743 return OInfo(sha, type_id_to_type_map[type_id], uncomp_size)
744 # END handle non-deltas
746 # deltas are a little tougher - unpack the first bytes to obtain
747 # the actual target size, as opposed to the size of the delta data
748 streams = self.collect_streams_at_offset(offset)
749 buf = streams[0].read(512)
750 offset, src_size = msb_size(buf)
751 offset, target_size = msb_size(buf, offset)
753 # collect the streams to obtain the actual object type
754 if streams[-1].type_id in delta_types:
755 raise BadObject(sha, "Could not resolve delta object")
756 return OInfo(sha, streams[-1].type, target_size)
757 # END handle stream
759 #{ Read-Database like Interface
761 def info(self, sha):
762 """Retrieve information about the object identified by the given sha
764 :param sha: 20 byte sha1
765 :raise BadObject:
766 :return: OInfo instance, with 20 byte sha"""
767 return self._object(sha, False)
769 def stream(self, sha):
770 """Retrieve an object stream along with its information as identified by the given sha
772 :param sha: 20 byte sha1
773 :raise BadObject:
774 :return: OStream instance, with 20 byte sha"""
775 return self._object(sha, True)
777 def info_at_index(self, index):
778 """As ``info``, but uses a PackIndexFile compatible index to refer to the object"""
779 return self._object(None, False, index)
781 def stream_at_index(self, index):
782 """As ``stream``, but uses a PackIndexFile compatible index to refer to the
783 object"""
784 return self._object(None, True, index)
786 #} END Read-Database like Interface
788 #{ Interface
790 def pack(self):
791 """:return: the underlying pack file instance"""
792 return self._pack
794 def index(self):
795 """:return: the underlying pack index file instance"""
796 return self._index
798 def is_valid_stream(self, sha, use_crc=False):
799 """
800 Verify that the stream at the given sha is valid.
802 :param use_crc: if True, the index' crc is run over the compressed stream of
803 the object, which is much faster than checking the sha1. It is also
804 more prone to unnoticed corruption or manipulation.
805 :param sha: 20 byte sha1 of the object whose stream to verify
806 whether the compressed stream of the object is valid. If it is
807 a delta, this only verifies that the delta's data is valid, not the
808 data of the actual undeltified object, as it depends on more than
809 just this stream.
810 If False, the object will be decompressed and the sha generated. It must
811 match the given sha
813 :return: True if the stream is valid
814 :raise UnsupportedOperation: If the index is version 1 only
815 :raise BadObject: sha was not found"""
816 if use_crc:
817 if self._index.version() < 2:
818 raise UnsupportedOperation("Version 1 indices do not contain crc's, verify by sha instead")
819 # END handle index version
821 index = self._sha_to_index(sha)
822 offset = self._index.offset(index)
823 next_offset = self._offset_map[offset]
824 crc_value = self._index.crc(index)
826 # create the current crc value, on the compressed object data
827 # Read it in chunks, without copying the data
828 crc_update = zlib.crc32
829 pack_data = self._pack.data()
830 cur_pos = offset
831 this_crc_value = 0
832 while cur_pos < next_offset:
833 rbound = min(cur_pos + chunk_size, next_offset)
834 size = rbound - cur_pos
835 this_crc_value = crc_update(pack_data[cur_pos:cur_pos + size], this_crc_value)
836 cur_pos += size
837 # END window size loop
839 # crc returns signed 32 bit numbers, the AND op forces it into unsigned
840 # mode ... wow, sneaky, from dulwich.
841 return (this_crc_value & 0xffffffff) == crc_value
842 else:
843 shawriter = Sha1Writer()
844 stream = self._object(sha, as_stream=True)
845 # write a loose object, which is the basis for the sha
846 write_object(stream.type, stream.size, stream.read, shawriter.write)
848 assert shawriter.sha(as_hex=False) == sha
849 return shawriter.sha(as_hex=False) == sha
850 # END handle crc/sha verification
851 return True
853 def info_iter(self):
854 """
855 :return: Iterator over all objects in this pack. The iterator yields
856 OInfo instances"""
857 return self._iter_objects(as_stream=False)
859 def stream_iter(self):
860 """
861 :return: iterator over all objects in this pack. The iterator yields
862 OStream instances"""
863 return self._iter_objects(as_stream=True)
865 def collect_streams_at_offset(self, offset):
866 """
867 As the version in the PackFile, but can resolve REF deltas within this pack
868 For more info, see ``collect_streams``
870 :param offset: offset into the pack file at which the object can be found"""
871 streams = self._pack.collect_streams(offset)
873 # try to resolve the last one if needed. It is assumed to be either
874 # a REF delta, or a base object, as OFFSET deltas are resolved by the pack
875 if streams[-1].type_id == REF_DELTA:
876 stream = streams[-1]
877 while stream.type_id in delta_types:
878 if stream.type_id == REF_DELTA:
879 # smmap can return memory view objects, which can't be compared as buffers/bytes can ...
880 if isinstance(stream.delta_info, memoryview):
881 sindex = self._index.sha_to_index(stream.delta_info.tobytes())
882 else:
883 sindex = self._index.sha_to_index(stream.delta_info)
884 if sindex is None:
885 break
886 stream = self._pack.stream(self._index.offset(sindex))
887 streams.append(stream)
888 else:
889 # must be another OFS DELTA - this could happen if a REF
890 # delta we resolve previously points to an OFS delta. Who
891 # would do that ;) ? We can handle it though
892 stream = self._pack.stream(stream.delta_info)
893 streams.append(stream)
894 # END handle ref delta
895 # END resolve ref streams
896 # END resolve streams
898 return streams
900 def collect_streams(self, sha):
901 """
902 As ``PackFile.collect_streams``, but takes a sha instead of an offset.
903 Additionally, ref_delta streams will be resolved within this pack.
904 If this is not possible, the stream will be left alone, hence it is adivsed
905 to check for unresolved ref-deltas and resolve them before attempting to
906 construct a delta stream.
908 :param sha: 20 byte sha1 specifying the object whose related streams you want to collect
909 :return: list of streams, first being the actual object delta, the last being
910 a possibly unresolved base object.
911 :raise BadObject:"""
912 return self.collect_streams_at_offset(self._index.offset(self._sha_to_index(sha)))
914 @classmethod
915 def write_pack(cls, object_iter, pack_write, index_write=None,
916 object_count=None, zlib_compression=zlib.Z_BEST_SPEED):
917 """
918 Create a new pack by putting all objects obtained by the object_iterator
919 into a pack which is written using the pack_write method.
920 The respective index is produced as well if index_write is not Non.
922 :param object_iter: iterator yielding odb output objects
923 :param pack_write: function to receive strings to write into the pack stream
924 :param indx_write: if not None, the function writes the index file corresponding
925 to the pack.
926 :param object_count: if you can provide the amount of objects in your iteration,
927 this would be the place to put it. Otherwise we have to pre-iterate and store
928 all items into a list to get the number, which uses more memory than necessary.
929 :param zlib_compression: the zlib compression level to use
930 :return: tuple(pack_sha, index_binsha) binary sha over all the contents of the pack
931 and over all contents of the index. If index_write was None, index_binsha will be None
933 **Note:** The destination of the write functions is up to the user. It could
934 be a socket, or a file for instance
936 **Note:** writes only undeltified objects"""
937 objs = object_iter
938 if not object_count:
939 if not isinstance(object_iter, (tuple, list)):
940 objs = list(object_iter)
941 # END handle list type
942 object_count = len(objs)
943 # END handle object
945 pack_writer = FlexibleSha1Writer(pack_write)
946 pwrite = pack_writer.write
947 ofs = 0 # current offset into the pack file
948 index = None
949 wants_index = index_write is not None
951 # write header
952 pwrite(pack('>LLL', PackFile.pack_signature, PackFile.pack_version_default, object_count))
953 ofs += 12
955 if wants_index:
956 index = IndexWriter()
957 # END handle index header
959 actual_count = 0
960 for obj in objs:
961 actual_count += 1
962 crc = 0
964 # object header
965 hdr = create_pack_object_header(obj.type_id, obj.size)
966 if index_write:
967 crc = crc32(hdr)
968 else:
969 crc = None
970 # END handle crc
971 pwrite(hdr)
973 # data stream
974 zstream = zlib.compressobj(zlib_compression)
975 ostream = obj.stream
976 br, bw, crc = write_stream_to_pack(ostream.read, pwrite, zstream, base_crc=crc)
977 assert(br == obj.size)
978 if wants_index:
979 index.append(obj.binsha, crc, ofs)
980 # END handle index
982 ofs += len(hdr) + bw
983 if actual_count == object_count:
984 break
985 # END abort once we are done
986 # END for each object
988 if actual_count != object_count:
989 raise ValueError(
990 "Expected to write %i objects into pack, but received only %i from iterators" % (object_count, actual_count))
991 # END count assertion
993 # write footer
994 pack_sha = pack_writer.sha(as_hex=False)
995 assert len(pack_sha) == 20
996 pack_write(pack_sha)
997 ofs += len(pack_sha) # just for completeness ;)
999 index_sha = None
1000 if wants_index:
1001 index_sha = index.write(pack_sha, index_write)
1002 # END handle index
1004 return pack_sha, index_sha
1006 @classmethod
1007 def create(cls, object_iter, base_dir, object_count=None, zlib_compression=zlib.Z_BEST_SPEED):
1008 """Create a new on-disk entity comprised of a properly named pack file and a properly named
1009 and corresponding index file. The pack contains all OStream objects contained in object iter.
1010 :param base_dir: directory which is to contain the files
1011 :return: PackEntity instance initialized with the new pack
1013 **Note:** for more information on the other parameters see the write_pack method"""
1014 pack_fd, pack_path = tempfile.mkstemp('', 'pack', base_dir)
1015 index_fd, index_path = tempfile.mkstemp('', 'index', base_dir)
1016 pack_write = lambda d: os.write(pack_fd, d)
1017 index_write = lambda d: os.write(index_fd, d)
1019 pack_binsha, index_binsha = cls.write_pack(object_iter, pack_write, index_write, object_count, zlib_compression)
1020 os.close(pack_fd)
1021 os.close(index_fd)
1023 fmt = "pack-%s.%s"
1024 new_pack_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'pack'))
1025 new_index_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'idx'))
1026 os.rename(pack_path, new_pack_path)
1027 os.rename(index_path, new_index_path)
1029 return cls(new_pack_path)
1031 #} END interface