Coverage for /var/srv/projects/api.amasfac.comuna18.com/tmp/venv/lib/python3.9/site-packages/gitdb/db/mem.py: 36%
50 statements
« prev ^ index » next coverage.py v6.4.4, created at 2023-07-17 14:22 -0600
« prev ^ index » next coverage.py v6.4.4, created at 2023-07-17 14:22 -0600
1# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
2#
3# This module is part of GitDB and is released under
4# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
5"""Contains the MemoryDatabase implementation"""
6from gitdb.db.loose import LooseObjectDB
7from gitdb.db.base import (
8 ObjectDBR,
9 ObjectDBW
10)
12from gitdb.base import (
13 OStream,
14 IStream,
15)
17from gitdb.exc import (
18 BadObject,
19 UnsupportedOperation
20)
22from gitdb.stream import (
23 ZippedStoreShaWriter,
24 DecompressMemMapReader,
25)
27from io import BytesIO
29__all__ = ("MemoryDB", )
32class MemoryDB(ObjectDBR, ObjectDBW):
34 """A memory database stores everything to memory, providing fast IO and object
35 retrieval. It should be used to buffer results and obtain SHAs before writing
36 it to the actual physical storage, as it allows to query whether object already
37 exists in the target storage before introducing actual IO"""
39 def __init__(self):
40 super().__init__()
41 self._db = LooseObjectDB("path/doesnt/matter")
43 # maps 20 byte shas to their OStream objects
44 self._cache = dict()
46 def set_ostream(self, stream):
47 raise UnsupportedOperation("MemoryDB's always stream into memory")
49 def store(self, istream):
50 zstream = ZippedStoreShaWriter()
51 self._db.set_ostream(zstream)
53 istream = self._db.store(istream)
54 zstream.close() # close to flush
55 zstream.seek(0)
57 # don't provide a size, the stream is written in object format, hence the
58 # header needs decompression
59 decomp_stream = DecompressMemMapReader(zstream.getvalue(), close_on_deletion=False)
60 self._cache[istream.binsha] = OStream(istream.binsha, istream.type, istream.size, decomp_stream)
62 return istream
64 def has_object(self, sha):
65 return sha in self._cache
67 def info(self, sha):
68 # we always return streams, which are infos as well
69 return self.stream(sha)
71 def stream(self, sha):
72 try:
73 ostream = self._cache[sha]
74 # rewind stream for the next one to read
75 ostream.stream.seek(0)
76 return ostream
77 except KeyError as e:
78 raise BadObject(sha) from e
79 # END exception handling
81 def size(self):
82 return len(self._cache)
84 def sha_iter(self):
85 return self._cache.keys()
87 #{ Interface
88 def stream_copy(self, sha_iter, odb):
89 """Copy the streams as identified by sha's yielded by sha_iter into the given odb
90 The streams will be copied directly
91 **Note:** the object will only be written if it did not exist in the target db
93 :return: amount of streams actually copied into odb. If smaller than the amount
94 of input shas, one or more objects did already exist in odb"""
95 count = 0
96 for sha in sha_iter:
97 if odb.has_object(sha):
98 continue
99 # END check object existence
101 ostream = self.stream(sha)
102 # compressed data including header
103 sio = BytesIO(ostream.stream.data())
104 istream = IStream(ostream.type, ostream.size, sio, sha)
106 odb.store(istream)
107 count += 1
108 # END for each sha
109 return count
110 #} END interface