You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

733 lines
27KB

  1. # Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors
  2. #
  3. # This module is part of GitDB and is released under
  4. # the New BSD License: http://www.opensource.org/licenses/bsd-license.php
  5. from io import BytesIO
  6. import mmap
  7. import os
  8. import sys
  9. import zlib
  10. from gitdb.fun import (
  11. msb_size,
  12. stream_copy,
  13. apply_delta_data,
  14. connect_deltas,
  15. delta_types
  16. )
  17. from gitdb.util import (
  18. allocate_memory,
  19. LazyMixin,
  20. make_sha,
  21. write,
  22. close,
  23. )
  24. from gitdb.const import NULL_BYTE, BYTE_SPACE
  25. from gitdb.utils.compat import buffer
  26. from gitdb.utils.encoding import force_bytes
  27. has_perf_mod = False
  28. PY26 = sys.version_info[:2] < (2, 7)
  29. try:
  30. from gitdb_speedups._perf import apply_delta as c_apply_delta
  31. has_perf_mod = True
  32. except ImportError:
  33. pass
  34. __all__ = ('DecompressMemMapReader', 'FDCompressedSha1Writer', 'DeltaApplyReader',
  35. 'Sha1Writer', 'FlexibleSha1Writer', 'ZippedStoreShaWriter', 'FDCompressedSha1Writer',
  36. 'FDStream', 'NullStream')
  37. #{ RO Streams
  38. class DecompressMemMapReader(LazyMixin):
  39. """Reads data in chunks from a memory map and decompresses it. The client sees
  40. only the uncompressed data, respective file-like read calls are handling on-demand
  41. buffered decompression accordingly
  42. A constraint on the total size of bytes is activated, simulating
  43. a logical file within a possibly larger physical memory area
  44. To read efficiently, you clearly don't want to read individual bytes, instead,
  45. read a few kilobytes at least.
  46. **Note:** The chunk-size should be carefully selected as it will involve quite a bit
  47. of string copying due to the way the zlib is implemented. Its very wasteful,
  48. hence we try to find a good tradeoff between allocation time and number of
  49. times we actually allocate. An own zlib implementation would be good here
  50. to better support streamed reading - it would only need to keep the mmap
  51. and decompress it into chunks, that's all ... """
  52. __slots__ = ('_m', '_zip', '_buf', '_buflen', '_br', '_cws', '_cwe', '_s', '_close',
  53. '_cbr', '_phi')
  54. max_read_size = 512 * 1024 # currently unused
  55. def __init__(self, m, close_on_deletion, size=None):
  56. """Initialize with mmap for stream reading
  57. :param m: must be content data - use new if you have object data and no size"""
  58. self._m = m
  59. self._zip = zlib.decompressobj()
  60. self._buf = None # buffer of decompressed bytes
  61. self._buflen = 0 # length of bytes in buffer
  62. if size is not None:
  63. self._s = size # size of uncompressed data to read in total
  64. self._br = 0 # num uncompressed bytes read
  65. self._cws = 0 # start byte of compression window
  66. self._cwe = 0 # end byte of compression window
  67. self._cbr = 0 # number of compressed bytes read
  68. self._phi = False # is True if we parsed the header info
  69. self._close = close_on_deletion # close the memmap on deletion ?
  70. def _set_cache_(self, attr):
  71. assert attr == '_s'
  72. # only happens for size, which is a marker to indicate we still
  73. # have to parse the header from the stream
  74. self._parse_header_info()
  75. def __del__(self):
  76. self.close()
  77. def _parse_header_info(self):
  78. """If this stream contains object data, parse the header info and skip the
  79. stream to a point where each read will yield object content
  80. :return: parsed type_string, size"""
  81. # read header
  82. # should really be enough, cgit uses 8192 I believe
  83. # And for good reason !! This needs to be that high for the header to be read correctly in all cases
  84. maxb = 8192
  85. self._s = maxb
  86. hdr = self.read(maxb)
  87. hdrend = hdr.find(NULL_BYTE)
  88. typ, size = hdr[:hdrend].split(BYTE_SPACE)
  89. size = int(size)
  90. self._s = size
  91. # adjust internal state to match actual header length that we ignore
  92. # The buffer will be depleted first on future reads
  93. self._br = 0
  94. hdrend += 1
  95. self._buf = BytesIO(hdr[hdrend:])
  96. self._buflen = len(hdr) - hdrend
  97. self._phi = True
  98. return typ, size
  99. #{ Interface
  100. @classmethod
  101. def new(self, m, close_on_deletion=False):
  102. """Create a new DecompressMemMapReader instance for acting as a read-only stream
  103. This method parses the object header from m and returns the parsed
  104. type and size, as well as the created stream instance.
  105. :param m: memory map on which to operate. It must be object data ( header + contents )
  106. :param close_on_deletion: if True, the memory map will be closed once we are
  107. being deleted"""
  108. inst = DecompressMemMapReader(m, close_on_deletion, 0)
  109. typ, size = inst._parse_header_info()
  110. return typ, size, inst
  111. def data(self):
  112. """:return: random access compatible data we are working on"""
  113. return self._m
  114. def close(self):
  115. """Close our underlying stream of compressed bytes if this was allowed during initialization
  116. :return: True if we closed the underlying stream
  117. :note: can be called safely
  118. """
  119. if self._close:
  120. if hasattr(self._m, 'close'):
  121. self._m.close()
  122. self._close = False
  123. # END handle resource freeing
  124. def compressed_bytes_read(self):
  125. """
  126. :return: number of compressed bytes read. This includes the bytes it
  127. took to decompress the header ( if there was one )"""
  128. # ABSTRACT: When decompressing a byte stream, it can be that the first
  129. # x bytes which were requested match the first x bytes in the loosely
  130. # compressed datastream. This is the worst-case assumption that the reader
  131. # does, it assumes that it will get at least X bytes from X compressed bytes
  132. # in call cases.
  133. # The caveat is that the object, according to our known uncompressed size,
  134. # is already complete, but there are still some bytes left in the compressed
  135. # stream that contribute to the amount of compressed bytes.
  136. # How can we know that we are truly done, and have read all bytes we need
  137. # to read ?
  138. # Without help, we cannot know, as we need to obtain the status of the
  139. # decompression. If it is not finished, we need to decompress more data
  140. # until it is finished, to yield the actual number of compressed bytes
  141. # belonging to the decompressed object
  142. # We are using a custom zlib module for this, if its not present,
  143. # we try to put in additional bytes up for decompression if feasible
  144. # and check for the unused_data.
  145. # Only scrub the stream forward if we are officially done with the
  146. # bytes we were to have.
  147. if self._br == self._s and not self._zip.unused_data:
  148. # manipulate the bytes-read to allow our own read method to continue
  149. # but keep the window at its current position
  150. self._br = 0
  151. if hasattr(self._zip, 'status'):
  152. while self._zip.status == zlib.Z_OK:
  153. self.read(mmap.PAGESIZE)
  154. # END scrub-loop custom zlib
  155. else:
  156. # pass in additional pages, until we have unused data
  157. while not self._zip.unused_data and self._cbr != len(self._m):
  158. self.read(mmap.PAGESIZE)
  159. # END scrub-loop default zlib
  160. # END handle stream scrubbing
  161. # reset bytes read, just to be sure
  162. self._br = self._s
  163. # END handle stream scrubbing
  164. # unused data ends up in the unconsumed tail, which was removed
  165. # from the count already
  166. return self._cbr
  167. #} END interface
  168. def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
  169. """Allows to reset the stream to restart reading
  170. :raise ValueError: If offset and whence are not 0"""
  171. if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
  172. raise ValueError("Can only seek to position 0")
  173. # END handle offset
  174. self._zip = zlib.decompressobj()
  175. self._br = self._cws = self._cwe = self._cbr = 0
  176. if self._phi:
  177. self._phi = False
  178. del(self._s) # trigger header parsing on first access
  179. # END skip header
  180. def read(self, size=-1):
  181. if size < 1:
  182. size = self._s - self._br
  183. else:
  184. size = min(size, self._s - self._br)
  185. # END clamp size
  186. if size == 0:
  187. return bytes()
  188. # END handle depletion
  189. # deplete the buffer, then just continue using the decompress object
  190. # which has an own buffer. We just need this to transparently parse the
  191. # header from the zlib stream
  192. dat = bytes()
  193. if self._buf:
  194. if self._buflen >= size:
  195. # have enough data
  196. dat = self._buf.read(size)
  197. self._buflen -= size
  198. self._br += size
  199. return dat
  200. else:
  201. dat = self._buf.read() # ouch, duplicates data
  202. size -= self._buflen
  203. self._br += self._buflen
  204. self._buflen = 0
  205. self._buf = None
  206. # END handle buffer len
  207. # END handle buffer
  208. # decompress some data
  209. # Abstract: zlib needs to operate on chunks of our memory map ( which may
  210. # be large ), as it will otherwise and always fill in the 'unconsumed_tail'
  211. # attribute which possible reads our whole map to the end, forcing
  212. # everything to be read from disk even though just a portion was requested.
  213. # As this would be a nogo, we workaround it by passing only chunks of data,
  214. # moving the window into the memory map along as we decompress, which keeps
  215. # the tail smaller than our chunk-size. This causes 'only' the chunk to be
  216. # copied once, and another copy of a part of it when it creates the unconsumed
  217. # tail. We have to use it to hand in the appropriate amount of bytes during
  218. # the next read.
  219. tail = self._zip.unconsumed_tail
  220. if tail:
  221. # move the window, make it as large as size demands. For code-clarity,
  222. # we just take the chunk from our map again instead of reusing the unconsumed
  223. # tail. The latter one would safe some memory copying, but we could end up
  224. # with not getting enough data uncompressed, so we had to sort that out as well.
  225. # Now we just assume the worst case, hence the data is uncompressed and the window
  226. # needs to be as large as the uncompressed bytes we want to read.
  227. self._cws = self._cwe - len(tail)
  228. self._cwe = self._cws + size
  229. else:
  230. cws = self._cws
  231. self._cws = self._cwe
  232. self._cwe = cws + size
  233. # END handle tail
  234. # if window is too small, make it larger so zip can decompress something
  235. if self._cwe - self._cws < 8:
  236. self._cwe = self._cws + 8
  237. # END adjust winsize
  238. # takes a slice, but doesn't copy the data, it says ...
  239. indata = buffer(self._m, self._cws, self._cwe - self._cws)
  240. # get the actual window end to be sure we don't use it for computations
  241. self._cwe = self._cws + len(indata)
  242. dcompdat = self._zip.decompress(indata, size)
  243. # update the amount of compressed bytes read
  244. # We feed possibly overlapping chunks, which is why the unconsumed tail
  245. # has to be taken into consideration, as well as the unused data
  246. # if we hit the end of the stream
  247. # NOTE: Behavior changed in PY2.7 onward, which requires special handling to make the tests work properly.
  248. # They are thorough, and I assume it is truly working.
  249. # Why is this logic as convoluted as it is ? Please look at the table in
  250. # https://github.com/gitpython-developers/gitdb/issues/19 to learn about the test-results.
  251. # Bascially, on py2.6, you want to use branch 1, whereas on all other python version, the second branch
  252. # will be the one that works.
  253. # However, the zlib VERSIONs as well as the platform check is used to further match the entries in the
  254. # table in the github issue. This is it ... it was the only way I could make this work everywhere.
  255. # IT's CERTAINLY GOING TO BITE US IN THE FUTURE ... .
  256. if PY26 or ((zlib.ZLIB_VERSION == '1.2.7' or zlib.ZLIB_VERSION == '1.2.5') and not sys.platform == 'darwin'):
  257. unused_datalen = len(self._zip.unconsumed_tail)
  258. else:
  259. unused_datalen = len(self._zip.unconsumed_tail) + len(self._zip.unused_data)
  260. # # end handle very special case ...
  261. self._cbr += len(indata) - unused_datalen
  262. self._br += len(dcompdat)
  263. if dat:
  264. dcompdat = dat + dcompdat
  265. # END prepend our cached data
  266. # it can happen, depending on the compression, that we get less bytes
  267. # than ordered as it needs the final portion of the data as well.
  268. # Recursively resolve that.
  269. # Note: dcompdat can be empty even though we still appear to have bytes
  270. # to read, if we are called by compressed_bytes_read - it manipulates
  271. # us to empty the stream
  272. if dcompdat and (len(dcompdat) - len(dat)) < size and self._br < self._s:
  273. dcompdat += self.read(size - len(dcompdat))
  274. # END handle special case
  275. return dcompdat
  276. class DeltaApplyReader(LazyMixin):
  277. """A reader which dynamically applies pack deltas to a base object, keeping the
  278. memory demands to a minimum.
  279. The size of the final object is only obtainable once all deltas have been
  280. applied, unless it is retrieved from a pack index.
  281. The uncompressed Delta has the following layout (MSB being a most significant
  282. bit encoded dynamic size):
  283. * MSB Source Size - the size of the base against which the delta was created
  284. * MSB Target Size - the size of the resulting data after the delta was applied
  285. * A list of one byte commands (cmd) which are followed by a specific protocol:
  286. * cmd & 0x80 - copy delta_data[offset:offset+size]
  287. * Followed by an encoded offset into the delta data
  288. * Followed by an encoded size of the chunk to copy
  289. * cmd & 0x7f - insert
  290. * insert cmd bytes from the delta buffer into the output stream
  291. * cmd == 0 - invalid operation ( or error in delta stream )
  292. """
  293. __slots__ = (
  294. "_bstream", # base stream to which to apply the deltas
  295. "_dstreams", # tuple of delta stream readers
  296. "_mm_target", # memory map of the delta-applied data
  297. "_size", # actual number of bytes in _mm_target
  298. "_br" # number of bytes read
  299. )
  300. #{ Configuration
  301. k_max_memory_move = 250 * 1000 * 1000
  302. #} END configuration
  303. def __init__(self, stream_list):
  304. """Initialize this instance with a list of streams, the first stream being
  305. the delta to apply on top of all following deltas, the last stream being the
  306. base object onto which to apply the deltas"""
  307. assert len(stream_list) > 1, "Need at least one delta and one base stream"
  308. self._bstream = stream_list[-1]
  309. self._dstreams = tuple(stream_list[:-1])
  310. self._br = 0
  311. def _set_cache_too_slow_without_c(self, attr):
  312. # the direct algorithm is fastest and most direct if there is only one
  313. # delta. Also, the extra overhead might not be worth it for items smaller
  314. # than X - definitely the case in python, every function call costs
  315. # huge amounts of time
  316. # if len(self._dstreams) * self._bstream.size < self.k_max_memory_move:
  317. if len(self._dstreams) == 1:
  318. return self._set_cache_brute_(attr)
  319. # Aggregate all deltas into one delta in reverse order. Hence we take
  320. # the last delta, and reverse-merge its ancestor delta, until we receive
  321. # the final delta data stream.
  322. dcl = connect_deltas(self._dstreams)
  323. # call len directly, as the (optional) c version doesn't implement the sequence
  324. # protocol
  325. if dcl.rbound() == 0:
  326. self._size = 0
  327. self._mm_target = allocate_memory(0)
  328. return
  329. # END handle empty list
  330. self._size = dcl.rbound()
  331. self._mm_target = allocate_memory(self._size)
  332. bbuf = allocate_memory(self._bstream.size)
  333. stream_copy(self._bstream.read, bbuf.write, self._bstream.size, 256 * mmap.PAGESIZE)
  334. # APPLY CHUNKS
  335. write = self._mm_target.write
  336. dcl.apply(bbuf, write)
  337. self._mm_target.seek(0)
  338. def _set_cache_brute_(self, attr):
  339. """If we are here, we apply the actual deltas"""
  340. # TODO: There should be a special case if there is only one stream
  341. # Then the default-git algorithm should perform a tad faster, as the
  342. # delta is not peaked into, causing less overhead.
  343. buffer_info_list = list()
  344. max_target_size = 0
  345. for dstream in self._dstreams:
  346. buf = dstream.read(512) # read the header information + X
  347. offset, src_size = msb_size(buf)
  348. offset, target_size = msb_size(buf, offset)
  349. buffer_info_list.append((buffer(buf, offset), offset, src_size, target_size))
  350. max_target_size = max(max_target_size, target_size)
  351. # END for each delta stream
  352. # sanity check - the first delta to apply should have the same source
  353. # size as our actual base stream
  354. base_size = self._bstream.size
  355. target_size = max_target_size
  356. # if we have more than 1 delta to apply, we will swap buffers, hence we must
  357. # assure that all buffers we use are large enough to hold all the results
  358. if len(self._dstreams) > 1:
  359. base_size = target_size = max(base_size, max_target_size)
  360. # END adjust buffer sizes
  361. # Allocate private memory map big enough to hold the first base buffer
  362. # We need random access to it
  363. bbuf = allocate_memory(base_size)
  364. stream_copy(self._bstream.read, bbuf.write, base_size, 256 * mmap.PAGESIZE)
  365. # allocate memory map large enough for the largest (intermediate) target
  366. # We will use it as scratch space for all delta ops. If the final
  367. # target buffer is smaller than our allocated space, we just use parts
  368. # of it upon return.
  369. tbuf = allocate_memory(target_size)
  370. # for each delta to apply, memory map the decompressed delta and
  371. # work on the op-codes to reconstruct everything.
  372. # For the actual copying, we use a seek and write pattern of buffer
  373. # slices.
  374. final_target_size = None
  375. for (dbuf, offset, src_size, target_size), dstream in zip(reversed(buffer_info_list), reversed(self._dstreams)):
  376. # allocate a buffer to hold all delta data - fill in the data for
  377. # fast access. We do this as we know that reading individual bytes
  378. # from our stream would be slower than necessary ( although possible )
  379. # The dbuf buffer contains commands after the first two MSB sizes, the
  380. # offset specifies the amount of bytes read to get the sizes.
  381. ddata = allocate_memory(dstream.size - offset)
  382. ddata.write(dbuf)
  383. # read the rest from the stream. The size we give is larger than necessary
  384. stream_copy(dstream.read, ddata.write, dstream.size, 256 * mmap.PAGESIZE)
  385. #######################################################################
  386. if 'c_apply_delta' in globals():
  387. c_apply_delta(bbuf, ddata, tbuf)
  388. else:
  389. apply_delta_data(bbuf, src_size, ddata, len(ddata), tbuf.write)
  390. #######################################################################
  391. # finally, swap out source and target buffers. The target is now the
  392. # base for the next delta to apply
  393. bbuf, tbuf = tbuf, bbuf
  394. bbuf.seek(0)
  395. tbuf.seek(0)
  396. final_target_size = target_size
  397. # END for each delta to apply
  398. # its already seeked to 0, constrain it to the actual size
  399. # NOTE: in the end of the loop, it swaps buffers, hence our target buffer
  400. # is not tbuf, but bbuf !
  401. self._mm_target = bbuf
  402. self._size = final_target_size
  403. #{ Configuration
  404. if not has_perf_mod:
  405. _set_cache_ = _set_cache_brute_
  406. else:
  407. _set_cache_ = _set_cache_too_slow_without_c
  408. #} END configuration
  409. def read(self, count=0):
  410. bl = self._size - self._br # bytes left
  411. if count < 1 or count > bl:
  412. count = bl
  413. # NOTE: we could check for certain size limits, and possibly
  414. # return buffers instead of strings to prevent byte copying
  415. data = self._mm_target.read(count)
  416. self._br += len(data)
  417. return data
  418. def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
  419. """Allows to reset the stream to restart reading
  420. :raise ValueError: If offset and whence are not 0"""
  421. if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
  422. raise ValueError("Can only seek to position 0")
  423. # END handle offset
  424. self._br = 0
  425. self._mm_target.seek(0)
  426. #{ Interface
  427. @classmethod
  428. def new(cls, stream_list):
  429. """
  430. Convert the given list of streams into a stream which resolves deltas
  431. when reading from it.
  432. :param stream_list: two or more stream objects, first stream is a Delta
  433. to the object that you want to resolve, followed by N additional delta
  434. streams. The list's last stream must be a non-delta stream.
  435. :return: Non-Delta OPackStream object whose stream can be used to obtain
  436. the decompressed resolved data
  437. :raise ValueError: if the stream list cannot be handled"""
  438. if len(stream_list) < 2:
  439. raise ValueError("Need at least two streams")
  440. # END single object special handling
  441. if stream_list[-1].type_id in delta_types:
  442. raise ValueError(
  443. "Cannot resolve deltas if there is no base object stream, last one was type: %s" % stream_list[-1].type)
  444. # END check stream
  445. return cls(stream_list)
  446. #} END interface
  447. #{ OInfo like Interface
  448. @property
  449. def type(self):
  450. return self._bstream.type
  451. @property
  452. def type_id(self):
  453. return self._bstream.type_id
  454. @property
  455. def size(self):
  456. """:return: number of uncompressed bytes in the stream"""
  457. return self._size
  458. #} END oinfo like interface
  459. #} END RO streams
  460. #{ W Streams
  461. class Sha1Writer(object):
  462. """Simple stream writer which produces a sha whenever you like as it degests
  463. everything it is supposed to write"""
  464. __slots__ = "sha1"
  465. def __init__(self):
  466. self.sha1 = make_sha()
  467. #{ Stream Interface
  468. def write(self, data):
  469. """:raise IOError: If not all bytes could be written
  470. :param data: byte object
  471. :return: length of incoming data"""
  472. self.sha1.update(data)
  473. return len(data)
  474. # END stream interface
  475. #{ Interface
  476. def sha(self, as_hex=False):
  477. """:return: sha so far
  478. :param as_hex: if True, sha will be hex-encoded, binary otherwise"""
  479. if as_hex:
  480. return self.sha1.hexdigest()
  481. return self.sha1.digest()
  482. #} END interface
  483. class FlexibleSha1Writer(Sha1Writer):
  484. """Writer producing a sha1 while passing on the written bytes to the given
  485. write function"""
  486. __slots__ = 'writer'
  487. def __init__(self, writer):
  488. Sha1Writer.__init__(self)
  489. self.writer = writer
  490. def write(self, data):
  491. Sha1Writer.write(self, data)
  492. self.writer(data)
  493. class ZippedStoreShaWriter(Sha1Writer):
  494. """Remembers everything someone writes to it and generates a sha"""
  495. __slots__ = ('buf', 'zip')
  496. def __init__(self):
  497. Sha1Writer.__init__(self)
  498. self.buf = BytesIO()
  499. self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)
  500. def __getattr__(self, attr):
  501. return getattr(self.buf, attr)
  502. def write(self, data):
  503. alen = Sha1Writer.write(self, data)
  504. self.buf.write(self.zip.compress(data))
  505. return alen
  506. def close(self):
  507. self.buf.write(self.zip.flush())
  508. def seek(self, offset, whence=getattr(os, 'SEEK_SET', 0)):
  509. """Seeking currently only supports to rewind written data
  510. Multiple writes are not supported"""
  511. if offset != 0 or whence != getattr(os, 'SEEK_SET', 0):
  512. raise ValueError("Can only seek to position 0")
  513. # END handle offset
  514. self.buf.seek(0)
  515. def getvalue(self):
  516. """:return: string value from the current stream position to the end"""
  517. return self.buf.getvalue()
  518. class FDCompressedSha1Writer(Sha1Writer):
  519. """Digests data written to it, making the sha available, then compress the
  520. data and write it to the file descriptor
  521. **Note:** operates on raw file descriptors
  522. **Note:** for this to work, you have to use the close-method of this instance"""
  523. __slots__ = ("fd", "sha1", "zip")
  524. # default exception
  525. exc = IOError("Failed to write all bytes to filedescriptor")
  526. def __init__(self, fd):
  527. super(FDCompressedSha1Writer, self).__init__()
  528. self.fd = fd
  529. self.zip = zlib.compressobj(zlib.Z_BEST_SPEED)
  530. #{ Stream Interface
  531. def write(self, data):
  532. """:raise IOError: If not all bytes could be written
  533. :return: length of incoming data"""
  534. self.sha1.update(data)
  535. cdata = self.zip.compress(data)
  536. bytes_written = write(self.fd, cdata)
  537. if bytes_written != len(cdata):
  538. raise self.exc
  539. return len(data)
  540. def close(self):
  541. remainder = self.zip.flush()
  542. if write(self.fd, remainder) != len(remainder):
  543. raise self.exc
  544. return close(self.fd)
  545. #} END stream interface
  546. class FDStream(object):
  547. """A simple wrapper providing the most basic functions on a file descriptor
  548. with the fileobject interface. Cannot use os.fdopen as the resulting stream
  549. takes ownership"""
  550. __slots__ = ("_fd", '_pos')
  551. def __init__(self, fd):
  552. self._fd = fd
  553. self._pos = 0
  554. def write(self, data):
  555. self._pos += len(data)
  556. os.write(self._fd, data)
  557. def read(self, count=0):
  558. if count == 0:
  559. count = os.path.getsize(self._filepath)
  560. # END handle read everything
  561. bytes = os.read(self._fd, count)
  562. self._pos += len(bytes)
  563. return bytes
  564. def fileno(self):
  565. return self._fd
  566. def tell(self):
  567. return self._pos
  568. def close(self):
  569. close(self._fd)
  570. class NullStream(object):
  571. """A stream that does nothing but providing a stream interface.
  572. Use it like /dev/null"""
  573. __slots__ = tuple()
  574. def read(self, size=0):
  575. return ''
  576. def close(self):
  577. pass
  578. def write(self, data):
  579. return len(data)
  580. #} END W streams