mirror of
https://github.com/python/cpython.git
synced 2025-08-03 16:39:00 +00:00
Issue #5863: Rewrite BZ2File in pure Python, and allow it to accept
file-like objects using a new `fileobj` constructor argument. Patch by Nadeem Vawda.
This commit is contained in:
parent
0f535013c5
commit
37dc5f85b8
11 changed files with 1201 additions and 2339 deletions
392
Lib/bz2.py
Normal file
392
Lib/bz2.py
Normal file
|
@ -0,0 +1,392 @@
|
|||
"""Interface to the libbzip2 compression library.
|
||||
|
||||
This module provides a file interface, classes for incremental
|
||||
(de)compression, and functions for one-shot (de)compression.
|
||||
"""
|
||||
|
||||
__all__ = ["BZ2File", "BZ2Compressor", "BZ2Decompressor", "compress",
|
||||
"decompress"]
|
||||
|
||||
__author__ = "Nadeem Vawda <nadeem.vawda@gmail.com>"
|
||||
|
||||
import io
|
||||
import threading
|
||||
import warnings
|
||||
|
||||
from _bz2 import BZ2Compressor, BZ2Decompressor
|
||||
|
||||
|
||||
_MODE_CLOSED = 0
|
||||
_MODE_READ = 1
|
||||
_MODE_READ_EOF = 2
|
||||
_MODE_WRITE = 3
|
||||
|
||||
_BUFFER_SIZE = 8192
|
||||
|
||||
|
||||
class BZ2File(io.BufferedIOBase):
|
||||
|
||||
"""A file object providing transparent bzip2 (de)compression.
|
||||
|
||||
A BZ2File can act as a wrapper for an existing file object, or refer
|
||||
directly to a named file on disk.
|
||||
|
||||
Note that BZ2File provides a *binary* file interface - data read is
|
||||
returned as bytes, and data to be written should be given as bytes.
|
||||
"""
|
||||
|
||||
def __init__(self, filename=None, mode="r", buffering=None,
|
||||
compresslevel=9, fileobj=None):
|
||||
"""Open a bzip2-compressed file.
|
||||
|
||||
If filename is given, open the named file. Otherwise, operate on
|
||||
the file object given by fileobj. Exactly one of these two
|
||||
parameters should be provided.
|
||||
|
||||
mode can be 'r' for reading (default), or 'w' for writing.
|
||||
|
||||
buffering is ignored. Its use is deprecated.
|
||||
|
||||
If mode is 'w', compresslevel can be a number between 1 and 9
|
||||
specifying the level of compression: 1 produces the least
|
||||
compression, and 9 (default) produces the most compression.
|
||||
"""
|
||||
# This lock must be recursive, so that BufferedIOBase's
|
||||
# readline(), readlines() and writelines() don't deadlock.
|
||||
self._lock = threading.RLock()
|
||||
self._fp = None
|
||||
self._closefp = False
|
||||
self._mode = _MODE_CLOSED
|
||||
self._pos = 0
|
||||
self._size = -1
|
||||
|
||||
if buffering is not None:
|
||||
warnings.warn("Use of 'buffering' argument is deprecated",
|
||||
DeprecationWarning)
|
||||
|
||||
if not (1 <= compresslevel <= 9):
|
||||
raise ValueError("compresslevel must be between 1 and 9")
|
||||
|
||||
if mode in ("", "r", "rb"):
|
||||
mode = "rb"
|
||||
mode_code = _MODE_READ
|
||||
self._decompressor = BZ2Decompressor()
|
||||
self._buffer = None
|
||||
elif mode in ("w", "wb"):
|
||||
mode = "wb"
|
||||
mode_code = _MODE_WRITE
|
||||
self._compressor = BZ2Compressor()
|
||||
else:
|
||||
raise ValueError("Invalid mode: {!r}".format(mode))
|
||||
|
||||
if filename is not None and fileobj is None:
|
||||
self._fp = open(filename, mode)
|
||||
self._closefp = True
|
||||
self._mode = mode_code
|
||||
elif fileobj is not None and filename is None:
|
||||
self._fp = fileobj
|
||||
self._mode = mode_code
|
||||
else:
|
||||
raise ValueError("Must give exactly one of filename and fileobj")
|
||||
|
||||
def close(self):
|
||||
"""Flush and close the file.
|
||||
|
||||
May be called more than once without error. Once the file is
|
||||
closed, any other operation on it will raise a ValueError.
|
||||
"""
|
||||
with self._lock:
|
||||
if self._mode == _MODE_CLOSED:
|
||||
return
|
||||
try:
|
||||
if self._mode in (_MODE_READ, _MODE_READ_EOF):
|
||||
self._decompressor = None
|
||||
elif self._mode == _MODE_WRITE:
|
||||
self._fp.write(self._compressor.flush())
|
||||
self._compressor = None
|
||||
finally:
|
||||
try:
|
||||
if self._closefp:
|
||||
self._fp.close()
|
||||
finally:
|
||||
self._fp = None
|
||||
self._closefp = False
|
||||
self._mode = _MODE_CLOSED
|
||||
self._buffer = None
|
||||
|
||||
@property
|
||||
def closed(self):
|
||||
"""True if this file is closed."""
|
||||
return self._mode == _MODE_CLOSED
|
||||
|
||||
def fileno(self):
|
||||
"""Return the file descriptor for the underlying file."""
|
||||
return self._fp.fileno()
|
||||
|
||||
def seekable(self):
|
||||
"""Return whether the file supports seeking."""
|
||||
return self.readable()
|
||||
|
||||
def readable(self):
|
||||
"""Return whether the file was opened for reading."""
|
||||
return self._mode in (_MODE_READ, _MODE_READ_EOF)
|
||||
|
||||
def writable(self):
|
||||
"""Return whether the file was opened for writing."""
|
||||
return self._mode == _MODE_WRITE
|
||||
|
||||
# Mode-checking helper functions.
|
||||
|
||||
def _check_not_closed(self):
|
||||
if self.closed:
|
||||
raise ValueError("I/O operation on closed file")
|
||||
|
||||
def _check_can_read(self):
|
||||
if not self.readable():
|
||||
self._check_not_closed()
|
||||
raise io.UnsupportedOperation("File not open for reading")
|
||||
|
||||
def _check_can_write(self):
|
||||
if not self.writable():
|
||||
self._check_not_closed()
|
||||
raise io.UnsupportedOperation("File not open for writing")
|
||||
|
||||
def _check_can_seek(self):
|
||||
if not self.seekable():
|
||||
self._check_not_closed()
|
||||
raise io.UnsupportedOperation("Seeking is only supported "
|
||||
"on files opening for reading")
|
||||
|
||||
# Fill the readahead buffer if it is empty. Returns False on EOF.
|
||||
def _fill_buffer(self):
|
||||
if self._buffer:
|
||||
return True
|
||||
if self._decompressor.eof:
|
||||
self._mode = _MODE_READ_EOF
|
||||
self._size = self._pos
|
||||
return False
|
||||
rawblock = self._fp.read(_BUFFER_SIZE)
|
||||
if not rawblock:
|
||||
raise EOFError("Compressed file ended before the "
|
||||
"end-of-stream marker was reached")
|
||||
self._buffer = self._decompressor.decompress(rawblock)
|
||||
return True
|
||||
|
||||
# Read data until EOF.
|
||||
# If return_data is false, consume the data without returning it.
|
||||
def _read_all(self, return_data=True):
|
||||
blocks = []
|
||||
while self._fill_buffer():
|
||||
if return_data:
|
||||
blocks.append(self._buffer)
|
||||
self._pos += len(self._buffer)
|
||||
self._buffer = None
|
||||
if return_data:
|
||||
return b"".join(blocks)
|
||||
|
||||
# Read a block of up to n bytes.
|
||||
# If return_data is false, consume the data without returning it.
|
||||
def _read_block(self, n, return_data=True):
|
||||
blocks = []
|
||||
while n > 0 and self._fill_buffer():
|
||||
if n < len(self._buffer):
|
||||
data = self._buffer[:n]
|
||||
self._buffer = self._buffer[n:]
|
||||
else:
|
||||
data = self._buffer
|
||||
self._buffer = None
|
||||
if return_data:
|
||||
blocks.append(data)
|
||||
self._pos += len(data)
|
||||
n -= len(data)
|
||||
if return_data:
|
||||
return b"".join(blocks)
|
||||
|
||||
def peek(self, n=0):
|
||||
"""Return buffered data without advancing the file position.
|
||||
|
||||
Always returns at least one byte of data, unless at EOF.
|
||||
The exact number of bytes returned is unspecified.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_read()
|
||||
if self._mode == _MODE_READ_EOF or not self._fill_buffer():
|
||||
return b""
|
||||
return self._buffer
|
||||
|
||||
def read(self, size=-1):
|
||||
"""Read up to size uncompressed bytes from the file.
|
||||
|
||||
If size is negative or omitted, read until EOF is reached.
|
||||
Returns b'' if the file is already at EOF.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_read()
|
||||
if self._mode == _MODE_READ_EOF or size == 0:
|
||||
return b""
|
||||
elif size < 0:
|
||||
return self._read_all()
|
||||
else:
|
||||
return self._read_block(size)
|
||||
|
||||
def read1(self, size=-1):
|
||||
"""Read up to size uncompressed bytes with at most one read
|
||||
from the underlying stream.
|
||||
|
||||
Returns b'' if the file is at EOF.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_read()
|
||||
if (size == 0 or self._mode == _MODE_READ_EOF or
|
||||
not self._fill_buffer()):
|
||||
return b""
|
||||
if 0 < size < len(self._buffer):
|
||||
data = self._buffer[:size]
|
||||
self._buffer = self._buffer[size:]
|
||||
else:
|
||||
data = self._buffer
|
||||
self._buffer = None
|
||||
self._pos += len(data)
|
||||
return data
|
||||
|
||||
def readinto(self, b):
|
||||
"""Read up to len(b) bytes into b.
|
||||
|
||||
Returns the number of bytes read (0 for EOF).
|
||||
"""
|
||||
with self._lock:
|
||||
return io.BufferedIOBase.readinto(self, b)
|
||||
|
||||
def readline(self, size=-1):
|
||||
"""Read a line of uncompressed bytes from the file.
|
||||
|
||||
The terminating newline (if present) is retained. If size is
|
||||
non-negative, no more than size bytes will be read (in which
|
||||
case the line may be incomplete). Returns b'' if already at EOF.
|
||||
"""
|
||||
if not hasattr(size, "__index__"):
|
||||
raise TypeError("Integer argument expected")
|
||||
size = size.__index__()
|
||||
with self._lock:
|
||||
return io.BufferedIOBase.readline(self, size)
|
||||
|
||||
def readlines(self, size=-1):
|
||||
"""Read a list of lines of uncompressed bytes from the file.
|
||||
|
||||
size can be specified to control the number of lines read: no
|
||||
further lines will be read once the total size of the lines read
|
||||
so far equals or exceeds size.
|
||||
"""
|
||||
if not hasattr(size, "__index__"):
|
||||
raise TypeError("Integer argument expected")
|
||||
size = size.__index__()
|
||||
with self._lock:
|
||||
return io.BufferedIOBase.readlines(self, size)
|
||||
|
||||
def write(self, data):
|
||||
"""Write a byte string to the file.
|
||||
|
||||
Returns the number of uncompressed bytes written, which is
|
||||
always len(data). Note that due to buffering, the file on disk
|
||||
may not reflect the data written until close() is called.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_write()
|
||||
compressed = self._compressor.compress(data)
|
||||
self._fp.write(compressed)
|
||||
self._pos += len(data)
|
||||
return len(data)
|
||||
|
||||
def writelines(self, seq):
|
||||
"""Write a sequence of byte strings to the file.
|
||||
|
||||
Returns the number of uncompressed bytes written.
|
||||
seq can be any iterable yielding byte strings.
|
||||
|
||||
Line separators are not added between the written byte strings.
|
||||
"""
|
||||
with self._lock:
|
||||
return io.BufferedIOBase.writelines(self, seq)
|
||||
|
||||
# Rewind the file to the beginning of the data stream.
|
||||
def _rewind(self):
|
||||
self._fp.seek(0, 0)
|
||||
self._mode = _MODE_READ
|
||||
self._pos = 0
|
||||
self._decompressor = BZ2Decompressor()
|
||||
self._buffer = None
|
||||
|
||||
def seek(self, offset, whence=0):
|
||||
"""Change the file position.
|
||||
|
||||
The new position is specified by offset, relative to the
|
||||
position indicated by whence. Values for whence are:
|
||||
|
||||
0: start of stream (default); offset must not be negative
|
||||
1: current stream position
|
||||
2: end of stream; offset must not be positive
|
||||
|
||||
Returns the new file position.
|
||||
|
||||
Note that seeking is emulated, so depending on the parameters,
|
||||
this operation may be extremely slow.
|
||||
"""
|
||||
with self._lock:
|
||||
self._check_can_seek()
|
||||
|
||||
# Recalculate offset as an absolute file position.
|
||||
if whence == 0:
|
||||
pass
|
||||
elif whence == 1:
|
||||
offset = self._pos + offset
|
||||
elif whence == 2:
|
||||
# Seeking relative to EOF - we need to know the file's size.
|
||||
if self._size < 0:
|
||||
self._read_all(return_data=False)
|
||||
offset = self._size + offset
|
||||
else:
|
||||
raise ValueError("Invalid value for whence: {}".format(whence))
|
||||
|
||||
# Make it so that offset is the number of bytes to skip forward.
|
||||
if offset < self._pos:
|
||||
self._rewind()
|
||||
else:
|
||||
offset -= self._pos
|
||||
|
||||
# Read and discard data until we reach the desired position.
|
||||
if self._mode != _MODE_READ_EOF:
|
||||
self._read_block(offset, return_data=False)
|
||||
|
||||
return self._pos
|
||||
|
||||
def tell(self):
|
||||
"""Return the current file position."""
|
||||
with self._lock:
|
||||
self._check_not_closed()
|
||||
return self._pos
|
||||
|
||||
|
||||
def compress(data, compresslevel=9):
|
||||
"""Compress a block of data.
|
||||
|
||||
compresslevel, if given, must be a number between 1 and 9.
|
||||
|
||||
For incremental compression, use a BZ2Compressor object instead.
|
||||
"""
|
||||
comp = BZ2Compressor(compresslevel)
|
||||
return comp.compress(data) + comp.flush()
|
||||
|
||||
|
||||
def decompress(data):
|
||||
"""Decompress a block of data.
|
||||
|
||||
For incremental decompression, use a BZ2Decompressor object instead.
|
||||
"""
|
||||
if len(data) == 0:
|
||||
return b""
|
||||
decomp = BZ2Decompressor()
|
||||
result = decomp.decompress(data)
|
||||
if not decomp.eof:
|
||||
raise ValueError("Compressed data ended before the "
|
||||
"end-of-stream marker was reached")
|
||||
return result
|
|
@ -21,7 +21,30 @@ has_cmdline_bunzip2 = sys.platform not in ("win32", "os2emx")
|
|||
|
||||
class BaseTest(unittest.TestCase):
|
||||
"Base for other testcases."
|
||||
TEXT = b'root:x:0:0:root:/root:/bin/bash\nbin:x:1:1:bin:/bin:\ndaemon:x:2:2:daemon:/sbin:\nadm:x:3:4:adm:/var/adm:\nlp:x:4:7:lp:/var/spool/lpd:\nsync:x:5:0:sync:/sbin:/bin/sync\nshutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\nhalt:x:7:0:halt:/sbin:/sbin/halt\nmail:x:8:12:mail:/var/spool/mail:\nnews:x:9:13:news:/var/spool/news:\nuucp:x:10:14:uucp:/var/spool/uucp:\noperator:x:11:0:operator:/root:\ngames:x:12:100:games:/usr/games:\ngopher:x:13:30:gopher:/usr/lib/gopher-data:\nftp:x:14:50:FTP User:/var/ftp:/bin/bash\nnobody:x:65534:65534:Nobody:/home:\npostfix:x:100:101:postfix:/var/spool/postfix:\nniemeyer:x:500:500::/home/niemeyer:/bin/bash\npostgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\nmysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\nwww:x:103:104::/var/www:/bin/false\n'
|
||||
TEXT_LINES = [
|
||||
b'root:x:0:0:root:/root:/bin/bash\n',
|
||||
b'bin:x:1:1:bin:/bin:\n',
|
||||
b'daemon:x:2:2:daemon:/sbin:\n',
|
||||
b'adm:x:3:4:adm:/var/adm:\n',
|
||||
b'lp:x:4:7:lp:/var/spool/lpd:\n',
|
||||
b'sync:x:5:0:sync:/sbin:/bin/sync\n',
|
||||
b'shutdown:x:6:0:shutdown:/sbin:/sbin/shutdown\n',
|
||||
b'halt:x:7:0:halt:/sbin:/sbin/halt\n',
|
||||
b'mail:x:8:12:mail:/var/spool/mail:\n',
|
||||
b'news:x:9:13:news:/var/spool/news:\n',
|
||||
b'uucp:x:10:14:uucp:/var/spool/uucp:\n',
|
||||
b'operator:x:11:0:operator:/root:\n',
|
||||
b'games:x:12:100:games:/usr/games:\n',
|
||||
b'gopher:x:13:30:gopher:/usr/lib/gopher-data:\n',
|
||||
b'ftp:x:14:50:FTP User:/var/ftp:/bin/bash\n',
|
||||
b'nobody:x:65534:65534:Nobody:/home:\n',
|
||||
b'postfix:x:100:101:postfix:/var/spool/postfix:\n',
|
||||
b'niemeyer:x:500:500::/home/niemeyer:/bin/bash\n',
|
||||
b'postgres:x:101:102:PostgreSQL Server:/var/lib/pgsql:/bin/bash\n',
|
||||
b'mysql:x:102:103:MySQL server:/var/lib/mysql:/bin/bash\n',
|
||||
b'www:x:103:104::/var/www:/bin/false\n',
|
||||
]
|
||||
TEXT = b''.join(TEXT_LINES)
|
||||
DATA = b'BZh91AY&SY.\xc8N\x18\x00\x01>_\x80\x00\x10@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe00\x01\x99\xaa\x00\xc0\x03F\x86\x8c#&\x83F\x9a\x03\x06\xa6\xd0\xa6\x93M\x0fQ\xa7\xa8\x06\x804hh\x12$\x11\xa4i4\xf14S\xd2<Q\xb5\x0fH\xd3\xd4\xdd\xd5\x87\xbb\xf8\x94\r\x8f\xafI\x12\xe1\xc9\xf8/E\x00pu\x89\x12]\xc9\xbbDL\nQ\x0e\t1\x12\xdf\xa0\xc0\x97\xac2O9\x89\x13\x94\x0e\x1c7\x0ed\x95I\x0c\xaaJ\xa4\x18L\x10\x05#\x9c\xaf\xba\xbc/\x97\x8a#C\xc8\xe1\x8cW\xf9\xe2\xd0\xd6M\xa7\x8bXa<e\x84t\xcbL\xb3\xa7\xd9\xcd\xd1\xcb\x84.\xaf\xb3\xab\xab\xad`n}\xa0lh\tE,\x8eZ\x15\x17VH>\x88\xe5\xcd9gd6\x0b\n\xe9\x9b\xd5\x8a\x99\xf7\x08.K\x8ev\xfb\xf7xw\xbb\xdf\xa1\x92\xf1\xdd|/";\xa2\xba\x9f\xd5\xb1#A\xb6\xf6\xb3o\xc9\xc5y\\\xebO\xe7\x85\x9a\xbc\xb6f8\x952\xd5\xd7"%\x89>V,\xf7\xa6z\xe2\x9f\xa3\xdf\x11\x11"\xd6E)I\xa9\x13^\xca\xf3r\xd0\x03U\x922\xf26\xec\xb6\xed\x8b\xc3U\x13\x9d\xc5\x170\xa4\xfa^\x92\xacDF\x8a\x97\xd6\x19\xfe\xdd\xb8\xbd\x1a\x9a\x19\xa3\x80ankR\x8b\xe5\xd83]\xa9\xc6\x08\x82f\xf6\xb9"6l$\xb8j@\xc0\x8a\xb0l1..\xbak\x83ls\x15\xbc\xf4\xc1\x13\xbe\xf8E\xb8\x9d\r\xa8\x9dk\x84\xd3n\xfa\xacQ\x07\xb1%y\xaav\xb4\x08\xe0z\x1b\x16\xf5\x04\xe9\xcc\xb9\x08z\x1en7.G\xfc]\xc9\x14\xe1B@\xbb!8`'
|
||||
DATA_CRLF = b'BZh91AY&SY\xaez\xbbN\x00\x01H\xdf\x80\x00\x12@\x02\xff\xf0\x01\x07n\x00?\xe7\xff\xe0@\x01\xbc\xc6`\x86*\x8d=M\xa9\x9a\x86\xd0L@\x0fI\xa6!\xa1\x13\xc8\x88jdi\x8d@\x03@\x1a\x1a\x0c\x0c\x83 \x00\xc4h2\x19\x01\x82D\x84e\t\xe8\x99\x89\x19\x1ah\x00\r\x1a\x11\xaf\x9b\x0fG\xf5(\x1b\x1f?\t\x12\xcf\xb5\xfc\x95E\x00ps\x89\x12^\xa4\xdd\xa2&\x05(\x87\x04\x98\x89u\xe40%\xb6\x19\'\x8c\xc4\x89\xca\x07\x0e\x1b!\x91UIFU%C\x994!DI\xd2\xfa\xf0\xf1N8W\xde\x13A\xf5\x9cr%?\x9f3;I45A\xd1\x8bT\xb1<l\xba\xcb_\xc00xY\x17r\x17\x88\x08\x08@\xa0\ry@\x10\x04$)`\xf2\xce\x89z\xb0s\xec\x9b.iW\x9d\x81\xb5-+t\x9f\x1a\'\x97dB\xf5x\xb5\xbe.[.\xd7\x0e\x81\xe7\x08\x1cN`\x88\x10\xca\x87\xc3!"\x80\x92R\xa1/\xd1\xc0\xe6mf\xac\xbd\x99\xcca\xb3\x8780>\xa4\xc7\x8d\x1a\\"\xad\xa1\xabyBg\x15\xb9l\x88\x88\x91k"\x94\xa4\xd4\x89\xae*\xa6\x0b\x10\x0c\xd6\xd4m\xe86\xec\xb5j\x8a\x86j\';\xca.\x01I\xf2\xaaJ\xe8\x88\x8cU+t3\xfb\x0c\n\xa33\x13r2\r\x16\xe0\xb3(\xbf\x1d\x83r\xe7M\xf0D\x1365\xd8\x88\xd3\xa4\x92\xcb2\x06\x04\\\xc1\xb0\xea//\xbek&\xd8\xe6+t\xe5\xa1\x13\xada\x16\xder5"w]\xa2i\xb7[\x97R \xe2IT\xcd;Z\x04dk4\xad\x8a\t\xd3\x81z\x10\xf1:^`\xab\x1f\xc5\xdc\x91N\x14$+\x9e\xae\xd3\x80'
|
||||
|
||||
|
@ -54,13 +77,15 @@ class BZ2FileTest(BaseTest):
|
|||
if os.path.isfile(self.filename):
|
||||
os.unlink(self.filename)
|
||||
|
||||
def createTempFile(self, crlf=0):
|
||||
def getData(self, crlf=False):
|
||||
if crlf:
|
||||
return self.DATA_CRLF
|
||||
else:
|
||||
return self.DATA
|
||||
|
||||
def createTempFile(self, crlf=False):
|
||||
with open(self.filename, "wb") as f:
|
||||
if crlf:
|
||||
data = self.DATA_CRLF
|
||||
else:
|
||||
data = self.DATA
|
||||
f.write(data)
|
||||
f.write(self.getData(crlf))
|
||||
|
||||
def testRead(self):
|
||||
# "Test BZ2File.read()"
|
||||
|
@ -70,7 +95,7 @@ class BZ2FileTest(BaseTest):
|
|||
self.assertEqual(bz2f.read(), self.TEXT)
|
||||
|
||||
def testRead0(self):
|
||||
# Test BBZ2File.read(0)"
|
||||
# "Test BBZ2File.read(0)"
|
||||
self.createTempFile()
|
||||
with BZ2File(self.filename) as bz2f:
|
||||
self.assertRaises(TypeError, bz2f.read, None)
|
||||
|
@ -94,6 +119,28 @@ class BZ2FileTest(BaseTest):
|
|||
with BZ2File(self.filename) as bz2f:
|
||||
self.assertEqual(bz2f.read(100), self.TEXT[:100])
|
||||
|
||||
def testPeek(self):
|
||||
# "Test BZ2File.peek()"
|
||||
self.createTempFile()
|
||||
with BZ2File(self.filename) as bz2f:
|
||||
pdata = bz2f.peek()
|
||||
self.assertNotEqual(len(pdata), 0)
|
||||
self.assertTrue(self.TEXT.startswith(pdata))
|
||||
self.assertEqual(bz2f.read(), self.TEXT)
|
||||
|
||||
def testReadInto(self):
|
||||
# "Test BZ2File.readinto()"
|
||||
self.createTempFile()
|
||||
with BZ2File(self.filename) as bz2f:
|
||||
n = 128
|
||||
b = bytearray(n)
|
||||
self.assertEqual(bz2f.readinto(b), n)
|
||||
self.assertEqual(b, self.TEXT[:n])
|
||||
n = len(self.TEXT) - n
|
||||
b = bytearray(len(self.TEXT))
|
||||
self.assertEqual(bz2f.readinto(b), n)
|
||||
self.assertEqual(b[:n], self.TEXT[-n:])
|
||||
|
||||
def testReadLine(self):
|
||||
# "Test BZ2File.readline()"
|
||||
self.createTempFile()
|
||||
|
@ -125,7 +172,7 @@ class BZ2FileTest(BaseTest):
|
|||
bz2f = BZ2File(self.filename)
|
||||
bz2f.close()
|
||||
self.assertRaises(ValueError, bz2f.__next__)
|
||||
# This call will deadlock of the above .__next__ call failed to
|
||||
# This call will deadlock if the above .__next__ call failed to
|
||||
# release the lock.
|
||||
self.assertRaises(ValueError, bz2f.readlines)
|
||||
|
||||
|
@ -217,6 +264,13 @@ class BZ2FileTest(BaseTest):
|
|||
self.assertEqual(bz2f.tell(), 0)
|
||||
self.assertEqual(bz2f.read(), self.TEXT)
|
||||
|
||||
def testFileno(self):
|
||||
# "Test BZ2File.fileno()"
|
||||
self.createTempFile()
|
||||
with open(self.filename) as rawf:
|
||||
with BZ2File(fileobj=rawf) as bz2f:
|
||||
self.assertEqual(bz2f.fileno(), rawf.fileno())
|
||||
|
||||
def testOpenDel(self):
|
||||
# "Test opening and deleting a file many times"
|
||||
self.createTempFile()
|
||||
|
@ -278,17 +332,65 @@ class BZ2FileTest(BaseTest):
|
|||
t.join()
|
||||
|
||||
def testMixedIterationReads(self):
|
||||
# Issue #8397: mixed iteration and reads should be forbidden.
|
||||
with bz2.BZ2File(self.filename, 'wb') as f:
|
||||
# The internal buffer size is hard-wired to 8192 bytes, we must
|
||||
# write out more than that for the test to stop half through
|
||||
# the buffer.
|
||||
f.write(self.TEXT * 100)
|
||||
with bz2.BZ2File(self.filename, 'rb') as f:
|
||||
next(f)
|
||||
self.assertRaises(ValueError, f.read)
|
||||
self.assertRaises(ValueError, f.readline)
|
||||
self.assertRaises(ValueError, f.readlines)
|
||||
# "Test mixed iteration and reads."
|
||||
self.createTempFile()
|
||||
linelen = len(self.TEXT_LINES[0])
|
||||
halflen = linelen // 2
|
||||
with bz2.BZ2File(self.filename) as bz2f:
|
||||
bz2f.read(halflen)
|
||||
self.assertEqual(next(bz2f), self.TEXT_LINES[0][halflen:])
|
||||
self.assertEqual(bz2f.read(), self.TEXT[linelen:])
|
||||
with bz2.BZ2File(self.filename) as bz2f:
|
||||
bz2f.readline()
|
||||
self.assertEqual(next(bz2f), self.TEXT_LINES[1])
|
||||
self.assertEqual(bz2f.readline(), self.TEXT_LINES[2])
|
||||
with bz2.BZ2File(self.filename) as bz2f:
|
||||
bz2f.readlines()
|
||||
with self.assertRaises(StopIteration):
|
||||
next(bz2f)
|
||||
self.assertEqual(bz2f.readlines(), [])
|
||||
|
||||
def testReadBytesIO(self):
|
||||
# "Test BZ2File.read() with BytesIO source"
|
||||
with BytesIO(self.getData()) as bio:
|
||||
with BZ2File(fileobj=bio) as bz2f:
|
||||
self.assertRaises(TypeError, bz2f.read, None)
|
||||
self.assertEqual(bz2f.read(), self.TEXT)
|
||||
self.assertFalse(bio.closed)
|
||||
|
||||
def testPeekBytesIO(self):
|
||||
# "Test BZ2File.peek() with BytesIO source"
|
||||
with BytesIO(self.getData()) as bio:
|
||||
with BZ2File(fileobj=bio) as bz2f:
|
||||
pdata = bz2f.peek()
|
||||
self.assertNotEqual(len(pdata), 0)
|
||||
self.assertTrue(self.TEXT.startswith(pdata))
|
||||
self.assertEqual(bz2f.read(), self.TEXT)
|
||||
|
||||
def testWriteBytesIO(self):
|
||||
# "Test BZ2File.write() with BytesIO destination"
|
||||
with BytesIO() as bio:
|
||||
with BZ2File(fileobj=bio, mode="w") as bz2f:
|
||||
self.assertRaises(TypeError, bz2f.write)
|
||||
bz2f.write(self.TEXT)
|
||||
self.assertEqual(self.decompress(bio.getvalue()), self.TEXT)
|
||||
self.assertFalse(bio.closed)
|
||||
|
||||
def testSeekForwardBytesIO(self):
|
||||
# "Test BZ2File.seek(150, 0) with BytesIO source"
|
||||
with BytesIO(self.getData()) as bio:
|
||||
with BZ2File(fileobj=bio) as bz2f:
|
||||
self.assertRaises(TypeError, bz2f.seek)
|
||||
bz2f.seek(150)
|
||||
self.assertEqual(bz2f.read(), self.TEXT[150:])
|
||||
|
||||
def testSeekBackwardsBytesIO(self):
|
||||
# "Test BZ2File.seek(-150, 1) with BytesIO source"
|
||||
with BytesIO(self.getData()) as bio:
|
||||
with BZ2File(fileobj=bio) as bz2f:
|
||||
bz2f.read(500)
|
||||
bz2f.seek(-150, 1)
|
||||
self.assertEqual(bz2f.read(), self.TEXT[500-150:])
|
||||
|
||||
class BZ2CompressorTest(BaseTest):
|
||||
def testCompress(self):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue