Issue #25626: Change zlib to accept Py_ssize_t and cap to UINT_MAX

The underlying zlib library stores sizes in “unsigned int”. The corresponding
Python parameters are all sizes of buffers filled in by zlib, so it is okay
to reduce higher values to the UINT_MAX internal cap. OverflowError is still
raised for sizes that do not fit in Py_ssize_t.

Sizes are now limited to Py_ssize_t rather than unsigned long, because Python
byte strings cannot be larger than Py_ssize_t. Previously this could result
in a SystemError on 32-bit platforms.

This resolves a regression in the gzip module when reading more than UINT_MAX
or LONG_MAX bytes in one call, introduced by revision 62723172412c.
This commit is contained in:
Martin Panter 2015-11-20 08:13:35 +00:00
parent d13cade381
commit e99e97762c
7 changed files with 120 additions and 47 deletions

View file

@ -122,11 +122,17 @@ class ExceptionTestCase(unittest.TestCase):
self.assertRaises(ValueError, zlib.decompressobj().flush, 0)
self.assertRaises(ValueError, zlib.decompressobj().flush, -1)
@support.cpython_only
def test_overflow(self):
with self.assertRaisesRegex(OverflowError, 'int too large'):
zlib.decompress(b'', 15, sys.maxsize + 1)
with self.assertRaisesRegex(OverflowError, 'int too large'):
zlib.decompressobj().flush(sys.maxsize + 1)
class BaseCompressTestCase(object):
def check_big_compress_buffer(self, size, compress_func):
_1M = 1024 * 1024
fmt = "%%0%dx" % (2 * _1M)
# Generate 10MB worth of random, and expand it by repeating it.
# The assumption is that zlib's memory is not big enough to exploit
# such spread out redundancy.
@ -196,6 +202,18 @@ class CompressTestCase(BaseCompressTestCase, unittest.TestCase):
finally:
data = None
@bigmemtest(size=_4G, memuse=1)
def test_large_bufsize(self, size):
# Test decompress(bufsize) parameter greater than the internal limit
data = HAMLET_SCENE * 10
compressed = zlib.compress(data, 1)
self.assertEqual(zlib.decompress(compressed, 15, size), data)
def test_custom_bufsize(self):
data = HAMLET_SCENE * 10
compressed = zlib.compress(data, 1)
self.assertEqual(zlib.decompress(compressed, 15, CustomInt()), data)
class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
# Test compression object
@ -364,6 +382,21 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
self.assertRaises(ValueError, dco.decompress, b"", -1)
self.assertEqual(b'', dco.unconsumed_tail)
def test_maxlen_large(self):
# Sizes up to sys.maxsize should be accepted, although zlib is
# internally limited to expressing sizes with unsigned int
data = HAMLET_SCENE * 10
self.assertGreater(len(data), zlib.DEF_BUF_SIZE)
compressed = zlib.compress(data, 1)
dco = zlib.decompressobj()
self.assertEqual(dco.decompress(compressed, sys.maxsize), data)
def test_maxlen_custom(self):
data = HAMLET_SCENE * 10
compressed = zlib.compress(data, 1)
dco = zlib.decompressobj()
self.assertEqual(dco.decompress(compressed, CustomInt()), data[:100])
def test_clear_unconsumed_tail(self):
# Issue #12050: calling decompress() without providing max_length
# should clear the unconsumed_tail attribute.
@ -537,6 +570,22 @@ class CompressObjectTestCase(BaseCompressTestCase, unittest.TestCase):
data = zlib.compress(input2)
self.assertEqual(dco.flush(), input1[1:])
@bigmemtest(size=_4G, memuse=1)
def test_flush_large_length(self, size):
# Test flush(length) parameter greater than internal limit UINT_MAX
input = HAMLET_SCENE * 10
data = zlib.compress(input, 1)
dco = zlib.decompressobj()
dco.decompress(data, 1)
self.assertEqual(dco.flush(size), input[1:])
def test_flush_custom_length(self):
input = HAMLET_SCENE * 10
data = zlib.compress(input, 1)
dco = zlib.decompressobj()
dco.decompress(data, 1)
self.assertEqual(dco.flush(CustomInt()), input[1:])
@requires_Compress_copy
def test_compresscopy(self):
# Test copying a compression object
@ -725,5 +774,10 @@ LAERTES
"""
class CustomInt:
def __int__(self):
return 100
if __name__ == "__main__":
unittest.main()