mirror of
https://github.com/python/cpython.git
synced 2025-08-31 05:58:33 +00:00
bpo-38256: Fix binascii.crc32() when inputs are 4+GiB (GH-32000)
When compiled with `USE_ZLIB_CRC32` defined (`configure` sets this on POSIX systems), `binascii.crc32(...)` failed to compute the correct value when the input data was >= 4GiB. Because the zlib crc32 API is limited to a 32-bit length. This lines it up with the `zlib.crc32(...)` implementation that doesn't have that flaw. **Performance:** This also adopts the same GIL releasing for larger inputs logic that `zlib.crc32` has, and causes the Windows build to always use zlib's crc32 instead of our slow C code as zlib is a required build dependency on Windows.
This commit is contained in:
parent
3ae975f1ac
commit
9d1c4d69db
6 changed files with 87 additions and 31 deletions
|
@ -4,7 +4,7 @@ import unittest
|
|||
import binascii
|
||||
import array
|
||||
import re
|
||||
from test.support import warnings_helper
|
||||
from test.support import bigmemtest, _1G, _4G, warnings_helper
|
||||
|
||||
|
||||
# Note: "*_hex" functions are aliases for "(un)hexlify"
|
||||
|
@ -441,6 +441,14 @@ class BytearrayBinASCIITest(BinASCIITest):
|
|||
class MemoryviewBinASCIITest(BinASCIITest):
|
||||
type2test = memoryview
|
||||
|
||||
class ChecksumBigBufferTestCase(unittest.TestCase):
|
||||
"""bpo-38256 - check that inputs >=4 GiB are handled correctly."""
|
||||
|
||||
@bigmemtest(size=_4G + 4, memuse=1, dry_run=False)
|
||||
def test_big_buffer(self, size):
|
||||
data = b"nyan" * (_1G + 1)
|
||||
self.assertEqual(binascii.crc32(data), 1044521549)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
unittest.main()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue