mirror of
https://github.com/python/cpython.git
synced 2025-07-24 11:44:31 +00:00
gh-95778: Correctly pre-check for int-to-str conversion (GH-96537)
Converting a large enough `int` to a decimal string raises `ValueError` as expected. However, the raise comes _after_ the quadratic-time base-conversion algorithm has run to completion. For effective DOS prevention, we need some kind of check before entering the quadratic-time loop. Oops! =)
The quick fix: essentially we catch _most_ values that exceed the threshold up front. Those that slip through will still be on the small side (read: sufficiently fast), and will get caught by the existing check so that the limit remains exact.
The justification for the current check. The C code check is:
```c
max_str_digits / (3 * PyLong_SHIFT) <= (size_a - 11) / 10
```
In GitHub markdown math-speak, writing $M$ for `max_str_digits`, $L$ for `PyLong_SHIFT` and $s$ for `size_a`, that check is:
$$\left\lfloor\frac{M}{3L}\right\rfloor \le \left\lfloor\frac{s - 11}{10}\right\rfloor$$
From this it follows that
$$\frac{M}{3L} < \frac{s-1}{10}$$
hence that
$$\frac{L(s-1)}{M} > \frac{10}{3} > \log_2(10).$$
So
$$2^{L(s-1)} > 10^M.$$
But our input integer $a$ satisfies $|a| \ge 2^{L(s-1)}$, so $|a|$ is larger than $10^M$. This shows that we don't accidentally capture anything _below_ the intended limit in the check.
<!-- gh-issue-number: gh-95778 -->
* Issue: gh-95778
<!-- /gh-issue-number -->
Co-authored-by: Gregory P. Smith [Google LLC] <greg@krypto.org>
(cherry picked from commit b126196838
)
Co-authored-by: Mark Dickinson <dickinsm@gmail.com>
This commit is contained in:
parent
2ced2c95b7
commit
8a776d1d51
4 changed files with 107 additions and 7 deletions
|
@ -1,4 +1,5 @@
|
|||
import sys
|
||||
import time
|
||||
|
||||
import unittest
|
||||
from test import support
|
||||
|
@ -632,6 +633,87 @@ class IntStrDigitLimitsTests(unittest.TestCase):
|
|||
with self.assertRaises(ValueError):
|
||||
str(i)
|
||||
|
||||
def test_denial_of_service_prevented_int_to_str(self):
|
||||
"""Regression test: ensure we fail before performing O(N**2) work."""
|
||||
maxdigits = sys.get_int_max_str_digits()
|
||||
assert maxdigits < 50_000, maxdigits # A test prerequisite.
|
||||
get_time = time.process_time
|
||||
if get_time() <= 0: # some platforms like WASM lack process_time()
|
||||
get_time = time.monotonic
|
||||
|
||||
huge_int = int(f'0x{"c"*65_000}', base=16) # 78268 decimal digits.
|
||||
digits = 78_268
|
||||
with support.adjust_int_max_str_digits(digits):
|
||||
start = get_time()
|
||||
huge_decimal = str(huge_int)
|
||||
seconds_to_convert = get_time() - start
|
||||
self.assertEqual(len(huge_decimal), digits)
|
||||
# Ensuring that we chose a slow enough conversion to measure.
|
||||
# It takes 0.1 seconds on a Zen based cloud VM in an opt build.
|
||||
if seconds_to_convert < 0.005:
|
||||
raise unittest.SkipTest('"slow" conversion took only '
|
||||
f'{seconds_to_convert} seconds.')
|
||||
|
||||
# We test with the limit almost at the size needed to check performance.
|
||||
# The performant limit check is slightly fuzzy, give it a some room.
|
||||
with support.adjust_int_max_str_digits(int(.995 * digits)):
|
||||
with self.assertRaises(ValueError) as err:
|
||||
start = get_time()
|
||||
str(huge_int)
|
||||
seconds_to_fail_huge = get_time() - start
|
||||
self.assertIn('conversion', str(err.exception))
|
||||
self.assertLess(seconds_to_fail_huge, seconds_to_convert/8)
|
||||
|
||||
# Now we test that a conversion that would take 30x as long also fails
|
||||
# in a similarly fast fashion.
|
||||
extra_huge_int = int(f'0x{"c"*500_000}', base=16) # 602060 digits.
|
||||
with self.assertRaises(ValueError) as err:
|
||||
start = get_time()
|
||||
# If not limited, 8 seconds said Zen based cloud VM.
|
||||
str(extra_huge_int)
|
||||
seconds_to_fail_extra_huge = get_time() - start
|
||||
self.assertIn('conversion', str(err.exception))
|
||||
self.assertLess(seconds_to_fail_extra_huge, seconds_to_convert/8)
|
||||
|
||||
def test_denial_of_service_prevented_str_to_int(self):
|
||||
"""Regression test: ensure we fail before performing O(N**2) work."""
|
||||
maxdigits = sys.get_int_max_str_digits()
|
||||
assert maxdigits < 100_000, maxdigits # A test prerequisite.
|
||||
get_time = time.process_time
|
||||
if get_time() <= 0: # some platforms like WASM lack process_time()
|
||||
get_time = time.monotonic
|
||||
|
||||
digits = 133700
|
||||
huge = '8'*digits
|
||||
with support.adjust_int_max_str_digits(digits):
|
||||
start = get_time()
|
||||
int(huge)
|
||||
seconds_to_convert = get_time() - start
|
||||
# Ensuring that we chose a slow enough conversion to measure.
|
||||
# It takes 0.1 seconds on a Zen based cloud VM in an opt build.
|
||||
if seconds_to_convert < 0.005:
|
||||
raise unittest.SkipTest('"slow" conversion took only '
|
||||
f'{seconds_to_convert} seconds.')
|
||||
|
||||
with support.adjust_int_max_str_digits(digits - 1):
|
||||
with self.assertRaises(ValueError) as err:
|
||||
start = get_time()
|
||||
int(huge)
|
||||
seconds_to_fail_huge = get_time() - start
|
||||
self.assertIn('conversion', str(err.exception))
|
||||
self.assertLess(seconds_to_fail_huge, seconds_to_convert/8)
|
||||
|
||||
# Now we test that a conversion that would take 30x as long also fails
|
||||
# in a similarly fast fashion.
|
||||
extra_huge = '7'*1_200_000
|
||||
with self.assertRaises(ValueError) as err:
|
||||
start = get_time()
|
||||
# If not limited, 8 seconds in the Zen based cloud VM.
|
||||
int(extra_huge)
|
||||
seconds_to_fail_extra_huge = get_time() - start
|
||||
self.assertIn('conversion', str(err.exception))
|
||||
self.assertLess(seconds_to_fail_extra_huge, seconds_to_convert/8)
|
||||
|
||||
def test_power_of_two_bases_unlimited(self):
|
||||
"""The limit does not apply to power of 2 bases."""
|
||||
maxdigits = sys.get_int_max_str_digits()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue