gh-99581: Fix a buffer overflow in the tokenizer when copying lines that fill the available buffer (GH-99605)

(cherry picked from commit e13d1d9dda)

Co-authored-by: Pablo Galindo Salgado <Pablogsal@gmail.com>
This commit is contained in:
Miss Islington (bot) 2022-11-20 12:53:02 -08:00 committed by GitHub
parent 152a437b8d
commit f381644819
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
3 changed files with 25 additions and 1 deletions

View file

@ -10,6 +10,8 @@ from textwrap import dedent
from unittest import TestCase, mock
from test.test_grammar import (VALID_UNDERSCORE_LITERALS,
INVALID_UNDERSCORE_LITERALS)
from test.support import os_helper
from test.support.script_helper import run_test_script, make_script
import os
import token
@ -2631,5 +2633,19 @@ async def f():
self.assertEqual(get_tokens(code), get_tokens(code_no_cont))
class CTokenizerBufferTests(unittest.TestCase):
def test_newline_at_the_end_of_buffer(self):
# See issue 99581: Make sure that if we need to add a new line at the
# end of the buffer, we have enough space in the buffer, specially when
# the current line is as long as the buffer space available.
test_script = f"""\
#coding: latin-1
#{"a"*10000}
#{"a"*10002}"""
with os_helper.temp_dir() as temp_dir:
file_name = make_script(temp_dir, 'foo', test_script)
run_test_script(file_name)
if __name__ == "__main__":
unittest.main()