mirror of
https://github.com/python/cpython.git
synced 2025-09-26 18:29:57 +00:00
Untokenize: An logically incorrect assert tested user input validity.
Replace it with correct logic that raises ValueError for bad input. Issues #8478 and #12691 reported the incorrect logic. Add an Untokenize test case and an initial test method.
This commit is contained in:
parent
cf62603276
commit
5e6db31368
2 changed files with 18 additions and 2 deletions
|
@ -638,7 +638,7 @@ Legacy unicode literals:
|
||||||
from test import support
|
from test import support
|
||||||
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
|
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
|
||||||
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
|
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
|
||||||
open as tokenize_open)
|
open as tokenize_open, Untokenizer)
|
||||||
from io import BytesIO
|
from io import BytesIO
|
||||||
from unittest import TestCase
|
from unittest import TestCase
|
||||||
import os, sys, glob
|
import os, sys, glob
|
||||||
|
@ -1153,6 +1153,19 @@ class TestTokenize(TestCase):
|
||||||
# See http://bugs.python.org/issue16152
|
# See http://bugs.python.org/issue16152
|
||||||
self.assertExactTypeEqual('@ ', token.AT)
|
self.assertExactTypeEqual('@ ', token.AT)
|
||||||
|
|
||||||
|
class UntokenizeTest(TestCase):
|
||||||
|
|
||||||
|
def test_bad_input_order(self):
|
||||||
|
u = Untokenizer()
|
||||||
|
u.prev_row = 2
|
||||||
|
u.prev_col = 2
|
||||||
|
with self.assertRaises(ValueError) as cm:
|
||||||
|
u.add_whitespace((1,3))
|
||||||
|
self.assertEqual(cm.exception.args[0],
|
||||||
|
'start (1,3) precedes previous end (2,2)')
|
||||||
|
self.assertRaises(ValueError, u.add_whitespace, (2,1))
|
||||||
|
|
||||||
|
|
||||||
__test__ = {"doctests" : doctests, 'decistmt': decistmt}
|
__test__ = {"doctests" : doctests, 'decistmt': decistmt}
|
||||||
|
|
||||||
def test_main():
|
def test_main():
|
||||||
|
@ -1162,6 +1175,7 @@ def test_main():
|
||||||
support.run_unittest(Test_Tokenize)
|
support.run_unittest(Test_Tokenize)
|
||||||
support.run_unittest(TestDetectEncoding)
|
support.run_unittest(TestDetectEncoding)
|
||||||
support.run_unittest(TestTokenize)
|
support.run_unittest(TestTokenize)
|
||||||
|
support.run_unittest(UntokenizeTest)
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
test_main()
|
test_main()
|
||||||
|
|
|
@ -229,7 +229,9 @@ class Untokenizer:
|
||||||
|
|
||||||
def add_whitespace(self, start):
|
def add_whitespace(self, start):
|
||||||
row, col = start
|
row, col = start
|
||||||
assert row <= self.prev_row
|
if row < self.prev_row or row == self.prev_row and col < self.prev_col:
|
||||||
|
raise ValueError("start ({},{}) precedes previous end ({},{})"
|
||||||
|
.format(row, col, self.prev_row, self.prev_col))
|
||||||
col_offset = col - self.prev_col
|
col_offset = col - self.prev_col
|
||||||
if col_offset:
|
if col_offset:
|
||||||
self.tokens.append(" " * col_offset)
|
self.tokens.append(" " * col_offset)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue