gh-102856: Python tokenizer implementation for PEP 701 (#104323)

This commit replaces the Python implementation of the tokenize module with an implementation
that reuses the real C tokenizer via a private extension module. The tokenize module now implements
a compatibility layer that transforms tokens from the C tokenizer into Python tokenize tokens for backward
compatibility.

As the C tokenizer does not emit some tokens that the Python tokenizer provides (such as comments and non-semantic newlines), a new special mode has been added to the C tokenizer mode that currently is only used via
the extension module that exposes it to the Python layer. This new mode forces the C tokenizer to emit these new extra tokens and add the appropriate metadata that is needed to match the old Python implementation.

Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
This commit is contained in:
Marta Gómez Macías 2023-05-21 02:03:02 +02:00 committed by GitHub
parent 3ed57e4995
commit 6715f91edc
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
22 changed files with 424 additions and 374 deletions

View file

@ -2187,7 +2187,7 @@ def _signature_strip_non_python_syntax(signature):
if string == ',':
current_parameter += 1
if (type == ERRORTOKEN) and (string == '$'):
if (type == OP) and (string == '$'):
assert self_parameter is None
self_parameter = current_parameter
continue
@ -2195,7 +2195,7 @@ def _signature_strip_non_python_syntax(signature):
add(string)
if (string == ','):
add(' ')
clean_signature = ''.join(text)
clean_signature = ''.join(text).strip()
return clean_signature, self_parameter

View file

@ -107,6 +107,10 @@ def check(file):
errprint("%r: Token Error: %s" % (file, msg))
return
except SyntaxError as msg:
errprint("%r: Token Error: %s" % (file, msg))
return
except IndentationError as msg:
errprint("%r: Indentation Error: %s" % (file, msg))
return
@ -272,6 +276,12 @@ def format_witnesses(w):
return prefix + " " + ', '.join(firsts)
def process_tokens(tokens):
try:
_process_tokens(tokens)
except TabError as e:
raise NannyNag(e.lineno, e.msg, e.text)
def _process_tokens(tokens):
INDENT = tokenize.INDENT
DEDENT = tokenize.DEDENT
NEWLINE = tokenize.NEWLINE

View file

@ -223,7 +223,7 @@ class TestCheck(TestCase):
with TemporaryPyFile(SOURCE_CODES["nannynag_errored"]) as file_path:
out = f"{file_path!r}: *** Line 3: trouble in tab city! ***\n"
out += "offending line: '\\tprint(\"world\")\\n'\n"
out += "indent not equal e.g. at tab size 1\n"
out += "inconsistent use of tabs and spaces in indentation\n"
tabnanny.verbose = 1
self.verify_tabnanny_check(file_path, out=out)
@ -315,7 +315,7 @@ class TestCommandLine(TestCase):
def test_with_errored_file(self):
"""Should displays error when errored python file is given."""
with TemporaryPyFile(SOURCE_CODES["wrong_indented"]) as file_path:
stderr = f"{file_path!r}: Indentation Error: "
stderr = f"{file_path!r}: Token Error: "
stderr += ('unindent does not match any outer indentation level'
' (<tokenize>, line 3)')
self.validate_cmd(file_path, stderr=stderr, expect_failure=True)

View file

@ -3,7 +3,7 @@ from test.support import os_helper
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
open as tokenize_open, Untokenizer, generate_tokens,
NEWLINE, _generate_tokens_from_c_tokenizer, DEDENT)
NEWLINE, _generate_tokens_from_c_tokenizer, DEDENT, TokenInfo)
from io import BytesIO, StringIO
import unittest
from textwrap import dedent
@ -82,7 +82,7 @@ class TokenizeTest(TestCase):
NAME 'False' (4, 11) (4, 16)
COMMENT '# NEWLINE' (4, 17) (4, 26)
NEWLINE '\\n' (4, 26) (4, 27)
DEDENT '' (5, 0) (5, 0)
DEDENT '' (4, 27) (4, 27)
""")
indent_error_file = b"""\
def k(x):
@ -230,6 +230,10 @@ def k(x):
continue
self.assertEqual(number_token(lit), lit)
for lit in INVALID_UNDERSCORE_LITERALS:
try:
number_token(lit)
except SyntaxError:
continue
self.assertNotEqual(number_token(lit), lit)
def test_string(self):
@ -381,21 +385,119 @@ c"""', """\
STRING 'rb"\""a\\\\\\nb\\\\\\nc"\""' (1, 0) (3, 4)
""")
self.check_tokenize('f"abc"', """\
STRING 'f"abc"' (1, 0) (1, 6)
FSTRING_START 'f"' (1, 0) (1, 2)
FSTRING_MIDDLE 'abc' (1, 2) (1, 5)
FSTRING_END '"' (1, 5) (1, 6)
""")
self.check_tokenize('fR"a{b}c"', """\
STRING 'fR"a{b}c"' (1, 0) (1, 9)
FSTRING_START 'fR"' (1, 0) (1, 3)
FSTRING_MIDDLE 'a' (1, 3) (1, 4)
OP '{' (1, 4) (1, 5)
NAME 'b' (1, 5) (1, 6)
OP '}' (1, 6) (1, 7)
FSTRING_MIDDLE 'c' (1, 7) (1, 8)
FSTRING_END '"' (1, 8) (1, 9)
""")
self.check_tokenize('fR"a{{{b!r}}}c"', """\
FSTRING_START 'fR"' (1, 0) (1, 3)
FSTRING_MIDDLE 'a{' (1, 3) (1, 5)
OP '{' (1, 6) (1, 7)
NAME 'b' (1, 7) (1, 8)
OP '!' (1, 8) (1, 9)
NAME 'r' (1, 9) (1, 10)
OP '}' (1, 10) (1, 11)
FSTRING_MIDDLE '}' (1, 11) (1, 12)
FSTRING_MIDDLE 'c' (1, 13) (1, 14)
FSTRING_END '"' (1, 14) (1, 15)
""")
self.check_tokenize('f"{{{1+1}}}"', """\
FSTRING_START 'f"' (1, 0) (1, 2)
FSTRING_MIDDLE '{' (1, 2) (1, 3)
OP '{' (1, 4) (1, 5)
NUMBER '1' (1, 5) (1, 6)
OP '+' (1, 6) (1, 7)
NUMBER '1' (1, 7) (1, 8)
OP '}' (1, 8) (1, 9)
FSTRING_MIDDLE '}' (1, 9) (1, 10)
FSTRING_END '"' (1, 11) (1, 12)
""")
self.check_tokenize('f"""{f\'\'\'{f\'{f"{1+1}"}\'}\'\'\'}"""', """\
FSTRING_START 'f\"""' (1, 0) (1, 4)
OP '{' (1, 4) (1, 5)
FSTRING_START "f'''" (1, 5) (1, 9)
OP '{' (1, 9) (1, 10)
FSTRING_START "f'" (1, 10) (1, 12)
OP '{' (1, 12) (1, 13)
FSTRING_START 'f"' (1, 13) (1, 15)
OP '{' (1, 15) (1, 16)
NUMBER '1' (1, 16) (1, 17)
OP '+' (1, 17) (1, 18)
NUMBER '1' (1, 18) (1, 19)
OP '}' (1, 19) (1, 20)
FSTRING_END '"' (1, 20) (1, 21)
OP '}' (1, 21) (1, 22)
FSTRING_END "'" (1, 22) (1, 23)
OP '}' (1, 23) (1, 24)
FSTRING_END "'''" (1, 24) (1, 27)
OP '}' (1, 27) (1, 28)
FSTRING_END '\"""' (1, 28) (1, 31)
""")
self.check_tokenize('f""" x\nstr(data, encoding={invalid!r})\n"""', """\
FSTRING_START 'f\"""' (1, 0) (1, 4)
FSTRING_MIDDLE ' x\\nstr(data, encoding=' (1, 4) (2, 19)
OP '{' (2, 19) (2, 20)
NAME 'invalid' (2, 20) (2, 27)
OP '!' (2, 27) (2, 28)
NAME 'r' (2, 28) (2, 29)
OP '}' (2, 29) (2, 30)
FSTRING_MIDDLE ')\\n' (2, 30) (3, 0)
FSTRING_END '\"""' (3, 0) (3, 3)
""")
self.check_tokenize('f"""123456789\nsomething{None}bad"""', """\
FSTRING_START 'f\"""' (1, 0) (1, 4)
FSTRING_MIDDLE '123456789\\nsomething' (1, 4) (2, 9)
OP '{' (2, 9) (2, 10)
NAME 'None' (2, 10) (2, 14)
OP '}' (2, 14) (2, 15)
FSTRING_MIDDLE 'bad' (2, 15) (2, 18)
FSTRING_END '\"""' (2, 18) (2, 21)
""")
self.check_tokenize('f"""abc"""', """\
STRING 'f\"\"\"abc\"\"\"' (1, 0) (1, 10)
FSTRING_START 'f\"""' (1, 0) (1, 4)
FSTRING_MIDDLE 'abc' (1, 4) (1, 7)
FSTRING_END '\"""' (1, 7) (1, 10)
""")
self.check_tokenize(r'f"abc\
def"', """\
STRING 'f"abc\\\\\\ndef"' (1, 0) (2, 4)
FSTRING_START 'f"' (1, 0) (1, 2)
FSTRING_MIDDLE 'abc\\\\\\ndef' (1, 2) (2, 3)
FSTRING_END '"' (2, 3) (2, 4)
""")
self.check_tokenize(r'Rf"abc\
def"', """\
STRING 'Rf"abc\\\\\\ndef"' (1, 0) (2, 4)
FSTRING_START 'Rf"' (1, 0) (1, 3)
FSTRING_MIDDLE 'abc\\\\\\ndef' (1, 3) (2, 3)
FSTRING_END '"' (2, 3) (2, 4)
""")
self.check_tokenize("f'some words {a+b:.3f} more words {c+d=} final words'", """\
FSTRING_START "f'" (1, 0) (1, 2)
FSTRING_MIDDLE 'some words ' (1, 2) (1, 13)
OP '{' (1, 13) (1, 14)
NAME 'a' (1, 14) (1, 15)
OP '+' (1, 15) (1, 16)
NAME 'b' (1, 16) (1, 17)
OP ':' (1, 17) (1, 18)
FSTRING_MIDDLE '.3f' (1, 18) (1, 21)
OP '}' (1, 21) (1, 22)
FSTRING_MIDDLE ' more words ' (1, 22) (1, 34)
OP '{' (1, 34) (1, 35)
NAME 'c' (1, 35) (1, 36)
OP '+' (1, 36) (1, 37)
NAME 'd' (1, 37) (1, 38)
OP '=' (1, 38) (1, 39)
OP '}' (1, 39) (1, 40)
FSTRING_MIDDLE ' final words' (1, 40) (1, 52)
FSTRING_END "'" (1, 52) (1, 53)
""")
def test_function(self):
@ -644,8 +746,8 @@ def"', """\
NEWLINE '\\n' (2, 5) (2, 6)
INDENT ' \\t' (3, 0) (3, 9)
NAME 'pass' (3, 9) (3, 13)
DEDENT '' (4, 0) (4, 0)
DEDENT '' (4, 0) (4, 0)
DEDENT '' (3, 14) (3, 14)
DEDENT '' (3, 14) (3, 14)
""")
def test_non_ascii_identifiers(self):
@ -857,7 +959,7 @@ async def foo():
NUMBER '1' (2, 17) (2, 18)
OP ':' (2, 18) (2, 19)
NAME 'pass' (2, 20) (2, 24)
DEDENT '' (3, 0) (3, 0)
DEDENT '' (2, 25) (2, 25)
""")
self.check_tokenize('''async def foo(async): await''', """\
@ -905,7 +1007,7 @@ def f():
NAME 'await' (6, 2) (6, 7)
OP '=' (6, 8) (6, 9)
NUMBER '2' (6, 10) (6, 11)
DEDENT '' (7, 0) (7, 0)
DEDENT '' (6, 12) (6, 12)
""")
self.check_tokenize('''\
@ -943,7 +1045,7 @@ async def f():
NAME 'await' (6, 2) (6, 7)
OP '=' (6, 8) (6, 9)
NUMBER '2' (6, 10) (6, 11)
DEDENT '' (7, 0) (7, 0)
DEDENT '' (6, 12) (6, 12)
""")
class GenerateTokensTest(TokenizeTest):
@ -968,7 +1070,7 @@ def decistmt(s):
])
else:
result.append((toknum, tokval))
return untokenize(result).decode('utf-8')
return untokenize(result).decode('utf-8').strip()
class TestMisc(TestCase):
@ -1040,33 +1142,16 @@ class Test_Tokenize(TestCase):
nonlocal first
if not first:
first = True
return line
yield line
else:
return b''
yield b''
# skip the initial encoding token and the end tokens
tokens = list(_tokenize(readline, encoding='utf-8'))[1:-2]
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
tokens = list(_tokenize(readline(), encoding='utf-8'))[:-2]
expected_tokens = [TokenInfo(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"\n')]
self.assertEqual(tokens, expected_tokens,
"bytes not decoded with encoding")
def test__tokenize_does_not_decode_with_encoding_none(self):
literal = '"ЉЊЈЁЂ"'
first = False
def readline():
nonlocal first
if not first:
first = True
return literal
else:
return b''
# skip the end tokens
tokens = list(_tokenize(readline, encoding=None))[:-2]
expected_tokens = [(3, '"ЉЊЈЁЂ"', (1, 0), (1, 7), '"ЉЊЈЁЂ"')]
self.assertEqual(tokens, expected_tokens,
"string not tokenized when encoding is None")
class TestDetectEncoding(TestCase):
@ -1326,7 +1411,7 @@ class TestTokenize(TestCase):
def test_tokenize(self):
import tokenize as tokenize_module
encoding = object()
encoding = "utf-8"
encoding_used = None
def mock_detect_encoding(readline):
return encoding, [b'first', b'second']
@ -1336,7 +1421,10 @@ class TestTokenize(TestCase):
encoding_used = encoding
out = []
while True:
next_line = readline()
try:
next_line = next(readline)
except StopIteration:
return out
if next_line:
out.append(next_line)
continue
@ -1356,7 +1444,7 @@ class TestTokenize(TestCase):
tokenize_module._tokenize = mock__tokenize
try:
results = tokenize(mock_readline)
self.assertEqual(list(results),
self.assertEqual(list(results)[1:],
[b'first', b'second', b'1', b'2', b'3', b'4'])
finally:
tokenize_module.detect_encoding = orig_detect_encoding
@ -1652,8 +1740,8 @@ class TestRoundtrip(TestCase):
if support.verbose >= 2:
print('tokenize', testfile)
with open(testfile, 'rb') as f:
with self.subTest(file=testfile):
self.check_roundtrip(f)
# with self.subTest(file=testfile):
self.check_roundtrip(f)
def roundtrip(self, code):
@ -2496,13 +2584,13 @@ async def f():
def test_unicode(self):
self.check_tokenize("Örter = u'places'\ngrün = U'green'", """\
NAME 'Örter' (1, 0) (1, 6)
EQUAL '=' (1, 7) (1, 8)
STRING "u'places'" (1, 9) (1, 18)
NEWLINE '' (1, 18) (1, 18)
NAME 'grün' (2, 0) (2, 5)
EQUAL '=' (2, 6) (2, 7)
STRING "U'green'" (2, 8) (2, 16)
NAME 'Örter' (1, 0) (1, 5)
EQUAL '=' (1, 6) (1, 7)
STRING "u'places'" (1, 8) (1, 17)
NEWLINE '' (1, 17) (1, 17)
NAME 'grün' (2, 0) (2, 4)
EQUAL '=' (2, 5) (2, 6)
STRING "U'green'" (2, 7) (2, 15)
""")
def test_invalid_syntax(self):
@ -2559,8 +2647,7 @@ async def f():
compile(valid, "<string>", "exec")
invalid = generate_source(MAXINDENT)
tokens = list(_generate_tokens_from_c_tokenizer(invalid))
self.assertEqual(tokens[-1].type, NEWLINE)
self.assertRaises(SyntaxError, lambda: list(_generate_tokens_from_c_tokenizer(invalid)))
self.assertRaises(
IndentationError, compile, invalid, "<string>", "exec"
)

6
Lib/token.py generated
View file

@ -67,10 +67,10 @@ SOFT_KEYWORD = 60
FSTRING_START = 61
FSTRING_MIDDLE = 62
FSTRING_END = 63
COMMENT = 64
NL = 65
# These aren't used by the C tokenizer but are needed for tokenize.py
ERRORTOKEN = 64
COMMENT = 65
NL = 66
ERRORTOKEN = 66
ENCODING = 67
N_TOKENS = 68
# Special definitions for cooperation with parser

View file

@ -56,112 +56,11 @@ class TokenInfo(collections.namedtuple('TokenInfo', 'type string start end line'
else:
return self.type
def group(*choices): return '(' + '|'.join(choices) + ')'
def any(*choices): return group(*choices) + '*'
def maybe(*choices): return group(*choices) + '?'
# Note: we use unicode matching for names ("\w") but ascii matching for
# number literals.
Whitespace = r'[ \f\t]*'
Comment = r'#[^\r\n]*'
Ignore = Whitespace + any(r'\\\r?\n' + Whitespace) + maybe(Comment)
Name = r'\w+'
Hexnumber = r'0[xX](?:_?[0-9a-fA-F])+'
Binnumber = r'0[bB](?:_?[01])+'
Octnumber = r'0[oO](?:_?[0-7])+'
Decnumber = r'(?:0(?:_?0)*|[1-9](?:_?[0-9])*)'
Intnumber = group(Hexnumber, Binnumber, Octnumber, Decnumber)
Exponent = r'[eE][-+]?[0-9](?:_?[0-9])*'
Pointfloat = group(r'[0-9](?:_?[0-9])*\.(?:[0-9](?:_?[0-9])*)?',
r'\.[0-9](?:_?[0-9])*') + maybe(Exponent)
Expfloat = r'[0-9](?:_?[0-9])*' + Exponent
Floatnumber = group(Pointfloat, Expfloat)
Imagnumber = group(r'[0-9](?:_?[0-9])*[jJ]', Floatnumber + r'[jJ]')
Number = group(Imagnumber, Floatnumber, Intnumber)
# Return the empty string, plus all of the valid string prefixes.
def _all_string_prefixes():
# The valid string prefixes. Only contain the lower case versions,
# and don't contain any permutations (include 'fr', but not
# 'rf'). The various permutations will be generated.
_valid_string_prefixes = ['b', 'r', 'u', 'f', 'br', 'fr']
# if we add binary f-strings, add: ['fb', 'fbr']
result = {''}
for prefix in _valid_string_prefixes:
for t in _itertools.permutations(prefix):
# create a list with upper and lower versions of each
# character
for u in _itertools.product(*[(c, c.upper()) for c in t]):
result.add(''.join(u))
return result
@functools.lru_cache
def _compile(expr):
return re.compile(expr, re.UNICODE)
# Note that since _all_string_prefixes includes the empty string,
# StringPrefix can be the empty string (making it optional).
StringPrefix = group(*_all_string_prefixes())
# Tail end of ' string.
Single = r"[^'\\]*(?:\\.[^'\\]*)*'"
# Tail end of " string.
Double = r'[^"\\]*(?:\\.[^"\\]*)*"'
# Tail end of ''' string.
Single3 = r"[^'\\]*(?:(?:\\.|'(?!''))[^'\\]*)*'''"
# Tail end of """ string.
Double3 = r'[^"\\]*(?:(?:\\.|"(?!""))[^"\\]*)*"""'
Triple = group(StringPrefix + "'''", StringPrefix + '"""')
# Single-line ' or " string.
String = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*'",
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*"')
# Sorting in reverse order puts the long operators before their prefixes.
# Otherwise if = came before ==, == would get recognized as two instances
# of =.
Special = group(*map(re.escape, sorted(EXACT_TOKEN_TYPES, reverse=True)))
Funny = group(r'\r?\n', Special)
PlainToken = group(Number, Funny, String, Name)
Token = Ignore + PlainToken
# First (or only) line of ' or " string.
ContStr = group(StringPrefix + r"'[^\n'\\]*(?:\\.[^\n'\\]*)*" +
group("'", r'\\\r?\n'),
StringPrefix + r'"[^\n"\\]*(?:\\.[^\n"\\]*)*' +
group('"', r'\\\r?\n'))
PseudoExtras = group(r'\\\r?\n|\Z', Comment, Triple)
PseudoToken = Whitespace + group(PseudoExtras, Number, Funny, ContStr, Name)
# For a given string prefix plus quotes, endpats maps it to a regex
# to match the remainder of that string. _prefix can be empty, for
# a normal single or triple quoted string (with no prefix).
endpats = {}
for _prefix in _all_string_prefixes():
endpats[_prefix + "'"] = Single
endpats[_prefix + '"'] = Double
endpats[_prefix + "'''"] = Single3
endpats[_prefix + '"""'] = Double3
del _prefix
# A set of all of the single and triple quoted string prefixes,
# including the opening quotes.
single_quoted = set()
triple_quoted = set()
for t in _all_string_prefixes():
for u in (t + '"', t + "'"):
single_quoted.add(u)
for u in (t + '"""', t + "'''"):
triple_quoted.add(u)
del t, u
tabsize = 8
class TokenError(Exception): pass
class StopTokenizing(Exception): pass
class StopTokenizing(Exception): pass
class Untokenizer:
@ -213,6 +112,14 @@ class Untokenizer:
self.tokens.append(indent)
self.prev_col = len(indent)
startline = False
elif tok_type == FSTRING_MIDDLE:
if '{' in token or '}' in token:
end_line, end_col = end
end = (end_line, end_col + token.count('{') + token.count('}'))
token = re.sub('{', '{{', token)
token = re.sub('}', '}}', token)
self.add_whitespace(start)
self.tokens.append(token)
self.prev_row, self.prev_col = end
@ -255,6 +162,11 @@ class Untokenizer:
elif startline and indents:
toks_append(indents[-1])
startline = False
elif toknum == FSTRING_MIDDLE:
if '{' in tokval or '}' in tokval:
tokval = re.sub('{', '{{', tokval)
tokval = re.sub('}', '}}', tokval)
toks_append(tokval)
@ -404,7 +316,6 @@ def open(filename):
buffer.close()
raise
def tokenize(readline):
"""
The tokenize() generator requires one argument, readline, which
@ -425,192 +336,32 @@ def tokenize(readline):
which tells you which encoding was used to decode the bytes stream.
"""
encoding, consumed = detect_encoding(readline)
empty = _itertools.repeat(b"")
rl_gen = _itertools.chain(consumed, iter(readline, b""), empty)
return _tokenize(rl_gen.__next__, encoding)
def _tokenize(readline, encoding):
lnum = parenlev = continued = 0
numchars = '0123456789'
contstr, needcont = '', 0
contline = None
indents = [0]
rl_gen = _itertools.chain(consumed, iter(readline, b""))
if encoding is not None:
if encoding == "utf-8-sig":
# BOM will already have been stripped.
encoding = "utf-8"
yield TokenInfo(ENCODING, encoding, (0, 0), (0, 0), '')
last_line = b''
line = b''
while True: # loop over lines in stream
try:
# We capture the value of the line variable here because
# readline uses the empty string '' to signal end of input,
# hence `line` itself will always be overwritten at the end
# of this loop.
last_line = line
line = readline()
except StopIteration:
line = b''
yield from _tokenize(rl_gen, encoding)
if encoding is not None:
line = line.decode(encoding)
lnum += 1
pos, max = 0, len(line)
def _tokenize(rl_gen, encoding):
source = b"".join(rl_gen).decode(encoding)
token = None
for token in _generate_tokens_from_c_tokenizer(source, extra_tokens=True):
# TODO: Marta -> limpiar esto
if 6 < token.type <= 54:
token = token._replace(type=OP)
if token.type in {ASYNC, AWAIT}:
token = token._replace(type=NAME)
if token.type == NEWLINE:
l_start, c_start = token.start
l_end, c_end = token.end
token = token._replace(string='\n', start=(l_start, c_start), end=(l_end, c_end+1))
if contstr: # continued string
if not line:
raise TokenError("EOF in multi-line string", strstart)
endmatch = endprog.match(line)
if endmatch:
pos = end = endmatch.end(0)
yield TokenInfo(STRING, contstr + line[:end],
strstart, (lnum, end), contline + line)
contstr, needcont = '', 0
contline = None
elif needcont and line[-2:] != '\\\n' and line[-3:] != '\\\r\n':
yield TokenInfo(ERRORTOKEN, contstr + line,
strstart, (lnum, len(line)), contline)
contstr = ''
contline = None
continue
else:
contstr = contstr + line
contline = contline + line
continue
elif parenlev == 0 and not continued: # new statement
if not line: break
column = 0
while pos < max: # measure leading whitespace
if line[pos] == ' ':
column += 1
elif line[pos] == '\t':
column = (column//tabsize + 1)*tabsize
elif line[pos] == '\f':
column = 0
else:
break
pos += 1
if pos == max:
break
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
pos += len(comment_token)
yield TokenInfo(NL, line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue
if column > indents[-1]: # count indents or dedents
indents.append(column)
yield TokenInfo(INDENT, line[:pos], (lnum, 0), (lnum, pos), line)
while column < indents[-1]:
if column not in indents:
raise IndentationError(
"unindent does not match any outer indentation level",
("<tokenize>", lnum, pos, line))
indents = indents[:-1]
yield TokenInfo(DEDENT, '', (lnum, pos), (lnum, pos), line)
else: # continued statement
if not line:
raise TokenError("EOF in multi-line statement", (lnum, 0))
continued = 0
while pos < max:
pseudomatch = _compile(PseudoToken).match(line, pos)
if pseudomatch: # scan for tokens
start, end = pseudomatch.span(1)
spos, epos, pos = (lnum, start), (lnum, end), end
if start == end:
continue
token, initial = line[start:end], line[start]
if (initial in numchars or # ordinary number
(initial == '.' and token != '.' and token != '...')):
yield TokenInfo(NUMBER, token, spos, epos, line)
elif initial in '\r\n':
if parenlev > 0:
yield TokenInfo(NL, token, spos, epos, line)
else:
yield TokenInfo(NEWLINE, token, spos, epos, line)
elif initial == '#':
assert not token.endswith("\n")
yield TokenInfo(COMMENT, token, spos, epos, line)
elif token in triple_quoted:
endprog = _compile(endpats[token])
endmatch = endprog.match(line, pos)
if endmatch: # all on one line
pos = endmatch.end(0)
token = line[start:pos]
yield TokenInfo(STRING, token, spos, (lnum, pos), line)
else:
strstart = (lnum, start) # multiple lines
contstr = line[start:]
contline = line
break
# Check up to the first 3 chars of the token to see if
# they're in the single_quoted set. If so, they start
# a string.
# We're using the first 3, because we're looking for
# "rb'" (for example) at the start of the token. If
# we switch to longer prefixes, this needs to be
# adjusted.
# Note that initial == token[:1].
# Also note that single quote checking must come after
# triple quote checking (above).
elif (initial in single_quoted or
token[:2] in single_quoted or
token[:3] in single_quoted):
if token[-1] == '\n': # continued string
strstart = (lnum, start)
# Again, using the first 3 chars of the
# token. This is looking for the matching end
# regex for the correct type of quote
# character. So it's really looking for
# endpats["'"] or endpats['"'], by trying to
# skip string prefix characters, if any.
endprog = _compile(endpats.get(initial) or
endpats.get(token[1]) or
endpats.get(token[2]))
contstr, needcont = line[start:], 1
contline = line
break
else: # ordinary string
yield TokenInfo(STRING, token, spos, epos, line)
elif initial.isidentifier(): # ordinary name
yield TokenInfo(NAME, token, spos, epos, line)
elif initial == '\\': # continued stmt
continued = 1
else:
if initial in '([{':
parenlev += 1
elif initial in ')]}':
parenlev -= 1
yield TokenInfo(OP, token, spos, epos, line)
else:
yield TokenInfo(ERRORTOKEN, line[pos],
(lnum, pos), (lnum, pos+1), line)
pos += 1
# Add an implicit NEWLINE if the input doesn't end in one
if last_line and last_line[-1] not in '\r\n' and not last_line.strip().startswith("#"):
yield TokenInfo(NEWLINE, '', (lnum - 1, len(last_line)), (lnum - 1, len(last_line) + 1), '')
for indent in indents[1:]: # pop remaining indent levels
yield TokenInfo(DEDENT, '', (lnum, 0), (lnum, 0), '')
yield TokenInfo(ENDMARKER, '', (lnum, 0), (lnum, 0), '')
yield token
if token is not None:
last_line, _ = token.start
yield TokenInfo(ENDMARKER, '', (last_line + 1, 0), (last_line + 1, 0), '')
def generate_tokens(readline):
@ -619,7 +370,16 @@ def generate_tokens(readline):
This has the same API as tokenize(), except that it expects the *readline*
callable to return str objects instead of bytes.
"""
return _tokenize(readline, None)
def _gen():
while True:
try:
line = readline()
except StopIteration:
return
if not line:
return
yield line.encode()
return _tokenize(_gen(), 'utf-8')
def main():
import argparse
@ -656,7 +416,10 @@ def main():
tokens = list(tokenize(f.readline))
else:
filename = "<stdin>"
tokens = _tokenize(sys.stdin.readline, None)
tokens = _tokenize(
(x.encode('utf-8') for x in iter(sys.stdin.readline, "")
), "utf-8")
# Output the tokenization
for token in tokens:
@ -682,10 +445,10 @@ def main():
perror("unexpected error: %s" % err)
raise
def _generate_tokens_from_c_tokenizer(source):
def _generate_tokens_from_c_tokenizer(source, extra_tokens=False):
"""Tokenize a source reading Python code as unicode strings using the internal C tokenizer"""
import _tokenize as c_tokenizer
for info in c_tokenizer.TokenizerIter(source):
for info in c_tokenizer.TokenizerIter(source, extra_tokens=extra_tokens):
tok, type, lineno, end_lineno, col_off, end_col_off, line = info
yield TokenInfo(type, tok, (lineno, col_off), (end_lineno, end_col_off), line)