mirror of
https://github.com/python/cpython.git
synced 2025-12-04 00:30:19 +00:00
gh-102856: Python tokenizer implementation for PEP 701 (#104323)
This commit replaces the Python implementation of the tokenize module with an implementation that reuses the real C tokenizer via a private extension module. The tokenize module now implements a compatibility layer that transforms tokens from the C tokenizer into Python tokenize tokens for backward compatibility. As the C tokenizer does not emit some tokens that the Python tokenizer provides (such as comments and non-semantic newlines), a new special mode has been added to the C tokenizer mode that currently is only used via the extension module that exposes it to the Python layer. This new mode forces the C tokenizer to emit these new extra tokens and add the appropriate metadata that is needed to match the old Python implementation. Co-authored-by: Pablo Galindo <pablogsal@gmail.com>
This commit is contained in:
parent
3ed57e4995
commit
6715f91edc
22 changed files with 424 additions and 374 deletions
|
|
@ -107,6 +107,10 @@ def check(file):
|
|||
errprint("%r: Token Error: %s" % (file, msg))
|
||||
return
|
||||
|
||||
except SyntaxError as msg:
|
||||
errprint("%r: Token Error: %s" % (file, msg))
|
||||
return
|
||||
|
||||
except IndentationError as msg:
|
||||
errprint("%r: Indentation Error: %s" % (file, msg))
|
||||
return
|
||||
|
|
@ -272,6 +276,12 @@ def format_witnesses(w):
|
|||
return prefix + " " + ', '.join(firsts)
|
||||
|
||||
def process_tokens(tokens):
|
||||
try:
|
||||
_process_tokens(tokens)
|
||||
except TabError as e:
|
||||
raise NannyNag(e.lineno, e.msg, e.text)
|
||||
|
||||
def _process_tokens(tokens):
|
||||
INDENT = tokenize.INDENT
|
||||
DEDENT = tokenize.DEDENT
|
||||
NEWLINE = tokenize.NEWLINE
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue