mirror of
https://github.com/python/cpython.git
synced 2025-11-03 03:22:27 +00:00
bpo-12486: Document tokenize.generate_tokens() as public API (#6957)
* Document tokenize.generate_tokens() * Add news file * Add test for generate_tokens * Document behaviour around ENCODING token * Add generate_tokens to __all__
This commit is contained in:
parent
c2745d2d05
commit
c56b17bd8c
4 changed files with 35 additions and 6 deletions
|
|
@ -1,8 +1,8 @@
|
|||
from test import support
|
||||
from tokenize import (tokenize, _tokenize, untokenize, NUMBER, NAME, OP,
|
||||
STRING, ENDMARKER, ENCODING, tok_name, detect_encoding,
|
||||
open as tokenize_open, Untokenizer)
|
||||
from io import BytesIO
|
||||
open as tokenize_open, Untokenizer, generate_tokens)
|
||||
from io import BytesIO, StringIO
|
||||
import unittest
|
||||
from unittest import TestCase, mock
|
||||
from test.test_grammar import (VALID_UNDERSCORE_LITERALS,
|
||||
|
|
@ -919,6 +919,19 @@ async def f():
|
|||
DEDENT '' (7, 0) (7, 0)
|
||||
""")
|
||||
|
||||
class GenerateTokensTest(TokenizeTest):
|
||||
def check_tokenize(self, s, expected):
|
||||
# Format the tokens in s in a table format.
|
||||
# The ENDMARKER is omitted.
|
||||
result = []
|
||||
f = StringIO(s)
|
||||
for type, token, start, end, line in generate_tokens(f.readline):
|
||||
if type == ENDMARKER:
|
||||
break
|
||||
type = tok_name[type]
|
||||
result.append(f" {type:10} {token!r:13} {start} {end}")
|
||||
self.assertEqual(result, expected.rstrip().splitlines())
|
||||
|
||||
|
||||
def decistmt(s):
|
||||
result = []
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue