mirror of
https://github.com/python/cpython.git
synced 2025-07-24 11:44:31 +00:00
- Issue #719888: Updated tokenize to use a bytes API. generate_tokens has been
renamed tokenize and now works with bytes rather than strings. A new detect_encoding function has been added for determining source file encoding according to PEP-0263. Token sequences returned by tokenize always start with an ENCODING token which specifies the encoding used to decode the file. This token is used to encode the output of untokenize back to bytes. Credit goes to Michael "I'm-going-to-name-my-first-child-unittest" Foord from Resolver Systems for this work.
This commit is contained in:
parent
112367a980
commit
428de65ca9
16 changed files with 609 additions and 182 deletions
|
@ -103,7 +103,9 @@ class AppendChecker:
|
|||
|
||||
def run(self):
|
||||
try:
|
||||
tokenize.tokenize(self.file.readline, self.tokeneater)
|
||||
tokens = tokenize.generate_tokens(self.file.readline)
|
||||
for _token in tokens:
|
||||
self.tokeneater(*_token)
|
||||
except tokenize.TokenError as msg:
|
||||
errprint("%r: Token Error: %s" % (self.fname, msg))
|
||||
self.nerrors = self.nerrors + 1
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue