mirror of
https://github.com/python/cpython.git
synced 2025-08-04 08:59:19 +00:00
reuse tokenize.detect_encoding in linecache instead of a custom solution
patch by Victor Stinner #4016
This commit is contained in:
parent
a8abe86331
commit
9b8d24b17d
2 changed files with 8 additions and 23 deletions
|
@ -27,7 +27,6 @@ __credits__ = ('GvR, ESR, Tim Peters, Thomas Wouters, Fred Drake, '
|
|||
import re, string, sys
|
||||
from token import *
|
||||
from codecs import lookup, BOM_UTF8
|
||||
from itertools import chain, repeat
|
||||
cookie_re = re.compile("coding[:=]\s*([-\w.]+)")
|
||||
|
||||
import token
|
||||
|
@ -327,13 +326,15 @@ def tokenize(readline):
|
|||
which tells you which encoding was used to decode the bytes stream.
|
||||
"""
|
||||
encoding, consumed = detect_encoding(readline)
|
||||
def readline_generator():
|
||||
def readline_generator(consumed):
|
||||
for line in consumed:
|
||||
yield line
|
||||
while True:
|
||||
try:
|
||||
yield readline()
|
||||
except StopIteration:
|
||||
return
|
||||
chained = chain(consumed, readline_generator())
|
||||
chained = readline_generator(consumed)
|
||||
return _tokenize(chained.__next__, encoding)
|
||||
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue