bpo-30377: Simplify handling of COMMENT and NL in tokenize.py (#1607)

This commit is contained in:
Albert-Jan Nijburg 2017-05-24 12:31:57 +01:00 committed by Serhiy Storchaka
parent a17a2f52c4
commit c471ca448c
2 changed files with 12 additions and 12 deletions

View file

@ -560,13 +560,11 @@ def _tokenize(readline, encoding):
if line[pos] in '#\r\n': # skip comments or blank lines
if line[pos] == '#':
comment_token = line[pos:].rstrip('\r\n')
nl_pos = pos + len(comment_token)
yield TokenInfo(COMMENT, comment_token,
(lnum, pos), (lnum, pos + len(comment_token)), line)
yield TokenInfo(NL, line[nl_pos:],
(lnum, nl_pos), (lnum, len(line)), line)
else:
yield TokenInfo((NL, COMMENT)[line[pos] == '#'], line[pos:],
pos += len(comment_token)
yield TokenInfo(NL, line[pos:],
(lnum, pos), (lnum, len(line)), line)
continue