SF bug #1224621: tokenize module does not detect inconsistent dedents

This commit is contained in:
Raymond Hettinger 2005-06-21 07:43:58 +00:00
parent 8fa7eb563b
commit da99d1cbfe
3 changed files with 25 additions and 1 deletions

View file

@ -1,4 +1,4 @@
from test.test_support import verbose, findfile, is_resource_enabled
from test.test_support import verbose, findfile, is_resource_enabled, TestFailed
import os, glob, random
from tokenize import (tokenize, generate_tokens, untokenize,
NUMBER, NAME, OP, STRING)
@ -41,6 +41,24 @@ for f in testfiles:
test_roundtrip(f)
###### Test detecton of IndentationError ######################
from cStringIO import StringIO
sampleBadText = """
def foo():
bar
baz
"""
try:
for tok in generate_tokens(StringIO(sampleBadText).readline):
pass
except IndentationError:
pass
else:
raise TestFailed("Did not detect IndentationError:")
###### Test example in the docs ###############################