mirror of
https://github.com/python/cpython.git
synced 2025-08-02 16:13:13 +00:00
Problem found while converting from PyBytes to PyString:
Re-enable (and correct) a test for the BOM at the beginning of a code unit. And properly "unget" characters when the BOM is incomplete.
This commit is contained in:
parent
24eac034be
commit
af59346f1a
2 changed files with 22 additions and 11 deletions
|
@ -200,8 +200,8 @@ class BuiltinTest(unittest.TestCase):
|
|||
|
||||
def test_compile(self):
|
||||
compile('print(1)\n', '', 'exec')
|
||||
## bom = b'\xef\xbb\xbf'
|
||||
## compile(bom + b'print(1)\n', '', 'exec')
|
||||
bom = b'\xef\xbb\xbf'
|
||||
compile(bom + b'print(1)\n', '', 'exec')
|
||||
compile(source='pass', filename='?', mode='exec')
|
||||
compile(dont_inherit=0, filename='tmp', source='0', mode='eval')
|
||||
compile('pass', '?', dont_inherit=1, mode='exec')
|
||||
|
@ -327,11 +327,12 @@ class BuiltinTest(unittest.TestCase):
|
|||
self.assertEqual(eval('c', globals, locals), 300)
|
||||
globals = {'a': 1, 'b': 2}
|
||||
locals = {'b': 200, 'c': 300}
|
||||
## bom = b'\xef\xbb\xbf'
|
||||
## self.assertEqual(eval(bom + b'a', globals, locals), 1)
|
||||
bom = b'\xef\xbb\xbf'
|
||||
self.assertEqual(eval(bom + b'a', globals, locals), 1)
|
||||
self.assertEqual(eval('"\xe5"', globals), "\xe5")
|
||||
self.assertRaises(TypeError, eval)
|
||||
self.assertRaises(TypeError, eval, ())
|
||||
self.assertRaises(SyntaxError, eval, bom[:2] + b'a')
|
||||
|
||||
def test_general_eval(self):
|
||||
# Tests that general mappings can be used for the locals argument
|
||||
|
|
|
@ -323,8 +323,21 @@ check_bom(int get_char(struct tok_state *),
|
|||
if (ch == EOF) {
|
||||
return 1;
|
||||
} else if (ch == 0xEF) {
|
||||
ch = get_char(tok); if (ch != 0xBB) goto NON_BOM;
|
||||
ch = get_char(tok); if (ch != 0xBF) goto NON_BOM;
|
||||
ch = get_char(tok);
|
||||
if (ch != 0xBB) {
|
||||
unget_char(ch, tok);
|
||||
unget_char(0xEF, tok);
|
||||
/* any token beginning with '\xEF' is a bad token */
|
||||
return 1;
|
||||
}
|
||||
ch = get_char(tok);
|
||||
if (ch != 0xBF) {
|
||||
unget_char(ch, tok);
|
||||
unget_char(0xBB, tok);
|
||||
unget_char(0xEF, tok);
|
||||
/* any token beginning with '\xEF' is a bad token */
|
||||
return 1;
|
||||
}
|
||||
#if 0
|
||||
/* Disable support for UTF-16 BOMs until a decision
|
||||
is made whether this needs to be supported. */
|
||||
|
@ -344,10 +357,7 @@ check_bom(int get_char(struct tok_state *),
|
|||
if (tok->encoding != NULL)
|
||||
PyMem_FREE(tok->encoding);
|
||||
tok->encoding = new_string("utf-8", 5); /* resulting is in utf-8 */
|
||||
return 1;
|
||||
NON_BOM:
|
||||
/* any token beginning with '\xEF', '\xFE', '\xFF' is a bad token */
|
||||
unget_char(0xFF, tok); /* XXX this will cause a syntax error */
|
||||
/* No need to set_readline: input is already utf-8 */
|
||||
return 1;
|
||||
}
|
||||
|
||||
|
@ -641,7 +651,7 @@ decode_str(const char *str, struct tok_state *tok)
|
|||
utf8 = translate_into_utf8(str, tok->enc);
|
||||
if (utf8 == NULL)
|
||||
return error_ret(tok);
|
||||
str = PyBytes_AsString(utf8);
|
||||
str = PyString_AsString(utf8);
|
||||
}
|
||||
for (s = str;; s++) {
|
||||
if (*s == '\0') break;
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue