bpo-40334: PEP 617 implementation: New PEG parser for CPython (GH-19503)

Co-authored-by: Guido van Rossum <guido@python.org>
Co-authored-by: Lysandros Nikolaou <lisandrosnik@gmail.com>
This commit is contained in:
Pablo Galindo 2020-04-22 23:29:27 +01:00 committed by GitHub
parent a81849b031
commit c5fc156852
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
91 changed files with 27057 additions and 146 deletions

View file

@ -599,7 +599,7 @@ class CmdLineTest(unittest.TestCase):
exitcode, stdout, stderr = assert_python_failure(script_name)
text = io.TextIOWrapper(io.BytesIO(stderr), 'ascii').read()
# Confirm that the caret is located under the first 1 character
self.assertIn("\n 1 + 1 = 2\n ^", text)
self.assertIn("\n 1 + 1 = 2\n ^", text)
def test_syntaxerror_indented_caret_position(self):
script = textwrap.dedent("""\
@ -611,7 +611,7 @@ class CmdLineTest(unittest.TestCase):
exitcode, stdout, stderr = assert_python_failure(script_name)
text = io.TextIOWrapper(io.BytesIO(stderr), 'ascii').read()
# Confirm that the caret is located under the first 1 character
self.assertIn("\n 1 + 1 = 2\n ^", text)
self.assertIn("\n 1 + 1 = 2\n ^", text)
# Try the same with a form feed at the start of the indented line
script = (
@ -622,7 +622,7 @@ class CmdLineTest(unittest.TestCase):
exitcode, stdout, stderr = assert_python_failure(script_name)
text = io.TextIOWrapper(io.BytesIO(stderr), "ascii").read()
self.assertNotIn("\f", text)
self.assertIn("\n 1 + 1 = 2\n ^", text)
self.assertIn("\n 1 + 1 = 2\n ^", text)
def test_syntaxerror_multi_line_fstring(self):
script = 'foo = f"""{}\nfoo"""\n'
@ -632,14 +632,14 @@ class CmdLineTest(unittest.TestCase):
self.assertEqual(
stderr.splitlines()[-3:],
[
b' foo = f"""{}',
b' ^',
b' foo"""',
b' ^',
b'SyntaxError: f-string: empty expression not allowed',
],
)
def test_syntaxerror_invalid_escape_sequence_multi_line(self):
script = 'foo = """\\q\n"""\n'
script = 'foo = """\\q"""\n'
with support.temp_dir() as script_dir:
script_name = _make_test_script(script_dir, 'script', script)
exitcode, stdout, stderr = assert_python_failure(
@ -647,10 +647,9 @@ class CmdLineTest(unittest.TestCase):
)
self.assertEqual(
stderr.splitlines()[-3:],
[
b' foo = """\\q',
b' ^',
b'SyntaxError: invalid escape sequence \\q',
[ b' foo = """\\q"""',
b' ^',
b'SyntaxError: invalid escape sequence \\q'
],
)

View file

@ -2,6 +2,7 @@
Test cases for codeop.py
Nick Mathewson
"""
import sys
import unittest
from test.support import is_jython
@ -9,7 +10,6 @@ from codeop import compile_command, PyCF_DONT_IMPLY_DEDENT
import io
if is_jython:
import sys
def unify_callables(d):
for n,v in d.items():
@ -122,6 +122,7 @@ class CodeopTests(unittest.TestCase):
av("def f():\n pass\n#foo\n")
av("@a.b.c\ndef f():\n pass\n")
@unittest.skipIf(sys.flags.use_peg, "Pegen does not support PyCF_DONT_INPLY_DEDENT yet")
def test_incomplete(self):
ai = self.assertIncomplete

View file

@ -501,6 +501,7 @@ if 1:
self.compile_single("if x:\n f(x)\nelse:\n g(x)")
self.compile_single("class T:\n pass")
@unittest.skipIf(sys.flags.use_peg, 'Pegen does not disallow multiline single stmts')
def test_bad_single_statement(self):
self.assertInvalidSingle('1\n2')
self.assertInvalidSingle('def f(): pass')

View file

@ -347,6 +347,7 @@ class InitConfigTests(EmbeddingTestsMixin, unittest.TestCase):
'isolated': 0,
'use_environment': 1,
'dev_mode': 0,
'use_peg': 1,
'install_signal_handlers': 1,
'use_hash_seed': 0,
@ -728,6 +729,7 @@ class InitConfigTests(EmbeddingTestsMixin, unittest.TestCase):
'import_time': 1,
'show_ref_count': 1,
'malloc_stats': 1,
'use_peg': 0,
'stdio_encoding': 'iso8859-1',
'stdio_errors': 'replace',

View file

@ -26,6 +26,7 @@ class EOFTestCase(unittest.TestCase):
else:
raise support.TestFailed
@unittest.skipIf(sys.flags.use_peg, "TODO for PEG -- fails with new parser")
def test_line_continuation_EOF(self):
"""A continuation at the end of input must be an error; bpo2180."""
expect = 'unexpected EOF while parsing (<string>, line 1)'
@ -36,6 +37,7 @@ class EOFTestCase(unittest.TestCase):
exec('\\')
self.assertEqual(str(excinfo.exception), expect)
@unittest.skip("TODO for PEG -- fails even with old parser now")
@unittest.skipIf(not sys.executable, "sys.executable required")
def test_line_continuation_EOF_from_file_bpo2180(self):
"""Ensure tok_nextc() does not add too many ending newlines."""

View file

@ -178,6 +178,7 @@ class ExceptionTests(unittest.TestCase):
s = '''if True:\n print()\n\texec "mixed tabs and spaces"'''
ckmsg(s, "inconsistent use of tabs and spaces in indentation", TabError)
@unittest.skipIf(sys.flags.use_peg, "Pegen column offsets might be different")
def testSyntaxErrorOffset(self):
def check(src, lineno, offset, encoding='utf-8'):
with self.assertRaises(SyntaxError) as cm:

View file

@ -1,6 +1,9 @@
import __future__
import unittest
import sys
@unittest.skipIf(sys.flags.use_peg, "Not supported by pegen yet")
class FLUFLTests(unittest.TestCase):
def test_barry_as_bdfl(self):

View file

@ -10,6 +10,7 @@
import ast
import types
import decimal
import sys
import unittest
a_global = 'global variable'
@ -205,7 +206,8 @@ f'{a * f"-{x()}-"}'"""
call = binop.right.values[1].value
self.assertEqual(type(call), ast.Call)
self.assertEqual(call.lineno, 3)
self.assertEqual(call.col_offset, 11)
if not sys.flags.use_peg:
self.assertEqual(call.col_offset, 11)
def test_ast_line_numbers_duplicate_expression(self):
"""Duplicate expression

View file

@ -1856,10 +1856,11 @@ Traceback (most recent call last):
...
SyntaxError: 'yield' outside function
>>> def f(): x = yield = y
Traceback (most recent call last):
...
SyntaxError: assignment to yield expression not possible
# Pegen does not produce this error message yet
# >>> def f(): x = yield = y
# Traceback (most recent call last):
# ...
# SyntaxError: assignment to yield expression not possible
>>> def f(): (yield bar) = y
Traceback (most recent call last):

View file

@ -8,6 +8,7 @@ import pickle
import unittest
import operator
import struct
import sys
from test import support
from test.support.script_helper import assert_python_failure
from test.support.script_helper import assert_python_ok
@ -899,9 +900,10 @@ class ParserStackLimitTestCase(unittest.TestCase):
st = parser.expr(e)
st.compile()
@unittest.skipIf(sys.flags.use_peg, "Pegen does not trigger memory error with this many parenthesis")
def test_trigger_memory_error(self):
e = self._nested_expression(100)
rc, out, err = assert_python_failure('-c', e)
rc, out, err = assert_python_failure('-Xoldparser', '-c', e)
# parsing the expression will result in an error message
# followed by a MemoryError (see #11963)
self.assertIn(b's_push: parser stack overflow', err)

View file

@ -0,0 +1,7 @@
import os
from test.support import load_package_tests
# Load all tests in package
def load_tests(*args):
return load_package_tests(os.path.dirname(__file__), *args)

View file

@ -0,0 +1,4 @@
import unittest
from . import load_tests
unittest.main()

View file

@ -0,0 +1,62 @@
"""
Copy-parse of ast.dump, removing the `isinstance` checks. This is needed,
because testing pegen requires generating a C extension module, which contains
a copy of the symbols defined in Python-ast.c. Thus, the isinstance check would
always fail. We rely on string comparison of the base classes instead.
TODO: Remove the above-described hack.
"""
def ast_dump(node, annotate_fields=True, include_attributes=False, *, indent=None):
def _format(node, level=0):
if indent is not None:
level += 1
prefix = '\n' + indent * level
sep = ',\n' + indent * level
else:
prefix = ''
sep = ', '
if any(cls.__name__ == 'AST' for cls in node.__class__.__mro__):
cls = type(node)
args = []
allsimple = True
keywords = annotate_fields
for name in node._fields:
try:
value = getattr(node, name)
except AttributeError:
keywords = True
continue
if value is None and getattr(cls, name, ...) is None:
keywords = True
continue
value, simple = _format(value, level)
allsimple = allsimple and simple
if keywords:
args.append('%s=%s' % (name, value))
else:
args.append(value)
if include_attributes and node._attributes:
for name in node._attributes:
try:
value = getattr(node, name)
except AttributeError:
continue
if value is None and getattr(cls, name, ...) is None:
continue
value, simple = _format(value, level)
allsimple = allsimple and simple
args.append('%s=%s' % (name, value))
if allsimple and len(args) <= 3:
return '%s(%s)' % (node.__class__.__name__, ', '.join(args)), not args
return '%s(%s%s)' % (node.__class__.__name__, prefix, sep.join(args)), False
elif isinstance(node, list):
if not node:
return '[]', True
return '[%s%s]' % (prefix, sep.join(_format(x, level)[0] for x in node)), False
return repr(node), True
if all(cls.__name__ != 'AST' for cls in node.__class__.__mro__):
raise TypeError('expected AST, got %r' % node.__class__.__name__)
if indent is not None and not isinstance(indent, str):
indent = ' ' * indent
return _format(node)[0]

View file

@ -0,0 +1,333 @@
import ast
import contextlib
import traceback
import tempfile
import shutil
import unittest
import sys
from test import test_tools
from test.test_peg_generator.ast_dump import ast_dump
from pathlib import PurePath, Path
from typing import Sequence
test_tools.skip_if_missing('peg_generator')
with test_tools.imports_under_tool('peg_generator'):
from pegen.grammar_parser import GeneratedParser as GrammarParser
from pegen.testutil import (
parse_string,
generate_parser_c_extension,
generate_c_parser_source,
)
class TestCParser(unittest.TestCase):
def setUp(self):
self.tmp_path = tempfile.mkdtemp()
def tearDown(self):
with contextlib.suppress(PermissionError):
shutil.rmtree(self.tmp_path)
def check_input_strings_for_grammar(
self,
source: str,
tmp_path: PurePath,
valid_cases: Sequence[str] = (),
invalid_cases: Sequence[str] = (),
) -> None:
grammar = parse_string(source, GrammarParser)
extension = generate_parser_c_extension(grammar, Path(tmp_path))
if valid_cases:
for case in valid_cases:
extension.parse_string(case, mode=0)
if invalid_cases:
for case in invalid_cases:
with self.assertRaises(SyntaxError):
extension.parse_string(case, mode=0)
def verify_ast_generation(self, source: str, stmt: str, tmp_path: PurePath) -> None:
grammar = parse_string(source, GrammarParser)
extension = generate_parser_c_extension(grammar, Path(tmp_path))
expected_ast = ast.parse(stmt)
actual_ast = extension.parse_string(stmt, mode=1)
self.assertEqual(ast_dump(expected_ast), ast_dump(actual_ast))
def test_c_parser(self) -> None:
grammar_source = """
start[mod_ty]: a=stmt* $ { Module(a, NULL, p->arena) }
stmt[stmt_ty]: a=expr_stmt { a }
expr_stmt[stmt_ty]: a=expression NEWLINE { _Py_Expr(a, EXTRA) }
expression[expr_ty]: ( l=expression '+' r=term { _Py_BinOp(l, Add, r, EXTRA) }
| l=expression '-' r=term { _Py_BinOp(l, Sub, r, EXTRA) }
| t=term { t }
)
term[expr_ty]: ( l=term '*' r=factor { _Py_BinOp(l, Mult, r, EXTRA) }
| l=term '/' r=factor { _Py_BinOp(l, Div, r, EXTRA) }
| f=factor { f }
)
factor[expr_ty]: ('(' e=expression ')' { e }
| a=atom { a }
)
atom[expr_ty]: ( n=NAME { n }
| n=NUMBER { n }
| s=STRING { s }
)
"""
grammar = parse_string(grammar_source, GrammarParser)
extension = generate_parser_c_extension(grammar, Path(self.tmp_path))
expressions = [
"4+5",
"4-5",
"4*5",
"1+4*5",
"1+4/5",
"(1+1) + (1+1)",
"(1+1) - (1+1)",
"(1+1) * (1+1)",
"(1+1) / (1+1)",
]
for expr in expressions:
the_ast = extension.parse_string(expr, mode=1)
expected_ast = ast.parse(expr)
self.assertEqual(ast_dump(the_ast), ast_dump(expected_ast))
def test_lookahead(self) -> None:
grammar = """
start: NAME &NAME expr NEWLINE? ENDMARKER
expr: NAME | NUMBER
"""
valid_cases = ["foo bar"]
invalid_cases = ["foo 34"]
self.check_input_strings_for_grammar(grammar, self.tmp_path, valid_cases, invalid_cases)
def test_negative_lookahead(self) -> None:
grammar = """
start: NAME !NAME expr NEWLINE? ENDMARKER
expr: NAME | NUMBER
"""
valid_cases = ["foo 34"]
invalid_cases = ["foo bar"]
self.check_input_strings_for_grammar(grammar, self.tmp_path, valid_cases, invalid_cases)
def test_cut(self) -> None:
grammar = """
start: X ~ Y Z | X Q S
X: 'x'
Y: 'y'
Z: 'z'
Q: 'q'
S: 's'
"""
valid_cases = ["x y z"]
invalid_cases = ["x q s"]
self.check_input_strings_for_grammar(grammar, self.tmp_path, valid_cases, invalid_cases)
def test_gather(self) -> None:
grammar = """
start: ';'.pass_stmt+ NEWLINE
pass_stmt: 'pass'
"""
valid_cases = ["pass", "pass; pass"]
invalid_cases = ["pass;", "pass; pass;"]
self.check_input_strings_for_grammar(grammar, self.tmp_path, valid_cases, invalid_cases)
def test_left_recursion(self) -> None:
grammar = """
start: expr NEWLINE
expr: ('-' term | expr '+' term | term)
term: NUMBER
"""
valid_cases = ["-34", "34", "34 + 12", "1 + 1 + 2 + 3"]
self.check_input_strings_for_grammar(grammar, self.tmp_path, valid_cases)
def test_advanced_left_recursive(self) -> None:
grammar = """
start: NUMBER | sign start
sign: ['-']
"""
valid_cases = ["23", "-34"]
self.check_input_strings_for_grammar(grammar, self.tmp_path, valid_cases)
def test_mutually_left_recursive(self) -> None:
grammar = """
start: foo 'E'
foo: bar 'A' | 'B'
bar: foo 'C' | 'D'
"""
valid_cases = ["B E", "D A C A E"]
self.check_input_strings_for_grammar(grammar, self.tmp_path, valid_cases)
def test_nasty_mutually_left_recursive(self) -> None:
grammar = """
start: target '='
target: maybe '+' | NAME
maybe: maybe '-' | target
"""
valid_cases = ["x ="]
invalid_cases = ["x - + ="]
self.check_input_strings_for_grammar(grammar, self.tmp_path, valid_cases, invalid_cases)
def test_return_stmt_noexpr_action(self) -> None:
grammar = """
start[mod_ty]: a=[statements] ENDMARKER { Module(a, NULL, p->arena) }
statements[asdl_seq*]: a=statement+ { a }
statement[stmt_ty]: simple_stmt
simple_stmt[stmt_ty]: small_stmt
small_stmt[stmt_ty]: return_stmt
return_stmt[stmt_ty]: a='return' NEWLINE { _Py_Return(NULL, EXTRA) }
"""
stmt = "return"
self.verify_ast_generation(grammar, stmt, self.tmp_path)
def test_gather_action_ast(self) -> None:
grammar = """
start[mod_ty]: a=';'.pass_stmt+ NEWLINE ENDMARKER { Module(a, NULL, p->arena) }
pass_stmt[stmt_ty]: a='pass' { _Py_Pass(EXTRA)}
"""
stmt = "pass; pass"
self.verify_ast_generation(grammar, stmt, self.tmp_path)
def test_pass_stmt_action(self) -> None:
grammar = """
start[mod_ty]: a=[statements] ENDMARKER { Module(a, NULL, p->arena) }
statements[asdl_seq*]: a=statement+ { a }
statement[stmt_ty]: simple_stmt
simple_stmt[stmt_ty]: small_stmt
small_stmt[stmt_ty]: pass_stmt
pass_stmt[stmt_ty]: a='pass' NEWLINE { _Py_Pass(EXTRA) }
"""
stmt = "pass"
self.verify_ast_generation(grammar, stmt, self.tmp_path)
def test_if_stmt_action(self) -> None:
grammar = """
start[mod_ty]: a=[statements] ENDMARKER { Module(a, NULL, p->arena) }
statements[asdl_seq*]: a=statement+ { _PyPegen_seq_flatten(p, a) }
statement[asdl_seq*]: a=compound_stmt { _PyPegen_singleton_seq(p, a) } | simple_stmt
simple_stmt[asdl_seq*]: a=small_stmt b=further_small_stmt* [';'] NEWLINE { _PyPegen_seq_insert_in_front(p, a, b) }
further_small_stmt[stmt_ty]: ';' a=small_stmt { a }
block: simple_stmt | NEWLINE INDENT a=statements DEDENT { a }
compound_stmt: if_stmt
if_stmt: 'if' a=full_expression ':' b=block { _Py_If(a, b, NULL, EXTRA) }
small_stmt[stmt_ty]: pass_stmt
pass_stmt[stmt_ty]: a='pass' { _Py_Pass(EXTRA) }
full_expression: NAME
"""
stmt = "pass"
self.verify_ast_generation(grammar, stmt, self.tmp_path)
def test_same_name_different_types(self) -> None:
source = """
start[mod_ty]: a=import_from+ NEWLINE ENDMARKER { Module(a, NULL, p->arena)}
import_from[stmt_ty]: ( a='from' !'import' c=simple_name 'import' d=import_as_names_from {
_Py_ImportFrom(c->v.Name.id, d, 0, EXTRA) }
| a='from' '.' 'import' c=import_as_names_from {
_Py_ImportFrom(NULL, c, 1, EXTRA) }
)
simple_name[expr_ty]: NAME
import_as_names_from[asdl_seq*]: a=','.import_as_name_from+ { a }
import_as_name_from[alias_ty]: a=NAME 'as' b=NAME { _Py_alias(((expr_ty) a)->v.Name.id, ((expr_ty) b)->v.Name.id, p->arena) }
"""
grammar = parse_string(source, GrammarParser)
extension = generate_parser_c_extension(grammar, Path(self.tmp_path))
for stmt in ("from a import b as c", "from . import a as b"):
expected_ast = ast.parse(stmt)
actual_ast = extension.parse_string(stmt, mode=1)
self.assertEqual(ast_dump(expected_ast), ast_dump(actual_ast))
def test_with_stmt_with_paren(self) -> None:
grammar_source = """
start[mod_ty]: a=[statements] ENDMARKER { Module(a, NULL, p->arena) }
statements[asdl_seq*]: a=statement+ { _PyPegen_seq_flatten(p, a) }
statement[asdl_seq*]: a=compound_stmt { _PyPegen_singleton_seq(p, a) }
compound_stmt[stmt_ty]: with_stmt
with_stmt[stmt_ty]: (
a='with' '(' b=','.with_item+ ')' ':' c=block {
_Py_With(b, _PyPegen_singleton_seq(p, c), NULL, EXTRA) }
)
with_item[withitem_ty]: (
e=NAME o=['as' t=NAME { t }] { _Py_withitem(e, _PyPegen_set_expr_context(p, o, Store), p->arena) }
)
block[stmt_ty]: a=pass_stmt NEWLINE { a } | NEWLINE INDENT a=pass_stmt DEDENT { a }
pass_stmt[stmt_ty]: a='pass' { _Py_Pass(EXTRA) }
"""
stmt = "with (\n a as b,\n c as d\n): pass"
grammar = parse_string(grammar_source, GrammarParser)
extension = generate_parser_c_extension(grammar, Path(self.tmp_path))
the_ast = extension.parse_string(stmt, mode=1)
self.assertTrue(ast_dump(the_ast).startswith(
"Module(body=[With(items=[withitem(context_expr=Name(id='a', ctx=Load()), optional_vars=Name(id='b', ctx=Store())), "
"withitem(context_expr=Name(id='c', ctx=Load()), optional_vars=Name(id='d', ctx=Store()))]"
))
def test_ternary_operator(self) -> None:
grammar_source = """
start[mod_ty]: a=expr ENDMARKER { Module(a, NULL, p->arena) }
expr[asdl_seq*]: a=listcomp NEWLINE { _PyPegen_singleton_seq(p, _Py_Expr(a, EXTRA)) }
listcomp[expr_ty]: (
a='[' b=NAME c=for_if_clauses d=']' { _Py_ListComp(b, c, EXTRA) }
)
for_if_clauses[asdl_seq*]: (
a=(y=[ASYNC] 'for' a=NAME 'in' b=NAME c=('if' z=NAME { z })*
{ _Py_comprehension(_Py_Name(((expr_ty) a)->v.Name.id, Store, EXTRA), b, c, (y == NULL) ? 0 : 1, p->arena) })+ { a }
)
"""
stmt = "[i for i in a if b]"
self.verify_ast_generation(grammar_source, stmt, self.tmp_path)
def test_syntax_error_for_string(self) -> None:
grammar_source = """
start: expr+ NEWLINE? ENDMARKER
expr: NAME
"""
grammar = parse_string(grammar_source, GrammarParser)
print(list(Path(self.tmp_path).iterdir()))
extension = generate_parser_c_extension(grammar, Path(self.tmp_path))
for text in ("a b 42 b a", "名 名 42 名 名"):
try:
extension.parse_string(text, mode=0)
except SyntaxError as e:
tb = traceback.format_exc()
self.assertTrue('File "<string>", line 1' in tb)
self.assertTrue(f"SyntaxError: invalid syntax" in tb)
def test_headers_and_trailer(self) -> None:
grammar_source = """
@header 'SOME HEADER'
@subheader 'SOME SUBHEADER'
@trailer 'SOME TRAILER'
start: expr+ NEWLINE? ENDMARKER
expr: x=NAME
"""
grammar = parse_string(grammar_source, GrammarParser)
parser_source = generate_c_parser_source(grammar)
self.assertTrue("SOME HEADER" in parser_source)
self.assertTrue("SOME SUBHEADER" in parser_source)
self.assertTrue("SOME TRAILER" in parser_source)
def test_error_in_rules(self) -> None:
grammar_source = """
start: expr+ NEWLINE? ENDMARKER
expr: NAME {PyTuple_New(-1)}
"""
grammar = parse_string(grammar_source, GrammarParser)
extension = generate_parser_c_extension(grammar, Path(self.tmp_path))
# PyTuple_New raises SystemError if an invalid argument was passed.
with self.assertRaises(SystemError):
extension.parse_string("a", mode=0)

View file

@ -0,0 +1,225 @@
import unittest
from test import test_tools
from typing import Dict, Set
test_tools.skip_if_missing('peg_generator')
with test_tools.imports_under_tool('peg_generator'):
from pegen.grammar_parser import GeneratedParser as GrammarParser
from pegen.testutil import parse_string
from pegen.first_sets import FirstSetCalculator
from pegen.grammar import Grammar
class TestFirstSets(unittest.TestCase):
def calculate_first_sets(self, grammar_source: str) -> Dict[str, Set[str]]:
grammar: Grammar = parse_string(grammar_source, GrammarParser)
return FirstSetCalculator(grammar.rules).calculate()
def test_alternatives(self) -> None:
grammar = """
start: expr NEWLINE? ENDMARKER
expr: A | B
A: 'a' | '-'
B: 'b' | '+'
"""
self.assertEqual(self.calculate_first_sets(grammar), {
"A": {"'a'", "'-'"},
"B": {"'+'", "'b'"},
"expr": {"'+'", "'a'", "'b'", "'-'"},
"start": {"'+'", "'a'", "'b'", "'-'"},
})
def test_optionals(self) -> None:
grammar = """
start: expr NEWLINE
expr: ['a'] ['b'] 'c'
"""
self.assertEqual(self.calculate_first_sets(grammar), {
"expr": {"'c'", "'a'", "'b'"},
"start": {"'c'", "'a'", "'b'"},
})
def test_repeat_with_separator(self) -> None:
grammar = """
start: ','.thing+ NEWLINE
thing: NUMBER
"""
self.assertEqual(self.calculate_first_sets(grammar), {"thing": {"NUMBER"}, "start": {"NUMBER"}})
def test_optional_operator(self) -> None:
grammar = """
start: sum NEWLINE
sum: (term)? 'b'
term: NUMBER
"""
self.assertEqual(self.calculate_first_sets(grammar), {
"term": {"NUMBER"},
"sum": {"NUMBER", "'b'"},
"start": {"'b'", "NUMBER"},
})
def test_optional_literal(self) -> None:
grammar = """
start: sum NEWLINE
sum: '+' ? term
term: NUMBER
"""
self.assertEqual(self.calculate_first_sets(grammar), {
"term": {"NUMBER"},
"sum": {"'+'", "NUMBER"},
"start": {"'+'", "NUMBER"},
})
def test_optional_after(self) -> None:
grammar = """
start: term NEWLINE
term: NUMBER ['+']
"""
self.assertEqual(self.calculate_first_sets(grammar), {"term": {"NUMBER"}, "start": {"NUMBER"}})
def test_optional_before(self) -> None:
grammar = """
start: term NEWLINE
term: ['+'] NUMBER
"""
self.assertEqual(self.calculate_first_sets(grammar), {"term": {"NUMBER", "'+'"}, "start": {"NUMBER", "'+'"}})
def test_repeat_0(self) -> None:
grammar = """
start: thing* "+" NEWLINE
thing: NUMBER
"""
self.assertEqual(self.calculate_first_sets(grammar), {"thing": {"NUMBER"}, "start": {'"+"', "NUMBER"}})
def test_repeat_0_with_group(self) -> None:
grammar = """
start: ('+' '-')* term NEWLINE
term: NUMBER
"""
self.assertEqual(self.calculate_first_sets(grammar), {"term": {"NUMBER"}, "start": {"'+'", "NUMBER"}})
def test_repeat_1(self) -> None:
grammar = """
start: thing+ '-' NEWLINE
thing: NUMBER
"""
self.assertEqual(self.calculate_first_sets(grammar), {"thing": {"NUMBER"}, "start": {"NUMBER"}})
def test_repeat_1_with_group(self) -> None:
grammar = """
start: ('+' term)+ term NEWLINE
term: NUMBER
"""
self.assertEqual(self.calculate_first_sets(grammar), {"term": {"NUMBER"}, "start": {"'+'"}})
def test_gather(self) -> None:
grammar = """
start: ','.thing+ NEWLINE
thing: NUMBER
"""
self.assertEqual(self.calculate_first_sets(grammar), {"thing": {"NUMBER"}, "start": {"NUMBER"}})
def test_positive_lookahead(self) -> None:
grammar = """
start: expr NEWLINE
expr: &'a' opt
opt: 'a' | 'b' | 'c'
"""
self.assertEqual(self.calculate_first_sets(grammar), {
"expr": {"'a'"},
"start": {"'a'"},
"opt": {"'b'", "'c'", "'a'"},
})
def test_negative_lookahead(self) -> None:
grammar = """
start: expr NEWLINE
expr: !'a' opt
opt: 'a' | 'b' | 'c'
"""
self.assertEqual(self.calculate_first_sets(grammar), {
"opt": {"'b'", "'a'", "'c'"},
"expr": {"'b'", "'c'"},
"start": {"'b'", "'c'"},
})
def test_left_recursion(self) -> None:
grammar = """
start: expr NEWLINE
expr: ('-' term | expr '+' term | term)
term: NUMBER
foo: 'foo'
bar: 'bar'
baz: 'baz'
"""
self.assertEqual(self.calculate_first_sets(grammar), {
"expr": {"NUMBER", "'-'"},
"term": {"NUMBER"},
"start": {"NUMBER", "'-'"},
"foo": {"'foo'"},
"bar": {"'bar'"},
"baz": {"'baz'"},
})
def test_advance_left_recursion(self) -> None:
grammar = """
start: NUMBER | sign start
sign: ['-']
"""
self.assertEqual(self.calculate_first_sets(grammar), {"sign": {"'-'", ""}, "start": {"'-'", "NUMBER"}})
def test_mutual_left_recursion(self) -> None:
grammar = """
start: foo 'E'
foo: bar 'A' | 'B'
bar: foo 'C' | 'D'
"""
self.assertEqual(self.calculate_first_sets(grammar), {
"foo": {"'D'", "'B'"},
"bar": {"'D'"},
"start": {"'D'", "'B'"},
})
def test_nasty_left_recursion(self) -> None:
# TODO: Validate this
grammar = """
start: target '='
target: maybe '+' | NAME
maybe: maybe '-' | target
"""
self.assertEqual(self.calculate_first_sets(grammar), {"maybe": set(), "target": {"NAME"}, "start": {"NAME"}})
def test_nullable_rule(self) -> None:
grammar = """
start: sign thing $
sign: ['-']
thing: NUMBER
"""
self.assertEqual(self.calculate_first_sets(grammar), {
"sign": {"", "'-'"},
"thing": {"NUMBER"},
"start": {"NUMBER", "'-'"},
})
def test_epsilon_production_in_start_rule(self) -> None:
grammar = """
start: ['-'] $
"""
self.assertEqual(self.calculate_first_sets(grammar), {"start": {"ENDMARKER", "'-'"}})
def test_multiple_nullable_rules(self) -> None:
grammar = """
start: sign thing other another $
sign: ['-']
thing: ['+']
other: '*'
another: '/'
"""
self.assertEqual(self.calculate_first_sets(grammar), {
"sign": {"", "'-'"},
"thing": {"'+'", ""},
"start": {"'+'", "'-'", "'*'"},
"other": {"'*'"},
"another": {"'/'"},
})

View file

@ -0,0 +1,728 @@
import io
import textwrap
import unittest
from test import test_tools
from typing import Dict, Any
from tokenize import TokenInfo, NAME, NEWLINE, NUMBER, OP
test_tools.skip_if_missing('peg_generator')
with test_tools.imports_under_tool('peg_generator'):
from pegen.grammar_parser import GeneratedParser as GrammarParser
from pegen.testutil import (
parse_string,
generate_parser,
make_parser
)
from pegen.grammar import GrammarVisitor, GrammarError, Grammar
from pegen.grammar_visualizer import ASTGrammarPrinter
from pegen.parser import Parser
from pegen.python_generator import PythonParserGenerator
class TestPegen(unittest.TestCase):
def test_parse_grammar(self) -> None:
grammar_source = """
start: sum NEWLINE
sum: t1=term '+' t2=term { action } | term
term: NUMBER
"""
expected = """
start: sum NEWLINE
sum: term '+' term | term
term: NUMBER
"""
grammar: Grammar = parse_string(grammar_source, GrammarParser)
rules = grammar.rules
self.assertEqual(str(grammar), textwrap.dedent(expected).strip())
# Check the str() and repr() of a few rules; AST nodes don't support ==.
self.assertEqual(str(rules["start"]), "start: sum NEWLINE")
self.assertEqual(str(rules["sum"]), "sum: term '+' term | term")
expected_repr = "Rule('term', None, Rhs([Alt([NamedItem(None, NameLeaf('NUMBER'))])]))"
self.assertEqual(repr(rules["term"]), expected_repr)
def test_long_rule_str(self) -> None:
grammar_source = """
start: zero | one | one zero | one one | one zero zero | one zero one | one one zero | one one one
"""
expected = """
start:
| zero
| one
| one zero
| one one
| one zero zero
| one zero one
| one one zero
| one one one
"""
grammar: Grammar = parse_string(grammar_source, GrammarParser)
self.assertEqual(str(grammar.rules["start"]), textwrap.dedent(expected).strip())
def test_typed_rules(self) -> None:
grammar = """
start[int]: sum NEWLINE
sum[int]: t1=term '+' t2=term { action } | term
term[int]: NUMBER
"""
rules = parse_string(grammar, GrammarParser).rules
# Check the str() and repr() of a few rules; AST nodes don't support ==.
self.assertEqual(str(rules["start"]), "start: sum NEWLINE")
self.assertEqual(str(rules["sum"]), "sum: term '+' term | term")
self.assertEqual(
repr(rules["term"]),
"Rule('term', 'int', Rhs([Alt([NamedItem(None, NameLeaf('NUMBER'))])]))"
)
def test_repeat_with_separator_rules(self) -> None:
grammar = """
start: ','.thing+ NEWLINE
thing: NUMBER
"""
rules = parse_string(grammar, GrammarParser).rules
self.assertEqual(str(rules["start"]), "start: ','.thing+ NEWLINE")
print(repr(rules["start"]))
self.assertTrue(repr(rules["start"]).startswith(
"Rule('start', None, Rhs([Alt([NamedItem(None, Gather(StringLeaf(\"','\"), NameLeaf('thing'"
))
self.assertEqual(str(rules["thing"]), "thing: NUMBER")
def test_expr_grammar(self) -> None:
grammar = """
start: sum NEWLINE
sum: term '+' term | term
term: NUMBER
"""
parser_class = make_parser(grammar)
node = parse_string("42\n", parser_class)
self.assertEqual(node, [
[[TokenInfo(NUMBER, string="42", start=(1, 0), end=(1, 2), line="42\n")]],
TokenInfo(NEWLINE, string="\n", start=(1, 2), end=(1, 3), line="42\n"),
])
def test_optional_operator(self) -> None:
grammar = """
start: sum NEWLINE
sum: term ('+' term)?
term: NUMBER
"""
parser_class = make_parser(grammar)
node = parse_string("1+2\n", parser_class)
self.assertEqual(node, [
[
[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1+2\n")],
[
TokenInfo(OP, string="+", start=(1, 1), end=(1, 2), line="1+2\n"),
[TokenInfo(NUMBER, string="2", start=(1, 2), end=(1, 3), line="1+2\n")],
],
],
TokenInfo(NEWLINE, string="\n", start=(1, 3), end=(1, 4), line="1+2\n"),
])
node = parse_string("1\n", parser_class)
self.assertEqual(node, [
[[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1\n")], None],
TokenInfo(NEWLINE, string="\n", start=(1, 1), end=(1, 2), line="1\n"),
])
def test_optional_literal(self) -> None:
grammar = """
start: sum NEWLINE
sum: term '+' ?
term: NUMBER
"""
parser_class = make_parser(grammar)
node = parse_string("1+\n", parser_class)
self.assertEqual(node, [
[
[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1+\n")],
TokenInfo(OP, string="+", start=(1, 1), end=(1, 2), line="1+\n"),
],
TokenInfo(NEWLINE, string="\n", start=(1, 2), end=(1, 3), line="1+\n"),
])
node = parse_string("1\n", parser_class)
self.assertEqual(node, [
[[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1\n")], None],
TokenInfo(NEWLINE, string="\n", start=(1, 1), end=(1, 2), line="1\n"),
])
def test_alt_optional_operator(self) -> None:
grammar = """
start: sum NEWLINE
sum: term ['+' term]
term: NUMBER
"""
parser_class = make_parser(grammar)
node = parse_string("1 + 2\n", parser_class)
self.assertEqual(node, [
[
[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1 + 2\n")],
[
TokenInfo(OP, string="+", start=(1, 2), end=(1, 3), line="1 + 2\n"),
[TokenInfo(NUMBER, string="2", start=(1, 4), end=(1, 5), line="1 + 2\n")],
],
],
TokenInfo(NEWLINE, string="\n", start=(1, 5), end=(1, 6), line="1 + 2\n"),
])
node = parse_string("1\n", parser_class)
self.assertEqual(node, [
[[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1\n")], None],
TokenInfo(NEWLINE, string="\n", start=(1, 1), end=(1, 2), line="1\n"),
])
def test_repeat_0_simple(self) -> None:
grammar = """
start: thing thing* NEWLINE
thing: NUMBER
"""
parser_class = make_parser(grammar)
node = parse_string("1 2 3\n", parser_class)
self.assertEqual(node, [
[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1 2 3\n")],
[
[[TokenInfo(NUMBER, string="2", start=(1, 2), end=(1, 3), line="1 2 3\n")]],
[[TokenInfo(NUMBER, string="3", start=(1, 4), end=(1, 5), line="1 2 3\n")]],
],
TokenInfo(NEWLINE, string="\n", start=(1, 5), end=(1, 6), line="1 2 3\n"),
])
node = parse_string("1\n", parser_class)
self.assertEqual(node, [
[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1\n")],
[],
TokenInfo(NEWLINE, string="\n", start=(1, 1), end=(1, 2), line="1\n"),
])
def test_repeat_0_complex(self) -> None:
grammar = """
start: term ('+' term)* NEWLINE
term: NUMBER
"""
parser_class = make_parser(grammar)
node = parse_string("1 + 2 + 3\n", parser_class)
self.assertEqual(node, [
[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1 + 2 + 3\n")],
[
[
[
TokenInfo(OP, string="+", start=(1, 2), end=(1, 3), line="1 + 2 + 3\n"),
[TokenInfo(NUMBER, string="2", start=(1, 4), end=(1, 5), line="1 + 2 + 3\n")],
]
],
[
[
TokenInfo(OP, string="+", start=(1, 6), end=(1, 7), line="1 + 2 + 3\n"),
[TokenInfo(NUMBER, string="3", start=(1, 8), end=(1, 9), line="1 + 2 + 3\n")],
]
],
],
TokenInfo(NEWLINE, string="\n", start=(1, 9), end=(1, 10), line="1 + 2 + 3\n"),
])
def test_repeat_1_simple(self) -> None:
grammar = """
start: thing thing+ NEWLINE
thing: NUMBER
"""
parser_class = make_parser(grammar)
node = parse_string("1 2 3\n", parser_class)
self.assertEqual(node, [
[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1 2 3\n")],
[
[[TokenInfo(NUMBER, string="2", start=(1, 2), end=(1, 3), line="1 2 3\n")]],
[[TokenInfo(NUMBER, string="3", start=(1, 4), end=(1, 5), line="1 2 3\n")]],
],
TokenInfo(NEWLINE, string="\n", start=(1, 5), end=(1, 6), line="1 2 3\n"),
])
with self.assertRaises(SyntaxError):
parse_string("1\n", parser_class)
def test_repeat_1_complex(self) -> None:
grammar = """
start: term ('+' term)+ NEWLINE
term: NUMBER
"""
parser_class = make_parser(grammar)
node = parse_string("1 + 2 + 3\n", parser_class)
self.assertEqual(node, [
[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1 + 2 + 3\n")],
[
[
[
TokenInfo(OP, string="+", start=(1, 2), end=(1, 3), line="1 + 2 + 3\n"),
[TokenInfo(NUMBER, string="2", start=(1, 4), end=(1, 5), line="1 + 2 + 3\n")],
]
],
[
[
TokenInfo(OP, string="+", start=(1, 6), end=(1, 7), line="1 + 2 + 3\n"),
[TokenInfo(NUMBER, string="3", start=(1, 8), end=(1, 9), line="1 + 2 + 3\n")],
]
],
],
TokenInfo(NEWLINE, string="\n", start=(1, 9), end=(1, 10), line="1 + 2 + 3\n"),
])
with self.assertRaises(SyntaxError):
parse_string("1\n", parser_class)
def test_repeat_with_sep_simple(self) -> None:
grammar = """
start: ','.thing+ NEWLINE
thing: NUMBER
"""
parser_class = make_parser(grammar)
node = parse_string("1, 2, 3\n", parser_class)
self.assertEqual(node, [
[
[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1, 2, 3\n")],
[TokenInfo(NUMBER, string="2", start=(1, 3), end=(1, 4), line="1, 2, 3\n")],
[TokenInfo(NUMBER, string="3", start=(1, 6), end=(1, 7), line="1, 2, 3\n")],
],
TokenInfo(NEWLINE, string="\n", start=(1, 7), end=(1, 8), line="1, 2, 3\n"),
])
def test_left_recursive(self) -> None:
grammar_source = """
start: expr NEWLINE
expr: ('-' term | expr '+' term | term)
term: NUMBER
foo: NAME+
bar: NAME*
baz: NAME?
"""
grammar: Grammar = parse_string(grammar_source, GrammarParser)
parser_class = generate_parser(grammar)
rules = grammar.rules
self.assertFalse(rules["start"].left_recursive)
self.assertTrue(rules["expr"].left_recursive)
self.assertFalse(rules["term"].left_recursive)
self.assertFalse(rules["foo"].left_recursive)
self.assertFalse(rules["bar"].left_recursive)
self.assertFalse(rules["baz"].left_recursive)
node = parse_string("1 + 2 + 3\n", parser_class)
self.assertEqual(node, [
[
[
[[TokenInfo(NUMBER, string="1", start=(1, 0), end=(1, 1), line="1 + 2 + 3\n")]],
TokenInfo(OP, string="+", start=(1, 2), end=(1, 3), line="1 + 2 + 3\n"),
[TokenInfo(NUMBER, string="2", start=(1, 4), end=(1, 5), line="1 + 2 + 3\n")],
],
TokenInfo(OP, string="+", start=(1, 6), end=(1, 7), line="1 + 2 + 3\n"),
[TokenInfo(NUMBER, string="3", start=(1, 8), end=(1, 9), line="1 + 2 + 3\n")],
],
TokenInfo(NEWLINE, string="\n", start=(1, 9), end=(1, 10), line="1 + 2 + 3\n"),
])
def test_python_expr(self) -> None:
grammar = """
start: expr NEWLINE? $ { ast.Expression(expr, lineno=1, col_offset=0) }
expr: ( expr '+' term { ast.BinOp(expr, ast.Add(), term, lineno=expr.lineno, col_offset=expr.col_offset, end_lineno=term.end_lineno, end_col_offset=term.end_col_offset) }
| expr '-' term { ast.BinOp(expr, ast.Sub(), term, lineno=expr.lineno, col_offset=expr.col_offset, end_lineno=term.end_lineno, end_col_offset=term.end_col_offset) }
| term { term }
)
term: ( l=term '*' r=factor { ast.BinOp(l, ast.Mult(), r, lineno=l.lineno, col_offset=l.col_offset, end_lineno=r.end_lineno, end_col_offset=r.end_col_offset) }
| l=term '/' r=factor { ast.BinOp(l, ast.Div(), r, lineno=l.lineno, col_offset=l.col_offset, end_lineno=r.end_lineno, end_col_offset=r.end_col_offset) }
| factor { factor }
)
factor: ( '(' expr ')' { expr }
| atom { atom }
)
atom: ( n=NAME { ast.Name(id=n.string, ctx=ast.Load(), lineno=n.start[0], col_offset=n.start[1], end_lineno=n.end[0], end_col_offset=n.end[1]) }
| n=NUMBER { ast.Constant(value=ast.literal_eval(n.string), lineno=n.start[0], col_offset=n.start[1], end_lineno=n.end[0], end_col_offset=n.end[1]) }
)
"""
parser_class = make_parser(grammar)
node = parse_string("(1 + 2*3 + 5)/(6 - 2)\n", parser_class)
code = compile(node, "", "eval")
val = eval(code)
self.assertEqual(val, 3.0)
def test_nullable(self) -> None:
grammar_source = """
start: sign NUMBER
sign: ['-' | '+']
"""
grammar: Grammar = parse_string(grammar_source, GrammarParser)
out = io.StringIO()
genr = PythonParserGenerator(grammar, out)
rules = grammar.rules
self.assertFalse(rules["start"].nullable) # Not None!
self.assertTrue(rules["sign"].nullable)
def test_advanced_left_recursive(self) -> None:
grammar_source = """
start: NUMBER | sign start
sign: ['-']
"""
grammar: Grammar = parse_string(grammar_source, GrammarParser)
out = io.StringIO()
genr = PythonParserGenerator(grammar, out)
rules = grammar.rules
self.assertFalse(rules["start"].nullable) # Not None!
self.assertTrue(rules["sign"].nullable)
self.assertTrue(rules["start"].left_recursive)
self.assertFalse(rules["sign"].left_recursive)
def test_mutually_left_recursive(self) -> None:
grammar_source = """
start: foo 'E'
foo: bar 'A' | 'B'
bar: foo 'C' | 'D'
"""
grammar: Grammar = parse_string(grammar_source, GrammarParser)
out = io.StringIO()
genr = PythonParserGenerator(grammar, out)
rules = grammar.rules
self.assertFalse(rules["start"].left_recursive)
self.assertTrue(rules["foo"].left_recursive)
self.assertTrue(rules["bar"].left_recursive)
genr.generate("<string>")
ns: Dict[str, Any] = {}
exec(out.getvalue(), ns)
parser_class: Type[Parser] = ns["GeneratedParser"]
node = parse_string("D A C A E", parser_class)
self.assertEqual(node, [
[
[
[
[TokenInfo(type=NAME, string="D", start=(1, 0), end=(1, 1), line="D A C A E")],
TokenInfo(type=NAME, string="A", start=(1, 2), end=(1, 3), line="D A C A E"),
],
TokenInfo(type=NAME, string="C", start=(1, 4), end=(1, 5), line="D A C A E"),
],
TokenInfo(type=NAME, string="A", start=(1, 6), end=(1, 7), line="D A C A E"),
],
TokenInfo(type=NAME, string="E", start=(1, 8), end=(1, 9), line="D A C A E"),
])
node = parse_string("B C A E", parser_class)
self.assertIsNotNone(node)
self.assertEqual(node, [
[
[
[TokenInfo(type=NAME, string="B", start=(1, 0), end=(1, 1), line="B C A E")],
TokenInfo(type=NAME, string="C", start=(1, 2), end=(1, 3), line="B C A E"),
],
TokenInfo(type=NAME, string="A", start=(1, 4), end=(1, 5), line="B C A E"),
],
TokenInfo(type=NAME, string="E", start=(1, 6), end=(1, 7), line="B C A E"),
])
def test_nasty_mutually_left_recursive(self) -> None:
# This grammar does not recognize 'x - + =', much to my chagrin.
# But that's the way PEG works.
# [Breathlessly]
# The problem is that the toplevel target call
# recurses into maybe, which recognizes 'x - +',
# and then the toplevel target looks for another '+',
# which fails, so it retreats to NAME,
# which succeeds, so we end up just recognizing 'x',
# and then start fails because there's no '=' after that.
grammar_source = """
start: target '='
target: maybe '+' | NAME
maybe: maybe '-' | target
"""
grammar: Grammar = parse_string(grammar_source, GrammarParser)
out = io.StringIO()
genr = PythonParserGenerator(grammar, out)
genr.generate("<string>")
ns: Dict[str, Any] = {}
exec(out.getvalue(), ns)
parser_class = ns["GeneratedParser"]
with self.assertRaises(SyntaxError):
parse_string("x - + =", parser_class)
def test_lookahead(self) -> None:
grammar = """
start: (expr_stmt | assign_stmt) &'.'
expr_stmt: !(target '=') expr
assign_stmt: target '=' expr
expr: term ('+' term)*
target: NAME
term: NUMBER
"""
parser_class = make_parser(grammar)
node = parse_string("foo = 12 + 12 .", parser_class)
self.assertEqual(node, [
[
[
[TokenInfo(NAME, string="foo", start=(1, 0), end=(1, 3), line="foo = 12 + 12 .")],
TokenInfo(OP, string="=", start=(1, 4), end=(1, 5), line="foo = 12 + 12 ."),
[
[
TokenInfo(
NUMBER, string="12", start=(1, 6), end=(1, 8), line="foo = 12 + 12 ."
)
],
[
[
[
TokenInfo(
OP,
string="+",
start=(1, 9),
end=(1, 10),
line="foo = 12 + 12 .",
),
[
TokenInfo(
NUMBER,
string="12",
start=(1, 11),
end=(1, 13),
line="foo = 12 + 12 .",
)
],
]
]
],
],
]
]
])
def test_named_lookahead_error(self) -> None:
grammar = """
start: foo=!'x' NAME
"""
with self.assertRaises(SyntaxError):
make_parser(grammar)
def test_start_leader(self) -> None:
grammar = """
start: attr | NAME
attr: start '.' NAME
"""
# Would assert False without a special case in compute_left_recursives().
make_parser(grammar)
def test_left_recursion_too_complex(self) -> None:
grammar = """
start: foo
foo: bar '+' | baz '+' | '+'
bar: baz '-' | foo '-' | '-'
baz: foo '*' | bar '*' | '*'
"""
with self.assertRaises(ValueError) as errinfo:
make_parser(grammar)
self.assertTrue("no leader" in str(errinfo.exception.value))
def test_cut(self) -> None:
grammar = """
start: '(' ~ expr ')'
expr: NUMBER
"""
parser_class = make_parser(grammar)
node = parse_string("(1)", parser_class, verbose=True)
self.assertEqual(node, [
TokenInfo(OP, string="(", start=(1, 0), end=(1, 1), line="(1)"),
[TokenInfo(NUMBER, string="1", start=(1, 1), end=(1, 2), line="(1)")],
TokenInfo(OP, string=")", start=(1, 2), end=(1, 3), line="(1)"),
])
def test_dangling_reference(self) -> None:
grammar = """
start: foo ENDMARKER
foo: bar NAME
"""
with self.assertRaises(GrammarError):
parser_class = make_parser(grammar)
def test_bad_token_reference(self) -> None:
grammar = """
start: foo
foo: NAMEE
"""
with self.assertRaises(GrammarError):
parser_class = make_parser(grammar)
def test_missing_start(self) -> None:
grammar = """
foo: NAME
"""
with self.assertRaises(GrammarError):
parser_class = make_parser(grammar)
class TestGrammarVisitor:
class Visitor(GrammarVisitor):
def __init__(self) -> None:
self.n_nodes = 0
def visit(self, node: Any, *args: Any, **kwargs: Any) -> None:
self.n_nodes += 1
super().visit(node, *args, **kwargs)
def test_parse_trivial_grammar(self) -> None:
grammar = """
start: 'a'
"""
rules = parse_string(grammar, GrammarParser)
visitor = self.Visitor()
visitor.visit(rules)
self.assertEqual(visitor.n_nodes, 6)
def test_parse_or_grammar(self) -> None:
grammar = """
start: rule
rule: 'a' | 'b'
"""
rules = parse_string(grammar, GrammarParser)
visitor = self.Visitor()
visitor.visit(rules)
# Grammar/Rule/Rhs/Alt/NamedItem/NameLeaf -> 6
# Rule/Rhs/ -> 2
# Alt/NamedItem/StringLeaf -> 3
# Alt/NamedItem/StringLeaf -> 3
self.assertEqual(visitor.n_nodes, 14)
def test_parse_repeat1_grammar(self) -> None:
grammar = """
start: 'a'+
"""
rules = parse_string(grammar, GrammarParser)
visitor = self.Visitor()
visitor.visit(rules)
# Grammar/Rule/Rhs/Alt/NamedItem/Repeat1/StringLeaf -> 6
self.assertEqual(visitor.n_nodes, 7)
def test_parse_repeat0_grammar(self) -> None:
grammar = """
start: 'a'*
"""
rules = parse_string(grammar, GrammarParser)
visitor = self.Visitor()
visitor.visit(rules)
# Grammar/Rule/Rhs/Alt/NamedItem/Repeat0/StringLeaf -> 6
self.assertEqual(visitor.n_nodes, 7)
def test_parse_optional_grammar(self) -> None:
grammar = """
start: 'a' ['b']
"""
rules = parse_string(grammar, GrammarParser)
visitor = self.Visitor()
visitor.visit(rules)
# Grammar/Rule/Rhs/Alt/NamedItem/StringLeaf -> 6
# NamedItem/Opt/Rhs/Alt/NamedItem/Stringleaf -> 6
self.assertEqual(visitor.n_nodes, 12)
class TestGrammarVisualizer(unittest.TestCase):
def test_simple_rule(self) -> None:
grammar = """
start: 'a' 'b'
"""
rules = parse_string(grammar, GrammarParser)
printer = ASTGrammarPrinter()
lines: List[str] = []
printer.print_grammar_ast(rules, printer=lines.append)
output = "\n".join(lines)
expected_output = textwrap.dedent(
"""\
Rule
Rhs
Alt
NamedItem
StringLeaf("'a'")
NamedItem
StringLeaf("'b'")
"""
)
self.assertEqual(output, expected_output)
def test_multiple_rules(self) -> None:
grammar = """
start: a b
a: 'a'
b: 'b'
"""
rules = parse_string(grammar, GrammarParser)
printer = ASTGrammarPrinter()
lines: List[str] = []
printer.print_grammar_ast(rules, printer=lines.append)
output = "\n".join(lines)
expected_output = textwrap.dedent(
"""\
Rule
Rhs
Alt
NamedItem
NameLeaf('a')
NamedItem
NameLeaf('b')
Rule
Rhs
Alt
NamedItem
StringLeaf("'a'")
Rule
Rhs
Alt
NamedItem
StringLeaf("'b'")
"""
)
self.assertEqual(output, expected_output)
def test_deep_nested_rule(self) -> None:
grammar = """
start: 'a' ['b'['c'['d']]]
"""
rules = parse_string(grammar, GrammarParser)
printer = ASTGrammarPrinter()
lines: List[str] = []
printer.print_grammar_ast(rules, printer=lines.append)
output = "\n".join(lines)
print()
print(output)
expected_output = textwrap.dedent(
"""\
Rule
Rhs
Alt
NamedItem
StringLeaf("'a'")
NamedItem
Opt
Rhs
Alt
NamedItem
StringLeaf("'b'")
NamedItem
Opt
Rhs
Alt
NamedItem
StringLeaf("'c'")
NamedItem
Opt
Rhs
Alt
NamedItem
StringLeaf("'d'")
"""
)
self.assertEqual(output, expected_output)

764
Lib/test/test_peg_parser.py Normal file
View file

@ -0,0 +1,764 @@
import ast
import os
import sys
import _peg_parser as peg_parser
import unittest
from pathlib import PurePath
from typing import Any, Union, Iterable, Tuple
from textwrap import dedent
TEST_CASES = [
('annotated_assignment', 'x: int = 42'),
('annotated_assignment_with_tuple', 'x: tuple = 1, 2'),
('annotated_assignment_with_parens', '(paren): int = 3+2'),
('annotated_assignment_with_yield', 'x: int = yield 42'),
('annotated_no_assignment', 'x: int'),
('annotation_with_multiple_parens', '((parens)): int'),
('annotation_with_parens', '(parens): int'),
('annotated_assignment_with_attr', 'a.b: int'),
('annotated_assignment_with_subscript', 'a[b]: int'),
('annotated_assignment_with_attr_and_parens', '(a.b): int'),
('annotated_assignment_with_subscript_and_parens', '(a[b]): int'),
('assert', 'assert a'),
('assert_message', 'assert a, b'),
('assignment_false', 'a = False'),
('assignment_none', 'a = None'),
('assignment_true', 'a = True'),
('assignment_paren', '(a) = 42'),
('assignment_paren_multiple', '(a, b) = (0, 1)'),
('asyncfor',
'''
async for i in a:
pass
'''),
('attribute_call', 'a.b()'),
('attribute_multiple_names', 'abcd.efg.hij'),
('attribute_simple', 'a.b'),
('attributes_subscript', 'a.b[0]'),
('augmented_assignment', 'x += 42'),
('binop_add', '1 + 1'),
('binop_add_multiple', '1 + 1 + 1 + 1'),
('binop_all', '1 + 2 * 5 + 3 ** 2 - -3'),
('binop_boolop_comp', '1 + 1 == 2 or 1 + 1 == 3 and not b'),
('boolop_or', 'a or b'),
('boolop_or_multiple', 'a or b or c'),
('class_def_bases',
'''
class C(A, B):
pass
'''),
('class_def_decorators',
'''
@a
class C:
pass
'''),
('class_def_decorator_with_expression',
'''
@lambda x: 42
class C:
pass
'''),
('class_def_decorator_with_expression_and_walrus',
'''
@x:=lambda x: 42
class C:
pass
'''),
('class_def_keywords',
'''
class C(keyword=a+b, **c):
pass
'''),
('class_def_mixed',
'''
class C(A, B, keyword=0, **a):
pass
'''),
('class_def_simple',
'''
class C:
pass
'''),
('class_def_starred_and_kwarg',
'''
class C(A, B, *x, **y):
pass
'''),
('class_def_starred_in_kwargs',
'''
class C(A, x=2, *[B, C], y=3):
pass
'''),
('call_attribute', 'f().b'),
('call_genexp', 'f(i for i in a)'),
('call_mixed_args', 'f(a, b, *c, **d)'),
('call_mixed_args_named', 'f(a, b, *c, d=4, **v)'),
('call_one_arg', 'f(a)'),
('call_posarg_genexp', 'f(a, (i for i in a))'),
('call_simple', 'f()'),
('call_subscript', 'f()[0]'),
('comp', 'a == b'),
('comp_multiple', 'a == b == c'),
('comp_paren_end', 'a == (b-1)'),
('comp_paren_start', '(a-1) == b'),
('decorator',
'''
@a
def f():
pass
'''),
('decorator_async',
'''
@a
async def d():
pass
'''),
('decorator_with_expression',
'''
@lambda x: 42
def f():
pass
'''),
('decorator_with_expression_and_walrus',
'''
@x:=lambda x: 42
def f():
pass
'''),
('del_attribute', 'del a.b'),
('del_call_attribute', 'del a().c'),
('del_call_genexp_attribute', 'del a(i for i in b).c'),
('del_empty', 'del()'),
('del_list', 'del a, [b, c]'),
('del_mixed', 'del a[0].b().c'),
('del_multiple', 'del a, b'),
('del_multiple_calls_attribute', 'del a()().b'),
('del_paren', 'del(a,b)'),
('del_paren_single_target', 'del(a)'),
('del_subscript_attribute', 'del a[0].b'),
('del_tuple', 'del a, (b, c)'),
('delete', 'del a'),
('dict',
'''
{
a: 1,
b: 2,
c: 3
}
'''),
('dict_comp', '{x:1 for x in a}'),
('dict_comp_if', '{x:1+2 for x in a if b}'),
('dict_empty', '{}'),
('for',
'''
for i in a:
pass
'''),
('for_else',
'''
for i in a:
pass
else:
pass
'''),
('for_star_target_in_paren', 'for (a) in b: pass'),
('for_star_targets_attribute', 'for a.b in c: pass'),
('for_star_targets_call_attribute', 'for a().c in b: pass'),
('for_star_targets_empty', 'for () in a: pass'),
('for_star_targets_mixed', 'for a[0].b().c in d: pass'),
('for_star_targets_mixed_starred',
'''
for a, *b, (c, d) in e:
pass
'''),
('for_star_targets_multiple', 'for a, b in c: pass'),
('for_star_targets_nested_starred', 'for *[*a] in b: pass'),
('for_star_targets_starred', 'for *a in b: pass'),
('for_star_targets_subscript_attribute', 'for a[0].b in c: pass'),
('for_star_targets_trailing_comma',
'''
for a, (b, c), in d:
pass
'''),
('for_star_targets_tuple', 'for a, (b, c) in d: pass'),
('for_underscore',
'''
for _ in a:
pass
'''),
('function_return_type',
'''
def f() -> Any:
pass
'''),
('f-string_slice', "f'{x[2]}'"),
('f-string_slice_upper', "f'{x[2:3]}'"),
('f-string_slice_step', "f'{x[2:3:-2]}'"),
('f-string_constant', "f'{42}'"),
('f-string_boolop', "f'{x and y}'"),
('f-string_named_expr', "f'{(x:=42)}'"),
('f-string_binop', "f'{x+y}'"),
('f-string_unaryop', "f'{not x}'"),
('f-string_lambda', "f'{(lambda x, /, y, y2=42 , *z, k1, k2=34, **k3: 42)}'"),
('f-string_lambda_call', "f'{(lambda: 2)(2)}'"),
('f-string_ifexpr', "f'{x if y else z}'"),
('f-string_dict', "f'{ {2:34, 3:34} }'"),
('f-string_set', "f'{ {2,-45} }'"),
('f-string_list', "f'{ [2,-45] }'"),
('f-string_tuple', "f'{ (2,-45) }'"),
('f-string_listcomp', "f'{[x for x in y if z]}'"),
('f-string_setcomp', "f'{ {x for x in y if z} }'"),
('f-string_dictcomp', "f'{ {x:x for x in y if z} }'"),
('f-string_genexpr', "f'{ (x for x in y if z) }'"),
('f-string_yield', "f'{ (yield x) }'"),
('f-string_yieldfrom', "f'{ (yield from x) }'"),
('f-string_await', "f'{ await x }'"),
('f-string_compare', "f'{ x == y }'"),
('f-string_call', "f'{ f(x,y,z) }'"),
('f-string_attribute', "f'{ f.x.y.z }'"),
('f-string_starred', "f'{ *x, }'"),
('f-string_doublestarred', "f'{ {**x} }'"),
('f-string_escape_brace', "f'{{Escape'"),
('f-string_escape_closing_brace', "f'Escape}}'"),
('f-string_repr', "f'{a!r}'"),
('f-string_str', "f'{a!s}'"),
('f-string_ascii', "f'{a!a}'"),
('f-string_debug', "f'{a=}'"),
('f-string_padding', "f'{a:03d}'"),
('f-string_multiline',
"""
f'''
{hello}
'''
"""),
('f-string_multiline_in_expr',
"""
f'''
{
hello
}
'''
"""),
('f-string_multiline_in_call',
"""
f'''
{f(
a, b, c
)}
'''
"""),
('global', 'global a, b'),
('group', '(yield a)'),
('if_elif',
'''
if a:
pass
elif b:
pass
'''),
('if_elif_elif',
'''
if a:
pass
elif b:
pass
elif c:
pass
'''),
('if_elif_else',
'''
if a:
pass
elif b:
pass
else:
pass
'''),
('if_else',
'''
if a:
pass
else:
pass
'''),
('if_simple', 'if a: pass'),
('import', 'import a'),
('import_alias', 'import a as b'),
('import_dotted', 'import a.b'),
('import_dotted_alias', 'import a.b as c'),
('import_dotted_multichar', 'import ab.cd'),
('import_from', 'from a import b'),
('import_from_alias', 'from a import b as c'),
('import_from_dotted', 'from a.b import c'),
('import_from_dotted_alias', 'from a.b import c as d'),
('import_from_multiple_aliases', 'from a import b as c, d as e'),
('import_from_one_dot', 'from .a import b'),
('import_from_one_dot_alias', 'from .a import b as c'),
('import_from_star', 'from a import *'),
('import_from_three_dots', 'from ...a import b'),
('import_from_trailing_comma', 'from a import (b,)'),
('kwarg',
'''
def f(**a):
pass
'''),
('kwonly_args',
'''
def f(*, a, b):
pass
'''),
('kwonly_args_with_default',
'''
def f(*, a=2, b):
pass
'''),
('lambda_kwarg', 'lambda **a: 42'),
('lambda_kwonly_args', 'lambda *, a, b: 42'),
('lambda_kwonly_args_with_default', 'lambda *, a=2, b: 42'),
('lambda_mixed_args', 'lambda a, /, b, *, c: 42'),
('lambda_mixed_args_with_default', 'lambda a, b=2, /, c=3, *e, f, **g: 42'),
('lambda_no_args', 'lambda: 42'),
('lambda_pos_args', 'lambda a,b: 42'),
('lambda_pos_args_with_default', 'lambda a, b=2: 42'),
('lambda_pos_only_args', 'lambda a, /: 42'),
('lambda_pos_only_args_with_default', 'lambda a=0, /: 42'),
('lambda_pos_posonly_args', 'lambda a, b, /, c, d: 42'),
('lambda_pos_posonly_args_with_default', 'lambda a, b=0, /, c=2: 42'),
('lambda_vararg', 'lambda *a: 42'),
('lambda_vararg_kwonly_args', 'lambda *a, b: 42'),
('list', '[1, 2, a]'),
('list_comp', '[i for i in a]'),
('list_comp_if', '[i for i in a if b]'),
('list_trailing_comma', '[1+2, a, 3+4,]'),
('mixed_args',
'''
def f(a, /, b, *, c):
pass
'''),
('mixed_args_with_default',
'''
def f(a, b=2, /, c=3, *e, f, **g):
pass
'''),
('multipart_string_bytes', 'b"Hola" b"Hello" b"Bye"'),
('multipart_string_triple', '"""Something here""" "and now"'),
('multipart_string_different_prefixes', 'u"Something" "Other thing" r"last thing"'),
('multiple_assignments', 'x = y = z = 42'),
('multiple_assignments_with_yield', 'x = y = z = yield 42'),
('multiple_pass',
'''
pass; pass
pass
'''),
('namedexpr', '(x := [1, 2, 3])'),
('namedexpr_false', '(x := False)'),
('namedexpr_none', '(x := None)'),
('namedexpr_true', '(x := True)'),
('nonlocal', 'nonlocal a, b'),
('number_complex', '-2.234+1j'),
('number_float', '-34.2333'),
('number_imaginary_literal', '1.1234j'),
('number_integer', '-234'),
('number_underscores', '1_234_567'),
('pass', 'pass'),
('pos_args',
'''
def f(a, b):
pass
'''),
('pos_args_with_default',
'''
def f(a, b=2):
pass
'''),
('pos_only_args',
'''
def f(a, /):
pass
'''),
('pos_only_args_with_default',
'''
def f(a=0, /):
pass
'''),
('pos_posonly_args',
'''
def f(a, b, /, c, d):
pass
'''),
('pos_posonly_args_with_default',
'''
def f(a, b=0, /, c=2):
pass
'''),
('primary_mixed', 'a.b.c().d[0]'),
('raise', 'raise'),
('raise_ellipsis', 'raise ...'),
('raise_expr', 'raise a'),
('raise_from', 'raise a from b'),
('return', 'return'),
('return_expr', 'return a'),
('set', '{1, 2+4, 3+5}'),
('set_comp', '{i for i in a}'),
('set_trailing_comma', '{1, 2, 3,}'),
('simple_assignment', 'x = 42'),
('simple_assignment_with_yield', 'x = yield 42'),
('string_bytes', 'b"hello"'),
('string_concatenation_bytes', 'b"hello" b"world"'),
('string_concatenation_simple', '"abcd" "efgh"'),
('string_format_simple', 'f"hello"'),
('string_format_with_formatted_value', 'f"hello {world}"'),
('string_simple', '"hello"'),
('string_unicode', 'u"hello"'),
('subscript_attribute', 'a[0].b'),
('subscript_call', 'a[b]()'),
('subscript_multiple_slices', 'a[0:a:2, 1]'),
('subscript_simple', 'a[0]'),
('subscript_single_element_tuple', 'a[0,]'),
('subscript_trailing_comma', 'a[0, 1, 2,]'),
('subscript_tuple', 'a[0, 1, 2]'),
('subscript_whole_slice', 'a[0+1:b:c]'),
('try_except',
'''
try:
pass
except:
pass
'''),
('try_except_else',
'''
try:
pass
except:
pass
else:
pass
'''),
('try_except_else_finally',
'''
try:
pass
except:
pass
else:
pass
finally:
pass
'''),
('try_except_expr',
'''
try:
pass
except a:
pass
'''),
('try_except_expr_target',
'''
try:
pass
except a as b:
pass
'''),
('try_except_finally',
'''
try:
pass
except:
pass
finally:
pass
'''),
('try_finally',
'''
try:
pass
finally:
pass
'''),
('unpacking_binop', '[*([1, 2, 3] + [3, 4, 5])]'),
('unpacking_call', '[*b()]'),
('unpacking_compare', '[*(x < y)]'),
('unpacking_constant', '[*3]'),
('unpacking_dict', '[*{1: 2, 3: 4}]'),
('unpacking_dict_comprehension', '[*{x:y for x,y in z}]'),
('unpacking_ifexpr', '[*([1, 2, 3] if x else y)]'),
('unpacking_list', '[*[1,2,3]]'),
('unpacking_list_comprehension', '[*[x for x in y]]'),
('unpacking_namedexpr', '[*(x:=[1, 2, 3])]'),
('unpacking_set', '[*{1,2,3}]'),
('unpacking_set_comprehension', '[*{x for x in y}]'),
('unpacking_string', '[*"myvalue"]'),
('unpacking_tuple', '[*(1,2,3)]'),
('unpacking_unaryop', '[*(not [1, 2, 3])]'),
('unpacking_yield', '[*(yield 42)]'),
('unpacking_yieldfrom', '[*(yield from x)]'),
('tuple', '(1, 2, 3)'),
('vararg',
'''
def f(*a):
pass
'''),
('vararg_kwonly_args',
'''
def f(*a, b):
pass
'''),
('while',
'''
while a:
pass
'''),
('while_else',
'''
while a:
pass
else:
pass
'''),
('with',
'''
with a:
pass
'''),
('with_as',
'''
with a as b:
pass
'''),
('with_as_paren',
'''
with a as (b):
pass
'''),
('with_as_empty', 'with a as (): pass'),
('with_list_recursive',
'''
with a as [x, [y, z]]:
pass
'''),
('with_tuple_recursive',
'''
with a as ((x, y), z):
pass
'''),
('with_tuple_target',
'''
with a as (x, y):
pass
'''),
('yield', 'yield'),
('yield_expr', 'yield a'),
('yield_from', 'yield from a'),
]
FAIL_TEST_CASES = [
("annotation_multiple_targets", "(a, b): int = 42"),
("annotation_nested_tuple", "((a, b)): int"),
("annotation_list", "[a]: int"),
("annotation_lambda", "lambda: int = 42"),
("annotation_tuple", "(a,): int"),
("annotation_tuple_without_paren", "a,: int"),
("assignment_keyword", "a = if"),
("comprehension_lambda", "(a for a in lambda: b)"),
("comprehension_else", "(a for a in b if c else d"),
("del_call", "del a()"),
("del_call_genexp", "del a(i for i in b)"),
("del_subscript_call", "del a[b]()"),
("del_attribute_call", "del a.b()"),
("del_mixed_call", "del a[0].b().c.d()"),
("for_star_targets_call", "for a() in b: pass"),
("for_star_targets_subscript_call", "for a[b]() in c: pass"),
("for_star_targets_attribute_call", "for a.b() in c: pass"),
("for_star_targets_mixed_call", "for a[0].b().c.d() in e: pass"),
("for_star_targets_in", "for a, in in b: pass"),
("f-string_assignment", "f'{x = 42}'"),
("f-string_empty", "f'{}'"),
("f-string_function_def", "f'{def f(): pass}'"),
("f-string_lambda", "f'{lambda x: 42}'"),
("f-string_singe_brace", "f'{'"),
("f-string_single_closing_brace", "f'}'"),
("from_import_invalid", "from import import a"),
("from_import_trailing_comma", "from a import b,"),
# This test case checks error paths involving tokens with uninitialized
# values of col_offset and end_col_offset.
("invalid indentation",
"""
def f():
a
a
"""),
("not_terminated_string", "a = 'example"),
]
FAIL_SPECIALIZED_MESSAGE_CASES = [
("f(x, y, z=1, **b, *a", "iterable argument unpacking follows keyword argument unpacking"),
("f(x, y=1, *z, **a, b", "positional argument follows keyword argument unpacking"),
("f(x, y, z=1, a=2, b", "positional argument follows keyword argument"),
("True = 1", "cannot assign to True"),
("a() = 1", "cannot assign to function call"),
("(a, b): int", "only single target (not tuple) can be annotated"),
("[a, b]: int", "only single target (not list) can be annotated"),
("a(): int", "illegal target for annotation"),
("1 += 1", "cannot assign to literal"),
("pass\n pass", "unexpected indent"),
("def f():\npass", "expected an indented block"),
]
GOOD_BUT_FAIL_TEST_CASES = [
('string_concatenation_format', 'f"{hello} world" f"again {and_again}"'),
('string_concatenation_multiple',
'''
f"hello" f"{world} again" f"and_again"
'''),
('f-string_multiline_comp',
"""
f'''
{(i for i in a
if b)}
'''
"""),
]
FSTRINGS_TRACEBACKS = {
'multiline_fstrings_same_line_with_brace': (
"""
f'''
{a$b}
'''
""",
'(a$b)',
),
'multiline_fstring_brace_on_next_line': (
"""
f'''
{a$b
}'''
""",
'(a$b',
),
'multiline_fstring_brace_on_previous_line': (
"""
f'''
{
a$b}'''
""",
'a$b)',
),
}
EXPRESSIONS_TEST_CASES = [
("expression_add", "1+1"),
("expression_add_2", "a+b"),
("expression_call", "f(a, b=2, **kw)"),
("expression_tuple", "1, 2, 3"),
("expression_tuple_one_value", "1,")
]
def cleanup_source(source: Any) -> str:
if isinstance(source, str):
result = dedent(source)
elif not isinstance(source, (list, tuple)):
result = "\n".join(source)
else:
raise TypeError(f"Invalid type for test source: {source}")
return result
def prepare_test_cases(
test_cases: Iterable[Tuple[str, Union[str, Iterable[str]]]]
) -> Tuple[Iterable[str], Iterable[str]]:
test_ids, _test_sources = zip(*test_cases)
test_sources = list(_test_sources)
for index, source in enumerate(test_sources):
result = cleanup_source(source)
test_sources[index] = result
return test_ids, test_sources
TEST_IDS, TEST_SOURCES = prepare_test_cases(TEST_CASES)
GOOD_BUT_FAIL_TEST_IDS, GOOD_BUT_FAIL_SOURCES = prepare_test_cases(
GOOD_BUT_FAIL_TEST_CASES
)
FAIL_TEST_IDS, FAIL_SOURCES = prepare_test_cases(FAIL_TEST_CASES)
EXPRESSIONS_TEST_IDS, EXPRESSIONS_TEST_SOURCES = prepare_test_cases(
EXPRESSIONS_TEST_CASES
)
class ASTGenerationTest(unittest.TestCase):
def test_correct_ast_generation_on_source_files(self) -> None:
self.maxDiff = None
for source in TEST_SOURCES:
actual_ast = peg_parser.parse_string(source)
expected_ast = ast.parse(source)
self.assertEqual(
ast.dump(actual_ast, include_attributes=True),
ast.dump(expected_ast, include_attributes=True),
f"Wrong AST generation for source: {source}",
)
def test_incorrect_ast_generation_on_source_files(self) -> None:
for source in FAIL_SOURCES:
with self.assertRaises(SyntaxError, msg=f"Parsing {source} did not raise an exception"):
peg_parser.parse_string(source)
def test_incorrect_ast_generation_with_specialized_errors(self) -> None:
for source, error_text in FAIL_SPECIALIZED_MESSAGE_CASES:
exc = IndentationError if "indent" in error_text else SyntaxError
with self.assertRaises(exc) as se:
peg_parser.parse_string(source)
self.assertTrue(
error_text in se.exception.msg,
f"Actual error message does not match expexted for {source}"
)
@unittest.skipIf(sys.flags.use_peg, "This tests nothing for now, since compile uses pegen as well")
@unittest.expectedFailure
def test_correct_but_known_to_fail_ast_generation_on_source_files(self) -> None:
for source in GOOD_BUT_FAIL_SOURCES:
actual_ast = peg_parser.parse_string(source)
expected_ast = ast.parse(source)
self.assertEqual(
ast.dump(actual_ast, include_attributes=True),
ast.dump(expected_ast, include_attributes=True),
f"Wrong AST generation for source: {source}",
)
def test_correct_ast_generation_without_pos_info(self) -> None:
for source in GOOD_BUT_FAIL_SOURCES:
actual_ast = peg_parser.parse_string(source)
expected_ast = ast.parse(source)
self.assertEqual(
ast.dump(actual_ast),
ast.dump(expected_ast),
f"Wrong AST generation for source: {source}",
)
def test_fstring_parse_error_tracebacks(self) -> None:
for source, error_text in FSTRINGS_TRACEBACKS.values():
with self.assertRaises(SyntaxError) as se:
peg_parser.parse_string(dedent(source))
self.assertEqual(error_text, se.exception.text)
def test_correct_ast_generatrion_eval(self) -> None:
for source in EXPRESSIONS_TEST_SOURCES:
actual_ast = peg_parser.parse_string(source, mode='eval')
expected_ast = ast.parse(source, mode='eval')
self.assertEqual(
ast.dump(actual_ast, include_attributes=True),
ast.dump(expected_ast, include_attributes=True),
f"Wrong AST generation for source: {source}",
)
def test_tokenizer_errors_are_propagated(self) -> None:
n=201
with self.assertRaisesRegex(SyntaxError, "too many nested parentheses"):
peg_parser.parse_string(n*'(' + ')'*n)

View file

@ -3,6 +3,7 @@
import dis
import pickle
import unittest
import sys
from test.support import check_syntax_error
@ -23,10 +24,12 @@ class PositionalOnlyTestCase(unittest.TestCase):
compile(codestr + "\n", "<test>", "single")
def test_invalid_syntax_errors(self):
check_syntax_error(self, "def f(a, b = 5, /, c): pass", "non-default argument follows default argument")
check_syntax_error(self, "def f(a = 5, b, /, c): pass", "non-default argument follows default argument")
check_syntax_error(self, "def f(a = 5, b=1, /, c, *, d=2): pass", "non-default argument follows default argument")
check_syntax_error(self, "def f(a = 5, b, /): pass", "non-default argument follows default argument")
if not sys.flags.use_peg:
check_syntax_error(self, "def f(a, b = 5, /, c): pass", "non-default argument follows default argument")
check_syntax_error(self, "def f(a = 5, b, /, c): pass", "non-default argument follows default argument")
check_syntax_error(self, "def f(a = 5, b=1, /, c, *, d=2): pass", "non-default argument follows default argument")
check_syntax_error(self, "def f(a = 5, b, /): pass", "non-default argument follows default argument")
check_syntax_error(self, "def f(*args, /): pass")
check_syntax_error(self, "def f(*args, a, /): pass")
check_syntax_error(self, "def f(**kwargs, /): pass")
@ -44,10 +47,12 @@ class PositionalOnlyTestCase(unittest.TestCase):
check_syntax_error(self, "def f(a, *, c, /, d, e): pass")
def test_invalid_syntax_errors_async(self):
check_syntax_error(self, "async def f(a, b = 5, /, c): pass", "non-default argument follows default argument")
check_syntax_error(self, "async def f(a = 5, b, /, c): pass", "non-default argument follows default argument")
check_syntax_error(self, "async def f(a = 5, b=1, /, c, d=2): pass", "non-default argument follows default argument")
check_syntax_error(self, "async def f(a = 5, b, /): pass", "non-default argument follows default argument")
if not sys.flags.use_peg:
check_syntax_error(self, "async def f(a, b = 5, /, c): pass", "non-default argument follows default argument")
check_syntax_error(self, "async def f(a = 5, b, /, c): pass", "non-default argument follows default argument")
check_syntax_error(self, "async def f(a = 5, b=1, /, c, d=2): pass", "non-default argument follows default argument")
check_syntax_error(self, "async def f(a = 5, b, /): pass", "non-default argument follows default argument")
check_syntax_error(self, "async def f(*args, /): pass")
check_syntax_error(self, "async def f(*args, a, /): pass")
check_syntax_error(self, "async def f(**kwargs, /): pass")
@ -231,9 +236,11 @@ class PositionalOnlyTestCase(unittest.TestCase):
self.assertEqual(x(1, 2), 3)
def test_invalid_syntax_lambda(self):
check_syntax_error(self, "lambda a, b = 5, /, c: None", "non-default argument follows default argument")
check_syntax_error(self, "lambda a = 5, b, /, c: None", "non-default argument follows default argument")
check_syntax_error(self, "lambda a = 5, b, /: None", "non-default argument follows default argument")
if not sys.flags.use_peg:
check_syntax_error(self, "lambda a, b = 5, /, c: None", "non-default argument follows default argument")
check_syntax_error(self, "lambda a = 5, b, /, c: None", "non-default argument follows default argument")
check_syntax_error(self, "lambda a = 5, b, /: None", "non-default argument follows default argument")
check_syntax_error(self, "lambda *args, /: None")
check_syntax_error(self, "lambda *args, a, /: None")
check_syntax_error(self, "lambda **kwargs, /: None")

View file

@ -119,7 +119,8 @@ class TestLiterals(unittest.TestCase):
eval("'''\n\\z'''")
self.assertEqual(len(w), 1)
self.assertEqual(w[0].filename, '<string>')
self.assertEqual(w[0].lineno, 1)
if not sys.flags.use_peg:
self.assertEqual(w[0].lineno, 1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('error', category=DeprecationWarning)
@ -128,7 +129,8 @@ class TestLiterals(unittest.TestCase):
exc = cm.exception
self.assertEqual(w, [])
self.assertEqual(exc.filename, '<string>')
self.assertEqual(exc.lineno, 1)
if not sys.flags.use_peg:
self.assertEqual(exc.lineno, 1)
def test_eval_str_raw(self):
self.assertEqual(eval(""" r'x' """), 'x')
@ -168,7 +170,8 @@ class TestLiterals(unittest.TestCase):
eval("b'''\n\\z'''")
self.assertEqual(len(w), 1)
self.assertEqual(w[0].filename, '<string>')
self.assertEqual(w[0].lineno, 1)
if not sys.flags.use_peg:
self.assertEqual(w[0].lineno, 1)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('error', category=DeprecationWarning)
@ -177,7 +180,8 @@ class TestLiterals(unittest.TestCase):
exc = cm.exception
self.assertEqual(w, [])
self.assertEqual(exc.filename, '<string>')
self.assertEqual(exc.lineno, 1)
if not sys.flags.use_peg:
self.assertEqual(exc.lineno, 1)
def test_eval_bytes_raw(self):
self.assertEqual(eval(""" br'x' """), b'x')

View file

@ -63,9 +63,10 @@ SyntaxError: cannot assign to __debug__
Traceback (most recent call last):
SyntaxError: cannot assign to function call
>>> del f()
Traceback (most recent call last):
SyntaxError: cannot delete function call
# Pegen does not support this yet
# >>> del f()
# Traceback (most recent call last):
# SyntaxError: cannot delete function call
>>> a + 1 = 2
Traceback (most recent call last):
@ -100,29 +101,30 @@ expression inside that contain should still cause a syntax error.
This test just checks a couple of cases rather than enumerating all of
them.
>>> (a, "b", c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: cannot assign to literal
# All of the following also produce different error messages with pegen
# >>> (a, "b", c) = (1, 2, 3)
# Traceback (most recent call last):
# SyntaxError: cannot assign to literal
>>> (a, True, c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: cannot assign to True
# >>> (a, True, c) = (1, 2, 3)
# Traceback (most recent call last):
# SyntaxError: cannot assign to True
>>> (a, __debug__, c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: cannot assign to __debug__
>>> (a, *True, c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: cannot assign to True
# >>> (a, *True, c) = (1, 2, 3)
# Traceback (most recent call last):
# SyntaxError: cannot assign to True
>>> (a, *__debug__, c) = (1, 2, 3)
Traceback (most recent call last):
SyntaxError: cannot assign to __debug__
>>> [a, b, c + 1] = [1, 2, 3]
Traceback (most recent call last):
SyntaxError: cannot assign to operator
# >>> [a, b, c + 1] = [1, 2, 3]
# Traceback (most recent call last):
# SyntaxError: cannot assign to operator
>>> a if 1 else b = 1
Traceback (most recent call last):
@ -186,9 +188,11 @@ SyntaxError: Generator expression must be parenthesized
>>> f(x for x in L, **{})
Traceback (most recent call last):
SyntaxError: Generator expression must be parenthesized
>>> f(L, x for x in L)
Traceback (most recent call last):
SyntaxError: Generator expression must be parenthesized
# >>> f(L, x for x in L)
# Traceback (most recent call last):
# SyntaxError: Generator expression must be parenthesized
>>> f(x for x in L, y for y in L)
Traceback (most recent call last):
SyntaxError: Generator expression must be parenthesized
@ -297,31 +301,34 @@ SyntaxError: invalid syntax
... 290, 291, 292, 293, 294, 295, 296, 297, 298, 299) # doctest: +ELLIPSIS
(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, ..., 297, 298, 299)
>>> f(lambda x: x[0] = 3)
Traceback (most recent call last):
SyntaxError: expression cannot contain assignment, perhaps you meant "=="?
# >>> f(lambda x: x[0] = 3)
# Traceback (most recent call last):
# SyntaxError: expression cannot contain assignment, perhaps you meant "=="?
The grammar accepts any test (basically, any expression) in the
keyword slot of a call site. Test a few different options.
>>> f(x()=2)
Traceback (most recent call last):
SyntaxError: expression cannot contain assignment, perhaps you meant "=="?
>>> f(a or b=1)
Traceback (most recent call last):
SyntaxError: expression cannot contain assignment, perhaps you meant "=="?
>>> f(x.y=1)
Traceback (most recent call last):
SyntaxError: expression cannot contain assignment, perhaps you meant "=="?
>>> f((x)=2)
Traceback (most recent call last):
SyntaxError: expression cannot contain assignment, perhaps you meant "=="?
>>> f(True=2)
Traceback (most recent call last):
SyntaxError: cannot assign to True
# >>> f(x()=2)
# Traceback (most recent call last):
# SyntaxError: expression cannot contain assignment, perhaps you meant "=="?
# >>> f(a or b=1)
# Traceback (most recent call last):
# SyntaxError: expression cannot contain assignment, perhaps you meant "=="?
# >>> f(x.y=1)
# Traceback (most recent call last):
# SyntaxError: expression cannot contain assignment, perhaps you meant "=="?
# >>> f((x)=2)
# Traceback (most recent call last):
# SyntaxError: expression cannot contain assignment, perhaps you meant "=="?
# >>> f(True=2)
# Traceback (most recent call last):
# SyntaxError: cannot assign to True
>>> f(__debug__=1)
Traceback (most recent call last):
SyntaxError: cannot assign to __debug__
>>> __debug__: int
Traceback (most recent call last):
SyntaxError: cannot assign to __debug__
More set_context():
@ -620,9 +627,9 @@ Corner-cases that used to fail to raise the correct error:
Traceback (most recent call last):
SyntaxError: cannot assign to __debug__
>>> with (lambda *:0): pass
Traceback (most recent call last):
SyntaxError: named arguments must follow bare *
# >>> with (lambda *:0): pass
# Traceback (most recent call last):
# SyntaxError: named arguments must follow bare *
Corner-cases that used to crash:
@ -637,6 +644,7 @@ Corner-cases that used to crash:
"""
import re
import sys
import unittest
from test import support
@ -670,6 +678,8 @@ class SyntaxTestCase(unittest.TestCase):
def test_assign_call(self):
self._check_error("f() = 1", "assign")
@unittest.skipIf(sys.flags.use_peg, "Pegen does not produce a specialized error "
"message yet")
def test_assign_del(self):
self._check_error("del f()", "delete")

View file

@ -545,10 +545,10 @@ class SysModuleTest(unittest.TestCase):
def test_sys_flags(self):
self.assertTrue(sys.flags)
attrs = ("debug",
"inspect", "interactive", "optimize", "dont_write_bytecode",
"no_user_site", "no_site", "ignore_environment", "verbose",
"bytes_warning", "quiet", "hash_randomization", "isolated",
"dev_mode", "utf8_mode")
"inspect", "interactive", "optimize", "use_peg",
"dont_write_bytecode", "no_user_site", "no_site",
"ignore_environment", "verbose", "bytes_warning", "quiet",
"hash_randomization", "isolated", "dev_mode", "utf8_mode")
for attr in attrs:
self.assertTrue(hasattr(sys.flags, attr), attr)
attr_type = bool if attr == "dev_mode" else int

View file

@ -656,6 +656,8 @@ class BaseExceptionReportingTests:
self.assertIn('inner_raise() # Marker', blocks[2])
self.check_zero_div(blocks[2])
@unittest.skipIf(sys.flags.use_peg,
"Pegen is arguably better here, so no need to fix this")
def test_syntax_error_offset_at_eol(self):
# See #10186.
def e():

View file

@ -218,6 +218,7 @@ def favk(
"""
@unittest.skipIf(sys.flags.use_peg, "Pegen does not support type comments yet")
class TypeCommentTests(unittest.TestCase):
lowest = 4 # Lowest minor version supported

View file

@ -158,14 +158,15 @@ List comprehension element unpacking
...
SyntaxError: iterable unpacking cannot be used in comprehension
Generator expression in function arguments
# Pegen is better here.
# Generator expression in function arguments
>>> list(*x for x in (range(5) for i in range(3)))
Traceback (most recent call last):
...
list(*x for x in (range(5) for i in range(3)))
^
SyntaxError: invalid syntax
# >>> list(*x for x in (range(5) for i in range(3)))
# Traceback (most recent call last):
# ...
# list(*x for x in (range(5) for i in range(3)))
# ^
# SyntaxError: invalid syntax
>>> dict(**x for x in [{1:2}])
Traceback (most recent call last):

View file

@ -6,6 +6,7 @@ import pathlib
import random
import tokenize
import ast
import sys
def read_pyfile(filename):
@ -327,6 +328,7 @@ class UnparseTestCase(ASTTestCase):
ast.Constant(value=(1, 2, 3), kind=None), "(1, 2, 3)"
)
@unittest.skipIf(sys.flags.use_peg, "Pegen does not support type annotation yet")
def test_function_type(self):
for function_type in (
"() -> int",