regrtest computes statistics (#108793)

test_netrc, test_pep646_syntax and test_xml_etree now return results
in the test_main() function.

Changes:

* Rewrite TestResult as a dataclass with a new State class.
* Add test.support.TestStats class and Regrtest.stats_dict attribute.
* libregrtest.runtest functions now modify a TestResult instance
  in-place.
* libregrtest summary lists the number of run tests and skipped
  tests, and denied resources.
* Add TestResult.has_meaningful_duration() method.
* Compute TestResult duration in the upper function.
* Use time.perf_counter() instead of time.monotonic().
* Regrtest: rename 'resource_denieds' attribute to 'resource_denied'.
* Rename CHILD_ERROR to MULTIPROCESSING_ERROR.
* Use match/case syntadx to have different code depending on the
  test state.

Co-authored-by: Alex Waygood <Alex.Waygood@Gmail.com>
This commit is contained in:
Victor Stinner 2023-09-02 18:09:36 +02:00 committed by GitHub
parent e7de0c5901
commit d4e534cbb3
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
10 changed files with 507 additions and 303 deletions

View file

@ -19,7 +19,7 @@ import textwrap
import unittest
from test import libregrtest
from test import support
from test.support import os_helper
from test.support import os_helper, TestStats
from test.libregrtest import utils, setup
if not support.has_subprocess_support:
@ -409,7 +409,9 @@ class BaseTestCase(unittest.TestCase):
self.fail("%r not found in %r" % (regex, output))
return match
def check_line(self, output, regex):
def check_line(self, output, regex, full=False):
if full:
regex += '\n'
regex = re.compile(r'^' + regex, re.MULTILINE)
self.assertRegex(output, regex)
@ -421,21 +423,27 @@ class BaseTestCase(unittest.TestCase):
def check_executed_tests(self, output, tests, skipped=(), failed=(),
env_changed=(), omitted=(),
rerun={}, no_test_ran=(),
rerun={}, run_no_tests=(),
resource_denied=(),
randomize=False, interrupted=False,
fail_env_changed=False):
fail_env_changed=False,
*, stats):
if isinstance(tests, str):
tests = [tests]
if isinstance(skipped, str):
skipped = [skipped]
if isinstance(resource_denied, str):
resource_denied = [resource_denied]
if isinstance(failed, str):
failed = [failed]
if isinstance(env_changed, str):
env_changed = [env_changed]
if isinstance(omitted, str):
omitted = [omitted]
if isinstance(no_test_ran, str):
no_test_ran = [no_test_ran]
if isinstance(run_no_tests, str):
run_no_tests = [run_no_tests]
if isinstance(stats, int):
stats = TestStats(stats)
executed = self.parse_executed_tests(output)
if randomize:
@ -479,12 +487,12 @@ class BaseTestCase(unittest.TestCase):
regex = LOG_PREFIX + f"Re-running {name} in verbose mode \\(matching: {match}\\)"
self.check_line(output, regex)
if no_test_ran:
regex = list_regex('%s test%s run no tests', no_test_ran)
if run_no_tests:
regex = list_regex('%s test%s run no tests', run_no_tests)
self.check_line(output, regex)
good = (len(tests) - len(skipped) - len(failed)
- len(omitted) - len(env_changed) - len(no_test_ran))
- len(omitted) - len(env_changed) - len(run_no_tests))
if good:
regex = r'%s test%s OK\.$' % (good, plural(good))
if not skipped and not failed and good > 1:
@ -494,6 +502,33 @@ class BaseTestCase(unittest.TestCase):
if interrupted:
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
# Total tests
parts = [f'run={stats.tests_run:,}']
if stats.failures:
parts.append(f'failures={stats.failures:,}')
if stats.skipped:
parts.append(f'skipped={stats.skipped:,}')
line = fr'Total tests: {" ".join(parts)}'
self.check_line(output, line, full=True)
# Total test files
report = [f'success={good}']
if failed:
report.append(f'failed={len(failed)}')
if env_changed:
report.append(f'env_changed={len(env_changed)}')
if skipped:
report.append(f'skipped={len(skipped)}')
if resource_denied:
report.append(f'resource_denied={len(resource_denied)}')
if rerun:
report.append(f'rerun={len(rerun)}')
if run_no_tests:
report.append(f'run_no_tests={len(run_no_tests)}')
line = fr'Total test files: {" ".join(report)}'
self.check_line(output, line, full=True)
# Result
result = []
if failed:
result.append('FAILURE')
@ -508,10 +543,8 @@ class BaseTestCase(unittest.TestCase):
result.append('SUCCESS')
result = ', '.join(result)
if rerun:
self.check_line(output, 'Tests result: FAILURE')
result = 'FAILURE then %s' % result
self.check_line(output, 'Tests result: %s' % result)
self.check_line(output, f'Result: {result}', full=True)
def parse_random_seed(self, output):
match = self.regex_search(r'Using random seed ([0-9]+)', output)
@ -604,7 +637,8 @@ class ProgramsTestCase(BaseTestCase):
def check_output(self, output):
self.parse_random_seed(output)
self.check_executed_tests(output, self.tests, randomize=True)
self.check_executed_tests(output, self.tests,
randomize=True, stats=len(self.tests))
def run_tests(self, args):
output = self.run_python(args)
@ -718,7 +752,8 @@ class ArgsTestCase(BaseTestCase):
tests = [test_ok, test_failing]
output = self.run_tests(*tests, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, tests, failed=test_failing)
self.check_executed_tests(output, tests, failed=test_failing,
stats=TestStats(2, 1))
def test_resources(self):
# test -u command line option
@ -737,17 +772,21 @@ class ArgsTestCase(BaseTestCase):
# -u all: 2 resources enabled
output = self.run_tests('-u', 'all', *test_names)
self.check_executed_tests(output, test_names)
self.check_executed_tests(output, test_names, stats=2)
# -u audio: 1 resource enabled
output = self.run_tests('-uaudio', *test_names)
self.check_executed_tests(output, test_names,
skipped=tests['network'])
skipped=tests['network'],
resource_denied=tests['network'],
stats=1)
# no option: 0 resources enabled
output = self.run_tests(*test_names)
self.check_executed_tests(output, test_names,
skipped=test_names)
skipped=test_names,
resource_denied=test_names,
stats=0)
def test_random(self):
# test -r and --randseed command line option
@ -795,7 +834,8 @@ class ArgsTestCase(BaseTestCase):
previous = name
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
stats = len(tests)
self.check_executed_tests(output, tests, stats=stats)
# test format '[2/7] test_opcodes'
with open(filename, "w") as fp:
@ -803,7 +843,7 @@ class ArgsTestCase(BaseTestCase):
print("[%s/%s] %s" % (index, len(tests), name), file=fp)
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
self.check_executed_tests(output, tests, stats=stats)
# test format 'test_opcodes'
with open(filename, "w") as fp:
@ -811,7 +851,7 @@ class ArgsTestCase(BaseTestCase):
print(name, file=fp)
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
self.check_executed_tests(output, tests, stats=stats)
# test format 'Lib/test/test_opcodes.py'
with open(filename, "w") as fp:
@ -819,20 +859,20 @@ class ArgsTestCase(BaseTestCase):
print('Lib/test/%s.py' % name, file=fp)
output = self.run_tests('--fromfile', filename)
self.check_executed_tests(output, tests)
self.check_executed_tests(output, tests, stats=stats)
def test_interrupted(self):
code = TEST_INTERRUPTED
test = self.create_test('sigint', code=code)
output = self.run_tests(test, exitcode=EXITCODE_INTERRUPTED)
self.check_executed_tests(output, test, omitted=test,
interrupted=True)
interrupted=True, stats=0)
def test_slowest(self):
# test --slowest
tests = [self.create_test() for index in range(3)]
output = self.run_tests("--slowest", *tests)
self.check_executed_tests(output, tests)
self.check_executed_tests(output, tests, stats=len(tests))
regex = ('10 slowest tests:\n'
'(?:- %s: .*\n){%s}'
% (self.TESTNAME_REGEX, len(tests)))
@ -851,7 +891,8 @@ class ArgsTestCase(BaseTestCase):
args = ("--slowest", test)
output = self.run_tests(*args, exitcode=EXITCODE_INTERRUPTED)
self.check_executed_tests(output, test,
omitted=test, interrupted=True)
omitted=test, interrupted=True,
stats=0)
regex = ('10 slowest tests:\n')
self.check_line(output, regex)
@ -860,7 +901,7 @@ class ArgsTestCase(BaseTestCase):
# test --coverage
test = self.create_test('coverage')
output = self.run_tests("--coverage", test)
self.check_executed_tests(output, [test])
self.check_executed_tests(output, [test], stats=1)
regex = (r'lines +cov% +module +\(path\)\n'
r'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
self.check_line(output, regex)
@ -890,7 +931,8 @@ class ArgsTestCase(BaseTestCase):
""")
test = self.create_test('forever', code=code)
output = self.run_tests('--forever', test, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, [test]*3, failed=test)
self.check_executed_tests(output, [test]*3, failed=test,
stats=TestStats(1, 1))
def check_leak(self, code, what):
test = self.create_test('huntrleaks', code=code)
@ -900,7 +942,7 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests('--huntrleaks', '6:3:', test,
exitcode=EXITCODE_BAD_TEST,
stderr=subprocess.STDOUT)
self.check_executed_tests(output, [test], failed=test)
self.check_executed_tests(output, [test], failed=test, stats=1)
line = 'beginning 9 repetitions\n123456789\n.........\n'
self.check_line(output, re.escape(line))
@ -982,7 +1024,7 @@ class ArgsTestCase(BaseTestCase):
tests = [crash_test]
output = self.run_tests("-j2", *tests, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, tests, failed=crash_test,
randomize=True)
randomize=True, stats=0)
def parse_methods(self, output):
regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
@ -1077,13 +1119,14 @@ class ArgsTestCase(BaseTestCase):
# don't fail by default
output = self.run_tests(testname)
self.check_executed_tests(output, [testname], env_changed=testname)
self.check_executed_tests(output, [testname],
env_changed=testname, stats=1)
# fail with --fail-env-changed
output = self.run_tests("--fail-env-changed", testname,
exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname], env_changed=testname,
fail_env_changed=True)
fail_env_changed=True, stats=1)
def test_rerun_fail(self):
# FAILURE then FAILURE
@ -1102,7 +1145,9 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, [testname],
failed=testname, rerun={testname: "test_fail_always"})
failed=testname,
rerun={testname: "test_fail_always"},
stats=TestStats(1, 1))
def test_rerun_success(self):
# FAILURE then SUCCESS
@ -1123,7 +1168,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=0)
self.check_executed_tests(output, [testname],
rerun={testname: "test_fail_once"})
rerun={testname: "test_fail_once"},
stats=1)
def test_rerun_setup_class_hook_failure(self):
# FAILURE then FAILURE
@ -1143,7 +1189,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
rerun={testname: "ExampleTests"})
rerun={testname: "ExampleTests"},
stats=0)
def test_rerun_teardown_class_hook_failure(self):
# FAILURE then FAILURE
@ -1163,7 +1210,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
rerun={testname: "ExampleTests"})
rerun={testname: "ExampleTests"},
stats=1)
def test_rerun_setup_module_hook_failure(self):
# FAILURE then FAILURE
@ -1182,7 +1230,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
rerun={testname: testname})
rerun={testname: testname},
stats=0)
def test_rerun_teardown_module_hook_failure(self):
# FAILURE then FAILURE
@ -1201,7 +1250,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
rerun={testname: testname})
rerun={testname: testname},
stats=1)
def test_rerun_setup_hook_failure(self):
# FAILURE then FAILURE
@ -1220,7 +1270,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
rerun={testname: "test_success"})
rerun={testname: "test_success"},
stats=1)
def test_rerun_teardown_hook_failure(self):
# FAILURE then FAILURE
@ -1239,7 +1290,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
rerun={testname: "test_success"})
rerun={testname: "test_success"},
stats=1)
def test_rerun_async_setup_hook_failure(self):
# FAILURE then FAILURE
@ -1258,7 +1310,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
rerun={testname: "test_success"})
rerun={testname: "test_success"},
stats=1)
def test_rerun_async_teardown_hook_failure(self):
# FAILURE then FAILURE
@ -1277,7 +1330,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, testname,
failed=[testname],
rerun={testname: "test_success"})
rerun={testname: "test_success"},
stats=1)
def test_no_tests_ran(self):
code = textwrap.dedent("""
@ -1291,7 +1345,9 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests(testname, "-m", "nosuchtest",
exitcode=EXITCODE_NO_TESTS_RAN)
self.check_executed_tests(output, [testname], no_test_ran=testname)
self.check_executed_tests(output, [testname],
run_no_tests=testname,
stats=0)
def test_no_tests_ran_skip(self):
code = textwrap.dedent("""
@ -1304,7 +1360,8 @@ class ArgsTestCase(BaseTestCase):
testname = self.create_test(code=code)
output = self.run_tests(testname)
self.check_executed_tests(output, [testname])
self.check_executed_tests(output, [testname],
stats=TestStats(1, skipped=1))
def test_no_tests_ran_multiple_tests_nonexistent(self):
code = textwrap.dedent("""
@ -1320,7 +1377,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests(testname, testname2, "-m", "nosuchtest",
exitcode=EXITCODE_NO_TESTS_RAN)
self.check_executed_tests(output, [testname, testname2],
no_test_ran=[testname, testname2])
run_no_tests=[testname, testname2],
stats=0)
def test_no_test_ran_some_test_exist_some_not(self):
code = textwrap.dedent("""
@ -1343,7 +1401,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests(testname, testname2, "-m", "nosuchtest",
"-m", "test_other_bug", exitcode=0)
self.check_executed_tests(output, [testname, testname2],
no_test_ran=[testname])
run_no_tests=[testname],
stats=1)
@support.cpython_only
def test_uncollectable(self):
@ -1370,7 +1429,8 @@ class ArgsTestCase(BaseTestCase):
exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
fail_env_changed=True,
stats=1)
def test_multiprocessing_timeout(self):
code = textwrap.dedent(r"""
@ -1396,7 +1456,7 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests("-j2", "--timeout=1.0", testname,
exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, [testname],
failed=testname)
failed=testname, stats=0)
self.assertRegex(output,
re.compile('%s timed out' % testname, re.MULTILINE))
@ -1430,7 +1490,8 @@ class ArgsTestCase(BaseTestCase):
exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
fail_env_changed=True,
stats=1)
self.assertIn("Warning -- Unraisable exception", output)
self.assertIn("Exception: weakref callback bug", output)
@ -1462,7 +1523,8 @@ class ArgsTestCase(BaseTestCase):
exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
fail_env_changed=True,
stats=1)
self.assertIn("Warning -- Uncaught thread exception", output)
self.assertIn("Exception: bug in thread", output)
@ -1503,7 +1565,8 @@ class ArgsTestCase(BaseTestCase):
output = self.run_tests(*cmd, exitcode=EXITCODE_ENV_CHANGED)
self.check_executed_tests(output, [testname],
env_changed=[testname],
fail_env_changed=True)
fail_env_changed=True,
stats=1)
self.assertRegex(output, regex)
def test_unicode_guard_env(self):
@ -1550,7 +1613,8 @@ class ArgsTestCase(BaseTestCase):
self.check_executed_tests(output, testnames,
env_changed=testnames,
fail_env_changed=True,
randomize=True)
randomize=True,
stats=len(testnames))
for testname in testnames:
self.assertIn(f"Warning -- {testname} leaked temporary "
f"files (1): mytmpfile",
@ -1589,7 +1653,47 @@ class ArgsTestCase(BaseTestCase):
exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, [testname],
failed=[testname],
randomize=True)
randomize=True,
stats=0)
def test_doctest(self):
code = textwrap.dedent(fr'''
import doctest
import sys
from test import support
def my_function():
"""
Pass:
>>> 1 + 1
2
Failure:
>>> 2 + 3
23
>>> 1 + 1
11
Skipped test (ignored):
>>> id(1.0) # doctest: +SKIP
7948648
"""
def test_main():
testmod = sys.modules[__name__]
return support.run_doctest(testmod)
''')
testname = self.create_test(code=code)
output = self.run_tests("--fail-env-changed", "-v", "-j1", testname,
exitcode=EXITCODE_BAD_TEST)
self.check_executed_tests(output, [testname],
failed=[testname],
randomize=True,
stats=TestStats(4, 2, 1))
class TestUtils(unittest.TestCase):