mirror of
https://github.com/python/cpython.git
synced 2025-08-04 17:08:35 +00:00
regrtest computes statistics (#108793)
test_netrc, test_pep646_syntax and test_xml_etree now return results in the test_main() function. Changes: * Rewrite TestResult as a dataclass with a new State class. * Add test.support.TestStats class and Regrtest.stats_dict attribute. * libregrtest.runtest functions now modify a TestResult instance in-place. * libregrtest summary lists the number of run tests and skipped tests, and denied resources. * Add TestResult.has_meaningful_duration() method. * Compute TestResult duration in the upper function. * Use time.perf_counter() instead of time.monotonic(). * Regrtest: rename 'resource_denieds' attribute to 'resource_denied'. * Rename CHILD_ERROR to MULTIPROCESSING_ERROR. * Use match/case syntadx to have different code depending on the test state. Co-authored-by: Alex Waygood <Alex.Waygood@Gmail.com>
This commit is contained in:
parent
e7de0c5901
commit
d4e534cbb3
10 changed files with 507 additions and 303 deletions
|
@ -4,6 +4,7 @@ if __name__ != 'test.support':
|
|||
raise ImportError('support must be imported from the test package')
|
||||
|
||||
import contextlib
|
||||
import dataclasses
|
||||
import functools
|
||||
import getpass
|
||||
import _opcode
|
||||
|
@ -118,18 +119,21 @@ class Error(Exception):
|
|||
|
||||
class TestFailed(Error):
|
||||
"""Test failed."""
|
||||
|
||||
class TestFailedWithDetails(TestFailed):
|
||||
"""Test failed."""
|
||||
def __init__(self, msg, errors, failures):
|
||||
def __init__(self, msg, *args, stats=None):
|
||||
self.msg = msg
|
||||
self.errors = errors
|
||||
self.failures = failures
|
||||
super().__init__(msg, errors, failures)
|
||||
self.stats = stats
|
||||
super().__init__(msg, *args)
|
||||
|
||||
def __str__(self):
|
||||
return self.msg
|
||||
|
||||
class TestFailedWithDetails(TestFailed):
|
||||
"""Test failed."""
|
||||
def __init__(self, msg, errors, failures, stats):
|
||||
self.errors = errors
|
||||
self.failures = failures
|
||||
super().__init__(msg, errors, failures, stats=stats)
|
||||
|
||||
class TestDidNotRun(Error):
|
||||
"""Test did not run any subtests."""
|
||||
|
||||
|
@ -1105,6 +1109,30 @@ def _filter_suite(suite, pred):
|
|||
newtests.append(test)
|
||||
suite._tests = newtests
|
||||
|
||||
@dataclasses.dataclass(slots=True)
|
||||
class TestStats:
|
||||
tests_run: int = 0
|
||||
failures: int = 0
|
||||
skipped: int = 0
|
||||
|
||||
@staticmethod
|
||||
def from_unittest(result):
|
||||
return TestStats(result.testsRun,
|
||||
len(result.failures),
|
||||
len(result.skipped))
|
||||
|
||||
@staticmethod
|
||||
def from_doctest(results):
|
||||
return TestStats(results.attempted,
|
||||
results.failed,
|
||||
results.skipped)
|
||||
|
||||
def accumulate(self, stats):
|
||||
self.tests_run += stats.tests_run
|
||||
self.failures += stats.failures
|
||||
self.skipped += stats.skipped
|
||||
|
||||
|
||||
def _run_suite(suite):
|
||||
"""Run tests from a unittest.TestSuite-derived class."""
|
||||
runner = get_test_runner(sys.stdout,
|
||||
|
@ -1119,6 +1147,7 @@ def _run_suite(suite):
|
|||
if not result.testsRun and not result.skipped and not result.errors:
|
||||
raise TestDidNotRun
|
||||
if not result.wasSuccessful():
|
||||
stats = TestStats.from_unittest(result)
|
||||
if len(result.errors) == 1 and not result.failures:
|
||||
err = result.errors[0][1]
|
||||
elif len(result.failures) == 1 and not result.errors:
|
||||
|
@ -1128,7 +1157,8 @@ def _run_suite(suite):
|
|||
if not verbose: err += "; run in verbose mode for details"
|
||||
errors = [(str(tc), exc_str) for tc, exc_str in result.errors]
|
||||
failures = [(str(tc), exc_str) for tc, exc_str in result.failures]
|
||||
raise TestFailedWithDetails(err, errors, failures)
|
||||
raise TestFailedWithDetails(err, errors, failures, stats=stats)
|
||||
return result
|
||||
|
||||
|
||||
# By default, don't filter tests
|
||||
|
@ -1237,7 +1267,7 @@ def run_unittest(*classes):
|
|||
else:
|
||||
suite.addTest(loader.loadTestsFromTestCase(cls))
|
||||
_filter_suite(suite, match_test)
|
||||
_run_suite(suite)
|
||||
return _run_suite(suite)
|
||||
|
||||
#=======================================================================
|
||||
# Check for the presence of docstrings.
|
||||
|
@ -1277,13 +1307,18 @@ def run_doctest(module, verbosity=None, optionflags=0):
|
|||
else:
|
||||
verbosity = None
|
||||
|
||||
f, t = doctest.testmod(module, verbose=verbosity, optionflags=optionflags)
|
||||
if f:
|
||||
raise TestFailed("%d of %d doctests failed" % (f, t))
|
||||
results = doctest.testmod(module,
|
||||
verbose=verbosity,
|
||||
optionflags=optionflags)
|
||||
if results.failed:
|
||||
stats = TestStats.from_doctest(results)
|
||||
raise TestFailed(f"{results.failed} of {results.attempted} "
|
||||
f"doctests failed",
|
||||
stats=stats)
|
||||
if verbose:
|
||||
print('doctest (%s) ... %d tests with zero failures' %
|
||||
(module.__name__, t))
|
||||
return f, t
|
||||
(module.__name__, results.attempted))
|
||||
return results
|
||||
|
||||
|
||||
#=======================================================================
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue