mirror of
https://github.com/python/cpython.git
synced 2025-08-03 16:39:00 +00:00
bpo-44708: Only re-run test methods that match names of previously failing test methods (GH-27287)
* Move to a static argparse.Namespace subclass * Roughly annotate runtest.py * Refactor libregrtest to use lossless test result objects * Only re-run test methods that match names of previously failing test methods * Adopt tests to cover test method name matching Co-authored-by: Pablo Galindo Salgado <Pablogsal@gmail.com>
This commit is contained in:
parent
50ffbe3daf
commit
f1afef5e0d
7 changed files with 341 additions and 174 deletions
|
@ -11,10 +11,10 @@ import time
|
|||
import unittest
|
||||
from test.libregrtest.cmdline import _parse_args
|
||||
from test.libregrtest.runtest import (
|
||||
findtests, runtest, get_abs_module,
|
||||
STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED,
|
||||
INTERRUPTED, CHILD_ERROR, TEST_DID_NOT_RUN, TIMEOUT,
|
||||
PROGRESS_MIN_TIME, format_test_result, is_failed)
|
||||
findtests, runtest, get_abs_module, is_failed,
|
||||
STDTESTS, NOTTESTS, PROGRESS_MIN_TIME,
|
||||
Passed, Failed, EnvChanged, Skipped, ResourceDenied, Interrupted,
|
||||
ChildError, DidNotRun)
|
||||
from test.libregrtest.setup import setup_tests
|
||||
from test.libregrtest.pgo import setup_pgo_tests
|
||||
from test.libregrtest.utils import removepy, count, format_duration, printlist
|
||||
|
@ -99,34 +99,32 @@ class Regrtest:
|
|||
| set(self.run_no_tests))
|
||||
|
||||
def accumulate_result(self, result, rerun=False):
|
||||
test_name = result.test_name
|
||||
ok = result.result
|
||||
test_name = result.name
|
||||
|
||||
if ok not in (CHILD_ERROR, INTERRUPTED) and not rerun:
|
||||
self.test_times.append((result.test_time, test_name))
|
||||
if not isinstance(result, (ChildError, Interrupted)) and not rerun:
|
||||
self.test_times.append((result.duration_sec, test_name))
|
||||
|
||||
if ok == PASSED:
|
||||
if isinstance(result, Passed):
|
||||
self.good.append(test_name)
|
||||
elif ok in (FAILED, CHILD_ERROR):
|
||||
if not rerun:
|
||||
self.bad.append(test_name)
|
||||
elif ok == ENV_CHANGED:
|
||||
self.environment_changed.append(test_name)
|
||||
elif ok == SKIPPED:
|
||||
self.skipped.append(test_name)
|
||||
elif ok == RESOURCE_DENIED:
|
||||
elif isinstance(result, ResourceDenied):
|
||||
self.skipped.append(test_name)
|
||||
self.resource_denieds.append(test_name)
|
||||
elif ok == TEST_DID_NOT_RUN:
|
||||
elif isinstance(result, Skipped):
|
||||
self.skipped.append(test_name)
|
||||
elif isinstance(result, EnvChanged):
|
||||
self.environment_changed.append(test_name)
|
||||
elif isinstance(result, Failed):
|
||||
if not rerun:
|
||||
self.bad.append(test_name)
|
||||
self.rerun.append(result)
|
||||
elif isinstance(result, DidNotRun):
|
||||
self.run_no_tests.append(test_name)
|
||||
elif ok == INTERRUPTED:
|
||||
elif isinstance(result, Interrupted):
|
||||
self.interrupted = True
|
||||
elif ok == TIMEOUT:
|
||||
self.bad.append(test_name)
|
||||
else:
|
||||
raise ValueError("invalid test result: %r" % ok)
|
||||
raise ValueError("invalid test result: %r" % result)
|
||||
|
||||
if rerun and ok not in {FAILED, CHILD_ERROR, INTERRUPTED}:
|
||||
if rerun and not isinstance(result, (Failed, Interrupted)):
|
||||
self.bad.remove(test_name)
|
||||
|
||||
xml_data = result.xml_data
|
||||
|
@ -314,15 +312,31 @@ class Regrtest:
|
|||
|
||||
self.log()
|
||||
self.log("Re-running failed tests in verbose mode")
|
||||
self.rerun = self.bad[:]
|
||||
for test_name in self.rerun:
|
||||
self.log(f"Re-running {test_name} in verbose mode")
|
||||
rerun_list = self.rerun[:]
|
||||
self.rerun = []
|
||||
for result in rerun_list:
|
||||
test_name = result.name
|
||||
errors = result.errors or []
|
||||
failures = result.failures or []
|
||||
error_names = [test_full_name.split(" ")[0] for (test_full_name, *_) in errors]
|
||||
failure_names = [test_full_name.split(" ")[0] for (test_full_name, *_) in failures]
|
||||
self.ns.verbose = True
|
||||
orig_match_tests = self.ns.match_tests
|
||||
if errors or failures:
|
||||
if self.ns.match_tests is None:
|
||||
self.ns.match_tests = []
|
||||
self.ns.match_tests.extend(error_names)
|
||||
self.ns.match_tests.extend(failure_names)
|
||||
matching = "matching: " + ", ".join(self.ns.match_tests)
|
||||
self.log(f"Re-running {test_name} in verbose mode ({matching})")
|
||||
else:
|
||||
self.log(f"Re-running {test_name} in verbose mode")
|
||||
result = runtest(self.ns, test_name)
|
||||
self.ns.match_tests = orig_match_tests
|
||||
|
||||
self.accumulate_result(result, rerun=True)
|
||||
|
||||
if result.result == INTERRUPTED:
|
||||
if isinstance(result, Interrupted):
|
||||
break
|
||||
|
||||
if self.bad:
|
||||
|
@ -383,7 +397,7 @@ class Regrtest:
|
|||
if self.rerun:
|
||||
print()
|
||||
print("%s:" % count(len(self.rerun), "re-run test"))
|
||||
printlist(self.rerun)
|
||||
printlist(r.name for r in self.rerun)
|
||||
|
||||
if self.run_no_tests:
|
||||
print()
|
||||
|
@ -423,14 +437,14 @@ class Regrtest:
|
|||
result = runtest(self.ns, test_name)
|
||||
self.accumulate_result(result)
|
||||
|
||||
if result.result == INTERRUPTED:
|
||||
if isinstance(result, Interrupted):
|
||||
break
|
||||
|
||||
previous_test = format_test_result(result)
|
||||
previous_test = str(result)
|
||||
test_time = time.monotonic() - start_time
|
||||
if test_time >= PROGRESS_MIN_TIME:
|
||||
previous_test = "%s in %s" % (previous_test, format_duration(test_time))
|
||||
elif result.result == PASSED:
|
||||
elif isinstance(result, Passed):
|
||||
# be quiet: say nothing if the test passed shortly
|
||||
previous_test = None
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue