mirror of
https://github.com/python/cpython.git
synced 2025-09-30 12:21:51 +00:00
Synchronize libregrtest from master to 3.6 (#2244)
* bpo-30523: regrtest: Add --list-cases option (#2238) * bpo-30284: Fix regrtest for out of tree build (#1481) * bpo-30540: regrtest: add --matchfile option (#1909) * bpo-30258: regrtest: Fix run_tests_multiprocess() (#1479) * bpo-30263: regrtest: log system load (#1452)
This commit is contained in:
parent
085a57a8d6
commit
a0ccc54e6d
7 changed files with 228 additions and 58 deletions
|
@ -117,6 +117,13 @@ resources to test. Currently only the following are defined:
|
|||
To enable all resources except one, use '-uall,-<resource>'. For
|
||||
example, to run all the tests except for the gui tests, give the
|
||||
option '-uall,-gui'.
|
||||
|
||||
--matchfile filters tests using a text file, one pattern per line.
|
||||
Pattern examples:
|
||||
|
||||
- test method: test_stat_attributes
|
||||
- test class: FileTests
|
||||
- test identifier: test_os.FileTests.test_stat_attributes
|
||||
"""
|
||||
|
||||
|
||||
|
@ -189,8 +196,12 @@ def _create_parser():
|
|||
help='single step through a set of tests.' +
|
||||
more_details)
|
||||
group.add_argument('-m', '--match', metavar='PAT',
|
||||
dest='match_tests',
|
||||
dest='match_tests', action='append',
|
||||
help='match test cases and methods with glob pattern PAT')
|
||||
group.add_argument('--matchfile', metavar='FILENAME',
|
||||
dest='match_filename',
|
||||
help='similar to --match but get patterns from a '
|
||||
'text file, one pattern per line')
|
||||
group.add_argument('-G', '--failfast', action='store_true',
|
||||
help='fail as soon as a test fails (only with -v or -W)')
|
||||
group.add_argument('-u', '--use', metavar='RES1,RES2,...',
|
||||
|
@ -239,6 +250,9 @@ def _create_parser():
|
|||
group.add_argument('--list-tests', action='store_true',
|
||||
help="only write the name of tests that will be run, "
|
||||
"don't execute them")
|
||||
group.add_argument('--list-cases', action='store_true',
|
||||
help='only write the name of test cases that will be run'
|
||||
' , don\'t execute them')
|
||||
group.add_argument('-P', '--pgo', dest='pgo', action='store_true',
|
||||
help='enable Profile Guided Optimization training')
|
||||
|
||||
|
@ -343,10 +357,19 @@ def _parse_args(args, **kwargs):
|
|||
ns.use_resources.append(r)
|
||||
if ns.random_seed is not None:
|
||||
ns.randomize = True
|
||||
if ns.verbose:
|
||||
ns.header = True
|
||||
if ns.huntrleaks and ns.verbose3:
|
||||
ns.verbose3 = False
|
||||
print("WARNING: Disable --verbose3 because it's incompatible with "
|
||||
"--huntrleaks: see http://bugs.python.org/issue27103",
|
||||
file=sys.stderr)
|
||||
if ns.match_filename:
|
||||
if ns.match_tests is None:
|
||||
ns.match_tests = []
|
||||
filename = os.path.join(support.SAVEDCWD, ns.match_filename)
|
||||
with open(filename) as fp:
|
||||
for line in fp:
|
||||
ns.match_tests.append(line.strip())
|
||||
|
||||
return ns
|
||||
|
|
|
@ -10,9 +10,10 @@ import sysconfig
|
|||
import tempfile
|
||||
import textwrap
|
||||
import time
|
||||
import unittest
|
||||
from test.libregrtest.cmdline import _parse_args
|
||||
from test.libregrtest.runtest import (
|
||||
findtests, runtest,
|
||||
findtests, runtest, get_abs_module,
|
||||
STDTESTS, NOTTESTS, PASSED, FAILED, ENV_CHANGED, SKIPPED, RESOURCE_DENIED,
|
||||
INTERRUPTED, CHILD_ERROR,
|
||||
PROGRESS_MIN_TIME, format_test_result)
|
||||
|
@ -28,7 +29,13 @@ except ImportError:
|
|||
# to keep the test files in a subfolder. This eases the cleanup of leftover
|
||||
# files using the "make distclean" command.
|
||||
if sysconfig.is_python_build():
|
||||
TEMPDIR = os.path.join(sysconfig.get_config_var('srcdir'), 'build')
|
||||
TEMPDIR = sysconfig.get_config_var('abs_builddir')
|
||||
if TEMPDIR is None:
|
||||
# bpo-30284: On Windows, only srcdir is available. Using abs_builddir
|
||||
# mostly matters on UNIX when building Python out of the source tree,
|
||||
# especially when the source tree is read only.
|
||||
TEMPDIR = sysconfig.get_config_var('srcdir')
|
||||
TEMPDIR = os.path.join(TEMPDIR, 'build')
|
||||
else:
|
||||
TEMPDIR = tempfile.gettempdir()
|
||||
TEMPDIR = os.path.abspath(TEMPDIR)
|
||||
|
@ -107,7 +114,7 @@ class Regrtest:
|
|||
self.test_times.append((test_time, test))
|
||||
if ok == PASSED:
|
||||
self.good.append(test)
|
||||
elif ok == FAILED:
|
||||
elif ok in (FAILED, CHILD_ERROR):
|
||||
self.bad.append(test)
|
||||
elif ok == ENV_CHANGED:
|
||||
self.environment_changed.append(test)
|
||||
|
@ -116,22 +123,28 @@ class Regrtest:
|
|||
elif ok == RESOURCE_DENIED:
|
||||
self.skipped.append(test)
|
||||
self.resource_denieds.append(test)
|
||||
elif ok != INTERRUPTED:
|
||||
raise ValueError("invalid test result: %r" % ok)
|
||||
|
||||
def display_progress(self, test_index, test):
|
||||
if self.ns.quiet:
|
||||
return
|
||||
|
||||
# "[ 51/405/1] test_tcl passed"
|
||||
line = f"{test_index:{self.test_count_width}}{self.test_count}"
|
||||
if self.bad and not self.ns.pgo:
|
||||
fmt = "{time} [{test_index:{count_width}}{test_count}/{nbad}] {test_name}"
|
||||
else:
|
||||
fmt = "{time} [{test_index:{count_width}}{test_count}] {test_name}"
|
||||
line = f"{line}/{len(self.bad)}"
|
||||
line = f"[{line}] {test}"
|
||||
|
||||
# add the system load prefix: "load avg: 1.80 "
|
||||
if hasattr(os, 'getloadavg'):
|
||||
load_avg_1min = os.getloadavg()[0]
|
||||
line = f"load avg: {load_avg_1min:.2f} {line}"
|
||||
|
||||
# add the timestamp prefix: "0:01:05 "
|
||||
test_time = time.monotonic() - self.start_time
|
||||
test_time = datetime.timedelta(seconds=int(test_time))
|
||||
line = fmt.format(count_width=self.test_count_width,
|
||||
test_index=test_index,
|
||||
test_count=self.test_count,
|
||||
nbad=len(self.bad),
|
||||
test_name=test,
|
||||
time=test_time)
|
||||
line = f"{test_time} {line}"
|
||||
print(line, flush=True)
|
||||
|
||||
def parse_args(self, kwargs):
|
||||
|
@ -179,19 +192,14 @@ class Regrtest:
|
|||
self.tests = []
|
||||
# regex to match 'test_builtin' in line:
|
||||
# '0:00:00 [ 4/400] test_builtin -- test_dict took 1 sec'
|
||||
regex = (r'^(?:[0-9]+:[0-9]+:[0-9]+ *)?'
|
||||
r'(?:\[[0-9/ ]+\] *)?'
|
||||
r'(test_[a-zA-Z0-9_]+)')
|
||||
regex = re.compile(regex)
|
||||
regex = re.compile(r'\btest_[a-zA-Z0-9_]+\b')
|
||||
with open(os.path.join(support.SAVEDCWD, self.ns.fromfile)) as fp:
|
||||
for line in fp:
|
||||
line = line.split('#', 1)[0]
|
||||
line = line.strip()
|
||||
if line.startswith('#'):
|
||||
continue
|
||||
match = regex.match(line)
|
||||
if match is None:
|
||||
continue
|
||||
self.tests.append(match.group(1))
|
||||
match = regex.search(line)
|
||||
if match is not None:
|
||||
self.tests.append(match.group())
|
||||
|
||||
removepy(self.tests)
|
||||
|
||||
|
@ -241,6 +249,29 @@ class Regrtest:
|
|||
for name in self.selected:
|
||||
print(name)
|
||||
|
||||
def _list_cases(self, suite):
|
||||
for test in suite:
|
||||
if isinstance(test, unittest.loader._FailedTest):
|
||||
continue
|
||||
if isinstance(test, unittest.TestSuite):
|
||||
self._list_cases(test)
|
||||
elif isinstance(test, unittest.TestCase):
|
||||
print(test.id())
|
||||
|
||||
def list_cases(self):
|
||||
for test in self.selected:
|
||||
abstest = get_abs_module(self.ns, test)
|
||||
try:
|
||||
suite = unittest.defaultTestLoader.loadTestsFromName(abstest)
|
||||
self._list_cases(suite)
|
||||
except unittest.SkipTest:
|
||||
self.skipped.append(test)
|
||||
|
||||
if self.skipped:
|
||||
print(file=sys.stderr)
|
||||
print(count(len(self.skipped), "test"), "skipped:", file=sys.stderr)
|
||||
printlist(self.skipped, file=sys.stderr)
|
||||
|
||||
def rerun_failed_tests(self):
|
||||
self.ns.verbose = True
|
||||
self.ns.failfast = False
|
||||
|
@ -381,23 +412,28 @@ class Regrtest:
|
|||
if self.bad:
|
||||
return
|
||||
|
||||
def display_header(self):
|
||||
# Print basic platform information
|
||||
print("==", platform.python_implementation(), *sys.version.split())
|
||||
print("==", platform.platform(aliased=True),
|
||||
"%s-endian" % sys.byteorder)
|
||||
print("== hash algorithm:", sys.hash_info.algorithm,
|
||||
"64bit" if sys.maxsize > 2**32 else "32bit")
|
||||
print("== cwd:", os.getcwd())
|
||||
cpu_count = os.cpu_count()
|
||||
if cpu_count:
|
||||
print("== CPU count:", cpu_count)
|
||||
print("== encodings: locale=%s, FS=%s"
|
||||
% (locale.getpreferredencoding(False),
|
||||
sys.getfilesystemencoding()))
|
||||
print("Testing with flags:", sys.flags)
|
||||
|
||||
def run_tests(self):
|
||||
# For a partial run, we do not need to clutter the output.
|
||||
if (self.ns.verbose
|
||||
or self.ns.header
|
||||
or not (self.ns.pgo or self.ns.quiet or self.ns.single
|
||||
or self.tests or self.ns.args)):
|
||||
# Print basic platform information
|
||||
print("==", platform.python_implementation(), *sys.version.split())
|
||||
print("== ", platform.platform(aliased=True),
|
||||
"%s-endian" % sys.byteorder)
|
||||
print("== ", "hash algorithm:", sys.hash_info.algorithm,
|
||||
"64bit" if sys.maxsize > 2**32 else "32bit")
|
||||
print("== cwd:", os.getcwd())
|
||||
print("== encodings: locale=%s, FS=%s"
|
||||
% (locale.getpreferredencoding(False),
|
||||
sys.getfilesystemencoding()))
|
||||
print("Testing with flags:", sys.flags)
|
||||
if (self.ns.header
|
||||
or not(self.ns.pgo or self.ns.quiet or self.ns.single
|
||||
or self.tests or self.ns.args)):
|
||||
self.display_header()
|
||||
|
||||
if self.ns.randomize:
|
||||
print("Using random seed", self.ns.random_seed)
|
||||
|
@ -487,6 +523,10 @@ class Regrtest:
|
|||
self.list_tests()
|
||||
sys.exit(0)
|
||||
|
||||
if self.ns.list_cases:
|
||||
self.list_cases()
|
||||
sys.exit(0)
|
||||
|
||||
self.run_tests()
|
||||
self.display_result()
|
||||
|
||||
|
@ -513,7 +553,7 @@ def count(n, word):
|
|||
return "%d %ss" % (n, word)
|
||||
|
||||
|
||||
def printlist(x, width=70, indent=4):
|
||||
def printlist(x, width=70, indent=4, file=None):
|
||||
"""Print the elements of iterable x to stdout.
|
||||
|
||||
Optional arg width (default 70) is the maximum line length.
|
||||
|
@ -524,7 +564,8 @@ def printlist(x, width=70, indent=4):
|
|||
blanks = ' ' * indent
|
||||
# Print the sorted list: 'x' may be a '--random' list or a set()
|
||||
print(textwrap.fill(' '.join(str(elt) for elt in sorted(x)), width,
|
||||
initial_indent=blanks, subsequent_indent=blanks))
|
||||
initial_indent=blanks, subsequent_indent=blanks),
|
||||
file=file)
|
||||
|
||||
|
||||
def main(tests=None, **kwargs):
|
||||
|
|
|
@ -84,7 +84,7 @@ def dash_R(the_module, test, indirect_test, huntrleaks):
|
|||
indirect_test()
|
||||
alloc_after, rc_after, fd_after = dash_R_cleanup(fs, ps, pic, zdc,
|
||||
abcs)
|
||||
print('.', end='', flush=True)
|
||||
print('.', end='', file=sys.stderr, flush=True)
|
||||
if i >= nwarmup:
|
||||
rc_deltas[i] = rc_after - rc_before
|
||||
alloc_deltas[i] = alloc_after - alloc_before
|
||||
|
|
|
@ -71,6 +71,14 @@ def findtests(testdir=None, stdtests=STDTESTS, nottests=NOTTESTS):
|
|||
return stdtests + sorted(tests)
|
||||
|
||||
|
||||
def get_abs_module(ns, test):
|
||||
if test.startswith('test.') or ns.testdir:
|
||||
return test
|
||||
else:
|
||||
# Always import it from the test package
|
||||
return 'test.' + test
|
||||
|
||||
|
||||
def runtest(ns, test):
|
||||
"""Run a single test.
|
||||
|
||||
|
@ -141,11 +149,7 @@ def runtest_inner(ns, test, display_failure=True):
|
|||
test_time = 0.0
|
||||
refleak = False # True if the test leaked references.
|
||||
try:
|
||||
if test.startswith('test.') or ns.testdir:
|
||||
abstest = test
|
||||
else:
|
||||
# Always import it from the test package
|
||||
abstest = 'test.' + test
|
||||
abstest = get_abs_module(ns, test)
|
||||
clear_caches()
|
||||
with saved_test_environment(test, ns.verbose, ns.quiet, pgo=ns.pgo) as environment:
|
||||
start_time = time.time()
|
||||
|
|
|
@ -41,7 +41,7 @@ def run_test_in_subprocess(testname, ns):
|
|||
slaveargs = json.dumps(slaveargs)
|
||||
|
||||
cmd = [sys.executable, *support.args_from_interpreter_flags(),
|
||||
'-X', 'faulthandler',
|
||||
'-u', # Unbuffered stdout and stderr
|
||||
'-m', 'test.regrtest',
|
||||
'--slaveargs', slaveargs]
|
||||
if ns.pgo:
|
||||
|
@ -124,13 +124,13 @@ class MultiprocessThread(threading.Thread):
|
|||
finally:
|
||||
self.current_test = None
|
||||
|
||||
stdout, _, result = stdout.strip().rpartition("\n")
|
||||
if retcode != 0:
|
||||
result = (CHILD_ERROR, "Exit code %s" % retcode)
|
||||
self.output.put((test, stdout.rstrip(), stderr.rstrip(),
|
||||
result))
|
||||
return True
|
||||
return False
|
||||
|
||||
stdout, _, result = stdout.strip().rpartition("\n")
|
||||
if not result:
|
||||
self.output.put((None, None, None, None))
|
||||
return True
|
||||
|
@ -203,6 +203,8 @@ def run_tests_multiprocess(regrtest):
|
|||
and test_time >= PROGRESS_MIN_TIME
|
||||
and not regrtest.ns.pgo):
|
||||
text += ' (%.0f sec)' % test_time
|
||||
elif ok == CHILD_ERROR:
|
||||
text = '%s (%s)' % (text, test_time)
|
||||
running = get_running(workers)
|
||||
if running and not regrtest.ns.pgo:
|
||||
text += ' -- running: %s' % ', '.join(running)
|
||||
|
@ -216,9 +218,6 @@ def run_tests_multiprocess(regrtest):
|
|||
|
||||
if result[0] == INTERRUPTED:
|
||||
raise KeyboardInterrupt
|
||||
if result[0] == CHILD_ERROR:
|
||||
msg = "Child error on {}: {}".format(test, result[1])
|
||||
raise Exception(msg)
|
||||
test_index += 1
|
||||
except KeyboardInterrupt:
|
||||
regrtest.interrupted = True
|
||||
|
|
|
@ -1915,9 +1915,15 @@ def run_unittest(*classes):
|
|||
def case_pred(test):
|
||||
if match_tests is None:
|
||||
return True
|
||||
for name in test.id().split("."):
|
||||
if fnmatch.fnmatchcase(name, match_tests):
|
||||
test_id = test.id()
|
||||
|
||||
for match_test in match_tests:
|
||||
if fnmatch.fnmatchcase(test_id, match_test):
|
||||
return True
|
||||
|
||||
for name in test_id.split("."):
|
||||
if fnmatch.fnmatchcase(name, match_test):
|
||||
return True
|
||||
return False
|
||||
_filter_suite(suite, case_pred)
|
||||
_run_suite(suite)
|
||||
|
|
|
@ -118,6 +118,9 @@ class ParseArgsTestCase(unittest.TestCase):
|
|||
ns = libregrtest._parse_args(['--header'])
|
||||
self.assertTrue(ns.header)
|
||||
|
||||
ns = libregrtest._parse_args(['--verbose'])
|
||||
self.assertTrue(ns.header)
|
||||
|
||||
def test_randomize(self):
|
||||
for opt in '-r', '--randomize':
|
||||
with self.subTest(opt=opt):
|
||||
|
@ -156,9 +159,24 @@ class ParseArgsTestCase(unittest.TestCase):
|
|||
for opt in '-m', '--match':
|
||||
with self.subTest(opt=opt):
|
||||
ns = libregrtest._parse_args([opt, 'pattern'])
|
||||
self.assertEqual(ns.match_tests, 'pattern')
|
||||
self.assertEqual(ns.match_tests, ['pattern'])
|
||||
self.checkError([opt], 'expected one argument')
|
||||
|
||||
ns = libregrtest._parse_args(['-m', 'pattern1',
|
||||
'-m', 'pattern2'])
|
||||
self.assertEqual(ns.match_tests, ['pattern1', 'pattern2'])
|
||||
|
||||
self.addCleanup(support.unlink, support.TESTFN)
|
||||
with open(support.TESTFN, "w") as fp:
|
||||
print('matchfile1', file=fp)
|
||||
print('matchfile2', file=fp)
|
||||
|
||||
filename = os.path.abspath(support.TESTFN)
|
||||
ns = libregrtest._parse_args(['-m', 'match',
|
||||
'--matchfile', filename])
|
||||
self.assertEqual(ns.match_tests,
|
||||
['match', 'matchfile1', 'matchfile2'])
|
||||
|
||||
def test_failfast(self):
|
||||
for opt in '-G', '--failfast':
|
||||
with self.subTest(opt=opt):
|
||||
|
@ -272,7 +290,6 @@ class ParseArgsTestCase(unittest.TestCase):
|
|||
ns = libregrtest._parse_args([opt])
|
||||
self.assertTrue(ns.forever)
|
||||
|
||||
|
||||
def test_unrecognized_argument(self):
|
||||
self.checkError(['--xxx'], 'usage:')
|
||||
|
||||
|
@ -354,7 +371,7 @@ class BaseTestCase(unittest.TestCase):
|
|||
self.assertRegex(output, regex)
|
||||
|
||||
def parse_executed_tests(self, output):
|
||||
regex = (r'^[0-9]+:[0-9]+:[0-9]+ \[ *[0-9]+(?:/ *[0-9]+)?\] (%s)'
|
||||
regex = (r'^[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?\[ *[0-9]+(?:/ *[0-9]+)*\] (%s)'
|
||||
% self.TESTNAME_REGEX)
|
||||
parser = re.finditer(regex, output, re.MULTILINE)
|
||||
return list(match.group(1) for match in parser)
|
||||
|
@ -454,7 +471,6 @@ class BaseTestCase(unittest.TestCase):
|
|||
self.fail(msg)
|
||||
return proc
|
||||
|
||||
|
||||
def run_python(self, args, **kw):
|
||||
args = [sys.executable, '-X', 'faulthandler', '-I', *args]
|
||||
proc = self.run_command(args, **kw)
|
||||
|
@ -676,6 +692,14 @@ class ArgsTestCase(BaseTestCase):
|
|||
output = self.run_tests('--fromfile', filename)
|
||||
self.check_executed_tests(output, tests)
|
||||
|
||||
# test format 'Lib/test/test_opcodes.py'
|
||||
with open(filename, "w") as fp:
|
||||
for name in tests:
|
||||
print('Lib/test/%s.py' % name, file=fp)
|
||||
|
||||
output = self.run_tests('--fromfile', filename)
|
||||
self.check_executed_tests(output, tests)
|
||||
|
||||
def test_interrupted(self):
|
||||
code = TEST_INTERRUPTED
|
||||
test = self.create_test('sigint', code=code)
|
||||
|
@ -801,6 +825,79 @@ class ArgsTestCase(BaseTestCase):
|
|||
self.assertEqual(output.rstrip().splitlines(),
|
||||
tests)
|
||||
|
||||
def test_list_cases(self):
|
||||
# test --list-cases
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_method1(self):
|
||||
pass
|
||||
def test_method2(self):
|
||||
pass
|
||||
""")
|
||||
testname = self.create_test(code=code)
|
||||
all_methods = ['%s.Tests.test_method1' % testname,
|
||||
'%s.Tests.test_method2' % testname]
|
||||
output = self.run_tests('--list-cases', testname)
|
||||
self.assertEqual(output.splitlines(), all_methods)
|
||||
|
||||
def test_crashed(self):
|
||||
# Any code which causes a crash
|
||||
code = 'import faulthandler; faulthandler._sigsegv()'
|
||||
crash_test = self.create_test(name="crash", code=code)
|
||||
ok_test = self.create_test(name="ok")
|
||||
|
||||
tests = [crash_test, ok_test]
|
||||
output = self.run_tests("-j2", *tests, exitcode=1)
|
||||
self.check_executed_tests(output, tests, failed=crash_test,
|
||||
randomize=True)
|
||||
|
||||
def parse_methods(self, output):
|
||||
regex = re.compile("^(test[^ ]+).*ok$", flags=re.MULTILINE)
|
||||
return [match.group(1) for match in regex.finditer(output)]
|
||||
|
||||
def test_matchfile(self):
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_method1(self):
|
||||
pass
|
||||
def test_method2(self):
|
||||
pass
|
||||
def test_method3(self):
|
||||
pass
|
||||
def test_method4(self):
|
||||
pass
|
||||
""")
|
||||
all_methods = ['test_method1', 'test_method2',
|
||||
'test_method3', 'test_method4']
|
||||
testname = self.create_test(code=code)
|
||||
|
||||
# by default, all methods should be run
|
||||
output = self.run_tests("-v", testname)
|
||||
methods = self.parse_methods(output)
|
||||
self.assertEqual(methods, all_methods)
|
||||
|
||||
# only run a subset
|
||||
filename = support.TESTFN
|
||||
self.addCleanup(support.unlink, filename)
|
||||
|
||||
subset = [
|
||||
# only match the method name
|
||||
'test_method1',
|
||||
# match the full identifier
|
||||
'%s.Tests.test_method3' % testname]
|
||||
with open(filename, "w") as fp:
|
||||
for name in subset:
|
||||
print(name, file=fp)
|
||||
|
||||
output = self.run_tests("-v", "--matchfile", filename, testname)
|
||||
methods = self.parse_methods(output)
|
||||
subset = ['test_method1', 'test_method3']
|
||||
self.assertEqual(methods, subset)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue