mirror of
https://github.com/python/cpython.git
synced 2025-09-30 04:15:43 +00:00
[3.6] bpo-30523, bpo-30764, bpo-30776: Sync regrtest from master (#2441)
* bpo-30523: regrtest --list-cases --match (#2401) * regrtest --list-cases now supports --match and --match-file options. Example: ./python -m test --list-cases -m FileTests test_os * --list-cases now also sets support.verbose to False to prevent messages to stdout when loading test modules. * Add support._match_test() private function. (cherry picked from commitace56d5836
) * bpo-30764: regrtest: add --fail-env-changed option (#2402) * bpo-30764: regrtest: change exit code on failure * Exit code 2 if failed tests ("bad") * Exit code 3 if interrupted * bpo-30764: regrtest: add --fail-env-changed option If the option is set, mark a test as failed if it alters the environment, for example if it creates a file without removing it. (cherry picked from commit63f54c6893
) * bpo-30776: reduce regrtest -R false positives (#2422) * Change the regrtest --huntrleaks checker to decide if a test file leaks or not. Require that each run leaks at least 1 reference. * Warmup runs are now completely ignored: ignored in the checker test and not used anymore to compute the sum. * Add an unit test for a reference leak. Example of reference differences previously considered a failure (leak) and now considered as success (success, no leak): [3, 0, 0] [0, 1, 0] [8, -8, 1] (cherry picked from commit48b5c422ff
)
This commit is contained in:
parent
39e501a291
commit
35d2ca2b94
5 changed files with 140 additions and 49 deletions
|
@ -255,6 +255,9 @@ def _create_parser():
|
||||||
' , don\'t execute them')
|
' , don\'t execute them')
|
||||||
group.add_argument('-P', '--pgo', dest='pgo', action='store_true',
|
group.add_argument('-P', '--pgo', dest='pgo', action='store_true',
|
||||||
help='enable Profile Guided Optimization training')
|
help='enable Profile Guided Optimization training')
|
||||||
|
group.add_argument('--fail-env-changed', action='store_true',
|
||||||
|
help='if a test file alters the environment, mark '
|
||||||
|
'the test as failed')
|
||||||
|
|
||||||
return parser
|
return parser
|
||||||
|
|
||||||
|
|
|
@ -256,9 +256,13 @@ class Regrtest:
|
||||||
if isinstance(test, unittest.TestSuite):
|
if isinstance(test, unittest.TestSuite):
|
||||||
self._list_cases(test)
|
self._list_cases(test)
|
||||||
elif isinstance(test, unittest.TestCase):
|
elif isinstance(test, unittest.TestCase):
|
||||||
print(test.id())
|
if support._match_test(test):
|
||||||
|
print(test.id())
|
||||||
|
|
||||||
def list_cases(self):
|
def list_cases(self):
|
||||||
|
support.verbose = False
|
||||||
|
support.match_tests = self.ns.match_tests
|
||||||
|
|
||||||
for test in self.selected:
|
for test in self.selected:
|
||||||
abstest = get_abs_module(self.ns, test)
|
abstest = get_abs_module(self.ns, test)
|
||||||
try:
|
try:
|
||||||
|
@ -474,6 +478,8 @@ class Regrtest:
|
||||||
result = "FAILURE"
|
result = "FAILURE"
|
||||||
elif self.interrupted:
|
elif self.interrupted:
|
||||||
result = "INTERRUPTED"
|
result = "INTERRUPTED"
|
||||||
|
elif self.environment_changed and self.ns.fail_env_changed:
|
||||||
|
result = "ENV CHANGED"
|
||||||
else:
|
else:
|
||||||
result = "SUCCESS"
|
result = "SUCCESS"
|
||||||
print("Tests result: %s" % result)
|
print("Tests result: %s" % result)
|
||||||
|
@ -534,7 +540,13 @@ class Regrtest:
|
||||||
self.rerun_failed_tests()
|
self.rerun_failed_tests()
|
||||||
|
|
||||||
self.finalize()
|
self.finalize()
|
||||||
sys.exit(len(self.bad) > 0 or self.interrupted)
|
if self.bad:
|
||||||
|
sys.exit(2)
|
||||||
|
if self.interrupted:
|
||||||
|
sys.exit(130)
|
||||||
|
if self.ns.fail_env_changed and self.environment_changed:
|
||||||
|
sys.exit(3)
|
||||||
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
def removepy(names):
|
def removepy(names):
|
||||||
|
|
|
@ -93,9 +93,21 @@ def dash_R(the_module, test, indirect_test, huntrleaks):
|
||||||
rc_before = rc_after
|
rc_before = rc_after
|
||||||
fd_before = fd_after
|
fd_before = fd_after
|
||||||
print(file=sys.stderr)
|
print(file=sys.stderr)
|
||||||
|
|
||||||
# These checkers return False on success, True on failure
|
# These checkers return False on success, True on failure
|
||||||
def check_rc_deltas(deltas):
|
def check_rc_deltas(deltas):
|
||||||
return any(deltas)
|
# bpo-30776: Try to ignore false positives:
|
||||||
|
#
|
||||||
|
# [3, 0, 0]
|
||||||
|
# [0, 1, 0]
|
||||||
|
# [8, -8, 1]
|
||||||
|
#
|
||||||
|
# Expected leaks:
|
||||||
|
#
|
||||||
|
# [5, 5, 6]
|
||||||
|
# [10, 1, 1]
|
||||||
|
return all(delta >= 1 for delta in deltas)
|
||||||
|
|
||||||
def check_alloc_deltas(deltas):
|
def check_alloc_deltas(deltas):
|
||||||
# At least 1/3rd of 0s
|
# At least 1/3rd of 0s
|
||||||
if 3 * deltas.count(0) < len(deltas):
|
if 3 * deltas.count(0) < len(deltas):
|
||||||
|
@ -104,14 +116,21 @@ def dash_R(the_module, test, indirect_test, huntrleaks):
|
||||||
if not set(deltas) <= {1,0,-1}:
|
if not set(deltas) <= {1,0,-1}:
|
||||||
return True
|
return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def check_fd_deltas(deltas):
|
||||||
|
return any(deltas)
|
||||||
|
|
||||||
failed = False
|
failed = False
|
||||||
for deltas, item_name, checker in [
|
for deltas, item_name, checker in [
|
||||||
(rc_deltas, 'references', check_rc_deltas),
|
(rc_deltas, 'references', check_rc_deltas),
|
||||||
(alloc_deltas, 'memory blocks', check_alloc_deltas),
|
(alloc_deltas, 'memory blocks', check_alloc_deltas),
|
||||||
(fd_deltas, 'file descriptors', check_rc_deltas)]:
|
(fd_deltas, 'file descriptors', check_fd_deltas)
|
||||||
|
]:
|
||||||
|
# ignore warmup runs
|
||||||
|
deltas = deltas[nwarmup:]
|
||||||
if checker(deltas):
|
if checker(deltas):
|
||||||
msg = '%s leaked %s %s, sum=%s' % (
|
msg = '%s leaked %s %s, sum=%s' % (
|
||||||
test, deltas[nwarmup:], item_name, sum(deltas))
|
test, deltas, item_name, sum(deltas))
|
||||||
print(msg, file=sys.stderr, flush=True)
|
print(msg, file=sys.stderr, flush=True)
|
||||||
with open(fname, "a") as refrep:
|
with open(fname, "a") as refrep:
|
||||||
print(msg, file=refrep)
|
print(msg, file=refrep)
|
||||||
|
|
|
@ -1898,6 +1898,23 @@ def _run_suite(suite):
|
||||||
raise TestFailed(err)
|
raise TestFailed(err)
|
||||||
|
|
||||||
|
|
||||||
|
def _match_test(test):
|
||||||
|
global match_tests
|
||||||
|
|
||||||
|
if match_tests is None:
|
||||||
|
return True
|
||||||
|
test_id = test.id()
|
||||||
|
|
||||||
|
for match_test in match_tests:
|
||||||
|
if fnmatch.fnmatchcase(test_id, match_test):
|
||||||
|
return True
|
||||||
|
|
||||||
|
for name in test_id.split("."):
|
||||||
|
if fnmatch.fnmatchcase(name, match_test):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
|
||||||
def run_unittest(*classes):
|
def run_unittest(*classes):
|
||||||
"""Run tests from unittest.TestCase-derived classes."""
|
"""Run tests from unittest.TestCase-derived classes."""
|
||||||
valid_types = (unittest.TestSuite, unittest.TestCase)
|
valid_types = (unittest.TestSuite, unittest.TestCase)
|
||||||
|
@ -1912,20 +1929,7 @@ def run_unittest(*classes):
|
||||||
suite.addTest(cls)
|
suite.addTest(cls)
|
||||||
else:
|
else:
|
||||||
suite.addTest(unittest.makeSuite(cls))
|
suite.addTest(unittest.makeSuite(cls))
|
||||||
def case_pred(test):
|
_filter_suite(suite, _match_test)
|
||||||
if match_tests is None:
|
|
||||||
return True
|
|
||||||
test_id = test.id()
|
|
||||||
|
|
||||||
for match_test in match_tests:
|
|
||||||
if fnmatch.fnmatchcase(test_id, match_test):
|
|
||||||
return True
|
|
||||||
|
|
||||||
for name in test_id.split("."):
|
|
||||||
if fnmatch.fnmatchcase(name, match_test):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
_filter_suite(suite, case_pred)
|
|
||||||
_run_suite(suite)
|
_run_suite(suite)
|
||||||
|
|
||||||
#=======================================================================
|
#=======================================================================
|
||||||
|
|
|
@ -377,19 +377,19 @@ class BaseTestCase(unittest.TestCase):
|
||||||
return list(match.group(1) for match in parser)
|
return list(match.group(1) for match in parser)
|
||||||
|
|
||||||
def check_executed_tests(self, output, tests, skipped=(), failed=(),
|
def check_executed_tests(self, output, tests, skipped=(), failed=(),
|
||||||
omitted=(), randomize=False, interrupted=False):
|
env_changed=(), omitted=(),
|
||||||
|
randomize=False, interrupted=False,
|
||||||
|
fail_env_changed=False):
|
||||||
if isinstance(tests, str):
|
if isinstance(tests, str):
|
||||||
tests = [tests]
|
tests = [tests]
|
||||||
if isinstance(skipped, str):
|
if isinstance(skipped, str):
|
||||||
skipped = [skipped]
|
skipped = [skipped]
|
||||||
if isinstance(failed, str):
|
if isinstance(failed, str):
|
||||||
failed = [failed]
|
failed = [failed]
|
||||||
|
if isinstance(env_changed, str):
|
||||||
|
env_changed = [env_changed]
|
||||||
if isinstance(omitted, str):
|
if isinstance(omitted, str):
|
||||||
omitted = [omitted]
|
omitted = [omitted]
|
||||||
ntest = len(tests)
|
|
||||||
nskipped = len(skipped)
|
|
||||||
nfailed = len(failed)
|
|
||||||
nomitted = len(omitted)
|
|
||||||
|
|
||||||
executed = self.parse_executed_tests(output)
|
executed = self.parse_executed_tests(output)
|
||||||
if randomize:
|
if randomize:
|
||||||
|
@ -415,11 +415,17 @@ class BaseTestCase(unittest.TestCase):
|
||||||
regex = list_regex('%s test%s failed', failed)
|
regex = list_regex('%s test%s failed', failed)
|
||||||
self.check_line(output, regex)
|
self.check_line(output, regex)
|
||||||
|
|
||||||
|
if env_changed:
|
||||||
|
regex = list_regex('%s test%s altered the execution environment',
|
||||||
|
env_changed)
|
||||||
|
self.check_line(output, regex)
|
||||||
|
|
||||||
if omitted:
|
if omitted:
|
||||||
regex = list_regex('%s test%s omitted', omitted)
|
regex = list_regex('%s test%s omitted', omitted)
|
||||||
self.check_line(output, regex)
|
self.check_line(output, regex)
|
||||||
|
|
||||||
good = ntest - nskipped - nfailed - nomitted
|
good = (len(tests) - len(skipped) - len(failed)
|
||||||
|
- len(omitted) - len(env_changed))
|
||||||
if good:
|
if good:
|
||||||
regex = r'%s test%s OK\.$' % (good, plural(good))
|
regex = r'%s test%s OK\.$' % (good, plural(good))
|
||||||
if not skipped and not failed and good > 1:
|
if not skipped and not failed and good > 1:
|
||||||
|
@ -429,10 +435,12 @@ class BaseTestCase(unittest.TestCase):
|
||||||
if interrupted:
|
if interrupted:
|
||||||
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
|
self.check_line(output, 'Test suite interrupted by signal SIGINT.')
|
||||||
|
|
||||||
if nfailed:
|
if failed:
|
||||||
result = 'FAILURE'
|
result = 'FAILURE'
|
||||||
elif interrupted:
|
elif interrupted:
|
||||||
result = 'INTERRUPTED'
|
result = 'INTERRUPTED'
|
||||||
|
elif fail_env_changed and env_changed:
|
||||||
|
result = 'ENV CHANGED'
|
||||||
else:
|
else:
|
||||||
result = 'SUCCESS'
|
result = 'SUCCESS'
|
||||||
self.check_line(output, 'Tests result: %s' % result)
|
self.check_line(output, 'Tests result: %s' % result)
|
||||||
|
@ -604,7 +612,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
test_failing = self.create_test('failing', code=code)
|
test_failing = self.create_test('failing', code=code)
|
||||||
tests = [test_ok, test_failing]
|
tests = [test_ok, test_failing]
|
||||||
|
|
||||||
output = self.run_tests(*tests, exitcode=1)
|
output = self.run_tests(*tests, exitcode=2)
|
||||||
self.check_executed_tests(output, tests, failed=test_failing)
|
self.check_executed_tests(output, tests, failed=test_failing)
|
||||||
|
|
||||||
def test_resources(self):
|
def test_resources(self):
|
||||||
|
@ -703,7 +711,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
def test_interrupted(self):
|
def test_interrupted(self):
|
||||||
code = TEST_INTERRUPTED
|
code = TEST_INTERRUPTED
|
||||||
test = self.create_test('sigint', code=code)
|
test = self.create_test('sigint', code=code)
|
||||||
output = self.run_tests(test, exitcode=1)
|
output = self.run_tests(test, exitcode=130)
|
||||||
self.check_executed_tests(output, test, omitted=test,
|
self.check_executed_tests(output, test, omitted=test,
|
||||||
interrupted=True)
|
interrupted=True)
|
||||||
|
|
||||||
|
@ -732,7 +740,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
args = ("--slowest", "-j2", test)
|
args = ("--slowest", "-j2", test)
|
||||||
else:
|
else:
|
||||||
args = ("--slowest", test)
|
args = ("--slowest", test)
|
||||||
output = self.run_tests(*args, exitcode=1)
|
output = self.run_tests(*args, exitcode=130)
|
||||||
self.check_executed_tests(output, test,
|
self.check_executed_tests(output, test,
|
||||||
omitted=test, interrupted=True)
|
omitted=test, interrupted=True)
|
||||||
|
|
||||||
|
@ -772,9 +780,43 @@ class ArgsTestCase(BaseTestCase):
|
||||||
builtins.__dict__['RUN'] = 1
|
builtins.__dict__['RUN'] = 1
|
||||||
""")
|
""")
|
||||||
test = self.create_test('forever', code=code)
|
test = self.create_test('forever', code=code)
|
||||||
output = self.run_tests('--forever', test, exitcode=1)
|
output = self.run_tests('--forever', test, exitcode=2)
|
||||||
self.check_executed_tests(output, [test]*3, failed=test)
|
self.check_executed_tests(output, [test]*3, failed=test)
|
||||||
|
|
||||||
|
def check_leak(self, code, what):
|
||||||
|
test = self.create_test('huntrleaks', code=code)
|
||||||
|
|
||||||
|
filename = 'reflog.txt'
|
||||||
|
self.addCleanup(support.unlink, filename)
|
||||||
|
output = self.run_tests('--huntrleaks', '3:3:', test,
|
||||||
|
exitcode=2,
|
||||||
|
stderr=subprocess.STDOUT)
|
||||||
|
self.check_executed_tests(output, [test], failed=test)
|
||||||
|
|
||||||
|
line = 'beginning 6 repetitions\n123456\n......\n'
|
||||||
|
self.check_line(output, re.escape(line))
|
||||||
|
|
||||||
|
line2 = '%s leaked [1, 1, 1] %s, sum=3\n' % (test, what)
|
||||||
|
self.assertIn(line2, output)
|
||||||
|
|
||||||
|
with open(filename) as fp:
|
||||||
|
reflog = fp.read()
|
||||||
|
self.assertIn(line2, reflog)
|
||||||
|
|
||||||
|
@unittest.skipUnless(Py_DEBUG, 'need a debug build')
|
||||||
|
def test_huntrleaks(self):
|
||||||
|
# test --huntrleaks
|
||||||
|
code = textwrap.dedent("""
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
GLOBAL_LIST = []
|
||||||
|
|
||||||
|
class RefLeakTest(unittest.TestCase):
|
||||||
|
def test_leak(self):
|
||||||
|
GLOBAL_LIST.append(object())
|
||||||
|
""")
|
||||||
|
self.check_leak(code, 'references')
|
||||||
|
|
||||||
@unittest.skipUnless(Py_DEBUG, 'need a debug build')
|
@unittest.skipUnless(Py_DEBUG, 'need a debug build')
|
||||||
def test_huntrleaks_fd_leak(self):
|
def test_huntrleaks_fd_leak(self):
|
||||||
# test --huntrleaks for file descriptor leak
|
# test --huntrleaks for file descriptor leak
|
||||||
|
@ -799,24 +841,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
fd = os.open(__file__, os.O_RDONLY)
|
fd = os.open(__file__, os.O_RDONLY)
|
||||||
# bug: never cloes the file descriptor
|
# bug: never cloes the file descriptor
|
||||||
""")
|
""")
|
||||||
test = self.create_test('huntrleaks', code=code)
|
self.check_leak(code, 'file descriptors')
|
||||||
|
|
||||||
filename = 'reflog.txt'
|
|
||||||
self.addCleanup(support.unlink, filename)
|
|
||||||
output = self.run_tests('--huntrleaks', '3:3:', test,
|
|
||||||
exitcode=1,
|
|
||||||
stderr=subprocess.STDOUT)
|
|
||||||
self.check_executed_tests(output, [test], failed=test)
|
|
||||||
|
|
||||||
line = 'beginning 6 repetitions\n123456\n......\n'
|
|
||||||
self.check_line(output, re.escape(line))
|
|
||||||
|
|
||||||
line2 = '%s leaked [1, 1, 1] file descriptors, sum=3\n' % test
|
|
||||||
self.assertIn(line2, output)
|
|
||||||
|
|
||||||
with open(filename) as fp:
|
|
||||||
reflog = fp.read()
|
|
||||||
self.assertIn(line2, reflog)
|
|
||||||
|
|
||||||
def test_list_tests(self):
|
def test_list_tests(self):
|
||||||
# test --list-tests
|
# test --list-tests
|
||||||
|
@ -837,11 +862,20 @@ class ArgsTestCase(BaseTestCase):
|
||||||
pass
|
pass
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
|
# Test --list-cases
|
||||||
all_methods = ['%s.Tests.test_method1' % testname,
|
all_methods = ['%s.Tests.test_method1' % testname,
|
||||||
'%s.Tests.test_method2' % testname]
|
'%s.Tests.test_method2' % testname]
|
||||||
output = self.run_tests('--list-cases', testname)
|
output = self.run_tests('--list-cases', testname)
|
||||||
self.assertEqual(output.splitlines(), all_methods)
|
self.assertEqual(output.splitlines(), all_methods)
|
||||||
|
|
||||||
|
# Test --list-cases with --match
|
||||||
|
all_methods = ['%s.Tests.test_method1' % testname]
|
||||||
|
output = self.run_tests('--list-cases',
|
||||||
|
'-m', 'test_method1',
|
||||||
|
testname)
|
||||||
|
self.assertEqual(output.splitlines(), all_methods)
|
||||||
|
|
||||||
def test_crashed(self):
|
def test_crashed(self):
|
||||||
# Any code which causes a crash
|
# Any code which causes a crash
|
||||||
code = 'import faulthandler; faulthandler._sigsegv()'
|
code = 'import faulthandler; faulthandler._sigsegv()'
|
||||||
|
@ -849,7 +883,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
ok_test = self.create_test(name="ok")
|
ok_test = self.create_test(name="ok")
|
||||||
|
|
||||||
tests = [crash_test, ok_test]
|
tests = [crash_test, ok_test]
|
||||||
output = self.run_tests("-j2", *tests, exitcode=1)
|
output = self.run_tests("-j2", *tests, exitcode=2)
|
||||||
self.check_executed_tests(output, tests, failed=crash_test,
|
self.check_executed_tests(output, tests, failed=crash_test,
|
||||||
randomize=True)
|
randomize=True)
|
||||||
|
|
||||||
|
@ -898,6 +932,25 @@ class ArgsTestCase(BaseTestCase):
|
||||||
subset = ['test_method1', 'test_method3']
|
subset = ['test_method1', 'test_method3']
|
||||||
self.assertEqual(methods, subset)
|
self.assertEqual(methods, subset)
|
||||||
|
|
||||||
|
def test_env_changed(self):
|
||||||
|
code = textwrap.dedent("""
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
class Tests(unittest.TestCase):
|
||||||
|
def test_env_changed(self):
|
||||||
|
open("env_changed", "w").close()
|
||||||
|
""")
|
||||||
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
|
# don't fail by default
|
||||||
|
output = self.run_tests(testname)
|
||||||
|
self.check_executed_tests(output, [testname], env_changed=testname)
|
||||||
|
|
||||||
|
# fail with --fail-env-changed
|
||||||
|
output = self.run_tests("--fail-env-changed", testname, exitcode=3)
|
||||||
|
self.check_executed_tests(output, [testname], env_changed=testname,
|
||||||
|
fail_env_changed=True)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue