mirror of
https://github.com/python/cpython.git
synced 2025-09-26 18:29:57 +00:00
gh-98903: Test suite fails with exit code 4 if no tests ran (#98904)
The Python test suite now fails wit exit code 4 if no tests ran. It should help detecting typos in test names and test methods. * Add "EXITCODE_" constants to Lib/test/libregrtest/main.py. * Fix a typo: "NO TEST RUN" becomes "NO TESTS RAN"
This commit is contained in:
parent
0689b99bb8
commit
c76db37c0d
3 changed files with 52 additions and 26 deletions
|
@ -28,6 +28,11 @@ from test.support import threading_helper
|
||||||
# Must be smaller than buildbot "1200 seconds without output" limit.
|
# Must be smaller than buildbot "1200 seconds without output" limit.
|
||||||
EXIT_TIMEOUT = 120.0
|
EXIT_TIMEOUT = 120.0
|
||||||
|
|
||||||
|
EXITCODE_BAD_TEST = 2
|
||||||
|
EXITCODE_INTERRUPTED = 130
|
||||||
|
EXITCODE_ENV_CHANGED = 3
|
||||||
|
EXITCODE_NO_TESTS_RAN = 4
|
||||||
|
|
||||||
|
|
||||||
class Regrtest:
|
class Regrtest:
|
||||||
"""Execute a test suite.
|
"""Execute a test suite.
|
||||||
|
@ -493,15 +498,18 @@ class Regrtest:
|
||||||
print("== encodings: locale=%s, FS=%s"
|
print("== encodings: locale=%s, FS=%s"
|
||||||
% (locale.getencoding(), sys.getfilesystemencoding()))
|
% (locale.getencoding(), sys.getfilesystemencoding()))
|
||||||
|
|
||||||
|
def no_tests_run(self):
|
||||||
|
return not any((self.good, self.bad, self.skipped, self.interrupted,
|
||||||
|
self.environment_changed))
|
||||||
|
|
||||||
def get_tests_result(self):
|
def get_tests_result(self):
|
||||||
result = []
|
result = []
|
||||||
if self.bad:
|
if self.bad:
|
||||||
result.append("FAILURE")
|
result.append("FAILURE")
|
||||||
elif self.ns.fail_env_changed and self.environment_changed:
|
elif self.ns.fail_env_changed and self.environment_changed:
|
||||||
result.append("ENV CHANGED")
|
result.append("ENV CHANGED")
|
||||||
elif not any((self.good, self.bad, self.skipped, self.interrupted,
|
elif self.no_tests_run():
|
||||||
self.environment_changed)):
|
result.append("NO TESTS RAN")
|
||||||
result.append("NO TEST RUN")
|
|
||||||
|
|
||||||
if self.interrupted:
|
if self.interrupted:
|
||||||
result.append("INTERRUPTED")
|
result.append("INTERRUPTED")
|
||||||
|
@ -750,11 +758,13 @@ class Regrtest:
|
||||||
self.save_xml_result()
|
self.save_xml_result()
|
||||||
|
|
||||||
if self.bad:
|
if self.bad:
|
||||||
sys.exit(2)
|
sys.exit(EXITCODE_BAD_TEST)
|
||||||
if self.interrupted:
|
if self.interrupted:
|
||||||
sys.exit(130)
|
sys.exit(EXITCODE_INTERRUPTED)
|
||||||
if self.ns.fail_env_changed and self.environment_changed:
|
if self.ns.fail_env_changed and self.environment_changed:
|
||||||
sys.exit(3)
|
sys.exit(EXITCODE_ENV_CHANGED)
|
||||||
|
if self.no_tests_run():
|
||||||
|
sys.exit(EXITCODE_NO_TESTS_RAN)
|
||||||
sys.exit(0)
|
sys.exit(0)
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -28,6 +28,11 @@ ROOT_DIR = os.path.join(os.path.dirname(__file__), '..', '..')
|
||||||
ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
|
ROOT_DIR = os.path.abspath(os.path.normpath(ROOT_DIR))
|
||||||
LOG_PREFIX = r'[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?'
|
LOG_PREFIX = r'[0-9]+:[0-9]+:[0-9]+ (?:load avg: [0-9]+\.[0-9]{2} )?'
|
||||||
|
|
||||||
|
EXITCODE_BAD_TEST = 2
|
||||||
|
EXITCODE_ENV_CHANGED = 3
|
||||||
|
EXITCODE_NO_TESTS_RAN = 4
|
||||||
|
EXITCODE_INTERRUPTED = 130
|
||||||
|
|
||||||
TEST_INTERRUPTED = textwrap.dedent("""
|
TEST_INTERRUPTED = textwrap.dedent("""
|
||||||
from signal import SIGINT, raise_signal
|
from signal import SIGINT, raise_signal
|
||||||
try:
|
try:
|
||||||
|
@ -497,7 +502,7 @@ class BaseTestCase(unittest.TestCase):
|
||||||
result.append('INTERRUPTED')
|
result.append('INTERRUPTED')
|
||||||
if not any((good, result, failed, interrupted, skipped,
|
if not any((good, result, failed, interrupted, skipped,
|
||||||
env_changed, fail_env_changed)):
|
env_changed, fail_env_changed)):
|
||||||
result.append("NO TEST RUN")
|
result.append("NO TESTS RAN")
|
||||||
elif not result:
|
elif not result:
|
||||||
result.append('SUCCESS')
|
result.append('SUCCESS')
|
||||||
result = ', '.join(result)
|
result = ', '.join(result)
|
||||||
|
@ -707,7 +712,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
test_failing = self.create_test('failing', code=code)
|
test_failing = self.create_test('failing', code=code)
|
||||||
tests = [test_ok, test_failing]
|
tests = [test_ok, test_failing]
|
||||||
|
|
||||||
output = self.run_tests(*tests, exitcode=2)
|
output = self.run_tests(*tests, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, tests, failed=test_failing)
|
self.check_executed_tests(output, tests, failed=test_failing)
|
||||||
|
|
||||||
def test_resources(self):
|
def test_resources(self):
|
||||||
|
@ -748,13 +753,14 @@ class ArgsTestCase(BaseTestCase):
|
||||||
test = self.create_test('random', code)
|
test = self.create_test('random', code)
|
||||||
|
|
||||||
# first run to get the output with the random seed
|
# first run to get the output with the random seed
|
||||||
output = self.run_tests('-r', test)
|
output = self.run_tests('-r', test, exitcode=EXITCODE_NO_TESTS_RAN)
|
||||||
randseed = self.parse_random_seed(output)
|
randseed = self.parse_random_seed(output)
|
||||||
match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
|
match = self.regex_search(r'TESTRANDOM: ([0-9]+)', output)
|
||||||
test_random = int(match.group(1))
|
test_random = int(match.group(1))
|
||||||
|
|
||||||
# try to reproduce with the random seed
|
# try to reproduce with the random seed
|
||||||
output = self.run_tests('-r', '--randseed=%s' % randseed, test)
|
output = self.run_tests('-r', '--randseed=%s' % randseed, test,
|
||||||
|
exitcode=EXITCODE_NO_TESTS_RAN)
|
||||||
randseed2 = self.parse_random_seed(output)
|
randseed2 = self.parse_random_seed(output)
|
||||||
self.assertEqual(randseed2, randseed)
|
self.assertEqual(randseed2, randseed)
|
||||||
|
|
||||||
|
@ -813,7 +819,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
def test_interrupted(self):
|
def test_interrupted(self):
|
||||||
code = TEST_INTERRUPTED
|
code = TEST_INTERRUPTED
|
||||||
test = self.create_test('sigint', code=code)
|
test = self.create_test('sigint', code=code)
|
||||||
output = self.run_tests(test, exitcode=130)
|
output = self.run_tests(test, exitcode=EXITCODE_INTERRUPTED)
|
||||||
self.check_executed_tests(output, test, omitted=test,
|
self.check_executed_tests(output, test, omitted=test,
|
||||||
interrupted=True)
|
interrupted=True)
|
||||||
|
|
||||||
|
@ -838,7 +844,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
args = ("--slowest", "-j2", test)
|
args = ("--slowest", "-j2", test)
|
||||||
else:
|
else:
|
||||||
args = ("--slowest", test)
|
args = ("--slowest", test)
|
||||||
output = self.run_tests(*args, exitcode=130)
|
output = self.run_tests(*args, exitcode=EXITCODE_INTERRUPTED)
|
||||||
self.check_executed_tests(output, test,
|
self.check_executed_tests(output, test,
|
||||||
omitted=test, interrupted=True)
|
omitted=test, interrupted=True)
|
||||||
|
|
||||||
|
@ -878,7 +884,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
builtins.__dict__['RUN'] = 1
|
builtins.__dict__['RUN'] = 1
|
||||||
""")
|
""")
|
||||||
test = self.create_test('forever', code=code)
|
test = self.create_test('forever', code=code)
|
||||||
output = self.run_tests('--forever', test, exitcode=2)
|
output = self.run_tests('--forever', test, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, [test]*3, failed=test)
|
self.check_executed_tests(output, [test]*3, failed=test)
|
||||||
|
|
||||||
def check_leak(self, code, what):
|
def check_leak(self, code, what):
|
||||||
|
@ -887,7 +893,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
filename = 'reflog.txt'
|
filename = 'reflog.txt'
|
||||||
self.addCleanup(os_helper.unlink, filename)
|
self.addCleanup(os_helper.unlink, filename)
|
||||||
output = self.run_tests('--huntrleaks', '3:3:', test,
|
output = self.run_tests('--huntrleaks', '3:3:', test,
|
||||||
exitcode=2,
|
exitcode=EXITCODE_BAD_TEST,
|
||||||
stderr=subprocess.STDOUT)
|
stderr=subprocess.STDOUT)
|
||||||
self.check_executed_tests(output, [test], failed=test)
|
self.check_executed_tests(output, [test], failed=test)
|
||||||
|
|
||||||
|
@ -969,7 +975,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
crash_test = self.create_test(name="crash", code=code)
|
crash_test = self.create_test(name="crash", code=code)
|
||||||
|
|
||||||
tests = [crash_test]
|
tests = [crash_test]
|
||||||
output = self.run_tests("-j2", *tests, exitcode=2)
|
output = self.run_tests("-j2", *tests, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, tests, failed=crash_test,
|
self.check_executed_tests(output, tests, failed=crash_test,
|
||||||
randomize=True)
|
randomize=True)
|
||||||
|
|
||||||
|
@ -1069,7 +1075,8 @@ class ArgsTestCase(BaseTestCase):
|
||||||
self.check_executed_tests(output, [testname], env_changed=testname)
|
self.check_executed_tests(output, [testname], env_changed=testname)
|
||||||
|
|
||||||
# fail with --fail-env-changed
|
# fail with --fail-env-changed
|
||||||
output = self.run_tests("--fail-env-changed", testname, exitcode=3)
|
output = self.run_tests("--fail-env-changed", testname,
|
||||||
|
exitcode=EXITCODE_ENV_CHANGED)
|
||||||
self.check_executed_tests(output, [testname], env_changed=testname,
|
self.check_executed_tests(output, [testname], env_changed=testname,
|
||||||
fail_env_changed=True)
|
fail_env_changed=True)
|
||||||
|
|
||||||
|
@ -1088,7 +1095,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-w", testname, exitcode=2)
|
output = self.run_tests("-w", testname, exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, [testname],
|
self.check_executed_tests(output, [testname],
|
||||||
failed=testname, rerun={testname: "test_fail_always"})
|
failed=testname, rerun={testname: "test_fail_always"})
|
||||||
|
|
||||||
|
@ -1123,7 +1130,8 @@ class ArgsTestCase(BaseTestCase):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0)
|
output = self.run_tests(testname, "-m", "nosuchtest",
|
||||||
|
exitcode=EXITCODE_NO_TESTS_RAN)
|
||||||
self.check_executed_tests(output, [testname], no_test_ran=testname)
|
self.check_executed_tests(output, [testname], no_test_ran=testname)
|
||||||
|
|
||||||
def test_no_tests_ran_skip(self):
|
def test_no_tests_ran_skip(self):
|
||||||
|
@ -1136,7 +1144,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests(testname, exitcode=0)
|
output = self.run_tests(testname)
|
||||||
self.check_executed_tests(output, [testname])
|
self.check_executed_tests(output, [testname])
|
||||||
|
|
||||||
def test_no_tests_ran_multiple_tests_nonexistent(self):
|
def test_no_tests_ran_multiple_tests_nonexistent(self):
|
||||||
|
@ -1150,7 +1158,8 @@ class ArgsTestCase(BaseTestCase):
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
testname2 = self.create_test(code=code)
|
testname2 = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0)
|
output = self.run_tests(testname, testname2, "-m", "nosuchtest",
|
||||||
|
exitcode=EXITCODE_NO_TESTS_RAN)
|
||||||
self.check_executed_tests(output, [testname, testname2],
|
self.check_executed_tests(output, [testname, testname2],
|
||||||
no_test_ran=[testname, testname2])
|
no_test_ran=[testname, testname2])
|
||||||
|
|
||||||
|
@ -1198,7 +1207,8 @@ class ArgsTestCase(BaseTestCase):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("--fail-env-changed", testname, exitcode=3)
|
output = self.run_tests("--fail-env-changed", testname,
|
||||||
|
exitcode=EXITCODE_ENV_CHANGED)
|
||||||
self.check_executed_tests(output, [testname],
|
self.check_executed_tests(output, [testname],
|
||||||
env_changed=[testname],
|
env_changed=[testname],
|
||||||
fail_env_changed=True)
|
fail_env_changed=True)
|
||||||
|
@ -1224,7 +1234,8 @@ class ArgsTestCase(BaseTestCase):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("-j2", "--timeout=1.0", testname, exitcode=2)
|
output = self.run_tests("-j2", "--timeout=1.0", testname,
|
||||||
|
exitcode=EXITCODE_BAD_TEST)
|
||||||
self.check_executed_tests(output, [testname],
|
self.check_executed_tests(output, [testname],
|
||||||
failed=testname)
|
failed=testname)
|
||||||
self.assertRegex(output,
|
self.assertRegex(output,
|
||||||
|
@ -1256,7 +1267,8 @@ class ArgsTestCase(BaseTestCase):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
|
output = self.run_tests("--fail-env-changed", "-v", testname,
|
||||||
|
exitcode=EXITCODE_ENV_CHANGED)
|
||||||
self.check_executed_tests(output, [testname],
|
self.check_executed_tests(output, [testname],
|
||||||
env_changed=[testname],
|
env_changed=[testname],
|
||||||
fail_env_changed=True)
|
fail_env_changed=True)
|
||||||
|
@ -1287,7 +1299,8 @@ class ArgsTestCase(BaseTestCase):
|
||||||
""")
|
""")
|
||||||
testname = self.create_test(code=code)
|
testname = self.create_test(code=code)
|
||||||
|
|
||||||
output = self.run_tests("--fail-env-changed", "-v", testname, exitcode=3)
|
output = self.run_tests("--fail-env-changed", "-v", testname,
|
||||||
|
exitcode=EXITCODE_ENV_CHANGED)
|
||||||
self.check_executed_tests(output, [testname],
|
self.check_executed_tests(output, [testname],
|
||||||
env_changed=[testname],
|
env_changed=[testname],
|
||||||
fail_env_changed=True)
|
fail_env_changed=True)
|
||||||
|
@ -1328,7 +1341,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
for option in ("-v", "-W"):
|
for option in ("-v", "-W"):
|
||||||
with self.subTest(option=option):
|
with self.subTest(option=option):
|
||||||
cmd = ["--fail-env-changed", option, testname]
|
cmd = ["--fail-env-changed", option, testname]
|
||||||
output = self.run_tests(*cmd, exitcode=3)
|
output = self.run_tests(*cmd, exitcode=EXITCODE_ENV_CHANGED)
|
||||||
self.check_executed_tests(output, [testname],
|
self.check_executed_tests(output, [testname],
|
||||||
env_changed=[testname],
|
env_changed=[testname],
|
||||||
fail_env_changed=True)
|
fail_env_changed=True)
|
||||||
|
@ -1373,7 +1386,8 @@ class ArgsTestCase(BaseTestCase):
|
||||||
""")
|
""")
|
||||||
testnames = [self.create_test(code=code) for _ in range(3)]
|
testnames = [self.create_test(code=code) for _ in range(3)]
|
||||||
|
|
||||||
output = self.run_tests("--fail-env-changed", "-v", "-j2", *testnames, exitcode=3)
|
output = self.run_tests("--fail-env-changed", "-v", "-j2", *testnames,
|
||||||
|
exitcode=EXITCODE_ENV_CHANGED)
|
||||||
self.check_executed_tests(output, testnames,
|
self.check_executed_tests(output, testnames,
|
||||||
env_changed=testnames,
|
env_changed=testnames,
|
||||||
fail_env_changed=True,
|
fail_env_changed=True,
|
||||||
|
|
|
@ -0,0 +1,2 @@
|
||||||
|
The Python test suite now fails wit exit code 4 if no tests ran. It should
|
||||||
|
help detecting typos in test names and test methods.
|
Loading…
Add table
Add a link
Reference in a new issue