mirror of
https://github.com/python/cpython.git
synced 2025-08-30 21:48:47 +00:00
bpo-34279, regrtest: Issue a warning if no tests have been executed (GH-10150)
This commit is contained in:
parent
b2774c8e91
commit
9724348b43
5 changed files with 107 additions and 7 deletions
|
@ -351,11 +351,20 @@ class BaseTestCase(unittest.TestCase):
|
|||
self.tmptestdir = tempfile.mkdtemp()
|
||||
self.addCleanup(support.rmtree, self.tmptestdir)
|
||||
|
||||
def create_test(self, name=None, code=''):
|
||||
def create_test(self, name=None, code=None):
|
||||
if not name:
|
||||
name = 'noop%s' % BaseTestCase.TEST_UNIQUE_ID
|
||||
BaseTestCase.TEST_UNIQUE_ID += 1
|
||||
|
||||
if code is None:
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_empty_test(self):
|
||||
pass
|
||||
""")
|
||||
|
||||
# test_regrtest cannot be run twice in parallel because
|
||||
# of setUp() and create_test()
|
||||
name = self.TESTNAME_PREFIX + name
|
||||
|
@ -390,7 +399,7 @@ class BaseTestCase(unittest.TestCase):
|
|||
|
||||
def check_executed_tests(self, output, tests, skipped=(), failed=(),
|
||||
env_changed=(), omitted=(),
|
||||
rerun=(),
|
||||
rerun=(), no_test_ran=(),
|
||||
randomize=False, interrupted=False,
|
||||
fail_env_changed=False):
|
||||
if isinstance(tests, str):
|
||||
|
@ -405,6 +414,8 @@ class BaseTestCase(unittest.TestCase):
|
|||
omitted = [omitted]
|
||||
if isinstance(rerun, str):
|
||||
rerun = [rerun]
|
||||
if isinstance(no_test_ran, str):
|
||||
no_test_ran = [no_test_ran]
|
||||
|
||||
executed = self.parse_executed_tests(output)
|
||||
if randomize:
|
||||
|
@ -447,8 +458,12 @@ class BaseTestCase(unittest.TestCase):
|
|||
regex = "Re-running test %r in verbose mode" % name
|
||||
self.check_line(output, regex)
|
||||
|
||||
if no_test_ran:
|
||||
regex = list_regex('%s test%s run no tests', no_test_ran)
|
||||
self.check_line(output, regex)
|
||||
|
||||
good = (len(tests) - len(skipped) - len(failed)
|
||||
- len(omitted) - len(env_changed))
|
||||
- len(omitted) - len(env_changed) - len(no_test_ran))
|
||||
if good:
|
||||
regex = r'%s test%s OK\.$' % (good, plural(good))
|
||||
if not skipped and not failed and good > 1:
|
||||
|
@ -465,12 +480,16 @@ class BaseTestCase(unittest.TestCase):
|
|||
result.append('ENV CHANGED')
|
||||
if interrupted:
|
||||
result.append('INTERRUPTED')
|
||||
if not result:
|
||||
if not any((good, result, failed, interrupted, skipped,
|
||||
env_changed, fail_env_changed)):
|
||||
result.append("NO TEST RUN")
|
||||
elif not result:
|
||||
result.append('SUCCESS')
|
||||
result = ', '.join(result)
|
||||
if rerun:
|
||||
self.check_line(output, 'Tests result: %s' % result)
|
||||
result = 'FAILURE then %s' % result
|
||||
|
||||
self.check_line(output, 'Tests result: %s' % result)
|
||||
|
||||
def parse_random_seed(self, output):
|
||||
|
@ -649,7 +668,14 @@ class ArgsTestCase(BaseTestCase):
|
|||
# test -u command line option
|
||||
tests = {}
|
||||
for resource in ('audio', 'network'):
|
||||
code = 'from test import support\nsupport.requires(%r)' % resource
|
||||
code = textwrap.dedent("""
|
||||
from test import support; support.requires(%r)
|
||||
import unittest
|
||||
class PassingTest(unittest.TestCase):
|
||||
def test_pass(self):
|
||||
pass
|
||||
""" % resource)
|
||||
|
||||
tests[resource] = self.create_test(resource, code)
|
||||
test_names = sorted(tests.values())
|
||||
|
||||
|
@ -978,6 +1004,56 @@ class ArgsTestCase(BaseTestCase):
|
|||
output = self.run_tests("-w", testname, exitcode=2)
|
||||
self.check_executed_tests(output, [testname],
|
||||
failed=testname, rerun=testname)
|
||||
def test_no_tests_ran(self):
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_bug(self):
|
||||
pass
|
||||
""")
|
||||
testname = self.create_test(code=code)
|
||||
|
||||
output = self.run_tests(testname, "-m", "nosuchtest", exitcode=0)
|
||||
self.check_executed_tests(output, [testname], no_test_ran=testname)
|
||||
|
||||
def test_no_tests_ran_multiple_tests_nonexistent(self):
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_bug(self):
|
||||
pass
|
||||
""")
|
||||
testname = self.create_test(code=code)
|
||||
testname2 = self.create_test(code=code)
|
||||
|
||||
output = self.run_tests(testname, testname2, "-m", "nosuchtest", exitcode=0)
|
||||
self.check_executed_tests(output, [testname, testname2],
|
||||
no_test_ran=[testname, testname2])
|
||||
|
||||
def test_no_test_ran_some_test_exist_some_not(self):
|
||||
code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_bug(self):
|
||||
pass
|
||||
""")
|
||||
testname = self.create_test(code=code)
|
||||
other_code = textwrap.dedent("""
|
||||
import unittest
|
||||
|
||||
class Tests(unittest.TestCase):
|
||||
def test_other_bug(self):
|
||||
pass
|
||||
""")
|
||||
testname2 = self.create_test(code=other_code)
|
||||
|
||||
output = self.run_tests(testname, testname2, "-m", "nosuchtest",
|
||||
"-m", "test_other_bug", exitcode=0)
|
||||
self.check_executed_tests(output, [testname, testname2],
|
||||
no_test_ran=[testname])
|
||||
|
||||
|
||||
class TestUtils(unittest.TestCase):
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue