mirror of
https://github.com/python/cpython.git
synced 2025-11-02 11:08:57 +00:00
Issue #25220: Fix "-m test --forever"
* Fix "-m test --forever": replace _test_forever() with self._test_forever() * Add unit test for --forever * Add unit test for a failing test * Fix also some pyflakes warnings in libregrtest
This commit is contained in:
parent
38031143fb
commit
9a14214aee
4 changed files with 99 additions and 43 deletions
|
|
@ -319,7 +319,7 @@ class Regrtest:
|
||||||
|
|
||||||
def run_tests(self):
|
def run_tests(self):
|
||||||
if self.ns.forever:
|
if self.ns.forever:
|
||||||
self.tests = _test_forever(list(self.selected))
|
self.tests = self._test_forever(list(self.selected))
|
||||||
self.test_count = ''
|
self.test_count = ''
|
||||||
self.test_count_width = 3
|
self.test_count_width = 3
|
||||||
else:
|
else:
|
||||||
|
|
|
||||||
|
|
@ -46,6 +46,8 @@ def dash_R(the_module, test, indirect_test, huntrleaks):
|
||||||
print("beginning", repcount, "repetitions", file=sys.stderr)
|
print("beginning", repcount, "repetitions", file=sys.stderr)
|
||||||
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr,
|
print(("1234567890"*(repcount//10 + 1))[:repcount], file=sys.stderr,
|
||||||
flush=True)
|
flush=True)
|
||||||
|
# initialize variables to make pyflakes quiet
|
||||||
|
rc_before = alloc_before = 0
|
||||||
for i in range(repcount):
|
for i in range(repcount):
|
||||||
indirect_test()
|
indirect_test()
|
||||||
alloc_after, rc_after = dash_R_cleanup(fs, ps, pic, zdc, abcs)
|
alloc_after, rc_after = dash_R_cleanup(fs, ps, pic, zdc, abcs)
|
||||||
|
|
@ -158,6 +160,6 @@ def warm_caches():
|
||||||
for i in range(256):
|
for i in range(256):
|
||||||
s[i:i+1]
|
s[i:i+1]
|
||||||
# unicode cache
|
# unicode cache
|
||||||
x = [chr(i) for i in range(256)]
|
[chr(i) for i in range(256)]
|
||||||
# int cache
|
# int cache
|
||||||
x = list(range(-5, 257))
|
list(range(-5, 257))
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,6 @@ import sys
|
||||||
import time
|
import time
|
||||||
import traceback
|
import traceback
|
||||||
import types
|
import types
|
||||||
import unittest
|
|
||||||
from test import support
|
from test import support
|
||||||
try:
|
try:
|
||||||
import threading
|
import threading
|
||||||
|
|
@ -173,7 +172,7 @@ def run_tests_multiprocess(regrtest):
|
||||||
try:
|
try:
|
||||||
while finished < regrtest.ns.use_mp:
|
while finished < regrtest.ns.use_mp:
|
||||||
try:
|
try:
|
||||||
item = output.get(timeout=PROGRESS_UPDATE)
|
item = output.get(timeout=timeout)
|
||||||
except queue.Empty:
|
except queue.Empty:
|
||||||
running = get_running(workers)
|
running = get_running(workers)
|
||||||
if running:
|
if running:
|
||||||
|
|
|
||||||
|
|
@ -330,33 +330,52 @@ class BaseTestCase(unittest.TestCase):
|
||||||
self.assertRegex(output, regex)
|
self.assertRegex(output, regex)
|
||||||
|
|
||||||
def parse_executed_tests(self, output):
|
def parse_executed_tests(self, output):
|
||||||
parser = re.finditer(r'^\[[0-9]+/[0-9]+\] (%s)$' % self.TESTNAME_REGEX,
|
regex = r'^\[ *[0-9]+(?:/ *[0-9]+)?\] (%s)$' % self.TESTNAME_REGEX
|
||||||
output,
|
parser = re.finditer(regex, output, re.MULTILINE)
|
||||||
re.MULTILINE)
|
return list(match.group(1) for match in parser)
|
||||||
return set(match.group(1) for match in parser)
|
|
||||||
|
|
||||||
def check_executed_tests(self, output, tests, skipped=None):
|
def check_executed_tests(self, output, tests, skipped=(), failed=(),
|
||||||
|
randomize=False):
|
||||||
if isinstance(tests, str):
|
if isinstance(tests, str):
|
||||||
tests = [tests]
|
tests = [tests]
|
||||||
executed = self.parse_executed_tests(output)
|
if isinstance(skipped, str):
|
||||||
self.assertEqual(executed, set(tests), output)
|
skipped = [skipped]
|
||||||
|
if isinstance(failed, str):
|
||||||
|
failed = [failed]
|
||||||
ntest = len(tests)
|
ntest = len(tests)
|
||||||
if skipped:
|
nskipped = len(skipped)
|
||||||
if isinstance(skipped, str):
|
nfailed = len(failed)
|
||||||
skipped = [skipped]
|
|
||||||
nskipped = len(skipped)
|
|
||||||
|
|
||||||
plural = 's' if nskipped != 1 else ''
|
executed = self.parse_executed_tests(output)
|
||||||
names = ' '.join(sorted(skipped))
|
if randomize:
|
||||||
expected = (r'%s test%s skipped:\n %s$'
|
self.assertEqual(set(executed), set(tests), output)
|
||||||
% (nskipped, plural, names))
|
|
||||||
self.check_line(output, expected)
|
|
||||||
|
|
||||||
ok = ntest - nskipped
|
|
||||||
if ok:
|
|
||||||
self.check_line(output, r'%s test OK\.$' % ok)
|
|
||||||
else:
|
else:
|
||||||
self.check_line(output, r'All %s tests OK\.$' % ntest)
|
self.assertEqual(executed, tests, output)
|
||||||
|
|
||||||
|
def plural(count):
|
||||||
|
return 's' if count != 1 else ''
|
||||||
|
|
||||||
|
def list_regex(line_format, tests):
|
||||||
|
count = len(tests)
|
||||||
|
names = ' '.join(sorted(tests))
|
||||||
|
regex = line_format % (count, plural(count))
|
||||||
|
regex = r'%s:\n %s$' % (regex, names)
|
||||||
|
return regex
|
||||||
|
|
||||||
|
if skipped:
|
||||||
|
regex = list_regex('%s test%s skipped', skipped)
|
||||||
|
self.check_line(output, regex)
|
||||||
|
|
||||||
|
if failed:
|
||||||
|
regex = list_regex('%s test%s failed', failed)
|
||||||
|
self.check_line(output, regex)
|
||||||
|
|
||||||
|
good = ntest - nskipped - nfailed
|
||||||
|
if good:
|
||||||
|
regex = r'%s test%s OK\.$' % (good, plural(good))
|
||||||
|
if not skipped and not failed and good > 1:
|
||||||
|
regex = 'All %s' % regex
|
||||||
|
self.check_line(output, regex)
|
||||||
|
|
||||||
def parse_random_seed(self, output):
|
def parse_random_seed(self, output):
|
||||||
match = self.regex_search(r'Using random seed ([0-9]+)', output)
|
match = self.regex_search(r'Using random seed ([0-9]+)', output)
|
||||||
|
|
@ -364,24 +383,28 @@ class BaseTestCase(unittest.TestCase):
|
||||||
self.assertTrue(0 <= randseed <= 10000000, randseed)
|
self.assertTrue(0 <= randseed <= 10000000, randseed)
|
||||||
return randseed
|
return randseed
|
||||||
|
|
||||||
def run_command(self, args, input=None):
|
def run_command(self, args, input=None, exitcode=0):
|
||||||
if not input:
|
if not input:
|
||||||
input = ''
|
input = ''
|
||||||
try:
|
proc = subprocess.run(args,
|
||||||
return subprocess.run(args,
|
universal_newlines=True,
|
||||||
check=True, universal_newlines=True,
|
input=input,
|
||||||
input=input,
|
stdout=subprocess.PIPE,
|
||||||
stdout=subprocess.PIPE,
|
stderr=subprocess.PIPE)
|
||||||
stderr=subprocess.PIPE)
|
if proc.returncode != exitcode:
|
||||||
except subprocess.CalledProcessError as exc:
|
self.fail("Command %s failed with exit code %s\n"
|
||||||
self.fail("%s\n"
|
|
||||||
"\n"
|
"\n"
|
||||||
"stdout:\n"
|
"stdout:\n"
|
||||||
|
"---\n"
|
||||||
"%s\n"
|
"%s\n"
|
||||||
|
"---\n"
|
||||||
"\n"
|
"\n"
|
||||||
"stderr:\n"
|
"stderr:\n"
|
||||||
|
"---\n"
|
||||||
"%s"
|
"%s"
|
||||||
% (str(exc), exc.stdout, exc.stderr))
|
"---\n"
|
||||||
|
% (str(args), proc.returncode, proc.stdout, proc.stderr))
|
||||||
|
return proc
|
||||||
|
|
||||||
|
|
||||||
def run_python(self, args, **kw):
|
def run_python(self, args, **kw):
|
||||||
|
|
@ -411,11 +434,11 @@ class ProgramsTestCase(BaseTestCase):
|
||||||
|
|
||||||
def check_output(self, output):
|
def check_output(self, output):
|
||||||
self.parse_random_seed(output)
|
self.parse_random_seed(output)
|
||||||
self.check_executed_tests(output, self.tests)
|
self.check_executed_tests(output, self.tests, randomize=True)
|
||||||
|
|
||||||
def run_tests(self, args):
|
def run_tests(self, args):
|
||||||
stdout = self.run_python(args)
|
output = self.run_python(args)
|
||||||
self.check_output(stdout)
|
self.check_output(output)
|
||||||
|
|
||||||
def test_script_regrtest(self):
|
def test_script_regrtest(self):
|
||||||
# Lib/test/regrtest.py
|
# Lib/test/regrtest.py
|
||||||
|
|
@ -492,8 +515,24 @@ class ArgsTestCase(BaseTestCase):
|
||||||
Test arguments of the Python test suite.
|
Test arguments of the Python test suite.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def run_tests(self, *args, input=None):
|
def run_tests(self, *args, **kw):
|
||||||
return self.run_python(['-m', 'test', *args], input=input)
|
return self.run_python(['-m', 'test', *args], **kw)
|
||||||
|
|
||||||
|
def test_failing_test(self):
|
||||||
|
# test a failing test
|
||||||
|
code = textwrap.dedent("""
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
class FailingTest(unittest.TestCase):
|
||||||
|
def test_failing(self):
|
||||||
|
self.fail("bug")
|
||||||
|
""")
|
||||||
|
test_ok = self.create_test()
|
||||||
|
test_failing = self.create_test(code=code)
|
||||||
|
tests = [test_ok, test_failing]
|
||||||
|
|
||||||
|
output = self.run_tests(*tests, exitcode=1)
|
||||||
|
self.check_executed_tests(output, tests, failed=test_failing)
|
||||||
|
|
||||||
def test_resources(self):
|
def test_resources(self):
|
||||||
# test -u command line option
|
# test -u command line option
|
||||||
|
|
@ -572,8 +611,7 @@ class ArgsTestCase(BaseTestCase):
|
||||||
# test --coverage
|
# test --coverage
|
||||||
test = self.create_test()
|
test = self.create_test()
|
||||||
output = self.run_tests("--coverage", test)
|
output = self.run_tests("--coverage", test)
|
||||||
executed = self.parse_executed_tests(output)
|
self.check_executed_tests(output, [test])
|
||||||
self.assertEqual(executed, {test}, output)
|
|
||||||
regex = ('lines +cov% +module +\(path\)\n'
|
regex = ('lines +cov% +module +\(path\)\n'
|
||||||
'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
|
'(?: *[0-9]+ *[0-9]{1,2}% *[^ ]+ +\([^)]+\)+)+')
|
||||||
self.check_line(output, regex)
|
self.check_line(output, regex)
|
||||||
|
|
@ -584,6 +622,23 @@ class ArgsTestCase(BaseTestCase):
|
||||||
output = self.run_tests("--wait", test, input='key')
|
output = self.run_tests("--wait", test, input='key')
|
||||||
self.check_line(output, 'Press any key to continue')
|
self.check_line(output, 'Press any key to continue')
|
||||||
|
|
||||||
|
def test_forever(self):
|
||||||
|
# test --forever
|
||||||
|
code = textwrap.dedent("""
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
class ForeverTester(unittest.TestCase):
|
||||||
|
RUN = 1
|
||||||
|
|
||||||
|
def test_run(self):
|
||||||
|
ForeverTester.RUN += 1
|
||||||
|
if ForeverTester.RUN > 3:
|
||||||
|
self.fail("fail at the 3rd runs")
|
||||||
|
""")
|
||||||
|
test = self.create_test(code=code)
|
||||||
|
output = self.run_tests('--forever', test, exitcode=1)
|
||||||
|
self.check_executed_tests(output, [test]*3, failed=test)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
unittest.main()
|
unittest.main()
|
||||||
|
|
|
||||||
Loading…
Add table
Add a link
Reference in a new issue