bpo-30856: Update TestResult early, without buffering in _Outcome (GH-28180)

TestResult methods addFailure(), addError(), addSkip() and
addSubTest() are now called immediately after raising an exception
in test or finishing a subtest.  Previously they were called only
after finishing the test clean up.
This commit is contained in:
Serhiy Storchaka 2021-09-19 15:24:38 +03:00 committed by GitHub
parent dea59cf88a
commit 664448d81f
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
7 changed files with 76 additions and 65 deletions

View file

@ -47,12 +47,10 @@ class _Outcome(object):
self.result = result self.result = result
self.result_supports_subtests = hasattr(result, "addSubTest") self.result_supports_subtests = hasattr(result, "addSubTest")
self.success = True self.success = True
self.skipped = []
self.expectedFailure = None self.expectedFailure = None
self.errors = []
@contextlib.contextmanager @contextlib.contextmanager
def testPartExecutor(self, test_case, isTest=False): def testPartExecutor(self, test_case, subTest=False):
old_success = self.success old_success = self.success
self.success = True self.success = True
try: try:
@ -61,7 +59,7 @@ class _Outcome(object):
raise raise
except SkipTest as e: except SkipTest as e:
self.success = False self.success = False
self.skipped.append((test_case, str(e))) _addSkip(self.result, test_case, str(e))
except _ShouldStop: except _ShouldStop:
pass pass
except: except:
@ -70,17 +68,36 @@ class _Outcome(object):
self.expectedFailure = exc_info self.expectedFailure = exc_info
else: else:
self.success = False self.success = False
self.errors.append((test_case, exc_info)) if subTest:
self.result.addSubTest(test_case.test_case, test_case, exc_info)
else:
_addError(self.result, test_case, exc_info)
# explicitly break a reference cycle: # explicitly break a reference cycle:
# exc_info -> frame -> exc_info # exc_info -> frame -> exc_info
exc_info = None exc_info = None
else: else:
if self.result_supports_subtests and self.success: if subTest and self.success:
self.errors.append((test_case, None)) self.result.addSubTest(test_case.test_case, test_case, None)
finally: finally:
self.success = self.success and old_success self.success = self.success and old_success
def _addSkip(result, test_case, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(test_case, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(test_case)
def _addError(result, test, exc_info):
if result is not None and exc_info is not None:
if issubclass(exc_info[0], test.failureException):
result.addFailure(test, exc_info)
else:
result.addError(test, exc_info)
def _id(obj): def _id(obj):
return obj return obj
@ -467,15 +484,6 @@ class TestCase(object):
return "<%s testMethod=%s>" % \ return "<%s testMethod=%s>" % \
(strclass(self.__class__), self._testMethodName) (strclass(self.__class__), self._testMethodName)
def _addSkip(self, result, test_case, reason):
addSkip = getattr(result, 'addSkip', None)
if addSkip is not None:
addSkip(test_case, reason)
else:
warnings.warn("TestResult has no addSkip method, skips not reported",
RuntimeWarning, 2)
result.addSuccess(test_case)
@contextlib.contextmanager @contextlib.contextmanager
def subTest(self, msg=_subtest_msg_sentinel, **params): def subTest(self, msg=_subtest_msg_sentinel, **params):
"""Return a context manager that will return the enclosed block """Return a context manager that will return the enclosed block
@ -494,7 +502,7 @@ class TestCase(object):
params_map = parent.params.new_child(params) params_map = parent.params.new_child(params)
self._subtest = _SubTest(self, msg, params_map) self._subtest = _SubTest(self, msg, params_map)
try: try:
with self._outcome.testPartExecutor(self._subtest, isTest=True): with self._outcome.testPartExecutor(self._subtest, subTest=True):
yield yield
if not self._outcome.success: if not self._outcome.success:
result = self._outcome.result result = self._outcome.result
@ -507,16 +515,6 @@ class TestCase(object):
finally: finally:
self._subtest = parent self._subtest = parent
def _feedErrorsToResult(self, result, errors):
for test, exc_info in errors:
if isinstance(test, _SubTest):
result.addSubTest(test.test_case, test, exc_info)
elif exc_info is not None:
if issubclass(exc_info[0], self.failureException):
result.addFailure(test, exc_info)
else:
result.addError(test, exc_info)
def _addExpectedFailure(self, result, exc_info): def _addExpectedFailure(self, result, exc_info):
try: try:
addExpectedFailure = result.addExpectedFailure addExpectedFailure = result.addExpectedFailure
@ -574,7 +572,7 @@ class TestCase(object):
# If the class or method was skipped. # If the class or method was skipped.
skip_why = (getattr(self.__class__, '__unittest_skip_why__', '') skip_why = (getattr(self.__class__, '__unittest_skip_why__', '')
or getattr(testMethod, '__unittest_skip_why__', '')) or getattr(testMethod, '__unittest_skip_why__', ''))
self._addSkip(result, self, skip_why) _addSkip(result, self, skip_why)
return result return result
expecting_failure = ( expecting_failure = (
@ -589,16 +587,13 @@ class TestCase(object):
self._callSetUp() self._callSetUp()
if outcome.success: if outcome.success:
outcome.expecting_failure = expecting_failure outcome.expecting_failure = expecting_failure
with outcome.testPartExecutor(self, isTest=True): with outcome.testPartExecutor(self):
self._callTestMethod(testMethod) self._callTestMethod(testMethod)
outcome.expecting_failure = False outcome.expecting_failure = False
with outcome.testPartExecutor(self): with outcome.testPartExecutor(self):
self._callTearDown() self._callTearDown()
self.doCleanups() self.doCleanups()
for test, reason in outcome.skipped:
self._addSkip(result, test, reason)
self._feedErrorsToResult(result, outcome.errors)
if outcome.success: if outcome.success:
if expecting_failure: if expecting_failure:
if outcome.expectedFailure: if outcome.expectedFailure:
@ -609,11 +604,10 @@ class TestCase(object):
result.addSuccess(self) result.addSuccess(self)
return result return result
finally: finally:
# explicitly break reference cycles: # explicitly break reference cycle:
# outcome.errors -> frame -> outcome -> outcome.errors
# outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure # outcome.expectedFailure -> frame -> outcome -> outcome.expectedFailure
outcome.errors.clear()
outcome.expectedFailure = None outcome.expectedFailure = None
outcome = None
# clear the outcome, no more needed # clear the outcome, no more needed
self._outcome = None self._outcome = None

View file

@ -197,8 +197,8 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
super(Foo, self).test() super(Foo, self).test()
raise RuntimeError('raised by Foo.test') raise RuntimeError('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown', expected = ['startTest', 'setUp', 'test',
'addError', 'stopTest'] 'addError', 'tearDown', 'stopTest']
Foo(events).run(result) Foo(events).run(result)
self.assertEqual(events, expected) self.assertEqual(events, expected)
@ -216,7 +216,7 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
raise RuntimeError('raised by Foo.test') raise RuntimeError('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addError', 'stopTest', 'stopTestRun'] 'addError', 'tearDown', 'stopTest', 'stopTestRun']
Foo(events).run() Foo(events).run()
self.assertEqual(events, expected) self.assertEqual(events, expected)
@ -236,8 +236,8 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
super(Foo, self).test() super(Foo, self).test()
self.fail('raised by Foo.test') self.fail('raised by Foo.test')
expected = ['startTest', 'setUp', 'test', 'tearDown', expected = ['startTest', 'setUp', 'test',
'addFailure', 'stopTest'] 'addFailure', 'tearDown', 'stopTest']
Foo(events).run(result) Foo(events).run(result)
self.assertEqual(events, expected) self.assertEqual(events, expected)
@ -252,7 +252,7 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
self.fail('raised by Foo.test') self.fail('raised by Foo.test')
expected = ['startTestRun', 'startTest', 'setUp', 'test', expected = ['startTestRun', 'startTest', 'setUp', 'test',
'tearDown', 'addFailure', 'stopTest', 'stopTestRun'] 'addFailure', 'tearDown', 'stopTest', 'stopTestRun']
events = [] events = []
Foo(events).run() Foo(events).run()
self.assertEqual(events, expected) self.assertEqual(events, expected)
@ -353,10 +353,10 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
def test_run_call_order__subtests(self): def test_run_call_order__subtests(self):
events = [] events = []
result = LoggingResult(events) result = LoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown', expected = ['startTest', 'setUp', 'test',
'addSubTestFailure', 'addSubTestSuccess', 'addSubTestFailure', 'addSubTestSuccess',
'addSubTestFailure', 'addSubTestFailure', 'addSubTestFailure', 'addSubTestFailure',
'addSubTestSuccess', 'addError', 'stopTest'] 'addSubTestSuccess', 'addError', 'tearDown', 'stopTest']
self._check_call_order__subtests(result, events, expected) self._check_call_order__subtests(result, events, expected)
def test_run_call_order__subtests_legacy(self): def test_run_call_order__subtests_legacy(self):
@ -364,8 +364,8 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
# text execution stops after the first subtest failure. # text execution stops after the first subtest failure.
events = [] events = []
result = LegacyLoggingResult(events) result = LegacyLoggingResult(events)
expected = ['startTest', 'setUp', 'test', 'tearDown', expected = ['startTest', 'setUp', 'test',
'addFailure', 'stopTest'] 'addFailure', 'tearDown', 'stopTest']
self._check_call_order__subtests(result, events, expected) self._check_call_order__subtests(result, events, expected)
def _check_call_order__subtests_success(self, result, events, expected_events): def _check_call_order__subtests_success(self, result, events, expected_events):
@ -386,9 +386,9 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
result = LoggingResult(events) result = LoggingResult(events)
# The 6 subtest successes are individually recorded, in addition # The 6 subtest successes are individually recorded, in addition
# to the whole test success. # to the whole test success.
expected = (['startTest', 'setUp', 'test', 'tearDown'] expected = (['startTest', 'setUp', 'test']
+ 6 * ['addSubTestSuccess'] + 6 * ['addSubTestSuccess']
+ ['addSuccess', 'stopTest']) + ['tearDown', 'addSuccess', 'stopTest'])
self._check_call_order__subtests_success(result, events, expected) self._check_call_order__subtests_success(result, events, expected)
def test_run_call_order__subtests_success_legacy(self): def test_run_call_order__subtests_success_legacy(self):
@ -413,8 +413,8 @@ class Test_TestCase(unittest.TestCase, TestEquality, TestHashing):
self.fail('failure') self.fail('failure')
self.fail('failure') self.fail('failure')
expected = ['startTest', 'setUp', 'test', 'tearDown', expected = ['startTest', 'setUp', 'test',
'addSubTestFailure', 'stopTest'] 'addSubTestFailure', 'tearDown', 'stopTest']
Foo(events).run(result) Foo(events).run(result)
self.assertEqual(events, expected) self.assertEqual(events, expected)

View file

@ -58,8 +58,8 @@ class Test_FunctionTestCase(unittest.TestCase):
def tearDown(): def tearDown():
events.append('tearDown') events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown', expected = ['startTest', 'setUp', 'test',
'addError', 'stopTest'] 'addError', 'tearDown', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result) unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected) self.assertEqual(events, expected)
@ -84,8 +84,8 @@ class Test_FunctionTestCase(unittest.TestCase):
def tearDown(): def tearDown():
events.append('tearDown') events.append('tearDown')
expected = ['startTest', 'setUp', 'test', 'tearDown', expected = ['startTest', 'setUp', 'test',
'addFailure', 'stopTest'] 'addFailure', 'tearDown', 'stopTest']
unittest.FunctionTestCase(test, setUp, tearDown).run(result) unittest.FunctionTestCase(test, setUp, tearDown).run(result)
self.assertEqual(events, expected) self.assertEqual(events, expected)

View file

@ -816,7 +816,8 @@ class TestOutputBuffering(unittest.TestCase):
self.assertEqual(str(test_case), description) self.assertEqual(str(test_case), description)
self.assertIn('ValueError: bad cleanup2', formatted_exc) self.assertIn('ValueError: bad cleanup2', formatted_exc)
self.assertNotIn('TypeError', formatted_exc) self.assertNotIn('TypeError', formatted_exc)
self.assertIn(expected_out, formatted_exc) self.assertIn('\nStdout:\nset up\ndo cleanup2\n', formatted_exc)
self.assertNotIn('\ndo cleanup1\n', formatted_exc)
test_case, formatted_exc = result.errors[1] test_case, formatted_exc = result.errors[1]
self.assertEqual(str(test_case), description) self.assertEqual(str(test_case), description)
self.assertIn('TypeError: bad cleanup1', formatted_exc) self.assertIn('TypeError: bad cleanup1', formatted_exc)
@ -847,13 +848,16 @@ class TestOutputBuffering(unittest.TestCase):
self.assertIn('ZeroDivisionError: division by zero', formatted_exc) self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
self.assertNotIn('ValueError', formatted_exc) self.assertNotIn('ValueError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc) self.assertNotIn('TypeError', formatted_exc)
self.assertIn(expected_out, formatted_exc) self.assertIn('\nStdout:\nset up\n', formatted_exc)
self.assertNotIn('\ndo cleanup2\n', formatted_exc)
self.assertNotIn('\ndo cleanup1\n', formatted_exc)
test_case, formatted_exc = result.errors[1] test_case, formatted_exc = result.errors[1]
self.assertEqual(str(test_case), description) self.assertEqual(str(test_case), description)
self.assertIn('ValueError: bad cleanup2', formatted_exc) self.assertIn('ValueError: bad cleanup2', formatted_exc)
self.assertNotIn('ZeroDivisionError', formatted_exc) self.assertNotIn('ZeroDivisionError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc) self.assertNotIn('TypeError', formatted_exc)
self.assertIn(expected_out, formatted_exc) self.assertIn('\nStdout:\nset up\ndo cleanup2\n', formatted_exc)
self.assertNotIn('\ndo cleanup1\n', formatted_exc)
test_case, formatted_exc = result.errors[2] test_case, formatted_exc = result.errors[2]
self.assertEqual(str(test_case), description) self.assertEqual(str(test_case), description)
self.assertIn('TypeError: bad cleanup1', formatted_exc) self.assertIn('TypeError: bad cleanup1', formatted_exc)
@ -887,13 +891,16 @@ class TestOutputBuffering(unittest.TestCase):
self.assertIn('ZeroDivisionError: division by zero', formatted_exc) self.assertIn('ZeroDivisionError: division by zero', formatted_exc)
self.assertNotIn('ValueError', formatted_exc) self.assertNotIn('ValueError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc) self.assertNotIn('TypeError', formatted_exc)
self.assertIn(expected_out, formatted_exc) self.assertIn('\nStdout:\nset up\ntear down\n', formatted_exc)
self.assertNotIn('\ndo cleanup2\n', formatted_exc)
self.assertNotIn('\ndo cleanup1\n', formatted_exc)
test_case, formatted_exc = result.errors[1] test_case, formatted_exc = result.errors[1]
self.assertEqual(str(test_case), description) self.assertEqual(str(test_case), description)
self.assertIn('ValueError: bad cleanup2', formatted_exc) self.assertIn('ValueError: bad cleanup2', formatted_exc)
self.assertNotIn('ZeroDivisionError', formatted_exc) self.assertNotIn('ZeroDivisionError', formatted_exc)
self.assertNotIn('TypeError', formatted_exc) self.assertNotIn('TypeError', formatted_exc)
self.assertIn(expected_out, formatted_exc) self.assertIn('\nStdout:\nset up\ntear down\ndo cleanup2\n', formatted_exc)
self.assertNotIn('\ndo cleanup1\n', formatted_exc)
test_case, formatted_exc = result.errors[2] test_case, formatted_exc = result.errors[2]
self.assertEqual(str(test_case), description) self.assertEqual(str(test_case), description)
self.assertIn('TypeError: bad cleanup1', formatted_exc) self.assertIn('TypeError: bad cleanup1', formatted_exc)

View file

@ -78,7 +78,8 @@ class TestCleanUp(unittest.TestCase):
pass pass
test = TestableTest('testNothing') test = TestableTest('testNothing')
outcome = test._outcome = _Outcome() result = unittest.TestResult()
outcome = test._outcome = _Outcome(result=result)
CleanUpExc = Exception('foo') CleanUpExc = Exception('foo')
exc2 = Exception('bar') exc2 = Exception('bar')
@ -94,10 +95,13 @@ class TestCleanUp(unittest.TestCase):
self.assertFalse(test.doCleanups()) self.assertFalse(test.doCleanups())
self.assertFalse(outcome.success) self.assertFalse(outcome.success)
((_, (Type1, instance1, _)), (_, msg2), (_, msg1) = result.errors
(_, (Type2, instance2, _))) = reversed(outcome.errors) self.assertIn('in cleanup1', msg1)
self.assertEqual((Type1, instance1), (Exception, CleanUpExc)) self.assertIn('raise CleanUpExc', msg1)
self.assertEqual((Type2, instance2), (Exception, exc2)) self.assertIn('Exception: foo', msg1)
self.assertIn('in cleanup2', msg2)
self.assertIn('raise exc2', msg2)
self.assertIn('Exception: bar', msg2)
def testCleanupInRun(self): def testCleanupInRun(self):
blowUp = False blowUp = False

View file

@ -197,7 +197,7 @@ class Test_TestSkipping(unittest.TestCase):
result = LoggingResult(events) result = LoggingResult(events)
test = Foo("test_skip_me") test = Foo("test_skip_me")
self.assertIs(test.run(result), result) self.assertIs(test.run(result), result)
self.assertEqual(events, ['startTest', 'addSkip', 'addFailure', 'stopTest']) self.assertEqual(events, ['startTest', 'addFailure', 'addSkip', 'stopTest'])
self.assertEqual(result.skipped, [(test, "skip")]) self.assertEqual(result.skipped, [(test, "skip")])
def test_skipping_and_fail_in_cleanup(self): def test_skipping_and_fail_in_cleanup(self):

View file

@ -0,0 +1,6 @@
:class:`unittest.TestResult` methods
:meth:`~unittest.TestResult.addFailure`,
:meth:`~unittest.TestResult.addError`, :meth:`~unittest.TestResult.addSkip`
and :meth:`~unittest.TestResult.addSubTest` are now called immediately after
raising an exception in test or finishing a subtest. Previously they were
called only after finishing the test clean up.