mirror of
https://github.com/python/cpython.git
synced 2025-11-24 20:30:18 +00:00
gh-117225: Add color to doctest output (#117583)
Co-authored-by: Alex Waygood <Alex.Waygood@Gmail.com>
This commit is contained in:
parent
f6e5cc66be
commit
975081b11e
5 changed files with 92 additions and 15 deletions
|
|
@ -104,6 +104,7 @@ import traceback
|
|||
import unittest
|
||||
from io import StringIO, IncrementalNewlineDecoder
|
||||
from collections import namedtuple
|
||||
from traceback import _ANSIColors, _can_colorize
|
||||
|
||||
|
||||
class TestResults(namedtuple('TestResults', 'failed attempted')):
|
||||
|
|
@ -1179,6 +1180,9 @@ class DocTestRunner:
|
|||
The `run` method is used to process a single DocTest case. It
|
||||
returns a TestResults instance.
|
||||
|
||||
>>> save_colorize = traceback._COLORIZE
|
||||
>>> traceback._COLORIZE = False
|
||||
|
||||
>>> tests = DocTestFinder().find(_TestClass)
|
||||
>>> runner = DocTestRunner(verbose=False)
|
||||
>>> tests.sort(key = lambda test: test.name)
|
||||
|
|
@ -1229,6 +1233,8 @@ class DocTestRunner:
|
|||
can be also customized by subclassing DocTestRunner, and
|
||||
overriding the methods `report_start`, `report_success`,
|
||||
`report_unexpected_exception`, and `report_failure`.
|
||||
|
||||
>>> traceback._COLORIZE = save_colorize
|
||||
"""
|
||||
# This divider string is used to separate failure messages, and to
|
||||
# separate sections of the summary.
|
||||
|
|
@ -1307,7 +1313,10 @@ class DocTestRunner:
|
|||
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
|
||||
|
||||
def _failure_header(self, test, example):
|
||||
out = [self.DIVIDER]
|
||||
red, reset = (
|
||||
(_ANSIColors.RED, _ANSIColors.RESET) if _can_colorize() else ("", "")
|
||||
)
|
||||
out = [f"{red}{self.DIVIDER}{reset}"]
|
||||
if test.filename:
|
||||
if test.lineno is not None and example.lineno is not None:
|
||||
lineno = test.lineno + example.lineno + 1
|
||||
|
|
@ -1592,6 +1601,21 @@ class DocTestRunner:
|
|||
else:
|
||||
failed.append((name, (failures, tries, skips)))
|
||||
|
||||
if _can_colorize():
|
||||
bold_green = _ANSIColors.BOLD_GREEN
|
||||
bold_red = _ANSIColors.BOLD_RED
|
||||
green = _ANSIColors.GREEN
|
||||
red = _ANSIColors.RED
|
||||
reset = _ANSIColors.RESET
|
||||
yellow = _ANSIColors.YELLOW
|
||||
else:
|
||||
bold_green = ""
|
||||
bold_red = ""
|
||||
green = ""
|
||||
red = ""
|
||||
reset = ""
|
||||
yellow = ""
|
||||
|
||||
if verbose:
|
||||
if notests:
|
||||
print(f"{_n_items(notests)} had no tests:")
|
||||
|
|
@ -1600,13 +1624,13 @@ class DocTestRunner:
|
|||
print(f" {name}")
|
||||
|
||||
if passed:
|
||||
print(f"{_n_items(passed)} passed all tests:")
|
||||
print(f"{green}{_n_items(passed)} passed all tests:{reset}")
|
||||
for name, count in sorted(passed):
|
||||
s = "" if count == 1 else "s"
|
||||
print(f" {count:3d} test{s} in {name}")
|
||||
print(f" {green}{count:3d} test{s} in {name}{reset}")
|
||||
|
||||
if failed:
|
||||
print(self.DIVIDER)
|
||||
print(f"{red}{self.DIVIDER}{reset}")
|
||||
print(f"{_n_items(failed)} had failures:")
|
||||
for name, (failures, tries, skips) in sorted(failed):
|
||||
print(f" {failures:3d} of {tries:3d} in {name}")
|
||||
|
|
@ -1615,18 +1639,21 @@ class DocTestRunner:
|
|||
s = "" if total_tries == 1 else "s"
|
||||
print(f"{total_tries} test{s} in {_n_items(self._stats)}.")
|
||||
|
||||
and_f = f" and {total_failures} failed" if total_failures else ""
|
||||
print(f"{total_tries - total_failures} passed{and_f}.")
|
||||
and_f = (
|
||||
f" and {red}{total_failures} failed{reset}"
|
||||
if total_failures else ""
|
||||
)
|
||||
print(f"{green}{total_tries - total_failures} passed{reset}{and_f}.")
|
||||
|
||||
if total_failures:
|
||||
s = "" if total_failures == 1 else "s"
|
||||
msg = f"***Test Failed*** {total_failures} failure{s}"
|
||||
msg = f"{bold_red}***Test Failed*** {total_failures} failure{s}{reset}"
|
||||
if total_skips:
|
||||
s = "" if total_skips == 1 else "s"
|
||||
msg = f"{msg} and {total_skips} skipped test{s}"
|
||||
msg = f"{msg} and {yellow}{total_skips} skipped test{s}{reset}"
|
||||
print(f"{msg}.")
|
||||
elif verbose:
|
||||
print("Test passed.")
|
||||
print(f"{bold_green}Test passed.{reset}")
|
||||
|
||||
return TestResults(total_failures, total_tries, skipped=total_skips)
|
||||
|
||||
|
|
@ -1644,7 +1671,7 @@ class DocTestRunner:
|
|||
d[name] = (failures, tries, skips)
|
||||
|
||||
|
||||
def _n_items(items: list) -> str:
|
||||
def _n_items(items: list | dict) -> str:
|
||||
"""
|
||||
Helper to pluralise the number of items in a list.
|
||||
"""
|
||||
|
|
@ -1655,7 +1682,7 @@ def _n_items(items: list) -> str:
|
|||
|
||||
class OutputChecker:
|
||||
"""
|
||||
A class used to check the whether the actual output from a doctest
|
||||
A class used to check whether the actual output from a doctest
|
||||
example matches the expected output. `OutputChecker` defines two
|
||||
methods: `check_output`, which compares a given pair of outputs,
|
||||
and returns true if they match; and `output_difference`, which
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue