feat(cli/tools): add a subcommand --hide-stacktraces for test (#24095)

This commit is contained in:
Hajime-san 2024-08-20 10:27:36 +09:00 committed by GitHub
parent 4f49f703c1
commit 19bcb40059
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
21 changed files with 300 additions and 58 deletions

View file

@ -403,6 +403,7 @@ pub struct TestFlags {
pub watch: Option<WatchFlagsWithPaths>, pub watch: Option<WatchFlagsWithPaths>,
pub reporter: TestReporterConfig, pub reporter: TestReporterConfig,
pub junit_path: Option<String>, pub junit_path: Option<String>,
pub hide_stacktraces: bool,
} }
#[derive(Clone, Debug, Eq, PartialEq)] #[derive(Clone, Debug, Eq, PartialEq)]
@ -2999,6 +3000,12 @@ Directory arguments are expanded to all contained files matching the glob
.value_parser(["pretty", "dot", "junit", "tap"]) .value_parser(["pretty", "dot", "junit", "tap"])
.help_heading(TEST_HEADING) .help_heading(TEST_HEADING)
) )
.arg(
Arg::new("hide-stacktraces")
.long("hide-stacktraces")
.help("Hide stack traces for errors in failure test results.")
.action(ArgAction::SetTrue)
)
.arg(env_file_arg()) .arg(env_file_arg())
) )
} }
@ -4920,6 +4927,8 @@ fn test_parse(flags: &mut Flags, matches: &mut ArgMatches) {
flags.log_level = Some(Level::Error); flags.log_level = Some(Level::Error);
} }
let hide_stacktraces = matches.get_flag("hide-stacktraces");
flags.subcommand = DenoSubcommand::Test(TestFlags { flags.subcommand = DenoSubcommand::Test(TestFlags {
no_run, no_run,
doc, doc,
@ -4935,6 +4944,7 @@ fn test_parse(flags: &mut Flags, matches: &mut ArgMatches) {
watch: watch_arg_parse_with_paths(matches), watch: watch_arg_parse_with_paths(matches),
reporter, reporter,
junit_path, junit_path,
hide_stacktraces,
}); });
} }
@ -9015,6 +9025,7 @@ mod tests {
watch: Default::default(), watch: Default::default(),
reporter: Default::default(), reporter: Default::default(),
junit_path: None, junit_path: None,
hide_stacktraces: false,
}), }),
unstable_config: UnstableConfig { unstable_config: UnstableConfig {
legacy_flag_enabled: true, legacy_flag_enabled: true,
@ -9102,6 +9113,7 @@ mod tests {
clean: false, clean: false,
watch: Default::default(), watch: Default::default(),
junit_path: None, junit_path: None,
hide_stacktraces: false,
}), }),
type_check_mode: TypeCheckMode::Local, type_check_mode: TypeCheckMode::Local,
permissions: PermissionFlags { permissions: PermissionFlags {
@ -9140,6 +9152,7 @@ mod tests {
watch: Default::default(), watch: Default::default(),
reporter: Default::default(), reporter: Default::default(),
junit_path: None, junit_path: None,
hide_stacktraces: false,
}), }),
type_check_mode: TypeCheckMode::Local, type_check_mode: TypeCheckMode::Local,
permissions: PermissionFlags { permissions: PermissionFlags {
@ -9182,6 +9195,7 @@ mod tests {
watch: Default::default(), watch: Default::default(),
reporter: Default::default(), reporter: Default::default(),
junit_path: None, junit_path: None,
hide_stacktraces: false,
}), }),
permissions: PermissionFlags { permissions: PermissionFlags {
no_prompt: true, no_prompt: true,
@ -9318,6 +9332,7 @@ mod tests {
watch: Default::default(), watch: Default::default(),
reporter: Default::default(), reporter: Default::default(),
junit_path: None, junit_path: None,
hide_stacktraces: false,
}), }),
permissions: PermissionFlags { permissions: PermissionFlags {
no_prompt: true, no_prompt: true,
@ -9353,6 +9368,7 @@ mod tests {
watch: Some(Default::default()), watch: Some(Default::default()),
reporter: Default::default(), reporter: Default::default(),
junit_path: None, junit_path: None,
hide_stacktraces: false,
}), }),
permissions: PermissionFlags { permissions: PermissionFlags {
no_prompt: true, no_prompt: true,
@ -9387,6 +9403,7 @@ mod tests {
watch: Some(Default::default()), watch: Some(Default::default()),
reporter: Default::default(), reporter: Default::default(),
junit_path: None, junit_path: None,
hide_stacktraces: false,
}), }),
permissions: PermissionFlags { permissions: PermissionFlags {
no_prompt: true, no_prompt: true,
@ -9428,6 +9445,7 @@ mod tests {
}), }),
reporter: Default::default(), reporter: Default::default(),
junit_path: None, junit_path: None,
hide_stacktraces: false,
}), }),
type_check_mode: TypeCheckMode::Local, type_check_mode: TypeCheckMode::Local,
permissions: PermissionFlags { permissions: PermissionFlags {
@ -9624,6 +9642,26 @@ mod tests {
); );
} }
#[test]
fn test_hide_stacktraces() {
let r = flags_from_vec(svec!["deno", "test", "--hide-stacktraces"]);
assert_eq!(
r.unwrap(),
Flags {
subcommand: DenoSubcommand::Test(TestFlags {
hide_stacktraces: true,
..TestFlags::default()
}),
type_check_mode: TypeCheckMode::Local,
permissions: PermissionFlags {
no_prompt: true,
..Default::default()
},
..Flags::default()
}
);
}
#[test] #[test]
fn bundle_with_cafile() { fn bundle_with_cafile() {
let r = flags_from_vec(svec![ let r = flags_from_vec(svec![

View file

@ -377,6 +377,7 @@ pub struct WorkspaceTestOptions {
pub trace_leaks: bool, pub trace_leaks: bool,
pub reporter: TestReporterConfig, pub reporter: TestReporterConfig,
pub junit_path: Option<String>, pub junit_path: Option<String>,
pub hide_stacktraces: bool,
} }
impl WorkspaceTestOptions { impl WorkspaceTestOptions {
@ -394,6 +395,7 @@ impl WorkspaceTestOptions {
trace_leaks: test_flags.trace_leaks, trace_leaks: test_flags.trace_leaks,
reporter: test_flags.reporter, reporter: test_flags.reporter,
junit_path: test_flags.junit_path.clone(), junit_path: test_flags.junit_path.clone(),
hide_stacktraces: test_flags.hide_stacktraces,
} }
} }
} }

View file

@ -15,6 +15,7 @@ use crate::lsp::logging::lsp_log;
use crate::tools::test; use crate::tools::test;
use crate::tools::test::create_test_event_channel; use crate::tools::test::create_test_event_channel;
use crate::tools::test::FailFastTracker; use crate::tools::test::FailFastTracker;
use crate::tools::test::TestFailureFormatOptions;
use deno_core::anyhow::anyhow; use deno_core::anyhow::anyhow;
use deno_core::error::AnyError; use deno_core::error::AnyError;
@ -655,7 +656,10 @@ impl LspTestReporter {
let desc = self.tests.get(&desc.id).unwrap(); let desc = self.tests.get(&desc.id).unwrap();
self.progress(lsp_custom::TestRunProgressMessage::Failed { self.progress(lsp_custom::TestRunProgressMessage::Failed {
test: desc.as_test_identifier(&self.tests), test: desc.as_test_identifier(&self.tests),
messages: as_test_messages(failure.to_string(), false), messages: as_test_messages(
failure.format(&TestFailureFormatOptions::default()),
false,
),
duration: Some(elapsed as u32), duration: Some(elapsed as u32),
}) })
} }
@ -675,7 +679,7 @@ impl LspTestReporter {
let err_string = format!( let err_string = format!(
"Uncaught error from {}: {}\nThis error was not caught from a test and caused the test runner to fail on the referenced module.\nIt most likely originated from a dangling promise, event/timeout handler or top-level code.", "Uncaught error from {}: {}\nThis error was not caught from a test and caused the test runner to fail on the referenced module.\nIt most likely originated from a dangling promise, event/timeout handler or top-level code.",
origin, origin,
test::fmt::format_test_error(js_error) test::fmt::format_test_error(js_error, &TestFailureFormatOptions::default())
); );
let messages = as_test_messages(err_string, false); let messages = as_test_messages(err_string, false);
for desc in self.tests.values().filter(|d| d.origin() == origin) { for desc in self.tests.values().filter(|d| d.origin() == origin) {
@ -751,7 +755,10 @@ impl LspTestReporter {
test::TestStepResult::Failed(failure) => { test::TestStepResult::Failed(failure) => {
self.progress(lsp_custom::TestRunProgressMessage::Failed { self.progress(lsp_custom::TestRunProgressMessage::Failed {
test: desc.as_test_identifier(&self.tests), test: desc.as_test_identifier(&self.tests),
messages: as_test_messages(failure.to_string(), false), messages: as_test_messages(
failure.format(&TestFailureFormatOptions::default()),
false,
),
duration: Some(elapsed as u32), duration: Some(elapsed as u32),
}) })
} }

View file

@ -2,6 +2,7 @@
use serde::Serialize; use serde::Serialize;
use crate::tools::test::TestFailureFormatOptions;
use crate::version; use crate::version;
use super::*; use super::*;
@ -243,7 +244,10 @@ impl BenchReporter for ConsoleReporter {
&desc.name, &desc.name,
&mitata::reporter::Error { &mitata::reporter::Error {
stack: None, stack: None,
message: format_test_error(js_error), message: format_test_error(
js_error,
&TestFailureFormatOptions::default()
),
}, },
options options
) )
@ -298,7 +302,7 @@ impl BenchReporter for ConsoleReporter {
println!( println!(
"{}: {}", "{}: {}",
colors::red_bold("error"), colors::red_bold("error"),
format_test_error(&error) format_test_error(&error, &TestFailureFormatOptions::default())
); );
println!("This error was not caught from a benchmark and caused the bench runner to fail on the referenced module."); println!("This error was not caught from a benchmark and caused the bench runner to fail on the referenced module.");
println!("It most likely originated from a dangling promise, event/timeout handler or top-level code."); println!("It most likely originated from a dangling promise, event/timeout handler or top-level code.");

View file

@ -11,6 +11,7 @@ use crate::tools::repl;
use crate::tools::test::create_single_test_event_channel; use crate::tools::test::create_single_test_event_channel;
use crate::tools::test::reporters::PrettyTestReporter; use crate::tools::test::reporters::PrettyTestReporter;
use crate::tools::test::TestEventWorkerSender; use crate::tools::test::TestEventWorkerSender;
use crate::tools::test::TestFailureFormatOptions;
use crate::CliFactory; use crate::CliFactory;
use deno_core::anyhow::bail; use deno_core::anyhow::bail;
use deno_core::anyhow::Context; use deno_core::anyhow::Context;
@ -142,8 +143,15 @@ pub async fn kernel(
})?; })?;
repl_session.set_test_reporter_factory(Box::new(move || { repl_session.set_test_reporter_factory(Box::new(move || {
Box::new( Box::new(
PrettyTestReporter::new(false, true, false, true, cwd_url.clone()) PrettyTestReporter::new(
.with_writer(Box::new(TestWriter(stdio_tx.clone()))), false,
true,
false,
true,
cwd_url.clone(),
TestFailureFormatOptions::default(),
)
.with_writer(Box::new(TestWriter(stdio_tx.clone()))),
) )
})); }));

View file

@ -16,6 +16,7 @@ use crate::tools::test::send_test_event;
use crate::tools::test::worker_has_tests; use crate::tools::test::worker_has_tests;
use crate::tools::test::TestEvent; use crate::tools::test::TestEvent;
use crate::tools::test::TestEventReceiver; use crate::tools::test::TestEventReceiver;
use crate::tools::test::TestFailureFormatOptions;
use deno_ast::diagnostics::Diagnostic; use deno_ast::diagnostics::Diagnostic;
use deno_ast::swc::ast as swc_ast; use deno_ast::swc::ast as swc_ast;
@ -276,6 +277,7 @@ impl ReplSession {
false, false,
true, true,
cwd_url.clone(), cwd_url.clone(),
TestFailureFormatOptions::default(),
)) ))
}), }),
main_module, main_module,

View file

@ -72,16 +72,24 @@ fn abbreviate_test_error(js_error: &JsError) -> JsError {
// This function prettifies `JsError` and applies some changes specifically for // This function prettifies `JsError` and applies some changes specifically for
// test runner purposes: // test runner purposes:
// //
// - hide stack traces if `options.hide_stacktraces` is set to `true`
//
// - filter out stack frames: // - filter out stack frames:
// - if stack trace consists of mixed user and internal code, the frames // - if stack trace consists of mixed user and internal code, the frames
// below the first user code frame are filtered out // below the first user code frame are filtered out
// - if stack trace consists only of internal code it is preserved as is // - if stack trace consists only of internal code it is preserved as is
pub fn format_test_error(js_error: &JsError) -> String { pub fn format_test_error(
js_error: &JsError,
options: &TestFailureFormatOptions,
) -> String {
let mut js_error = abbreviate_test_error(js_error); let mut js_error = abbreviate_test_error(js_error);
js_error.exception_message = js_error js_error.exception_message = js_error
.exception_message .exception_message
.trim_start_matches("Uncaught ") .trim_start_matches("Uncaught ")
.to_string(); .to_string();
if options.hide_stacktraces {
return js_error.exception_message;
}
format_js_error(&js_error) format_js_error(&js_error)
} }

View file

@ -288,6 +288,11 @@ impl From<&TestDescription> for TestFailureDescription {
} }
} }
#[derive(Debug, Default, Clone, PartialEq)]
pub struct TestFailureFormatOptions {
pub hide_stacktraces: bool,
}
#[allow(clippy::derive_partial_eq_without_eq)] #[allow(clippy::derive_partial_eq_without_eq)]
#[derive(Debug, Clone, PartialEq, Deserialize)] #[derive(Debug, Clone, PartialEq, Deserialize)]
#[serde(rename_all = "camelCase")] #[serde(rename_all = "camelCase")]
@ -302,52 +307,55 @@ pub enum TestFailure {
HasSanitizersAndOverlaps(IndexSet<String>), // Long names of overlapped tests HasSanitizersAndOverlaps(IndexSet<String>), // Long names of overlapped tests
} }
impl std::fmt::Display for TestFailure { impl TestFailure {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { pub fn format(
&self,
options: &TestFailureFormatOptions,
) -> Cow<'static, str> {
match self { match self {
TestFailure::JsError(js_error) => { TestFailure::JsError(js_error) => {
write!(f, "{}", format_test_error(js_error)) Cow::Owned(format_test_error(js_error, options))
}
TestFailure::FailedSteps(1) => Cow::Borrowed("1 test step failed."),
TestFailure::FailedSteps(n) => {
Cow::Owned(format!("{} test steps failed.", n))
} }
TestFailure::FailedSteps(1) => write!(f, "1 test step failed."),
TestFailure::FailedSteps(n) => write!(f, "{n} test steps failed."),
TestFailure::IncompleteSteps => { TestFailure::IncompleteSteps => {
write!(f, "Completed while steps were still running. Ensure all steps are awaited with `await t.step(...)`.") Cow::Borrowed("Completed while steps were still running. Ensure all steps are awaited with `await t.step(...)`.")
} }
TestFailure::Incomplete => { TestFailure::Incomplete => {
write!( Cow::Borrowed("Didn't complete before parent. Await step with `await t.step(...)`.")
f,
"Didn't complete before parent. Await step with `await t.step(...)`."
)
} }
TestFailure::Leaked(details, trailer_notes) => { TestFailure::Leaked(details, trailer_notes) => {
write!(f, "Leaks detected:")?; let mut f = String::new();
write!(f, "Leaks detected:").unwrap();
for detail in details { for detail in details {
write!(f, "\n - {}", detail)?; write!(f, "\n - {}", detail).unwrap();
} }
for trailer in trailer_notes { for trailer in trailer_notes {
write!(f, "\n{}", trailer)?; write!(f, "\n{}", trailer).unwrap();
} }
Ok(()) Cow::Owned(f)
} }
TestFailure::OverlapsWithSanitizers(long_names) => { TestFailure::OverlapsWithSanitizers(long_names) => {
write!(f, "Started test step while another test step with sanitizers was running:")?; let mut f = String::new();
write!(f, "Started test step while another test step with sanitizers was running:").unwrap();
for long_name in long_names { for long_name in long_names {
write!(f, "\n * {}", long_name)?; write!(f, "\n * {}", long_name).unwrap();
} }
Ok(()) Cow::Owned(f)
} }
TestFailure::HasSanitizersAndOverlaps(long_names) => { TestFailure::HasSanitizersAndOverlaps(long_names) => {
write!(f, "Started test step with sanitizers while another test step was running:")?; let mut f = String::new();
write!(f, "Started test step with sanitizers while another test step was running:").unwrap();
for long_name in long_names { for long_name in long_names {
write!(f, "\n * {}", long_name)?; write!(f, "\n * {}", long_name).unwrap();
} }
Ok(()) Cow::Owned(f)
} }
} }
} }
}
impl TestFailure {
pub fn overview(&self) -> String { pub fn overview(&self) -> String {
match self { match self {
TestFailure::JsError(js_error) => js_error.exception_message.clone(), TestFailure::JsError(js_error) => js_error.exception_message.clone(),
@ -369,10 +377,6 @@ impl TestFailure {
} }
} }
pub fn detail(&self) -> String {
self.to_string()
}
fn format_label(&self) -> String { fn format_label(&self) -> String {
match self { match self {
TestFailure::Incomplete => colors::gray("INCOMPLETE").to_string(), TestFailure::Incomplete => colors::gray("INCOMPLETE").to_string(),
@ -512,6 +516,7 @@ struct TestSpecifiersOptions {
specifier: TestSpecifierOptions, specifier: TestSpecifierOptions,
reporter: TestReporterConfig, reporter: TestReporterConfig,
junit_path: Option<String>, junit_path: Option<String>,
hide_stacktraces: bool,
} }
#[derive(Debug, Default, Clone)] #[derive(Debug, Default, Clone)]
@ -545,23 +550,31 @@ impl TestSummary {
fn get_test_reporter(options: &TestSpecifiersOptions) -> Box<dyn TestReporter> { fn get_test_reporter(options: &TestSpecifiersOptions) -> Box<dyn TestReporter> {
let parallel = options.concurrent_jobs.get() > 1; let parallel = options.concurrent_jobs.get() > 1;
let failure_format_options = TestFailureFormatOptions {
hide_stacktraces: options.hide_stacktraces,
};
let reporter: Box<dyn TestReporter> = match &options.reporter { let reporter: Box<dyn TestReporter> = match &options.reporter {
TestReporterConfig::Dot => { TestReporterConfig::Dot => Box::new(DotTestReporter::new(
Box::new(DotTestReporter::new(options.cwd.clone())) options.cwd.clone(),
} failure_format_options,
)),
TestReporterConfig::Pretty => Box::new(PrettyTestReporter::new( TestReporterConfig::Pretty => Box::new(PrettyTestReporter::new(
parallel, parallel,
options.log_level != Some(Level::Error), options.log_level != Some(Level::Error),
options.filter, options.filter,
false, false,
options.cwd.clone(), options.cwd.clone(),
failure_format_options,
)),
TestReporterConfig::Junit => Box::new(JunitTestReporter::new(
options.cwd.clone(),
"-".to_string(),
failure_format_options,
)), )),
TestReporterConfig::Junit => {
Box::new(JunitTestReporter::new(options.cwd.clone(), "-".to_string()))
}
TestReporterConfig::Tap => Box::new(TapTestReporter::new( TestReporterConfig::Tap => Box::new(TapTestReporter::new(
options.cwd.clone(), options.cwd.clone(),
options.concurrent_jobs > NonZeroUsize::new(1).unwrap(), options.concurrent_jobs > NonZeroUsize::new(1).unwrap(),
failure_format_options,
)), )),
}; };
@ -569,6 +582,9 @@ fn get_test_reporter(options: &TestSpecifiersOptions) -> Box<dyn TestReporter> {
let junit = Box::new(JunitTestReporter::new( let junit = Box::new(JunitTestReporter::new(
options.cwd.clone(), options.cwd.clone(),
junit_path.to_string(), junit_path.to_string(),
TestFailureFormatOptions {
hide_stacktraces: options.hide_stacktraces,
},
)); ));
return Box::new(CompoundTestReporter::new(vec![reporter, junit])); return Box::new(CompoundTestReporter::new(vec![reporter, junit]));
} }
@ -1807,6 +1823,7 @@ pub async fn run_tests(
filter: workspace_test_options.filter.is_some(), filter: workspace_test_options.filter.is_some(),
reporter: workspace_test_options.reporter, reporter: workspace_test_options.reporter,
junit_path: workspace_test_options.junit_path, junit_path: workspace_test_options.junit_path,
hide_stacktraces: workspace_test_options.hide_stacktraces,
specifier: TestSpecifierOptions { specifier: TestSpecifierOptions {
filter: TestFilter::from_flag(&workspace_test_options.filter), filter: TestFilter::from_flag(&workspace_test_options.filter),
shuffle: workspace_test_options.shuffle, shuffle: workspace_test_options.shuffle,
@ -1973,6 +1990,7 @@ pub async fn run_tests_with_watch(
filter: workspace_test_options.filter.is_some(), filter: workspace_test_options.filter.is_some(),
reporter: workspace_test_options.reporter, reporter: workspace_test_options.reporter,
junit_path: workspace_test_options.junit_path, junit_path: workspace_test_options.junit_path,
hide_stacktraces: workspace_test_options.hide_stacktraces,
specifier: TestSpecifierOptions { specifier: TestSpecifierOptions {
filter: TestFilter::from_flag(&workspace_test_options.filter), filter: TestFilter::from_flag(&workspace_test_options.filter),
shuffle: workspace_test_options.shuffle, shuffle: workspace_test_options.shuffle,

View file

@ -105,6 +105,7 @@ pub(super) fn report_summary(
cwd: &Url, cwd: &Url,
summary: &TestSummary, summary: &TestSummary,
elapsed: &Duration, elapsed: &Duration,
options: &TestFailureFormatOptions,
) { ) {
if !summary.failures.is_empty() || !summary.uncaught_errors.is_empty() { if !summary.failures.is_empty() || !summary.uncaught_errors.is_empty() {
#[allow(clippy::type_complexity)] // Type alias doesn't look better here #[allow(clippy::type_complexity)] // Type alias doesn't look better here
@ -136,8 +137,13 @@ pub(super) fn report_summary(
if !failure.hide_in_summary() { if !failure.hide_in_summary() {
let failure_title = format_test_for_summary(cwd, description); let failure_title = format_test_for_summary(cwd, description);
writeln!(writer, "{}", &failure_title).unwrap(); writeln!(writer, "{}", &failure_title).unwrap();
writeln!(writer, "{}: {}", colors::red_bold("error"), failure) writeln!(
.unwrap(); writer,
"{}: {}",
colors::red_bold("error"),
failure.format(options)
)
.unwrap();
writeln!(writer).unwrap(); writeln!(writer).unwrap();
failure_titles.push(failure_title); failure_titles.push(failure_title);
} }
@ -152,7 +158,7 @@ pub(super) fn report_summary(
writer, writer,
"{}: {}", "{}: {}",
colors::red_bold("error"), colors::red_bold("error"),
format_test_error(js_error) format_test_error(js_error, options)
) )
.unwrap(); .unwrap();
writeln!(writer, "This error was not caught from a test and caused the test runner to fail on the referenced module.").unwrap(); writeln!(writer, "This error was not caught from a test and caused the test runner to fail on the referenced module.").unwrap();

View file

@ -9,11 +9,15 @@ pub struct DotTestReporter {
width: usize, width: usize,
cwd: Url, cwd: Url,
summary: TestSummary, summary: TestSummary,
failure_format_options: TestFailureFormatOptions,
} }
#[allow(clippy::print_stdout)] #[allow(clippy::print_stdout)]
impl DotTestReporter { impl DotTestReporter {
pub fn new(cwd: Url) -> DotTestReporter { pub fn new(
cwd: Url,
failure_format_options: TestFailureFormatOptions,
) -> DotTestReporter {
let console_width = if let Some(size) = crate::util::console::console_size() let console_width = if let Some(size) = crate::util::console::console_size()
{ {
size.cols as usize size.cols as usize
@ -26,6 +30,7 @@ impl DotTestReporter {
width: console_width, width: console_width,
cwd, cwd,
summary: TestSummary::new(), summary: TestSummary::new(),
failure_format_options,
} }
} }
@ -190,6 +195,7 @@ impl TestReporter for DotTestReporter {
&self.cwd, &self.cwd,
&self.summary, &self.summary,
elapsed, elapsed,
&self.failure_format_options,
); );
println!(); println!();
} }

View file

@ -15,19 +15,28 @@ pub struct JunitTestReporter {
// from child to parent to build the full test name that reflects the test // from child to parent to build the full test name that reflects the test
// hierarchy. // hierarchy.
test_name_tree: TestNameTree, test_name_tree: TestNameTree,
failure_format_options: TestFailureFormatOptions,
} }
impl JunitTestReporter { impl JunitTestReporter {
pub fn new(cwd: Url, output_path: String) -> Self { pub fn new(
cwd: Url,
output_path: String,
failure_format_options: TestFailureFormatOptions,
) -> Self {
Self { Self {
cwd, cwd,
output_path, output_path,
cases: IndexMap::new(), cases: IndexMap::new(),
test_name_tree: TestNameTree::new(), test_name_tree: TestNameTree::new(),
failure_format_options,
} }
} }
fn convert_status(status: &TestResult) -> quick_junit::TestCaseStatus { fn convert_status(
status: &TestResult,
failure_format_options: &TestFailureFormatOptions,
) -> quick_junit::TestCaseStatus {
match status { match status {
TestResult::Ok => quick_junit::TestCaseStatus::success(), TestResult::Ok => quick_junit::TestCaseStatus::success(),
TestResult::Ignored => quick_junit::TestCaseStatus::skipped(), TestResult::Ignored => quick_junit::TestCaseStatus::skipped(),
@ -35,7 +44,7 @@ impl JunitTestReporter {
kind: quick_junit::NonSuccessKind::Failure, kind: quick_junit::NonSuccessKind::Failure,
message: Some(failure.overview()), message: Some(failure.overview()),
ty: None, ty: None,
description: Some(failure.detail()), description: Some(failure.format(failure_format_options).into_owned()),
reruns: vec![], reruns: vec![],
}, },
TestResult::Cancelled => quick_junit::TestCaseStatus::NonSuccess { TestResult::Cancelled => quick_junit::TestCaseStatus::NonSuccess {
@ -50,6 +59,7 @@ impl JunitTestReporter {
fn convert_step_status( fn convert_step_status(
status: &TestStepResult, status: &TestStepResult,
failure_format_options: &TestFailureFormatOptions,
) -> quick_junit::TestCaseStatus { ) -> quick_junit::TestCaseStatus {
match status { match status {
TestStepResult::Ok => quick_junit::TestCaseStatus::success(), TestStepResult::Ok => quick_junit::TestCaseStatus::success(),
@ -59,7 +69,9 @@ impl JunitTestReporter {
kind: quick_junit::NonSuccessKind::Failure, kind: quick_junit::NonSuccessKind::Failure,
message: Some(failure.overview()), message: Some(failure.overview()),
ty: None, ty: None,
description: Some(failure.detail()), description: Some(
failure.format(failure_format_options).into_owned(),
),
reruns: vec![], reruns: vec![],
} }
} }
@ -111,7 +123,7 @@ impl TestReporter for JunitTestReporter {
elapsed: u64, elapsed: u64,
) { ) {
if let Some(case) = self.cases.get_mut(&description.id) { if let Some(case) = self.cases.get_mut(&description.id) {
case.status = Self::convert_status(result); case.status = Self::convert_status(result, &self.failure_format_options);
case.set_time(Duration::from_millis(elapsed)); case.set_time(Duration::from_millis(elapsed));
} }
} }
@ -153,7 +165,8 @@ impl TestReporter for JunitTestReporter {
_test_steps: &IndexMap<usize, TestStepDescription>, _test_steps: &IndexMap<usize, TestStepDescription>,
) { ) {
if let Some(case) = self.cases.get_mut(&description.id) { if let Some(case) = self.cases.get_mut(&description.id) {
case.status = Self::convert_step_status(result); case.status =
Self::convert_step_status(result, &self.failure_format_options);
case.set_time(Duration::from_millis(elapsed)); case.set_time(Duration::from_millis(elapsed));
} }
} }

View file

@ -20,6 +20,7 @@ pub struct PrettyTestReporter {
HashMap<usize, IndexMap<usize, (TestStepDescription, TestStepResult, u64)>>, HashMap<usize, IndexMap<usize, (TestStepDescription, TestStepResult, u64)>>,
summary: TestSummary, summary: TestSummary,
writer: Box<dyn std::io::Write>, writer: Box<dyn std::io::Write>,
failure_format_options: TestFailureFormatOptions,
} }
impl PrettyTestReporter { impl PrettyTestReporter {
@ -29,6 +30,7 @@ impl PrettyTestReporter {
filter: bool, filter: bool,
repl: bool, repl: bool,
cwd: Url, cwd: Url,
failure_format_options: TestFailureFormatOptions,
) -> PrettyTestReporter { ) -> PrettyTestReporter {
PrettyTestReporter { PrettyTestReporter {
parallel, parallel,
@ -45,6 +47,7 @@ impl PrettyTestReporter {
child_results_buffer: Default::default(), child_results_buffer: Default::default(),
summary: TestSummary::new(), summary: TestSummary::new(),
writer: Box::new(std::io::stdout()), writer: Box::new(std::io::stdout()),
failure_format_options,
} }
} }
@ -395,7 +398,13 @@ impl TestReporter for PrettyTestReporter {
_test_steps: &IndexMap<usize, TestStepDescription>, _test_steps: &IndexMap<usize, TestStepDescription>,
) { ) {
self.write_output_end(); self.write_output_end();
common::report_summary(&mut self.writer, &self.cwd, &self.summary, elapsed); common::report_summary(
&mut self.writer,
&self.cwd,
&self.summary,
elapsed,
&self.failure_format_options,
);
if !self.repl { if !self.repl {
writeln!(&mut self.writer).unwrap(); writeln!(&mut self.writer).unwrap();
} }

View file

@ -20,11 +20,16 @@ pub struct TapTestReporter {
n: usize, n: usize,
step_n: usize, step_n: usize,
step_results: HashMap<usize, Vec<(TestStepDescription, TestStepResult)>>, step_results: HashMap<usize, Vec<(TestStepDescription, TestStepResult)>>,
failure_format_options: TestFailureFormatOptions,
} }
#[allow(clippy::print_stdout)] #[allow(clippy::print_stdout)]
impl TapTestReporter { impl TapTestReporter {
pub fn new(cwd: Url, is_concurrent: bool) -> TapTestReporter { pub fn new(
cwd: Url,
is_concurrent: bool,
failure_format_options: TestFailureFormatOptions,
) -> TapTestReporter {
TapTestReporter { TapTestReporter {
cwd, cwd,
is_concurrent, is_concurrent,
@ -33,6 +38,7 @@ impl TapTestReporter {
n: 0, n: 0,
step_n: 0, step_n: 0,
step_results: HashMap::new(), step_results: HashMap::new(),
failure_format_options,
} }
} }
@ -45,6 +51,7 @@ impl TapTestReporter {
} }
fn print_diagnostic( fn print_diagnostic(
&self,
indent: usize, indent: usize,
failure: &TestFailure, failure: &TestFailure,
location: DiagnosticLocation, location: DiagnosticLocation,
@ -56,7 +63,7 @@ impl TapTestReporter {
// YAML is a superset of JSON, so we can avoid a YAML dependency here. // YAML is a superset of JSON, so we can avoid a YAML dependency here.
// This makes the output less readable though. // This makes the output less readable though.
let diagnostic = serde_json::to_string(&json!({ let diagnostic = serde_json::to_string(&json!({
"message": failure.to_string(), "message": failure.format(&self.failure_format_options),
"severity": "fail".to_string(), "severity": "fail".to_string(),
"at": location, "at": location,
})) }))
@ -102,7 +109,7 @@ impl TapTestReporter {
Self::print_line(4, status, self.step_n, &desc.name, directive); Self::print_line(4, status, self.step_n, &desc.name, directive);
if let TestStepResult::Failed(failure) = result { if let TestStepResult::Failed(failure) = result {
Self::print_diagnostic( self.print_diagnostic(
4, 4,
failure, failure,
DiagnosticLocation { DiagnosticLocation {
@ -171,7 +178,7 @@ impl TestReporter for TapTestReporter {
Self::print_line(0, status, self.n, &description.name, directive); Self::print_line(0, status, self.n, &description.name, directive);
if let TestResult::Failed(failure) = result { if let TestResult::Failed(failure) = result {
Self::print_diagnostic( self.print_diagnostic(
0, 0,
failure, failure,
DiagnosticLocation { DiagnosticLocation {

View file

@ -2,7 +2,7 @@
"exports": { "exports": {
".": "./mod.ts", ".": "./mod.ts",
"./assert": "./assert.ts", "./assert": "./assert.ts",
"./assert-equals": "./assert-equals.ts", "./assert-equals": "./assert_equals.ts",
"./fail": "./fail.ts" "./fail": "./fail.ts"
} }
} }

View file

@ -7,10 +7,10 @@
}, },
"jsr": { "jsr": {
"@std/assert@1.0.0": { "@std/assert@1.0.0": {
"integrity": "7ae268c58de9693b4997fd93d9b303a47df336664e2008378ccb93c3458d092a" "integrity": "[WILDLINE]"
}, },
"@std/http@1.0.0": { "@std/http@1.0.0": {
"integrity": "d75bd303c21123a9b58f7249e38b4c0aa3a09f7d76b13f9d7e7842d89052091a" "integrity": "[WILDLINE]"
} }
} }
}, },

View file

@ -0,0 +1,24 @@
{
"tests": {
"reporter_dot": {
"args": "test --hide-stacktraces --reporter=dot main.js",
"output": "dot.out",
"exitCode": 1
},
"reporter_junit": {
"args": "test --hide-stacktraces --reporter=junit main.js",
"output": "junit.out",
"exitCode": 1
},
"reporter_pretty": {
"args": "test --hide-stacktraces main.js",
"output": "pretty.out",
"exitCode": 1
},
"reporter_tap": {
"args": "test --hide-stacktraces --reporter=tap main.js",
"output": "tap.out",
"exitCode": 1
}
}
}

View file

@ -0,0 +1,23 @@
!
ERRORS
assert a b => ./main.js:1:6
error: AssertionError: Values are not equal.
[Diff] Actual / Expected
- foo
+ bar
FAILURES
assert a b => ./main.js:1:6
FAILED | 0 passed | 1 failed ([WILDCARD])
error: Test failed

View file

@ -0,0 +1,27 @@
<?xml version="1.0" encoding="UTF-8"?>
<testsuites name="deno test" tests="1" failures="1" errors="0" time="[WILDCARD]">
<testsuite name="./main.js" tests="1" disabled="0" errors="0" failures="1">
<testcase name="assert a b" classname="./main.js" time="[WILDCARD]" line="1" col="6">
<failure message="Uncaught AssertionError: Values are not equal.
[Diff] Actual / Expected
- foo
+ bar
">AssertionError: Values are not equal.
[Diff] Actual / Expected
- foo
+ bar
</failure>
</testcase>
</testsuite>
</testsuites>
error: Test failed

View file

@ -0,0 +1,8 @@
Deno.test("assert a b", () => {
class AssertionError extends Error {
name = "AssertionError";
}
throw new AssertionError(
"Values are not equal.\n\n\n [Diff] Actual / Expected\n\n\n- foo\n+ bar\n\n",
);
});

View file

@ -0,0 +1,24 @@
running 1 test from ./main.js
assert a b ... FAILED ([WILDCARD])
ERRORS
assert a b => ./main.js:1:6
error: AssertionError: Values are not equal.
[Diff] Actual / Expected
- foo
+ bar
FAILURES
assert a b => ./main.js:1:6
FAILED | 0 passed | 1 failed ([WILDCARD])
error: Test failed

View file

@ -0,0 +1,8 @@
TAP version 14
# ./main.js
not ok 1 - assert a b
---
{"message":"AssertionError: Values are not equal.\n\n\n [Diff] Actual / Expected\n\n\n- foo\n+ bar\n\n","severity":"fail","at":{"file":"./main.js","line":1}}
...
1..1
error: Test failed