mirror of
https://github.com/FuelLabs/sway.git
synced 2025-12-23 10:11:56 +00:00
Excute E2E tests in parallel (#7456)
Some checks failed
CI / cargo-fmt-check (push) Has been cancelled
Codspeed Benchmarks / benchmarks (push) Has been cancelled
CI / check-dependency-version-formats (push) Has been cancelled
CI / check-forc-manifest-version (push) Has been cancelled
CI / get-fuel-core-version (push) Has been cancelled
CI / build-sway-lib-std (push) Has been cancelled
CI / build-sway-examples (push) Has been cancelled
CI / build-reference-examples (push) Has been cancelled
CI / forc-fmt-check-sway-lib-std (push) Has been cancelled
CI / forc-fmt-check-sway-examples (push) Has been cancelled
CI / forc-fmt-check-panic (push) Has been cancelled
CI / check-sdk-harness-test-suite-compatibility (push) Has been cancelled
CI / build-mdbook (push) Has been cancelled
CI / build-forc-doc-sway-lib-std (push) Has been cancelled
CI / build-forc-test-project (push) Has been cancelled
CI / cargo-build-workspace (push) Has been cancelled
CI / cargo-clippy (push) Has been cancelled
CI / cargo-toml-fmt-check (push) Has been cancelled
CI / cargo-test-lib-std (push) Has been cancelled
CI / forc-run-benchmarks (push) Has been cancelled
CI / forc-unit-tests (push) Has been cancelled
CI / forc-pkg-fuels-deps-check (push) Has been cancelled
CI / cargo-unused-deps-check (push) Has been cancelled
CI / pre-publish-check (push) Has been cancelled
github pages / deploy (push) Has been cancelled
CI / verifications-complete (push) Has been cancelled
CI / cargo-run-e2e-test (push) Has been cancelled
CI / cargo-run-e2e-test-release (push) Has been cancelled
CI / Build and test various forc tools (push) Has been cancelled
CI / notify-slack-on-failure (push) Has been cancelled
CI / publish (push) Has been cancelled
CI / publish-sway-lib-std (push) Has been cancelled
CI / Build and upload forc binaries to release (push) Has been cancelled
Some checks failed
CI / cargo-fmt-check (push) Has been cancelled
Codspeed Benchmarks / benchmarks (push) Has been cancelled
CI / check-dependency-version-formats (push) Has been cancelled
CI / check-forc-manifest-version (push) Has been cancelled
CI / get-fuel-core-version (push) Has been cancelled
CI / build-sway-lib-std (push) Has been cancelled
CI / build-sway-examples (push) Has been cancelled
CI / build-reference-examples (push) Has been cancelled
CI / forc-fmt-check-sway-lib-std (push) Has been cancelled
CI / forc-fmt-check-sway-examples (push) Has been cancelled
CI / forc-fmt-check-panic (push) Has been cancelled
CI / check-sdk-harness-test-suite-compatibility (push) Has been cancelled
CI / build-mdbook (push) Has been cancelled
CI / build-forc-doc-sway-lib-std (push) Has been cancelled
CI / build-forc-test-project (push) Has been cancelled
CI / cargo-build-workspace (push) Has been cancelled
CI / cargo-clippy (push) Has been cancelled
CI / cargo-toml-fmt-check (push) Has been cancelled
CI / cargo-test-lib-std (push) Has been cancelled
CI / forc-run-benchmarks (push) Has been cancelled
CI / forc-unit-tests (push) Has been cancelled
CI / forc-pkg-fuels-deps-check (push) Has been cancelled
CI / cargo-unused-deps-check (push) Has been cancelled
CI / pre-publish-check (push) Has been cancelled
github pages / deploy (push) Has been cancelled
CI / verifications-complete (push) Has been cancelled
CI / cargo-run-e2e-test (push) Has been cancelled
CI / cargo-run-e2e-test-release (push) Has been cancelled
CI / Build and test various forc tools (push) Has been cancelled
CI / notify-slack-on-failure (push) Has been cancelled
CI / publish (push) Has been cancelled
CI / publish-sway-lib-std (push) Has been cancelled
CI / Build and upload forc binaries to release (push) Has been cancelled
## Description This PR implements parallel execution of E2E tests by using the approach similar to one introduced by @JoshuaBatty for running LSP garbage collection tests in parallel. The approach is based on: - `test` binary getting an additional `--exact` option used by the parallel runner to run exactly one test passed as an argument to `--exact`. The argument is the full path to the test's `toml` file. - parallel runner that uses `rayon` and `std::process::Command` to span `test --exact <test toml path>` processes in parallel. The original command line argument passed to `test` get forwarded to `test --exact` only if they are applicable while running tests in parallel. On my laptop with 20 cores, **the execution time of the complete E2E test suite reduces from reproducible 10:48 minutes to reproducible 1:47 minutes**. Parallel execution is set to be the default one. Sequential execution can still be used via `--sequential` or `-s` flag: `cargo run -- -s` or `test -s`. Similar to parallel garbage collection tests, the parallel execution of E2E tests shows `stderr` output of the executed tests, but not the `stdout`. Being non-deterministic, the position of displayed errors will not come immediately after or before the failing test display, but will always be easy to relate to a concrete failing test. In practice, this will be perfectly enough for troubleshooting failing tests. If all the output is needed during troubleshooting, failing tests can always be executed sequentially, by using the `--sequential` or `-s` flag. Additionally the PR: - adds `no_output` flag to `BuildOpts` to instructs `forc-pkg` not to output build artifacts like bytecode, ABI JSON, or storage slots JSON. This improves test execution speed in general because it skips writing to disc artifacts that are anyhow not used in tests. Also, it simplifies parallelization, because in case of tests with several `test.<feature>.toml` files, we don't need to worry about racing conditions when writing output files. If needed for troubleshooting, output can still be optionally generated in tests by passing the newly added `--write-output` CLI flag to the `test` binary. This flag is ignored when running tests in parallel. A support for the `no_output` option is not added neither to `forc` CLI, nor to CLI of any other tools like, e.g., `forc deploy`. If it proves useful to those tools, we can always easily add it to their CLIs. - fixes #7449 by annotating the `test` binary `main` function with `#[tokio::main(flavor = "current_thread")]`. - renames `exclude_std` CLI option to `no_std_only` to follow the `_only` pattern in naming filters in `FilterConfig` that filter out only the tests with a certain property. Implementing parallel execution for snapshot and IR tests (if required) will be done in followup PRs. ## Checklist - [x] I have linked to any relevant issues. - [x] I have commented my code, particularly in hard-to-understand areas. - [ ] I have updated the documentation where relevant (API docs, the reference, and the Sway book). - [ ] If my change requires substantial documentation changes, I have [requested support from the DevRel team](https://github.com/FuelLabs/devrel-requests/issues/new/choose) - [ ] I have added tests that prove my fix is effective or that my feature works. - [ ] I have added (or requested a maintainer to add) the necessary `Breaking*` or `New Feature` labels where relevant. - [x] I have done my best to ensure that my PR adheres to [the Fuel Labs Code Review Standards](https://github.com/FuelLabs/rfcs/blob/master/text/code-standards/external-contributors.md). - [x] I have requested a review from the relevant team or maintainers.
This commit is contained in:
parent
a5654fc1af
commit
97efa221f0
17 changed files with 527 additions and 188 deletions
4
.gitignore
vendored
4
.gitignore
vendored
|
|
@ -1,9 +1,9 @@
|
|||
# Generated by Cargo
|
||||
# will have compiled files and executables
|
||||
**/*/target/
|
||||
**/*/json_abi_output.json
|
||||
**/*/json_abi_output*.json
|
||||
**/*/json_abi_output_flat.json
|
||||
**/*/json_storage_slots_output.json
|
||||
**/*/json_storage_slots_output*.json
|
||||
target
|
||||
|
||||
# These are backup files generated by rustfmt
|
||||
|
|
|
|||
1
Cargo.lock
generated
1
Cargo.lock
generated
|
|
@ -12163,6 +12163,7 @@ dependencies = [
|
|||
"normalize-path",
|
||||
"prettydiff",
|
||||
"rand 0.8.5",
|
||||
"rayon",
|
||||
"regex",
|
||||
"revm",
|
||||
"rhai",
|
||||
|
|
|
|||
|
|
@ -323,6 +323,8 @@ pub struct BuildOpts {
|
|||
pub experimental: Vec<sway_features::Feature>,
|
||||
/// Set of disabled experimental flags
|
||||
pub no_experimental: Vec<sway_features::Feature>,
|
||||
/// Do not output any build artifacts, e.g., bytecode, ABI JSON, etc.
|
||||
pub no_output: bool,
|
||||
}
|
||||
|
||||
/// The set of options to filter type of projects to build in a workspace.
|
||||
|
|
@ -2210,6 +2212,7 @@ pub fn build_with_options(
|
|||
member_filter,
|
||||
experimental,
|
||||
no_experimental,
|
||||
no_output,
|
||||
..
|
||||
} = &build_options;
|
||||
|
||||
|
|
@ -2301,7 +2304,10 @@ pub fn build_with_options(
|
|||
built_package.write_hexcode(&hexfile_path)?;
|
||||
}
|
||||
|
||||
built_package.write_output(minify, &pkg_manifest.project.name, &output_dir)?;
|
||||
if !no_output {
|
||||
built_package.write_output(minify, &pkg_manifest.project.name, &output_dir)?;
|
||||
}
|
||||
|
||||
built_workspace.push(Arc::new(built_package));
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -867,6 +867,7 @@ fn build_opts_from_cmd(cmd: &cmd::Deploy, member_filter: pkg::MemberFilter) -> p
|
|||
member_filter,
|
||||
experimental: cmd.experimental.experimental.clone(),
|
||||
no_experimental: cmd.experimental.no_experimental.clone(),
|
||||
no_output: false,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -339,5 +339,6 @@ fn build_opts_from_cmd(cmd: &cmd::Run) -> pkg::BuildOpts {
|
|||
member_filter: pkg::MemberFilter::only_scripts(),
|
||||
experimental: cmd.experimental.experimental.clone(),
|
||||
no_experimental: cmd.experimental.no_experimental.clone(),
|
||||
no_output: false,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -164,6 +164,8 @@ pub struct TestOpts {
|
|||
pub experimental: Vec<sway_features::Feature>,
|
||||
/// Set of disabled experimental flags
|
||||
pub no_experimental: Vec<sway_features::Feature>,
|
||||
/// Do not output any build artifacts, e.g., bytecode, ABI JSON, etc.
|
||||
pub no_output: bool,
|
||||
}
|
||||
|
||||
/// The set of options provided for controlling logs printed for each test.
|
||||
|
|
@ -468,6 +470,7 @@ impl From<TestOpts> for pkg::BuildOpts {
|
|||
member_filter: Default::default(),
|
||||
experimental: val.experimental,
|
||||
no_experimental: val.no_experimental,
|
||||
no_output: val.no_output,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -494,6 +497,7 @@ impl TestOpts {
|
|||
member_filter: Default::default(),
|
||||
experimental: self.experimental,
|
||||
no_experimental: self.no_experimental,
|
||||
no_output: self.no_output,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -370,6 +370,7 @@ fn opts_from_cmd(cmd: Command) -> forc_test::TestOpts {
|
|||
build_target: cmd.build.build_target,
|
||||
experimental: cmd.experimental.experimental,
|
||||
no_experimental: cmd.experimental.no_experimental,
|
||||
no_output: false,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -50,5 +50,6 @@ fn opts_from_cmd(cmd: BuildCommand) -> pkg::BuildOpts {
|
|||
member_filter: MemberFilter::default(),
|
||||
experimental: cmd.experimental.experimental,
|
||||
no_experimental: cmd.experimental.no_experimental,
|
||||
no_output: false,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,5 +82,6 @@ fn build_opts_from_cmd(cmd: &ContractIdCommand) -> pkg::BuildOpts {
|
|||
member_filter: pkg::MemberFilter::only_contracts(),
|
||||
experimental: cmd.experimental.experimental.clone(),
|
||||
no_experimental: cmd.experimental.no_experimental.clone(),
|
||||
no_output: false,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -51,5 +51,6 @@ fn build_opts_from_cmd(cmd: PredicateRootCommand) -> pkg::BuildOpts {
|
|||
member_filter: pkg::MemberFilter::only_predicates(),
|
||||
experimental: cmd.experimental.experimental,
|
||||
no_experimental: cmd.experimental.no_experimental,
|
||||
no_output: false,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,6 +82,10 @@ impl PrintAsm {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn none() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn abstract_virtual() -> Self {
|
||||
Self {
|
||||
virtual_abstract: true,
|
||||
|
|
@ -146,6 +150,10 @@ impl PrintIr {
|
|||
}
|
||||
}
|
||||
|
||||
pub fn none() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn r#final() -> Self {
|
||||
Self {
|
||||
r#final: true,
|
||||
|
|
|
|||
|
|
@ -186,6 +186,30 @@ pub struct CliFields {
|
|||
pub no_experimental: Vec<Feature>,
|
||||
}
|
||||
|
||||
impl CliFields {
|
||||
pub fn experimental_as_cli_string(&self) -> Option<String> {
|
||||
Self::features_as_cli_string(&self.experimental)
|
||||
}
|
||||
|
||||
pub fn no_experimental_as_cli_string(&self) -> Option<String> {
|
||||
Self::features_as_cli_string(&self.no_experimental)
|
||||
}
|
||||
|
||||
fn features_as_cli_string(features: &[Feature]) -> Option<String> {
|
||||
if features.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(
|
||||
features
|
||||
.iter()
|
||||
.map(|f| f.name())
|
||||
.collect::<Vec<_>>()
|
||||
.join(","),
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum Error {
|
||||
ParseError(String),
|
||||
|
|
|
|||
|
|
@ -28,6 +28,7 @@ libtest-mimic.workspace = true
|
|||
normalize-path.workspace = true
|
||||
prettydiff.workspace = true
|
||||
rand.workspace = true
|
||||
rayon.workspace = true
|
||||
regex.workspace = true
|
||||
revm.workspace = true
|
||||
rhai = "1.22.2"
|
||||
|
|
|
|||
|
|
@ -291,6 +291,7 @@ pub(crate) async fn compile_to_bytes(
|
|||
},
|
||||
experimental: run_config.experimental.experimental.clone(),
|
||||
no_experimental: run_config.experimental.no_experimental.clone(),
|
||||
no_output: !run_config.write_output,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
|
|
@ -351,6 +352,7 @@ pub(crate) async fn compile_and_run_unit_tests(
|
|||
..Default::default()
|
||||
},
|
||||
build_target: run_config.build_target,
|
||||
no_output: !run_config.write_output,
|
||||
..Default::default()
|
||||
})
|
||||
}) {
|
||||
|
|
@ -378,7 +380,6 @@ pub(crate) fn test_json_abi(
|
|||
has_experimental_field: bool,
|
||||
is_release: bool,
|
||||
) -> Result<()> {
|
||||
emit_json_abi(file_name, built_package)?;
|
||||
let manifest_dir = env!("CARGO_MANIFEST_DIR");
|
||||
|
||||
let experimental_suffix = match (has_experimental_field, experimental_new_encoding) {
|
||||
|
|
@ -403,10 +404,15 @@ pub(crate) fn test_json_abi(
|
|||
);
|
||||
|
||||
let output_path = format!(
|
||||
"{}/src/e2e_vm_tests/test_programs/{}/{}",
|
||||
manifest_dir, file_name, "json_abi_output.json"
|
||||
"{}/src/e2e_vm_tests/test_programs/{}/json_abi_output{}.{}.json",
|
||||
manifest_dir,
|
||||
file_name,
|
||||
experimental_suffix,
|
||||
if is_release { "release" } else { "debug" },
|
||||
);
|
||||
|
||||
emit_json_abi(file_name, &output_path, built_package)?;
|
||||
|
||||
// Update the oracle failing silently
|
||||
if update_output_files {
|
||||
let _ = std::fs::copy(&output_path, &oracle_path);
|
||||
|
|
@ -439,20 +445,19 @@ pub(crate) fn test_json_abi(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn emit_json_abi(file_name: &str, built_package: &BuiltPackage) -> Result<()> {
|
||||
tracing::info!("ABI gen {} ...", file_name.bold());
|
||||
fn emit_json_abi(
|
||||
file_name: &str,
|
||||
json_abi_output_path: &str,
|
||||
built_package: &BuiltPackage,
|
||||
) -> Result<()> {
|
||||
tracing::info!("ABI JSON gen {} ...", file_name.bold());
|
||||
let json_abi = match &built_package.program_abi {
|
||||
ProgramABI::Fuel(abi) => serde_json::json!(abi),
|
||||
ProgramABI::Evm(abi) => serde_json::json!(abi),
|
||||
ProgramABI::MidenVM(_) => todo!(),
|
||||
};
|
||||
let manifest_dir = env!("CARGO_MANIFEST_DIR");
|
||||
let file = std::fs::File::create(format!(
|
||||
"{}/src/e2e_vm_tests/test_programs/{}/{}",
|
||||
manifest_dir, file_name, "json_abi_output.json"
|
||||
))?;
|
||||
let res = serde_json::to_writer_pretty(&file, &json_abi);
|
||||
res?;
|
||||
let file = std::fs::File::create(json_abi_output_path)?;
|
||||
serde_json::to_writer_pretty(&file, &json_abi)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
|
@ -461,25 +466,29 @@ pub(crate) fn test_json_storage_slots(
|
|||
built_package: &BuiltPackage,
|
||||
suffix: &Option<String>,
|
||||
) -> Result<()> {
|
||||
emit_json_storage_slots(file_name, built_package)?;
|
||||
let manifest_dir = env!("CARGO_MANIFEST_DIR");
|
||||
|
||||
let experimental_suffix = suffix
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.strip_prefix("test")
|
||||
.unwrap()
|
||||
.strip_suffix("toml")
|
||||
.unwrap()
|
||||
.trim_end_matches('.');
|
||||
|
||||
let oracle_path = format!(
|
||||
"{}/src/e2e_vm_tests/test_programs/{}/json_storage_slots_oracle.{}json",
|
||||
manifest_dir,
|
||||
file_name,
|
||||
suffix
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.strip_prefix("test")
|
||||
.unwrap()
|
||||
.strip_suffix("toml")
|
||||
.unwrap()
|
||||
.trim_start_matches('.')
|
||||
"{}/src/e2e_vm_tests/test_programs/{}/json_storage_slots_oracle{}.json",
|
||||
manifest_dir, file_name, experimental_suffix,
|
||||
);
|
||||
|
||||
let output_path = format!(
|
||||
"{}/src/e2e_vm_tests/test_programs/{}/{}",
|
||||
manifest_dir, file_name, "json_storage_slots_output.json"
|
||||
"{}/src/e2e_vm_tests/test_programs/{}/json_storage_slots_output{}.json",
|
||||
manifest_dir, file_name, experimental_suffix,
|
||||
);
|
||||
|
||||
emit_json_storage_slots(file_name, &output_path, built_package)?;
|
||||
|
||||
if fs::metadata(oracle_path.clone()).is_err() {
|
||||
bail!("JSON storage slots oracle file does not exist for this test.\nExpected oracle path: {}", &oracle_path);
|
||||
}
|
||||
|
|
@ -501,15 +510,14 @@ pub(crate) fn test_json_storage_slots(
|
|||
Ok(())
|
||||
}
|
||||
|
||||
fn emit_json_storage_slots(file_name: &str, built_package: &BuiltPackage) -> Result<()> {
|
||||
fn emit_json_storage_slots(
|
||||
file_name: &str,
|
||||
json_storage_slots_output_path: &str,
|
||||
built_package: &BuiltPackage,
|
||||
) -> Result<()> {
|
||||
tracing::info!("Storage slots JSON gen {} ...", file_name.bold());
|
||||
let json_storage_slots = serde_json::json!(built_package.storage_slots);
|
||||
let manifest_dir = env!("CARGO_MANIFEST_DIR");
|
||||
let file = std::fs::File::create(format!(
|
||||
"{}/src/e2e_vm_tests/test_programs/{}/{}",
|
||||
manifest_dir, file_name, "json_storage_slots_output.json"
|
||||
))?;
|
||||
let res = serde_json::to_writer_pretty(&file, &json_storage_slots);
|
||||
res?;
|
||||
let file = std::fs::File::create(json_storage_slots_output_path)?;
|
||||
serde_json::to_writer_pretty(&file, &json_storage_slots)?;
|
||||
Ok(())
|
||||
}
|
||||
|
|
|
|||
|
|
@ -17,12 +17,15 @@ use forc_util::tx_utils::decode_log_data;
|
|||
use fuel_vm::fuel_tx;
|
||||
use fuel_vm::fuel_types::canonical::Input;
|
||||
use fuel_vm::prelude::*;
|
||||
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
|
||||
use regex::Regex;
|
||||
use std::borrow::Cow;
|
||||
use std::collections::{BTreeMap, HashSet};
|
||||
use std::io::stdout;
|
||||
use std::io::Write;
|
||||
use std::process::{Command, Stdio};
|
||||
use std::str::FromStr;
|
||||
use std::time::Instant;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
path::{Path, PathBuf},
|
||||
|
|
@ -87,6 +90,7 @@ impl FileCheck {
|
|||
|
||||
#[derive(Clone)]
|
||||
struct TestDescription {
|
||||
test_toml_path: String,
|
||||
name: String,
|
||||
suffix: Option<String>,
|
||||
category: TestCategory,
|
||||
|
|
@ -109,6 +113,16 @@ struct TestDescription {
|
|||
logs: Option<String>,
|
||||
}
|
||||
|
||||
impl TestDescription {
|
||||
pub fn display_name(&self) -> Cow<str> {
|
||||
if let Some(suffix) = self.suffix.as_ref() {
|
||||
format!("{} ({})", self.name, suffix).into()
|
||||
} else {
|
||||
self.name.as_str().into()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(PartialEq, Eq, Hash)]
|
||||
struct DeployedContractKey {
|
||||
pub contract_path: String,
|
||||
|
|
@ -308,7 +322,7 @@ impl TestContext {
|
|||
})
|
||||
}
|
||||
|
||||
async fn run(&self, test: TestDescription, output: &mut String, verbose: bool) -> Result<()> {
|
||||
async fn run(&self, test: &TestDescription, output: &mut String, verbose: bool) -> Result<()> {
|
||||
let TestDescription {
|
||||
name,
|
||||
suffix,
|
||||
|
|
@ -347,10 +361,12 @@ impl TestContext {
|
|||
|
||||
match category {
|
||||
TestCategory::Runs => {
|
||||
let expected_result = expected_result.expect("No expected result found. This is likely because test.toml is missing either an \"expected_result_new_encoding\" or \"expected_result\" entry");
|
||||
let expected_result = expected_result
|
||||
.as_ref()
|
||||
.expect("No expected result found. This is likely because the `test.toml` is missing either an \"expected_result_new_encoding\" or \"expected_result\" entry.");
|
||||
|
||||
let (result, out) =
|
||||
run_and_capture_output(|| harness::compile_to_bytes(&name, &run_config, &logs))
|
||||
run_and_capture_output(|| harness::compile_to_bytes(name, run_config, logs))
|
||||
.await;
|
||||
*output = out;
|
||||
|
||||
|
|
@ -389,7 +405,7 @@ impl TestContext {
|
|||
}
|
||||
}
|
||||
|
||||
check_file_checker(checker, &name, output)?;
|
||||
check_file_checker(checker, name, output)?;
|
||||
|
||||
let compiled = result?;
|
||||
|
||||
|
|
@ -400,14 +416,18 @@ impl TestContext {
|
|||
}
|
||||
};
|
||||
|
||||
if compiled.warnings.len() > expected_warnings as usize {
|
||||
if compiled.warnings.len() > *expected_warnings as usize {
|
||||
return Err(anyhow::Error::msg(format!(
|
||||
"Expected warnings: {expected_warnings}\nActual number of warnings: {}",
|
||||
compiled.warnings.len()
|
||||
)));
|
||||
}
|
||||
|
||||
let result = harness::runs_in_vm(compiled.clone(), script_data, witness_data)?;
|
||||
let result = harness::runs_in_vm(
|
||||
compiled.clone(),
|
||||
script_data.clone(),
|
||||
witness_data.clone(),
|
||||
)?;
|
||||
let actual_result = match result {
|
||||
harness::VMExecutionResult::Fuel(state, receipts, ecal) => {
|
||||
print_receipts(output, &receipts);
|
||||
|
|
@ -462,20 +482,20 @@ impl TestContext {
|
|||
},
|
||||
};
|
||||
|
||||
if actual_result != expected_result {
|
||||
if &actual_result != expected_result {
|
||||
Err(anyhow::Error::msg(format!(
|
||||
"expected: {expected_result:?}\nactual: {actual_result:?}"
|
||||
)))
|
||||
} else {
|
||||
if validate_abi {
|
||||
if *validate_abi {
|
||||
let (result, out) = run_and_capture_output(|| async {
|
||||
harness::test_json_abi(
|
||||
&name,
|
||||
name,
|
||||
&compiled,
|
||||
experimental.new_encoding,
|
||||
run_config.update_output_files,
|
||||
&suffix,
|
||||
has_experimental_field,
|
||||
suffix,
|
||||
*has_experimental_field,
|
||||
run_config.release,
|
||||
)
|
||||
})
|
||||
|
|
@ -490,13 +510,13 @@ impl TestContext {
|
|||
|
||||
TestCategory::Compiles => {
|
||||
let (result, out) =
|
||||
run_and_capture_output(|| harness::compile_to_bytes(&name, &run_config, &logs))
|
||||
run_and_capture_output(|| harness::compile_to_bytes(name, run_config, logs))
|
||||
.await;
|
||||
*output = out;
|
||||
|
||||
let compiled_pkgs = match result? {
|
||||
forc_pkg::Built::Package(built_pkg) => {
|
||||
if built_pkg.warnings.len() > expected_warnings as usize {
|
||||
if built_pkg.warnings.len() > *expected_warnings as usize {
|
||||
return Err(anyhow::Error::msg(format!(
|
||||
"Expected warnings: {expected_warnings}\nActual number of warnings: {}",
|
||||
built_pkg.warnings.len()
|
||||
|
|
@ -515,9 +535,9 @@ impl TestContext {
|
|||
.collect(),
|
||||
};
|
||||
|
||||
check_file_checker(checker, &name, output)?;
|
||||
check_file_checker(checker, name, output)?;
|
||||
|
||||
if validate_abi {
|
||||
if *validate_abi {
|
||||
for (name, built_pkg) in &compiled_pkgs {
|
||||
let (result, out) = run_and_capture_output(|| async {
|
||||
harness::test_json_abi(
|
||||
|
|
@ -525,8 +545,8 @@ impl TestContext {
|
|||
built_pkg,
|
||||
experimental.new_encoding,
|
||||
run_config.update_output_files,
|
||||
&suffix,
|
||||
has_experimental_field,
|
||||
suffix,
|
||||
*has_experimental_field,
|
||||
run_config.release,
|
||||
)
|
||||
})
|
||||
|
|
@ -536,10 +556,10 @@ impl TestContext {
|
|||
}
|
||||
}
|
||||
|
||||
if validate_storage_slots {
|
||||
if *validate_storage_slots {
|
||||
for (name, built_pkg) in &compiled_pkgs {
|
||||
let (result, out) = run_and_capture_output(|| async {
|
||||
harness::test_json_storage_slots(name, built_pkg, &suffix)
|
||||
harness::test_json_storage_slots(name, built_pkg, suffix)
|
||||
})
|
||||
.await;
|
||||
result?;
|
||||
|
|
@ -551,7 +571,7 @@ impl TestContext {
|
|||
|
||||
TestCategory::FailsToCompile => {
|
||||
let (result, out) =
|
||||
run_and_capture_output(|| harness::compile_to_bytes(&name, &run_config, &logs))
|
||||
run_and_capture_output(|| harness::compile_to_bytes(name, run_config, logs))
|
||||
.await;
|
||||
|
||||
*output = out;
|
||||
|
|
@ -563,7 +583,7 @@ impl TestContext {
|
|||
|
||||
Err(anyhow::Error::msg("Test compiles but is expected to fail"))
|
||||
} else {
|
||||
check_file_checker(checker, &name, output)?;
|
||||
check_file_checker(checker, name, output)?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
|
@ -579,7 +599,7 @@ impl TestContext {
|
|||
let mut contract_ids = Vec::new();
|
||||
for contract_path in contract_paths.clone() {
|
||||
let (result, out) = run_and_capture_output(|| async {
|
||||
self.deploy_contract(&run_config, contract_path).await
|
||||
self.deploy_contract(run_config, contract_path).await
|
||||
})
|
||||
.await;
|
||||
output.push_str(&out);
|
||||
|
|
@ -587,7 +607,7 @@ impl TestContext {
|
|||
}
|
||||
let contract_ids = contract_ids.into_iter().collect::<Result<Vec<_>, _>>()?;
|
||||
|
||||
let (result, out) = harness::runs_on_node(&name, &run_config, &contract_ids).await;
|
||||
let (result, out) = harness::runs_on_node(name, run_config, &contract_ids).await;
|
||||
|
||||
output.push_str(&out);
|
||||
|
||||
|
|
@ -619,9 +639,9 @@ impl TestContext {
|
|||
}
|
||||
|
||||
match &receipts[receipts.len() - 2] {
|
||||
Receipt::Return { val, .. } => match expected_result.unwrap() {
|
||||
Receipt::Return { val, .. } => match expected_result.as_ref().unwrap() {
|
||||
TestResult::Result(v) => {
|
||||
if v != *val {
|
||||
if *v != *val {
|
||||
return Err(anyhow::Error::msg(format!(
|
||||
"return value does not match expected: {v:?}, {val:?}"
|
||||
)));
|
||||
|
|
@ -637,9 +657,9 @@ impl TestContext {
|
|||
todo!("Test result `Revert` is currently not implemented.")
|
||||
}
|
||||
},
|
||||
Receipt::ReturnData { data, .. } => match expected_result.unwrap() {
|
||||
Receipt::ReturnData { data, .. } => match expected_result.as_ref().unwrap() {
|
||||
TestResult::ReturnData(v) => {
|
||||
if v != *data.as_ref().unwrap() {
|
||||
if v != data.as_ref().unwrap() {
|
||||
return Err(anyhow::Error::msg(format!(
|
||||
"return value does not match expected: {v:?}, {data:?}"
|
||||
)));
|
||||
|
|
@ -663,7 +683,7 @@ impl TestContext {
|
|||
|
||||
TestCategory::UnitTestsPass => {
|
||||
let (result, out) =
|
||||
harness::compile_and_run_unit_tests(&name, &run_config, true).await;
|
||||
harness::compile_and_run_unit_tests(name, run_config, true).await;
|
||||
*output = out;
|
||||
|
||||
let mut decoded_logs = vec![];
|
||||
|
|
@ -725,7 +745,11 @@ impl TestContext {
|
|||
}
|
||||
}
|
||||
|
||||
let expected_decoded_test_logs = expected_decoded_test_logs.unwrap_or_default();
|
||||
let expected_decoded_test_logs = if let Some(expected_decoded_test_logs) = expected_decoded_test_logs.as_ref() {
|
||||
expected_decoded_test_logs
|
||||
} else {
|
||||
&vec![]
|
||||
};
|
||||
|
||||
if !failed.is_empty() {
|
||||
println!("FAILED!! output:\n{output}");
|
||||
|
|
@ -734,7 +758,7 @@ impl TestContext {
|
|||
failed.len(),
|
||||
failed.into_iter().collect::<String>()
|
||||
);
|
||||
} else if expected_decoded_test_logs != decoded_logs {
|
||||
} else if expected_decoded_test_logs != &decoded_logs {
|
||||
println!("FAILED!! output:\n{output}");
|
||||
panic!(
|
||||
"For {name}\ncollected decoded logs: {decoded_logs:?}\nexpected decoded logs: {expected_decoded_test_logs:?}"
|
||||
|
|
@ -750,92 +774,237 @@ impl TestContext {
|
|||
}
|
||||
}
|
||||
|
||||
pub async fn run(filter_config: &FilterConfig, run_config: &RunConfig) -> Result<()> {
|
||||
// Discover tests
|
||||
let mut tests = discover_test_tomls(run_config)?;
|
||||
let total_number_of_tests = tests.len();
|
||||
struct TestsInRun {
|
||||
total_number_of_tests: usize,
|
||||
skipped_tests: Vec<TestDescription>,
|
||||
disabled_tests: Vec<TestDescription>,
|
||||
included_tests: Vec<TestDescription>,
|
||||
excluded_tests: Vec<TestDescription>,
|
||||
tests_to_run: Vec<TestDescription>,
|
||||
}
|
||||
|
||||
// Filter tests
|
||||
let skipped_tests = filter_config
|
||||
.skip_until
|
||||
.as_ref()
|
||||
.map(|skip_until| {
|
||||
let mut found = false;
|
||||
tests.retained(|t| {
|
||||
found
|
||||
|| if skip_until.is_match(&t.name) {
|
||||
found = true;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
impl TestsInRun {
|
||||
fn new(filter_config: &FilterConfig, run_config: &RunConfig) -> Result<Self> {
|
||||
let all_tests = discover_test_tomls(run_config)?;
|
||||
let total_number_of_tests = all_tests.len();
|
||||
|
||||
let mut tests_to_run = all_tests;
|
||||
let skipped_tests = filter_config
|
||||
.skip_until
|
||||
.as_ref()
|
||||
.map(|skip_until| {
|
||||
let mut found = false;
|
||||
tests_to_run.retain_and_get_removed(|t| {
|
||||
found
|
||||
|| if skip_until.is_match(&t.name) {
|
||||
found = true;
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
})
|
||||
})
|
||||
})
|
||||
.unwrap_or_default();
|
||||
let disabled_tests = tests.retained(|t| t.category != TestCategory::Disabled);
|
||||
let included_tests = filter_config
|
||||
.include
|
||||
.as_ref()
|
||||
.map(|include| tests.retained(|t| include.is_match(&t.name)))
|
||||
.unwrap_or_default();
|
||||
let excluded_tests = filter_config
|
||||
.exclude
|
||||
.as_ref()
|
||||
.map(|exclude| tests.retained(|t| !exclude.is_match(&t.name)))
|
||||
.unwrap_or_default();
|
||||
.unwrap_or_default();
|
||||
let disabled_tests =
|
||||
tests_to_run.retain_and_get_removed(|t| t.category != TestCategory::Disabled);
|
||||
let included_tests = filter_config
|
||||
.include
|
||||
.as_ref()
|
||||
.map(|include| tests_to_run.retain_and_get_removed(|t| include.is_match(&t.name)))
|
||||
.unwrap_or_default();
|
||||
let excluded_tests = filter_config
|
||||
.exclude
|
||||
.as_ref()
|
||||
.map(|exclude| tests_to_run.retain_and_get_removed(|t| !exclude.is_match(&t.name)))
|
||||
.unwrap_or_default();
|
||||
|
||||
if filter_config.exclude_std {
|
||||
tests.retain(|t| exclude_tests_dependency(t, "std"));
|
||||
}
|
||||
if filter_config.abi_only {
|
||||
tests.retain(|t| t.validate_abi);
|
||||
}
|
||||
if filter_config.contract_only {
|
||||
tests.retain(|t| t.category == TestCategory::RunsWithContract);
|
||||
}
|
||||
if filter_config.forc_test_only {
|
||||
tests.retain(|t| t.category == TestCategory::UnitTestsPass);
|
||||
}
|
||||
if filter_config.first_only && !tests.is_empty() {
|
||||
tests = vec![tests.remove(0)];
|
||||
}
|
||||
|
||||
// Run tests
|
||||
let context = TestContext {
|
||||
deployed_contracts: Default::default(),
|
||||
};
|
||||
let mut number_of_tests_executed = 0;
|
||||
let mut number_of_tests_failed = 0;
|
||||
let mut failed_tests = vec![];
|
||||
|
||||
let start_time = Instant::now();
|
||||
for (i, test) in tests.into_iter().enumerate() {
|
||||
if filter_config.no_std_only {
|
||||
tests_to_run.retain(|t| exclude_tests_dependency(t, "std"));
|
||||
}
|
||||
if filter_config.abi_only {
|
||||
tests_to_run.retain(|t| t.validate_abi);
|
||||
}
|
||||
if filter_config.contract_only {
|
||||
tests_to_run.retain(|t| t.category == TestCategory::RunsWithContract);
|
||||
}
|
||||
if filter_config.forc_test_only {
|
||||
tests_to_run.retain(|t| t.category == TestCategory::UnitTestsPass);
|
||||
}
|
||||
let cur_profile = if run_config.release {
|
||||
BuildProfile::RELEASE
|
||||
} else {
|
||||
BuildProfile::DEBUG
|
||||
};
|
||||
tests_to_run.retain(|test| !test.unsupported_profiles.contains(&cur_profile));
|
||||
tests_to_run.retain(|test| test.supported_targets.contains(&run_config.build_target));
|
||||
|
||||
if test.unsupported_profiles.contains(&cur_profile) {
|
||||
continue;
|
||||
if filter_config.first_only {
|
||||
tests_to_run.truncate(1);
|
||||
}
|
||||
|
||||
let name = if let Some(suffix) = test.suffix.as_ref() {
|
||||
format!("{} ({})", test.name, suffix)
|
||||
Ok(Self {
|
||||
total_number_of_tests,
|
||||
skipped_tests,
|
||||
disabled_tests,
|
||||
included_tests,
|
||||
excluded_tests,
|
||||
tests_to_run,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn run_exact(exact: &str, run_config: &RunConfig) -> Result<()> {
|
||||
let test = parse_test_toml(Path::new(exact), run_config)?;
|
||||
let context = TestContext {
|
||||
deployed_contracts: Default::default(),
|
||||
};
|
||||
let mut output = String::new();
|
||||
context.run(&test, &mut output, run_config.verbose).await
|
||||
}
|
||||
|
||||
pub async fn run_in_parallel(filter_config: &FilterConfig, run_config: &RunConfig) -> Result<()> {
|
||||
let mut tests_in_run = TestsInRun::new(filter_config, run_config)?;
|
||||
|
||||
// Build common CLI args to pass to each subprocess.
|
||||
let mut common_args = vec![];
|
||||
common_args.push("--build-target".to_string());
|
||||
common_args.push(run_config.build_target.to_string());
|
||||
if run_config.locked {
|
||||
common_args.push("--locked".to_string());
|
||||
}
|
||||
if run_config.release {
|
||||
common_args.push("--release".to_string());
|
||||
}
|
||||
if run_config.update_output_files {
|
||||
common_args.push("--update-output-files".to_string());
|
||||
}
|
||||
if let Some(experimental) = run_config.experimental.experimental_as_cli_string() {
|
||||
common_args.push("--experimental".to_string());
|
||||
common_args.push(experimental);
|
||||
}
|
||||
if let Some(no_experimental) = run_config.experimental.no_experimental_as_cli_string() {
|
||||
common_args.push("--no-experimental".to_string());
|
||||
common_args.push(no_experimental);
|
||||
}
|
||||
|
||||
// Running tests of all test categories in parallel is safe by default, except for "run_on_node" tests.
|
||||
//
|
||||
// All other test categories ("compiles", "fails_to_compile", "runs", "forc_test") do not produce
|
||||
// compiler output files that could interfere with each other when run in parallel (e.g., JSON ABI files,
|
||||
// storage slots files, etc) for different `test.<feature>.toml`s in the same test. Also, the
|
||||
// storage slots JSON and ABI JSON files created to compare with the corresponding oracles,
|
||||
// are given unique names based on the test name and the profile (debug/release).
|
||||
//
|
||||
// However, "run_on_node" tests deploy contracts to a local node, and if multiple such tests
|
||||
// are run in parallel, they will try to deploy a same contract multiple times. For sequential
|
||||
// execution, the `TestContext` caches deployed contracts by their path, so that each contract
|
||||
// is deployed only once per test run. But for parallel execution, this cache cannot be shared
|
||||
// across multiple processes. Therefore, to avoid multiple deployments of the same contract
|
||||
// from different processes, we run "run_on_node" tests in parallel only if they don't share
|
||||
// contracts. Those that share contracts are run sequentially.
|
||||
|
||||
// Maps contract path to the list of tests that deploy it.
|
||||
let mut contracts_deployed_in_tests = HashMap::<String, Vec<_>>::new();
|
||||
tests_in_run
|
||||
.tests_to_run
|
||||
.iter()
|
||||
.filter(|t| t.category == TestCategory::RunsWithContract)
|
||||
.for_each(|test| {
|
||||
test.contract_paths.iter().for_each(|contract_path| {
|
||||
contracts_deployed_in_tests
|
||||
.entry(contract_path.clone())
|
||||
.or_default()
|
||||
.push(test);
|
||||
});
|
||||
});
|
||||
let tests_sharing_contracts: HashSet<_> = contracts_deployed_in_tests
|
||||
.values()
|
||||
.filter(|tests| tests.len() > 1)
|
||||
.flat_map(|tests| tests.iter())
|
||||
.map(|t| t.test_toml_path.clone())
|
||||
.collect();
|
||||
|
||||
let failed_tests = std::sync::Mutex::new(Vec::<String>::new());
|
||||
let start_time = Instant::now();
|
||||
|
||||
// Splitting the tests into two groups, sequential and parallel, by keeping
|
||||
// in `tests_in_run.tests_to_run` only those that can be run in parallel.
|
||||
// It turns out that doing the explicit split upfront is **way faster**
|
||||
// than filtering inside the parallel iterator.
|
||||
let tests_to_run_sequentially = tests_in_run
|
||||
.tests_to_run
|
||||
.retain_and_get_removed(|test| !tests_sharing_contracts.contains(&test.test_toml_path));
|
||||
|
||||
// Run tests that can be safely run in parallel.
|
||||
tests_in_run.tests_to_run.par_iter().for_each(|test| {
|
||||
let name = test.display_name();
|
||||
|
||||
let status = Command::new(std::env::current_exe().unwrap())
|
||||
.args(common_args.clone())
|
||||
.args(vec!["--exact".to_string(), test.test_toml_path.clone()])
|
||||
.stdout(Stdio::null())
|
||||
.stdin(Stdio::null())
|
||||
.status()
|
||||
.unwrap();
|
||||
|
||||
if status.success() {
|
||||
println!(" ✅ Passed: {name}");
|
||||
} else {
|
||||
test.name.clone()
|
||||
};
|
||||
println!(" ❌ Failed: {name}");
|
||||
failed_tests.lock().unwrap().push(name.into());
|
||||
}
|
||||
});
|
||||
|
||||
// Run sequentially "run_on_node" tests that share contracts.
|
||||
let context = TestContext {
|
||||
deployed_contracts: Default::default(),
|
||||
};
|
||||
|
||||
for test in tests_to_run_sequentially.iter() {
|
||||
let name = test.display_name();
|
||||
|
||||
let mut output = String::new();
|
||||
let result = context.run(test, &mut output, run_config.verbose).await;
|
||||
|
||||
if result.is_ok() {
|
||||
println!(" ✅ Passed: {name}");
|
||||
} else {
|
||||
println!(" ❌ Failed: {name}");
|
||||
failed_tests.lock().unwrap().push(name.into());
|
||||
}
|
||||
}
|
||||
|
||||
let duration = Instant::now().duration_since(start_time);
|
||||
|
||||
// To ensure proper statistics printed in the results, get the list of tests that
|
||||
// were run sequentially and add them back to the list of tests to run.
|
||||
tests_in_run.tests_to_run.extend(tests_to_run_sequentially);
|
||||
|
||||
print_run_results(
|
||||
&tests_in_run,
|
||||
filter_config,
|
||||
&failed_tests.into_inner().unwrap(),
|
||||
&duration,
|
||||
)
|
||||
}
|
||||
|
||||
pub async fn run_sequentially(filter_config: &FilterConfig, run_config: &RunConfig) -> Result<()> {
|
||||
let tests_in_run = TestsInRun::new(filter_config, run_config)?;
|
||||
|
||||
let context = TestContext {
|
||||
deployed_contracts: Default::default(),
|
||||
};
|
||||
|
||||
let mut failed_tests = Vec::<String>::new();
|
||||
let start_time = Instant::now();
|
||||
for (i, test) in tests_in_run.tests_to_run.iter().enumerate() {
|
||||
let name = test.display_name();
|
||||
|
||||
print!("Testing {} ...", name.clone().bold());
|
||||
stdout().flush().unwrap();
|
||||
|
||||
let mut output = String::new();
|
||||
|
||||
// Skip the test if its not compatible with the current build target.
|
||||
if !test.supported_targets.contains(&run_config.build_target) {
|
||||
continue;
|
||||
}
|
||||
|
||||
use std::fmt::Write;
|
||||
let _ = writeln!(output, " {}", "Verbose Output".green().bold());
|
||||
let result = if !filter_config.first_only {
|
||||
|
|
@ -851,49 +1020,79 @@ pub async fn run(filter_config: &FilterConfig, run_config: &RunConfig) -> Result
|
|||
println!(" {}", "failed".red().bold());
|
||||
println!("{}", textwrap::indent(err.to_string().as_str(), " "));
|
||||
println!("{}", textwrap::indent(&output, " "));
|
||||
number_of_tests_failed += 1;
|
||||
failed_tests.push(name);
|
||||
failed_tests.push(name.into());
|
||||
} else {
|
||||
println!(" {}", "ok".green().bold());
|
||||
|
||||
// If verbosity is requested then print it out.
|
||||
if run_config.verbose && !output.is_empty() {
|
||||
println!("{}", textwrap::indent(&output, " "));
|
||||
}
|
||||
}
|
||||
|
||||
number_of_tests_executed += 1;
|
||||
}
|
||||
let duration = Instant::now().duration_since(start_time);
|
||||
|
||||
print_run_results(&tests_in_run, filter_config, &failed_tests, &duration)
|
||||
}
|
||||
|
||||
fn print_run_results(
|
||||
tests_in_run: &TestsInRun,
|
||||
filter_config: &FilterConfig,
|
||||
failed_tests: &[String],
|
||||
duration: &Duration,
|
||||
) -> Result<()> {
|
||||
let number_of_tests_executed = tests_in_run.tests_to_run.len();
|
||||
let number_of_tests_failed = failed_tests.len();
|
||||
|
||||
if number_of_tests_executed == 0 {
|
||||
if let Some(skip_until) = &filter_config.skip_until {
|
||||
tracing::info!(
|
||||
"Filtered {} tests with `skip-until` regex: {:?}",
|
||||
skipped_tests.len(),
|
||||
skip_until.to_string()
|
||||
"Filtered {} test{} with `skip-until` regex: {:?}",
|
||||
tests_in_run.skipped_tests.len(),
|
||||
if tests_in_run.skipped_tests.len() == 1 {
|
||||
""
|
||||
} else {
|
||||
"s"
|
||||
},
|
||||
skip_until.to_string(),
|
||||
);
|
||||
}
|
||||
if let Some(include) = &filter_config.include {
|
||||
tracing::info!(
|
||||
"Filtered {} tests with `include` regex: {:?}",
|
||||
included_tests.len(),
|
||||
include.to_string()
|
||||
"Filtered {} test{} with `include` regex: {:?}",
|
||||
tests_in_run.included_tests.len(),
|
||||
if tests_in_run.included_tests.len() == 1 {
|
||||
""
|
||||
} else {
|
||||
"s"
|
||||
},
|
||||
include.to_string(),
|
||||
);
|
||||
}
|
||||
if let Some(exclude) = &filter_config.exclude {
|
||||
tracing::info!(
|
||||
"Filtered {} tests with `exclude` regex: {:?}",
|
||||
excluded_tests.len(),
|
||||
exclude.to_string()
|
||||
"Filtered {} test{} with `exclude` regex: {:?}",
|
||||
tests_in_run.excluded_tests.len(),
|
||||
if tests_in_run.excluded_tests.len() == 1 {
|
||||
""
|
||||
} else {
|
||||
"s"
|
||||
},
|
||||
exclude.to_string(),
|
||||
);
|
||||
}
|
||||
if !disabled_tests.is_empty() {
|
||||
tracing::info!("{} tests were disabled.", disabled_tests.len());
|
||||
if !tests_in_run.disabled_tests.is_empty() {
|
||||
tracing::info!(
|
||||
"{} test{} disabled.",
|
||||
tests_in_run.disabled_tests.len(),
|
||||
if tests_in_run.disabled_tests.len() == 1 {
|
||||
" was"
|
||||
} else {
|
||||
"s were"
|
||||
},
|
||||
);
|
||||
}
|
||||
tracing::warn!(
|
||||
"No tests were run. Regex filters filtered out all {} tests.",
|
||||
total_number_of_tests
|
||||
"No tests were run. Provided test filters filtered out all {} tests.",
|
||||
tests_in_run.total_number_of_tests
|
||||
);
|
||||
} else {
|
||||
tracing::info!("_________________________________");
|
||||
|
|
@ -904,24 +1103,29 @@ pub async fn run(filter_config: &FilterConfig, run_config: &RunConfig) -> Result
|
|||
} else {
|
||||
"failed".red().bold()
|
||||
},
|
||||
total_number_of_tests,
|
||||
tests_in_run.total_number_of_tests,
|
||||
number_of_tests_executed - number_of_tests_failed,
|
||||
number_of_tests_failed,
|
||||
disabled_tests.len(),
|
||||
util::duration_to_str(&duration)
|
||||
tests_in_run.disabled_tests.len(),
|
||||
util::duration_to_str(duration),
|
||||
);
|
||||
if number_of_tests_failed > 0 {
|
||||
tracing::info!("{}", "Failing tests:".red().bold());
|
||||
tracing::info!(
|
||||
" {}",
|
||||
failed_tests
|
||||
.into_iter()
|
||||
.map(|test_name| format!("{} ... {}", test_name.bold(), "failed".red().bold()))
|
||||
.iter()
|
||||
.map(|failed_test| format!(
|
||||
"{} ... {}",
|
||||
failed_test.bold(),
|
||||
"failed".red().bold()
|
||||
))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n ")
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
if number_of_tests_failed != 0 {
|
||||
Err(anyhow::Error::msg("Failed tests"))
|
||||
} else {
|
||||
|
|
@ -1283,6 +1487,7 @@ fn parse_test_toml(path: &Path, run_config: &RunConfig) -> Result<TestDescriptio
|
|||
.map(|x| x.to_string());
|
||||
|
||||
Ok(TestDescription {
|
||||
test_toml_path: path.to_str().unwrap().into(),
|
||||
name,
|
||||
suffix: path.file_name().unwrap().to_str().map(|x| x.to_string()),
|
||||
category,
|
||||
|
|
|
|||
|
|
@ -1,11 +1,13 @@
|
|||
pub trait VecExt<T> {
|
||||
fn retained<F>(&mut self, f: F) -> Vec<T>
|
||||
/// Retains the elements specified by the predicate `f`,
|
||||
/// and returns the elements that were removed.
|
||||
fn retain_and_get_removed<F>(&mut self, f: F) -> Vec<T>
|
||||
where
|
||||
F: FnMut(&T) -> bool;
|
||||
}
|
||||
|
||||
impl<T> VecExt<T> for Vec<T> {
|
||||
fn retained<F>(&mut self, mut f: F) -> Vec<T>
|
||||
fn retain_and_get_removed<F>(&mut self, mut f: F) -> Vec<T>
|
||||
where
|
||||
F: FnMut(&T) -> bool,
|
||||
{
|
||||
|
|
|
|||
126
test/src/main.rs
126
test/src/main.rs
|
|
@ -23,7 +23,7 @@ struct Cli {
|
|||
exclude: Option<regex::Regex>,
|
||||
|
||||
/// Skip all tests until a test matches this regex
|
||||
#[arg(long, short, value_name = "REGEX")]
|
||||
#[arg(long, value_name = "REGEX")]
|
||||
skip_until: Option<regex::Regex>,
|
||||
|
||||
/// Only run tests with ABI JSON output validation
|
||||
|
|
@ -31,8 +31,8 @@ struct Cli {
|
|||
abi_only: bool,
|
||||
|
||||
/// Only run tests with no `std` dependencies
|
||||
#[arg(long, visible_alias = "exclude_std")]
|
||||
exclude_std: bool,
|
||||
#[arg(long, visible_alias = "no_std")]
|
||||
no_std_only: bool,
|
||||
|
||||
/// Only run tests that deploy contracts
|
||||
#[arg(long, visible_alias = "contract")]
|
||||
|
|
@ -47,11 +47,13 @@ struct Cli {
|
|||
first_only: bool,
|
||||
|
||||
/// Print out warnings, errors, and output of print options
|
||||
///
|
||||
/// This option is ignored if tests are run in parallel.
|
||||
#[arg(long, env = "SWAY_TEST_VERBOSE")]
|
||||
verbose: bool,
|
||||
|
||||
/// Compile Sway code in release mode
|
||||
#[arg(long)]
|
||||
#[arg(long, short)]
|
||||
release: bool,
|
||||
|
||||
/// Intended for use in CI to ensure test lock files are up to date
|
||||
|
|
@ -67,14 +69,20 @@ struct Cli {
|
|||
update_output_files: bool,
|
||||
|
||||
/// Print out the specified IR (separate options with comma), if the verbose option is on
|
||||
///
|
||||
/// This option is ignored if tests are run in parallel.
|
||||
#[arg(long, num_args(1..=18), value_parser = clap::builder::PossibleValuesParser::new(PrintIrCliOpt::cli_options()))]
|
||||
print_ir: Option<Vec<String>>,
|
||||
|
||||
/// Print out the specified ASM (separate options with comma), if the verbose option is on
|
||||
///
|
||||
/// This option is ignored if tests are run in parallel.
|
||||
#[arg(long, num_args(1..=5), value_parser = clap::builder::PossibleValuesParser::new(&PrintAsmCliOpt::CLI_OPTIONS))]
|
||||
print_asm: Option<Vec<String>>,
|
||||
|
||||
/// Print out the final bytecode, if the verbose option is on
|
||||
///
|
||||
/// This option is ignored if tests are run in parallel.
|
||||
#[arg(long)]
|
||||
print_bytecode: bool,
|
||||
|
||||
|
|
@ -84,6 +92,25 @@ struct Cli {
|
|||
/// Only run tests of a particular kind
|
||||
#[arg(long, short, num_args(1..=4), value_parser = clap::builder::PossibleValuesParser::new(&TestKindOpt::CLI_OPTIONS))]
|
||||
kind: Option<Vec<String>>,
|
||||
|
||||
/// Run only the exact test provided by an absolute path to a `test.toml` or `test.<feature>.toml` file
|
||||
///
|
||||
/// This flag is used internally for parallel test execution, and is not intended for general use.
|
||||
#[arg(long, hide = true)]
|
||||
exact: Option<String>,
|
||||
|
||||
/// Run tests sequentially (not in parallel)
|
||||
#[arg(long, short)]
|
||||
sequential: bool,
|
||||
|
||||
/// Write compilation output (e.g., bytecode, ABI JSON, storage slots JSON, etc.) to the filesystem
|
||||
///
|
||||
/// This is primarily useful for troubleshooting test failures.
|
||||
/// Output files are written to the `out` directory within each test's directory.
|
||||
///
|
||||
/// This option is ignored if tests are run in parallel.
|
||||
#[arg(long)]
|
||||
write_output: bool,
|
||||
}
|
||||
|
||||
#[derive(Default, Debug, Clone)]
|
||||
|
|
@ -137,7 +164,7 @@ pub struct FilterConfig {
|
|||
pub exclude: Option<regex::Regex>,
|
||||
pub skip_until: Option<regex::Regex>,
|
||||
pub abi_only: bool,
|
||||
pub exclude_std: bool,
|
||||
pub no_std_only: bool,
|
||||
pub contract_only: bool,
|
||||
pub first_only: bool,
|
||||
pub forc_test_only: bool,
|
||||
|
|
@ -154,32 +181,76 @@ pub struct RunConfig {
|
|||
pub print_asm: PrintAsm,
|
||||
pub print_bytecode: bool,
|
||||
pub experimental: sway_features::CliFields,
|
||||
pub kind: TestKind,
|
||||
pub write_output: bool,
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RunKindConfig {
|
||||
pub kind: TestKind,
|
||||
pub sequential: bool,
|
||||
}
|
||||
|
||||
// We want to use the "current_thread" flavor because running
|
||||
// Tokio runtime on another thread brings only overhead with
|
||||
// no benefits, especially when running tests in parallel.
|
||||
#[tokio::main(flavor = "current_thread")]
|
||||
async fn main() -> Result<()> {
|
||||
init_tracing_subscriber(Default::default());
|
||||
|
||||
// Parse args
|
||||
let cli = Cli::parse();
|
||||
|
||||
let build_target = match cli.build_target {
|
||||
Some(target) => match BuildTarget::from_str(target.as_str()) {
|
||||
Ok(target) => target,
|
||||
_ => panic!("Unexpected build target: {}", target),
|
||||
},
|
||||
None => BuildTarget::default(),
|
||||
};
|
||||
|
||||
if let Some(exact) = &cli.exact {
|
||||
if !std::fs::exists(exact).unwrap_or(false) {
|
||||
panic!("The --exact test path does not exist: {exact}\nThe --exact path must be an absolute path to an existing `test.toml` or `test.<feature>.toml` file");
|
||||
}
|
||||
|
||||
let run_config = RunConfig {
|
||||
// Take over options that are supported when running tests in parallel.
|
||||
locked: cli.locked,
|
||||
release: cli.release,
|
||||
build_target,
|
||||
experimental: cli.experimental,
|
||||
update_output_files: cli.update_output_files,
|
||||
// Ignore options that are not supported when running tests in parallel.
|
||||
print_ir: PrintIr::none(),
|
||||
print_asm: PrintAsm::none(),
|
||||
print_bytecode: false,
|
||||
write_output: false,
|
||||
verbose: false,
|
||||
};
|
||||
|
||||
e2e_vm_tests::run_exact(exact, &run_config).await?;
|
||||
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let run_kind_config = RunKindConfig {
|
||||
kind: cli
|
||||
.kind
|
||||
.as_ref()
|
||||
.map_or(TestKind::all(), |opts| TestKindOpt::from(opts).0),
|
||||
sequential: cli.sequential,
|
||||
};
|
||||
|
||||
let filter_config = FilterConfig {
|
||||
include: cli.include.clone(),
|
||||
exclude: cli.exclude,
|
||||
skip_until: cli.skip_until,
|
||||
abi_only: cli.abi_only,
|
||||
exclude_std: cli.exclude_std,
|
||||
no_std_only: cli.no_std_only,
|
||||
contract_only: cli.contract_only,
|
||||
forc_test_only: cli.forc_test_only,
|
||||
first_only: cli.first_only,
|
||||
};
|
||||
let build_target = match cli.build_target {
|
||||
Some(target) => match BuildTarget::from_str(target.as_str()) {
|
||||
Ok(target) => target,
|
||||
_ => panic!("unexpected build target"),
|
||||
},
|
||||
None => BuildTarget::default(),
|
||||
};
|
||||
|
||||
let run_config = RunConfig {
|
||||
locked: cli.locked,
|
||||
verbose: cli.verbose,
|
||||
|
|
@ -196,10 +267,7 @@ async fn main() -> Result<()> {
|
|||
.as_ref()
|
||||
.map_or(PrintAsm::default(), |opts| PrintAsmCliOpt::from(opts).0),
|
||||
print_bytecode: cli.print_bytecode,
|
||||
kind: cli
|
||||
.kind
|
||||
.as_ref()
|
||||
.map_or(TestKind::all(), |opts| TestKindOpt::from(opts).0),
|
||||
write_output: cli.write_output,
|
||||
};
|
||||
|
||||
// Check that the tests are consistent
|
||||
|
|
@ -209,14 +277,20 @@ async fn main() -> Result<()> {
|
|||
reduced_std_libs::create()?;
|
||||
|
||||
// Run E2E tests
|
||||
if run_config.kind.e2e {
|
||||
e2e_vm_tests::run(&filter_config, &run_config)
|
||||
.instrument(tracing::trace_span!("E2E"))
|
||||
.await?;
|
||||
if run_kind_config.kind.e2e {
|
||||
if run_kind_config.sequential {
|
||||
e2e_vm_tests::run_sequentially(&filter_config, &run_config)
|
||||
.instrument(tracing::trace_span!("E2E"))
|
||||
.await?;
|
||||
} else {
|
||||
e2e_vm_tests::run_in_parallel(&filter_config, &run_config)
|
||||
.instrument(tracing::trace_span!("E2E"))
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
// Run IR tests
|
||||
if run_config.kind.ir && !filter_config.first_only {
|
||||
if run_kind_config.kind.ir && !filter_config.first_only {
|
||||
println!("\n");
|
||||
ir_generation::run(filter_config.include.as_ref(), cli.verbose, &run_config)
|
||||
.instrument(tracing::trace_span!("IR"))
|
||||
|
|
@ -224,7 +298,7 @@ async fn main() -> Result<()> {
|
|||
}
|
||||
|
||||
// Run snapshot tests
|
||||
if run_config.kind.snapshot && !filter_config.first_only {
|
||||
if run_kind_config.kind.snapshot && !filter_config.first_only {
|
||||
println!("\n");
|
||||
snapshot::run(filter_config.include.as_ref())
|
||||
.instrument(tracing::trace_span!("SNAPSHOT"))
|
||||
|
|
|
|||
Loading…
Add table
Add a link
Reference in a new issue