From d93824d973f31d241a91783d300e8b7809dd2296 Mon Sep 17 00:00:00 2001 From: Omar Date: Mon, 25 Aug 2025 14:16:09 +0300 Subject: [PATCH 1/4] Updated Reporting Infrastructure (#151) * Remove the old reporting infra * Use the Test struct more in the code * Implement the initial set of reporter events * Add more runner events to the reporter and refine the structure * Add reporting infra for reporting ignored tests * Update report to use better map data structures * Add case status information to the report * Integrate the reporting infrastructure with the CLI reporter used by the program. * Include contract compilation information in report * Cleanup report model * Add information on the deployed contracts --- Cargo.lock | 41 +- Cargo.toml | 2 + .../common/src/macros/define_wrapper_type.rs | 11 + crates/config/src/lib.rs | 8 + crates/core/src/cached_compiler.rs | 102 ++- crates/core/src/driver/mod.rs | 8 + crates/core/src/main.rs | 511 ++++++++++---- crates/format/src/case.rs | 5 +- crates/node/src/pool.rs | 11 + crates/report/Cargo.toml | 7 + crates/report/src/aggregator.rs | 550 +++++++++++++++ crates/report/src/analyzer.rs | 81 --- crates/report/src/common.rs | 43 ++ crates/report/src/lib.rs | 13 +- crates/report/src/reporter.rs | 234 ------- crates/report/src/reporter_event.rs | 22 + crates/report/src/runner_event.rs | 640 ++++++++++++++++++ 17 files changed, 1820 insertions(+), 469 deletions(-) create mode 100644 crates/report/src/aggregator.rs delete mode 100644 crates/report/src/analyzer.rs create mode 100644 crates/report/src/common.rs delete mode 100644 crates/report/src/reporter.rs create mode 100644 crates/report/src/reporter_event.rs create mode 100644 crates/report/src/runner_event.rs diff --git a/Cargo.lock b/Cargo.lock index a04261a..7e0f75d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4588,13 +4588,20 @@ dependencies = [ name = "revive-dt-report" version = "0.1.0" dependencies = [ + "alloy-primitives", "anyhow", + "indexmap 2.10.0", + "paste", "revive-dt-common", "revive-dt-compiler", "revive-dt-config", "revive-dt-format", + "semver 1.0.26", "serde", "serde_json", + "serde_with", + "tokio", + "tracing", ] [[package]] @@ -4845,6 +4852,30 @@ dependencies = [ "windows-sys 0.59.0", ] +[[package]] +name = "schemars" +version = "0.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4cd191f9397d57d581cddd31014772520aa448f65ef991055d7f61582c65165f" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + +[[package]] +name = "schemars" +version = "1.0.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "82d20c4491bc164fa2f6c5d44565947a52ad80b9505d8e36f8d54c27c739fcd0" +dependencies = [ + "dyn-clone", + "ref-cast", + "serde", + "serde_json", +] + [[package]] name = "schnellru" version = "0.2.4" @@ -5075,15 +5106,17 @@ dependencies = [ [[package]] name = "serde_with" -version = "3.12.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6b6f7f2fcb69f747921f79f3926bd1e203fce4fef62c268dd3abfb6d86029aa" +checksum = "f2c45cd61fefa9db6f254525d46e392b852e0e61d9a1fd36e5bd183450a556d5" dependencies = [ "base64 0.22.1", "chrono", "hex", "indexmap 1.9.3", "indexmap 2.10.0", + "schemars 0.9.0", + "schemars 1.0.4", "serde", "serde_derive", "serde_json", @@ -5093,9 +5126,9 @@ dependencies = [ [[package]] name = "serde_with_macros" -version = "3.12.0" +version = "3.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d00caa5193a3c8362ac2b73be6b9e768aa5a4b2f721d8f4b339600c3cb51f8e" +checksum = "de90945e6565ce0d9a25098082ed4ee4002e047cb59892c318d66821e14bb30f" dependencies = [ "darling", "proc-macro2", diff --git a/Cargo.toml b/Cargo.toml index d8b4213..fb7cbcf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -34,6 +34,7 @@ futures = { version = "0.3.31" } hex = "0.4.3" regex = "1" moka = "0.12.10" +paste = "1.0.15" reqwest = { version = "0.12.15", features = ["json"] } once_cell = "1.21" semver = { version = "1.0", features = ["serde"] } @@ -43,6 +44,7 @@ serde_json = { version = "1.0", default-features = false, features = [ "std", "unbounded_depth", ] } +serde_with = { version = "3.14.0" } sha2 = { version = "0.10.9" } sp-core = "36.1.0" sp-runtime = "41.1.0" diff --git a/crates/common/src/macros/define_wrapper_type.rs b/crates/common/src/macros/define_wrapper_type.rs index 2196595..a5f1098 100644 --- a/crates/common/src/macros/define_wrapper_type.rs +++ b/crates/common/src/macros/define_wrapper_type.rs @@ -1,12 +1,23 @@ #[macro_export] macro_rules! impl_for_wrapper { (Display, $ident: ident) => { + #[automatically_derived] impl std::fmt::Display for $ident { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Display::fmt(&self.0, f) } } }; + (FromStr, $ident: ident) => { + #[automatically_derived] + impl std::str::FromStr for $ident { + type Err = anyhow::Error; + + fn from_str(s: &str) -> anyhow::Result { + s.parse().map(Self).map_err(Into::into) + } + } + }; } /// Defines wrappers around types. diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index 86ba19e..1f62dd1 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -124,6 +124,14 @@ pub struct Arguments { /// Controls if the compilation cache should be invalidated or not. #[arg(short, long)] pub invalidate_compilation_cache: bool, + + /// Controls if the compiler input is included in the final report. + #[clap(long = "report.include-compiler-input")] + pub report_include_compiler_input: bool, + + /// Controls if the compiler output is included in the final report. + #[clap(long = "report.include-compiler-output")] + pub report_include_compiler_output: bool, } impl Arguments { diff --git a/crates/core/src/cached_compiler.rs b/crates/core/src/cached_compiler.rs index b94b1f3..188befb 100644 --- a/crates/core/src/cached_compiler.rs +++ b/crates/core/src/cached_compiler.rs @@ -9,7 +9,7 @@ use std::{ use futures::FutureExt; use revive_dt_common::iterators::FilesWithExtensionIterator; -use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler}; +use revive_dt_compiler::{Compiler, CompilerInput, CompilerOutput, Mode, SolidityCompiler}; use revive_dt_config::Arguments; use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata}; @@ -35,6 +35,7 @@ impl CachedCompiler { } /// Compiles or gets the compilation artifacts from the cache. + #[allow(clippy::too_many_arguments)] #[instrument( level = "debug", skip_all, @@ -52,6 +53,19 @@ impl CachedCompiler { mode: &Mode, config: &Arguments, deployed_libraries: Option<&HashMap>, + compilation_success_report_callback: impl Fn( + Version, + PathBuf, + bool, + Option, + CompilerOutput, + ) + Clone, + compilation_failure_report_callback: impl Fn( + Option, + Option, + Option, + String, + ), ) -> Result<(CompilerOutput, Version)> { static CACHE_KEY_LOCK: Lazy>>>> = Lazy::new(Default::default); @@ -61,10 +75,21 @@ impl CachedCompiler { config, compiler_version_or_requirement, ) - .await?; + .await + .inspect_err(|err| { + compilation_failure_report_callback(None, None, None, err.to_string()) + })?; let compiler_version = ::new(compiler_path.clone()) .version() - .await?; + .await + .inspect_err(|err| { + compilation_failure_report_callback( + None, + Some(compiler_path.clone()), + None, + err.to_string(), + ) + })?; let cache_key = CacheKey { platform_key: P::config_id().to_string(), @@ -74,13 +99,19 @@ impl CachedCompiler { }; let compilation_callback = || { + let compiler_path = compiler_path.clone(); + let compiler_version = compiler_version.clone(); + let compilation_success_report_callback = compilation_success_report_callback.clone(); async move { compile_contracts::

( metadata.directory()?, compiler_path, + compiler_version, metadata.files_to_compile()?, mode, deployed_libraries, + compilation_success_report_callback, + compilation_failure_report_callback, ) .map(|compilation_result| compilation_result.map(CacheValue::new)) .await @@ -125,10 +156,19 @@ impl CachedCompiler { }; let _guard = mutex.lock().await; - self.0 - .get_or_insert_with(&cache_key, compilation_callback) - .await - .map(|value| value.compiler_output)? + match self.0.get(&cache_key).await { + Some(cache_value) => { + compilation_success_report_callback( + compiler_version.clone(), + compiler_path, + true, + None, + cache_value.compiler_output.clone(), + ); + cache_value.compiler_output + } + None => compilation_callback().await?.compiler_output, + } } }; @@ -136,19 +176,34 @@ impl CachedCompiler { } } +#[allow(clippy::too_many_arguments)] async fn compile_contracts( metadata_directory: impl AsRef, compiler_path: impl AsRef, + compiler_version: Version, mut files_to_compile: impl Iterator, mode: &Mode, deployed_libraries: Option<&HashMap>, + compilation_success_report_callback: impl Fn( + Version, + PathBuf, + bool, + Option, + CompilerOutput, + ), + compilation_failure_report_callback: impl Fn( + Option, + Option, + Option, + String, + ), ) -> Result { let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref()) .with_allowed_extension("sol") .with_use_cached_fs(true) .collect::>(); - Compiler::::new() + let compiler = Compiler::::new() .with_allow_path(metadata_directory) // Handling the modes .with_optimization(mode.optimize_setting) @@ -156,6 +211,14 @@ async fn compile_contracts( // Adding the contract sources to the compiler. .try_then(|compiler| { files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path)) + }) + .inspect_err(|err| { + compilation_failure_report_callback( + Some(compiler_version.clone()), + Some(compiler_path.as_ref().to_path_buf()), + None, + err.to_string(), + ) })? // Adding the deployed libraries to the compiler. .then(|compiler| { @@ -171,9 +234,28 @@ async fn compile_contracts( .fold(compiler, |compiler, (ident, address, path)| { compiler.with_library(path, ident.as_str(), *address) }) - }) - .try_build(compiler_path) + }); + + let compiler_input = compiler.input(); + let compiler_output = compiler + .try_build(compiler_path.as_ref()) .await + .inspect_err(|err| { + compilation_failure_report_callback( + Some(compiler_version.clone()), + Some(compiler_path.as_ref().to_path_buf()), + Some(compiler_input.clone()), + err.to_string(), + ) + })?; + compilation_success_report_callback( + compiler_version, + compiler_path.as_ref().to_path_buf(), + false, + Some(compiler_input), + compiler_output.clone(), + ); + Ok(compiler_output) } struct ArtifactsCache { diff --git a/crates/core/src/driver/mod.rs b/crates/core/src/driver/mod.rs index 4912b9a..d06ea52 100644 --- a/crates/core/src/driver/mod.rs +++ b/crates/core/src/driver/mod.rs @@ -22,6 +22,7 @@ use anyhow::Context; use futures::TryStreamExt; use indexmap::IndexMap; use revive_dt_format::traits::{ResolutionContext, ResolverApi}; +use revive_dt_report::ExecutionSpecificReporter; use semver::Version; use revive_dt_format::case::Case; @@ -51,6 +52,9 @@ pub struct CaseState { /// Stores the version used for the current case. compiler_version: Version, + /// The execution reporter. + execution_reporter: ExecutionSpecificReporter, + phantom: PhantomData, } @@ -62,12 +66,14 @@ where compiler_version: Version, compiled_contracts: HashMap>, deployed_contracts: HashMap, + execution_reporter: ExecutionSpecificReporter, ) -> Self { Self { compiled_contracts, deployed_contracts, variables: Default::default(), compiler_version, + execution_reporter, phantom: PhantomData, } } @@ -718,6 +724,8 @@ where instance_address = ?address, "Deployed contract" ); + self.execution_reporter + .report_contract_deployed_event(contract_instance.clone(), address)?; self.deployed_contracts.insert( contract_instance.clone(), diff --git a/crates/core/src/main.rs b/crates/core/src/main.rs index f74101d..43ae3be 100644 --- a/crates/core/src/main.rs +++ b/crates/core/src/main.rs @@ -18,8 +18,12 @@ use futures::stream; use futures::{Stream, StreamExt}; use indexmap::IndexMap; use revive_dt_node_interaction::EthereumNode; +use revive_dt_report::{ + NodeDesignation, ReportAggregator, Reporter, ReporterEvent, TestCaseStatus, + TestSpecificReporter, TestSpecifier, +}; use temp_dir::TempDir; -use tokio::{sync::mpsc, try_join}; +use tokio::{join, try_join}; use tracing::{debug, info, info_span, instrument}; use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{EnvFilter, FmtSubscriber}; @@ -39,7 +43,6 @@ use revive_dt_format::{ mode::ParsedMode, }; use revive_dt_node::{Node, pool::NodePool}; -use revive_dt_report::reporter::{Report, Span}; use crate::cached_compiler::CachedCompiler; @@ -53,11 +56,9 @@ struct Test<'a> { mode: Mode, case_idx: CaseIdx, case: &'a Case, + reporter: TestSpecificReporter, } -/// This represents the results that we gather from running test cases. -type CaseResult = Result; - fn main() -> anyhow::Result<()> { let (args, _guard) = init_cli()?; info!( @@ -69,20 +70,39 @@ fn main() -> anyhow::Result<()> { "Differential testing tool has been initialized" ); - let body = async { - for (corpus, tests) in collect_corpora(&args)? { - let span = Span::new(corpus, args.clone())?; - match &args.compile_only { - Some(platform) => compile_corpus(&args, &tests, platform, span).await, - None => execute_corpus(&args, &tests, span).await?, + let (reporter, report_aggregator_task) = ReportAggregator::new(args.clone()).into_task(); + + let number_of_threads = args.number_of_threads; + let body = async move { + let tests = collect_corpora(&args)? + .into_iter() + .inspect(|(corpus, _)| { + reporter + .report_corpus_file_discovery_event(corpus.clone()) + .expect("Can't fail") + }) + .flat_map(|(_, files)| files.into_iter()) + .inspect(|metadata_file| { + reporter + .report_metadata_file_discovery_event( + metadata_file.metadata_file_path.clone(), + metadata_file.content.clone(), + ) + .expect("Can't fail") + }) + .collect::>(); + + match &args.compile_only { + Some(platform) => { + compile_corpus(&args, &tests, platform, reporter, report_aggregator_task).await } - Report::save()?; + None => execute_corpus(&args, &tests, reporter, report_aggregator_task).await?, } Ok(()) }; tokio::runtime::Builder::new_multi_thread() - .worker_threads(args.number_of_threads) + .worker_threads(number_of_threads) .enable_all() .build() .expect("Failed building the Runtime") @@ -153,7 +173,8 @@ fn collect_corpora(args: &Arguments) -> anyhow::Result( args: &Arguments, metadata_files: &[MetadataFile], - span: Span, + reporter: Reporter, + report_aggregator_task: impl Future>, ) -> anyhow::Result<()> where L: Platform, @@ -161,13 +182,12 @@ where L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, { - let (report_tx, report_rx) = mpsc::unbounded_channel::<(Test<'_>, CaseResult)>(); + let tests = prepare_tests::(args, metadata_files, reporter.clone()); + let driver_task = start_driver_task::(args, tests).await?; + let cli_reporting_task = start_cli_reporting_task(reporter); - let tests = prepare_tests::(args, metadata_files); - let driver_task = start_driver_task::(args, tests, span, report_tx).await?; - let status_reporter_task = start_reporter_task(report_rx); - - tokio::join!(status_reporter_task, driver_task); + let (_, _, rtn) = tokio::join!(cli_reporting_task, driver_task, report_aggregator_task); + rtn?; Ok(()) } @@ -175,6 +195,7 @@ where fn prepare_tests<'a, L, F>( args: &Arguments, metadata_files: &'a [MetadataFile], + reporter: Reporter, ) -> impl Stream> where L: Platform, @@ -201,27 +222,25 @@ where .into_iter() .map(move |mode| (metadata_file, case_idx, case, mode)) }) - .fold( - IndexMap::<_, BTreeMap<_, Vec<_>>>::new(), - |mut map, (metadata_file, case_idx, case, mode)| { - let test = Test { - metadata: metadata_file, - metadata_file_path: metadata_file.metadata_file_path.as_path(), - mode: mode.clone(), - case_idx: CaseIdx::new(case_idx), - case, - }; - map.entry(mode) - .or_default() - .entry(test.case_idx) - .or_default() - .push(test); - map - }, - ) - .into_values() - .flatten() - .flat_map(|(_, value)| value.into_iter()) + .map(move |(metadata_file, case_idx, case, mode)| Test { + metadata: metadata_file, + metadata_file_path: metadata_file.metadata_file_path.as_path(), + mode: mode.clone(), + case_idx: CaseIdx::new(case_idx), + case, + reporter: reporter.test_specific_reporter(Arc::new(TestSpecifier { + solc_mode: mode.clone(), + metadata_file_path: metadata_file.metadata_file_path.clone(), + case_idx: CaseIdx::new(case_idx), + })), + }) + .inspect(|test| { + test.reporter + .report_test_case_discovery_event() + .expect("Can't fail") + }) + .collect::>() + .into_iter() // Filter the test out if the leader and follower do not support the target. .filter(|test| { let leader_support = @@ -236,7 +255,30 @@ where leader_support, follower_support, "Target is not supported, throwing metadata file out" - ) + ); + test + .reporter + .report_test_ignored_event( + "Either the leader or the follower do not support the target desired by the test", + IndexMap::from_iter([ + ( + "test_desired_targets".to_string(), + serde_json::to_value(test.metadata.targets.as_ref()) + .expect("Can't fail") + ), + ( + "leader_support".to_string(), + serde_json::to_value(leader_support) + .expect("Can't fail") + ), + ( + "follower_support".to_string(), + serde_json::to_value(follower_support) + .expect("Can't fail") + ) + ]) + ) + .expect("Can't fail"); } is_allowed @@ -248,6 +290,13 @@ where file_path = %test.metadata.relative_path().display(), "Metadata file is ignored, throwing case out" ); + test + .reporter + .report_test_ignored_event( + "Metadata file is ignored, therefore all cases are ignored", + IndexMap::new(), + ) + .expect("Can't fail"); false } else { true @@ -261,6 +310,13 @@ where case_idx = %test.case_idx, "Case is ignored, throwing case out" ); + test + .reporter + .report_test_ignored_event( + "Case is ignored", + IndexMap::new(), + ) + .expect("Can't fail"); false } else { true @@ -283,6 +339,29 @@ where follower_compatibility, "EVM Version is incompatible, throwing case out" ); + test + .reporter + .report_test_ignored_event( + "EVM version is incompatible with either the leader or the follower", + IndexMap::from_iter([ + ( + "test_desired_evm_version".to_string(), + serde_json::to_value(test.metadata.required_evm_version) + .expect("Can't fail") + ), + ( + "leader_compatibility".to_string(), + serde_json::to_value(leader_compatibility) + .expect("Can't fail") + ), + ( + "follower_compatibility".to_string(), + serde_json::to_value(follower_compatibility) + .expect("Can't fail") + ) + ]) + ) + .expect("Can't fail"); } is_allowed @@ -311,6 +390,24 @@ where follower_support, "Compilers do not support this, throwing case out" ); + test + .reporter + .report_test_ignored_event( + "Compilers do not support this mode either for the leader or for the follower.", + IndexMap::from_iter([ + ( + "leader_support".to_string(), + serde_json::to_value(leader_support) + .expect("Can't fail") + ), + ( + "follower_support".to_string(), + serde_json::to_value(follower_support) + .expect("Can't fail") + ) + ]) + ) + .expect("Can't fail"); } is_allowed.then_some(test) @@ -336,8 +433,6 @@ async fn does_compiler_support_mode( async fn start_driver_task<'a, L, F>( args: &Arguments, tests: impl Stream>, - span: Span, - report_tx: mpsc::UnboundedSender<(Test<'a>, CaseResult)>, ) -> anyhow::Result> where L: Platform, @@ -345,6 +440,8 @@ where L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, { + info!("Starting driver task"); + let leader_nodes = Arc::new(NodePool::::new(args)?); let follower_nodes = Arc::new(NodePool::::new(args)?); let number_concurrent_tasks = args.number_of_concurrent_tasks(); @@ -368,103 +465,121 @@ where move |test| { let leader_nodes = leader_nodes.clone(); let follower_nodes = follower_nodes.clone(); - let report_tx = report_tx.clone(); let cached_compiler = cached_compiler.clone(); async move { let leader_node = leader_nodes.round_robbin(); let follower_node = follower_nodes.round_robbin(); + test.reporter + .report_leader_node_assigned_event( + leader_node.id(), + L::config_id(), + leader_node.connection_string(), + ) + .expect("Can't fail"); + test.reporter + .report_follower_node_assigned_event( + follower_node.id(), + F::config_id(), + follower_node.connection_string(), + ) + .expect("Can't fail"); + + let reporter = test.reporter.clone(); let result = handle_case_driver::( - test.metadata_file_path, - test.metadata, - test.case_idx, - test.case, - test.mode.clone(), + test, args, cached_compiler, leader_node, follower_node, - span, ) .await; - report_tx - .send((test, result)) - .expect("Failed to send report"); + match result { + Ok(steps_executed) => reporter + .report_test_succeeded_event(steps_executed) + .expect("Can't fail"), + Err(error) => reporter + .report_test_failed_event(error.to_string()) + .expect("Can't fail"), + } } }, )) } -async fn start_reporter_task(mut report_rx: mpsc::UnboundedReceiver<(Test<'_>, CaseResult)>) { +#[allow(clippy::uninlined_format_args)] +#[allow(irrefutable_let_patterns)] +async fn start_cli_reporting_task(reporter: Reporter) { + let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail"); + drop(reporter); + let start = Instant::now(); const GREEN: &str = "\x1B[32m"; const RED: &str = "\x1B[31m"; - const COLOUR_RESET: &str = "\x1B[0m"; + const GREY: &str = "\x1B[90m"; + const COLOR_RESET: &str = "\x1B[0m"; const BOLD: &str = "\x1B[1m"; const BOLD_RESET: &str = "\x1B[22m"; let mut number_of_successes = 0; let mut number_of_failures = 0; - let mut failures = vec![]; - // Wait for reports to come from our test runner. When the channel closes, this ends. let mut buf = BufWriter::new(stderr()); - while let Some((test, case_result)) = report_rx.recv().await { - let case_name = test.case.name.as_deref().unwrap_or("unnamed_case"); - let case_idx = test.case_idx; - let test_path = test.metadata_file_path.display(); - let test_mode = test.mode.clone(); + while let Ok(event) = aggregator_events_rx.recv().await { + let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted { + metadata_file_path, + mode, + case_status, + } = event + else { + continue; + }; - match case_result { - Ok(_inputs) => { - number_of_successes += 1; - let _ = writeln!( + let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display()); + for (case_idx, case_status) in case_status.into_iter() { + let _ = write!(buf, "\tCase Index {case_idx:>3}: "); + let _ = match case_status { + TestCaseStatus::Succeeded { steps_executed } => { + number_of_successes += 1; + writeln!( + buf, + "{}{}Case Succeeded{}{} - Steps Executed: {}", + GREEN, BOLD, BOLD_RESET, COLOR_RESET, steps_executed + ) + } + TestCaseStatus::Failed { reason } => { + number_of_failures += 1; + writeln!( + buf, + "{}{}Case Failed{}{} - Reason: {}", + RED, BOLD, BOLD_RESET, COLOR_RESET, reason + ) + } + TestCaseStatus::Ignored { reason, .. } => writeln!( buf, - "{GREEN}Case Succeeded:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode})" - ); - } - Err(err) => { - number_of_failures += 1; - let _ = writeln!( - buf, - "{RED}Case Failed:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode})" - ); - failures.push((test, err)); - } - } - } - - let _ = writeln!(buf,); - let elapsed = start.elapsed(); - - // Now, log the failures with more complete errors at the bottom, like `cargo test` does, so - // that we don't have to scroll through the entire output to find them. - if !failures.is_empty() { - let _ = writeln!(buf, "{BOLD}Failures:{BOLD_RESET}\n"); - - for failure in failures { - let (test, err) = failure; - let case_name = test.case.name.as_deref().unwrap_or("unnamed_case"); - let case_idx = test.case_idx; - let test_path = test.metadata_file_path.display(); - let test_mode = test.mode.clone(); - - let _ = writeln!( - buf, - "---- {RED}Case Failed:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode}) ----\n\n{err}\n" - ); + "{}{}Case Ignored{}{} - Reason: {}", + GREY, BOLD, BOLD_RESET, COLOR_RESET, reason + ), + }; } + let _ = writeln!(buf); } // Summary at the end. let _ = writeln!( buf, - "{} cases: {GREEN}{number_of_successes}{COLOUR_RESET} cases succeeded, {RED}{number_of_failures}{COLOUR_RESET} cases failed in {} seconds", + "{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds", number_of_successes + number_of_failures, - elapsed.as_secs() + GREEN, + number_of_successes, + COLOR_RESET, + RED, + number_of_failures, + COLOR_RESET, + start.elapsed().as_secs() ); } @@ -474,25 +589,20 @@ async fn start_reporter_task(mut report_rx: mpsc::UnboundedReceiver<(Test<'_>, C name = "Handling Case" skip_all, fields( - metadata_file_path = %metadata.relative_path().display(), - mode = %mode, - %case_idx, - case_name = case.name.as_deref().unwrap_or("Unnamed Case"), + metadata_file_path = %test.metadata.relative_path().display(), + mode = %test.mode, + case_idx = %test.case_idx, + case_name = test.case.name.as_deref().unwrap_or("Unnamed Case"), leader_node = leader_node.id(), follower_node = follower_node.id(), ) )] async fn handle_case_driver( - metadata_file_path: &Path, - metadata: &MetadataFile, - case_idx: CaseIdx, - case: &Case, - mode: Mode, + test: Test<'_>, config: &Arguments, cached_compiler: Arc, leader_node: &L::Blockchain, follower_node: &F::Blockchain, - _: Span, ) -> anyhow::Result where L: Platform, @@ -500,6 +610,13 @@ where L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, { + let leader_reporter = test + .reporter + .execution_specific_reporter(leader_node.id(), NodeDesignation::Leader); + let follower_reporter = test + .reporter + .execution_specific_reporter(follower_node.id(), NodeDesignation::Follower); + let ( ( CompilerOutput { @@ -514,14 +631,69 @@ where _, ), ) = try_join!( - cached_compiler.compile_contracts::(metadata, metadata_file_path, &mode, config, None), - cached_compiler.compile_contracts::(metadata, metadata_file_path, &mode, config, None) + cached_compiler.compile_contracts::( + test.metadata, + test.metadata_file_path, + &test.mode, + config, + None, + |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| { + leader_reporter + .report_pre_link_contracts_compilation_succeeded_event( + compiler_version, + compiler_path, + is_cached, + compiler_input, + compiler_output, + ) + .expect("Can't fail") + }, + |compiler_version, compiler_path, compiler_input, failure_reason| { + leader_reporter + .report_pre_link_contracts_compilation_failed_event( + compiler_version, + compiler_path, + compiler_input, + failure_reason, + ) + .expect("Can't fail") + } + ), + cached_compiler.compile_contracts::( + test.metadata, + test.metadata_file_path, + &test.mode, + config, + None, + |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| { + follower_reporter + .report_pre_link_contracts_compilation_succeeded_event( + compiler_version, + compiler_path, + is_cached, + compiler_input, + compiler_output, + ) + .expect("Can't fail") + }, + |compiler_version, compiler_path, compiler_input, failure_reason| { + follower_reporter + .report_pre_link_contracts_compilation_failed_event( + compiler_version, + compiler_path, + compiler_input, + failure_reason, + ) + .expect("Can't fail") + } + ) )?; let mut leader_deployed_libraries = None::>; let mut follower_deployed_libraries = None::>; - let mut contract_sources = metadata.contract_sources()?; - for library_instance in metadata + let mut contract_sources = test.metadata.contract_sources()?; + for library_instance in test + .metadata .libraries .iter() .flatten() @@ -561,7 +733,8 @@ where // Getting the deployer address from the cases themselves. This is to ensure that we're // doing the deployments from different accounts and therefore we're not slowed down by // the nonce. - let deployer_address = case + let deployer_address = test + .case .steps .iter() .filter_map(|step| match step { @@ -620,6 +793,24 @@ where ), ); } + if let Some(ref leader_deployed_libraries) = leader_deployed_libraries { + leader_reporter.report_libraries_deployed_event( + leader_deployed_libraries + .clone() + .into_iter() + .map(|(key, (_, address, _))| (key, address)) + .collect::>(), + )?; + } + if let Some(ref follower_deployed_libraries) = follower_deployed_libraries { + follower_reporter.report_libraries_deployed_event( + follower_deployed_libraries + .clone() + .into_iter() + .map(|(key, (_, address, _))| (key, address)) + .collect::>(), + )?; + } let ( ( @@ -636,18 +827,60 @@ where ), ) = try_join!( cached_compiler.compile_contracts::( - metadata, - metadata_file_path, - &mode, + test.metadata, + test.metadata_file_path, + &test.mode, config, - leader_deployed_libraries.as_ref() + leader_deployed_libraries.as_ref(), + |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| { + leader_reporter + .report_post_link_contracts_compilation_succeeded_event( + compiler_version, + compiler_path, + is_cached, + compiler_input, + compiler_output, + ) + .expect("Can't fail") + }, + |compiler_version, compiler_path, compiler_input, failure_reason| { + leader_reporter + .report_post_link_contracts_compilation_failed_event( + compiler_version, + compiler_path, + compiler_input, + failure_reason, + ) + .expect("Can't fail") + } ), cached_compiler.compile_contracts::( - metadata, - metadata_file_path, - &mode, + test.metadata, + test.metadata_file_path, + &test.mode, config, - follower_deployed_libraries.as_ref() + follower_deployed_libraries.as_ref(), + |compiler_version, compiler_path, is_cached, compiler_input, compiler_output| { + follower_reporter + .report_post_link_contracts_compilation_succeeded_event( + compiler_version, + compiler_path, + is_cached, + compiler_input, + compiler_output, + ) + .expect("Can't fail") + }, + |compiler_version, compiler_path, compiler_input, failure_reason| { + follower_reporter + .report_post_link_contracts_compilation_failed_event( + compiler_version, + compiler_path, + compiler_input, + failure_reason, + ) + .expect("Can't fail") + } ) )?; @@ -655,16 +888,18 @@ where leader_compiler_version, leader_post_link_contracts, leader_deployed_libraries.unwrap_or_default(), + leader_reporter, ); let follower_state = CaseState::::new( follower_compiler_version, follower_post_link_contracts, follower_deployed_libraries.unwrap_or_default(), + follower_reporter, ); let mut driver = CaseDriver::::new( - metadata, - case, + test.metadata, + test.case, leader_node, follower_node, leader_state, @@ -679,14 +914,15 @@ where async fn execute_corpus( args: &Arguments, tests: &[MetadataFile], - span: Span, + reporter: Reporter, + report_aggregator_task: impl Future>, ) -> anyhow::Result<()> { match (&args.leader, &args.follower) { (TestingPlatform::Geth, TestingPlatform::Kitchensink) => { - run_driver::(args, tests, span).await? + run_driver::(args, tests, reporter, report_aggregator_task).await? } (TestingPlatform::Geth, TestingPlatform::Geth) => { - run_driver::(args, tests, span).await? + run_driver::(args, tests, reporter, report_aggregator_task).await? } _ => unimplemented!(), } @@ -698,7 +934,8 @@ async fn compile_corpus( config: &Arguments, tests: &[MetadataFile], platform: &TestingPlatform, - _: Span, + _: Reporter, + report_aggregator_task: impl Future>, ) { let tests = tests.iter().flat_map(|metadata| { metadata @@ -713,8 +950,8 @@ async fn compile_corpus( .map(Arc::new) .expect("Failed to create the cached compiler"); - futures::stream::iter(tests) - .for_each_concurrent(None, |(metadata, mode)| { + let compilation_task = + futures::stream::iter(tests).for_each_concurrent(None, |(metadata, mode)| { let cached_compiler = cached_compiler.clone(); async move { @@ -727,6 +964,8 @@ async fn compile_corpus( &mode, config, None, + |_, _, _, _, _| {}, + |_, _, _, _| {}, ) .await; } @@ -738,11 +977,13 @@ async fn compile_corpus( &mode, config, None, + |_, _, _, _, _| {}, + |_, _, _, _| {}, ) .await; } } } - }) - .await; + }); + let _ = join!(compilation_task, report_aggregator_task); } diff --git a/crates/format/src/case.rs b/crates/format/src/case.rs index 2ef9ead..c98ac46 100644 --- a/crates/format/src/case.rs +++ b/crates/format/src/case.rs @@ -71,6 +71,7 @@ impl Case { define_wrapper_type!( /// A wrapper type for the index of test cases found in metadata file. - #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] - pub struct CaseIdx(usize) impl Display; + #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] + #[serde(transparent)] + pub struct CaseIdx(usize) impl Display, FromStr; ); diff --git a/crates/node/src/pool.rs b/crates/node/src/pool.rs index 015c004..d195988 100644 --- a/crates/node/src/pool.rs +++ b/crates/node/src/pool.rs @@ -9,6 +9,7 @@ use revive_dt_common::cached_fs::read_to_string; use anyhow::Context; use revive_dt_config::Arguments; +use tracing::info; use crate::Node; @@ -63,6 +64,16 @@ where fn spawn_node(args: &Arguments, genesis: String) -> anyhow::Result { let mut node = T::new(args); + info!( + id = node.id(), + connection_string = node.connection_string(), + "Spawning node" + ); node.spawn(genesis)?; + info!( + id = node.id(), + connection_string = node.connection_string(), + "Spawned node" + ); Ok(node) } diff --git a/crates/report/Cargo.toml b/crates/report/Cargo.toml index 0e6e896..eae7fa7 100644 --- a/crates/report/Cargo.toml +++ b/crates/report/Cargo.toml @@ -13,9 +13,16 @@ revive-dt-config = { workspace = true } revive-dt-format = { workspace = true } revive-dt-compiler = { workspace = true } +alloy-primitives = { workspace = true } anyhow = { workspace = true } +paste = { workspace = true } +indexmap = { workspace = true, features = ["serde"] } +semver = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } +serde_with = { workspace = true } +tokio = { workspace = true } +tracing = { workspace = true } [lints] workspace = true diff --git a/crates/report/src/aggregator.rs b/crates/report/src/aggregator.rs new file mode 100644 index 0000000..1f7af7e --- /dev/null +++ b/crates/report/src/aggregator.rs @@ -0,0 +1,550 @@ +//! Implementation of the report aggregator task which consumes the events sent by the various +//! reporters and combines them into a single unified report. + +use std::{ + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + fs::OpenOptions, + path::PathBuf, + time::{SystemTime, UNIX_EPOCH}, +}; + +use alloy_primitives::Address; +use anyhow::Result; +use indexmap::IndexMap; +use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode}; +use revive_dt_config::{Arguments, TestingPlatform}; +use revive_dt_format::{case::CaseIdx, corpus::Corpus, metadata::ContractInstance}; +use semver::Version; +use serde::Serialize; +use serde_with::{DisplayFromStr, serde_as}; +use tokio::sync::{ + broadcast::{Sender, channel}, + mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel}, +}; +use tracing::debug; + +use crate::*; + +pub struct ReportAggregator { + /* Internal Report State */ + report: Report, + remaining_cases: HashMap>>, + /* Channels */ + runner_tx: Option>, + runner_rx: UnboundedReceiver, + listener_tx: Sender, +} + +impl ReportAggregator { + pub fn new(config: Arguments) -> Self { + let (runner_tx, runner_rx) = unbounded_channel::(); + let (listener_tx, _) = channel::(1024); + Self { + report: Report::new(config), + remaining_cases: Default::default(), + runner_tx: Some(runner_tx), + runner_rx, + listener_tx, + } + } + + pub fn into_task(mut self) -> (Reporter, impl Future>) { + let reporter = self + .runner_tx + .take() + .map(Into::into) + .expect("Can't fail since this can only be called once"); + (reporter, async move { self.aggregate().await }) + } + + async fn aggregate(mut self) -> Result<()> { + debug!("Starting to aggregate report"); + + while let Some(event) = self.runner_rx.recv().await { + debug!(?event, "Received Event"); + match event { + RunnerEvent::SubscribeToEvents(event) => { + self.handle_subscribe_to_events_event(*event); + } + RunnerEvent::CorpusFileDiscovery(event) => { + self.handle_corpus_file_discovered_event(*event) + } + RunnerEvent::MetadataFileDiscovery(event) => { + self.handle_metadata_file_discovery_event(*event); + } + RunnerEvent::TestCaseDiscovery(event) => { + self.handle_test_case_discovery(*event); + } + RunnerEvent::TestSucceeded(event) => { + self.handle_test_succeeded_event(*event); + } + RunnerEvent::TestFailed(event) => { + self.handle_test_failed_event(*event); + } + RunnerEvent::TestIgnored(event) => { + self.handle_test_ignored_event(*event); + } + RunnerEvent::LeaderNodeAssigned(event) => { + self.handle_leader_node_assigned_event(*event); + } + RunnerEvent::FollowerNodeAssigned(event) => { + self.handle_follower_node_assigned_event(*event); + } + RunnerEvent::PreLinkContractsCompilationSucceeded(event) => { + self.handle_pre_link_contracts_compilation_succeeded_event(*event) + } + RunnerEvent::PostLinkContractsCompilationSucceeded(event) => { + self.handle_post_link_contracts_compilation_succeeded_event(*event) + } + RunnerEvent::PreLinkContractsCompilationFailed(event) => { + self.handle_pre_link_contracts_compilation_failed_event(*event) + } + RunnerEvent::PostLinkContractsCompilationFailed(event) => { + self.handle_post_link_contracts_compilation_failed_event(*event) + } + RunnerEvent::LibrariesDeployed(event) => { + self.handle_libraries_deployed_event(*event); + } + RunnerEvent::ContractDeployed(event) => { + self.handle_contract_deployed_event(*event); + } + } + } + debug!("Report aggregation completed"); + + let file_name = { + let current_timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs(); + let mut file_name = current_timestamp.to_string(); + file_name.push_str(".json"); + file_name + }; + let file_path = self.report.config.directory().join(file_name); + let file = OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .read(false) + .open(file_path)?; + serde_json::to_writer_pretty(file, &self.report)?; + + Ok(()) + } + + fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) { + let _ = event.tx.send(self.listener_tx.subscribe()); + } + + fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) { + self.report.corpora.push(event.corpus); + } + + fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) { + self.report.metadata_files.insert(event.path.clone()); + } + + fn handle_test_case_discovery(&mut self, event: TestCaseDiscoveryEvent) { + self.remaining_cases + .entry(event.test_specifier.metadata_file_path.clone().into()) + .or_default() + .entry(event.test_specifier.solc_mode.clone()) + .or_default() + .insert(event.test_specifier.case_idx); + } + + fn handle_test_succeeded_event(&mut self, event: TestSucceededEvent) { + // Remove this from the set of cases we're tracking since it has completed. + self.remaining_cases + .entry(event.test_specifier.metadata_file_path.clone().into()) + .or_default() + .entry(event.test_specifier.solc_mode.clone()) + .or_default() + .remove(&event.test_specifier.case_idx); + + // Add information on the fact that the case was ignored to the report. + let test_case_report = self.test_case_report(&event.test_specifier); + test_case_report.status = Some(TestCaseStatus::Succeeded { + steps_executed: event.steps_executed, + }); + self.handle_post_test_case_status_update(&event.test_specifier); + } + + fn handle_test_failed_event(&mut self, event: TestFailedEvent) { + // Remove this from the set of cases we're tracking since it has completed. + self.remaining_cases + .entry(event.test_specifier.metadata_file_path.clone().into()) + .or_default() + .entry(event.test_specifier.solc_mode.clone()) + .or_default() + .remove(&event.test_specifier.case_idx); + + // Add information on the fact that the case was ignored to the report. + let test_case_report = self.test_case_report(&event.test_specifier); + test_case_report.status = Some(TestCaseStatus::Failed { + reason: event.reason, + }); + self.handle_post_test_case_status_update(&event.test_specifier); + } + + fn handle_test_ignored_event(&mut self, event: TestIgnoredEvent) { + // Remove this from the set of cases we're tracking since it has completed. + self.remaining_cases + .entry(event.test_specifier.metadata_file_path.clone().into()) + .or_default() + .entry(event.test_specifier.solc_mode.clone()) + .or_default() + .remove(&event.test_specifier.case_idx); + + // Add information on the fact that the case was ignored to the report. + let test_case_report = self.test_case_report(&event.test_specifier); + test_case_report.status = Some(TestCaseStatus::Ignored { + reason: event.reason, + additional_fields: event.additional_fields, + }); + self.handle_post_test_case_status_update(&event.test_specifier); + } + + fn handle_post_test_case_status_update(&mut self, specifier: &TestSpecifier) { + let remaining_cases = self + .remaining_cases + .entry(specifier.metadata_file_path.clone().into()) + .or_default() + .entry(specifier.solc_mode.clone()) + .or_default(); + if !remaining_cases.is_empty() { + return; + } + + let case_status = self + .report + .test_case_information + .entry(specifier.metadata_file_path.clone().into()) + .or_default() + .entry(specifier.solc_mode.clone()) + .or_default() + .iter() + .map(|(case_idx, case_report)| { + ( + *case_idx, + case_report.status.clone().expect("Can't be uninitialized"), + ) + }) + .collect::>(); + let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted { + metadata_file_path: specifier.metadata_file_path.clone().into(), + mode: specifier.solc_mode.clone(), + case_status, + }; + + // According to the documentation on send, the sending fails if there are no more receiver + // handles. Therefore, this isn't an error that we want to bubble up or anything. If we fail + // to send then we ignore the error. + let _ = self.listener_tx.send(event); + } + + fn handle_leader_node_assigned_event(&mut self, event: LeaderNodeAssignedEvent) { + let execution_information = self.execution_information(&ExecutionSpecifier { + test_specifier: event.test_specifier, + node_id: event.id, + node_designation: NodeDesignation::Leader, + }); + execution_information.node = Some(TestCaseNodeInformation { + id: event.id, + platform: event.platform, + connection_string: event.connection_string, + }); + } + + fn handle_follower_node_assigned_event(&mut self, event: FollowerNodeAssignedEvent) { + let execution_information = self.execution_information(&ExecutionSpecifier { + test_specifier: event.test_specifier, + node_id: event.id, + node_designation: NodeDesignation::Follower, + }); + execution_information.node = Some(TestCaseNodeInformation { + id: event.id, + platform: event.platform, + connection_string: event.connection_string, + }); + } + + fn handle_pre_link_contracts_compilation_succeeded_event( + &mut self, + event: PreLinkContractsCompilationSucceededEvent, + ) { + let include_input = self.report.config.report_include_compiler_input; + let include_output = self.report.config.report_include_compiler_output; + + let execution_information = self.execution_information(&event.execution_specifier); + + let compiler_input = if include_input { + event.compiler_input + } else { + None + }; + let compiler_output = if include_output { + Some(event.compiler_output) + } else { + None + }; + + execution_information.pre_link_compilation_status = Some(CompilationStatus::Success { + is_cached: event.is_cached, + compiler_version: event.compiler_version, + compiler_path: event.compiler_path, + compiler_input, + compiler_output, + }); + } + + fn handle_post_link_contracts_compilation_succeeded_event( + &mut self, + event: PostLinkContractsCompilationSucceededEvent, + ) { + let include_input = self.report.config.report_include_compiler_input; + let include_output = self.report.config.report_include_compiler_output; + + let execution_information = self.execution_information(&event.execution_specifier); + + let compiler_input = if include_input { + event.compiler_input + } else { + None + }; + let compiler_output = if include_output { + Some(event.compiler_output) + } else { + None + }; + + execution_information.post_link_compilation_status = Some(CompilationStatus::Success { + is_cached: event.is_cached, + compiler_version: event.compiler_version, + compiler_path: event.compiler_path, + compiler_input, + compiler_output, + }); + } + + fn handle_pre_link_contracts_compilation_failed_event( + &mut self, + event: PreLinkContractsCompilationFailedEvent, + ) { + let include_input = self.report.config.report_include_compiler_input; + + let execution_information = self.execution_information(&event.execution_specifier); + + let compiler_input = if include_input { + event.compiler_input + } else { + None + }; + + execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure { + reason: event.reason, + compiler_version: event.compiler_version, + compiler_path: event.compiler_path, + compiler_input, + }); + } + + fn handle_post_link_contracts_compilation_failed_event( + &mut self, + event: PostLinkContractsCompilationFailedEvent, + ) { + let include_input = self.report.config.report_include_compiler_input; + + let execution_information = self.execution_information(&event.execution_specifier); + + let compiler_input = if include_input { + event.compiler_input + } else { + None + }; + + execution_information.post_link_compilation_status = Some(CompilationStatus::Failure { + reason: event.reason, + compiler_version: event.compiler_version, + compiler_path: event.compiler_path, + compiler_input, + }); + } + + fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) { + self.execution_information(&event.execution_specifier) + .deployed_libraries = Some(event.libraries); + } + + fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) { + self.execution_information(&event.execution_specifier) + .deployed_contracts + .get_or_insert_default() + .insert(event.contract_instance, event.address); + } + + fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport { + self.report + .test_case_information + .entry(specifier.metadata_file_path.clone().into()) + .or_default() + .entry(specifier.solc_mode.clone()) + .or_default() + .entry(specifier.case_idx) + .or_default() + } + + fn execution_information( + &mut self, + specifier: &ExecutionSpecifier, + ) -> &mut ExecutionInformation { + let test_case_report = self.test_case_report(&specifier.test_specifier); + match specifier.node_designation { + NodeDesignation::Leader => test_case_report + .leader_execution_information + .get_or_insert_default(), + NodeDesignation::Follower => test_case_report + .follower_execution_information + .get_or_insert_default(), + } + } +} + +#[serde_as] +#[derive(Clone, Debug, Serialize)] +pub struct Report { + /// The configuration that the tool was started up with. + pub config: Arguments, + /// The platform of the leader chain. + pub leader_platform: TestingPlatform, + /// The platform of the follower chain. + pub follower_platform: TestingPlatform, + /// The list of corpus files that the tool found. + pub corpora: Vec, + /// The list of metadata files that were found by the tool. + pub metadata_files: BTreeSet, + /// Information relating to each test case. + #[serde_as(as = "BTreeMap<_, HashMap>>")] + pub test_case_information: + BTreeMap>>, +} + +impl Report { + pub fn new(config: Arguments) -> Self { + Self { + leader_platform: config.leader, + follower_platform: config.follower, + config, + corpora: Default::default(), + metadata_files: Default::default(), + test_case_information: Default::default(), + } + } +} + +#[derive(Clone, Debug, Serialize, Default)] +pub struct TestCaseReport { + /// Information on the status of the test case and whether it succeeded, failed, or was ignored. + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + /// Information related to the execution on the leader. + #[serde(skip_serializing_if = "Option::is_none")] + pub leader_execution_information: Option, + /// Information related to the execution on the follower. + #[serde(skip_serializing_if = "Option::is_none")] + pub follower_execution_information: Option, +} + +/// Information related to the status of the test. Could be that the test succeeded, failed, or that +/// it was ignored. +#[derive(Clone, Debug, Serialize)] +#[serde(tag = "status")] +pub enum TestCaseStatus { + /// The test case succeeded. + Succeeded { + /// The number of steps of the case that were executed. + steps_executed: usize, + }, + /// The test case failed. + Failed { + /// The reason for the failure of the test case. + reason: String, + }, + /// The test case was ignored. This variant carries information related to why it was ignored. + Ignored { + /// The reason behind the test case being ignored. + reason: String, + /// Additional fields that describe more information on why the test case is ignored. + #[serde(flatten)] + additional_fields: IndexMap, + }, +} + +/// Information related to the leader or follower node that's being used to execute the step. +#[derive(Clone, Debug, Serialize)] +pub struct TestCaseNodeInformation { + /// The ID of the node that this case is being executed on. + pub id: usize, + /// The platform of the node. + pub platform: TestingPlatform, + /// The connection string of the node. + pub connection_string: String, +} + +/// Execution information tied to the leader or the follower. +#[derive(Clone, Debug, Default, Serialize)] +pub struct ExecutionInformation { + /// Information related to the node assigned to this test case. + #[serde(skip_serializing_if = "Option::is_none")] + pub node: Option, + /// Information on the pre-link compiled contracts. + #[serde(skip_serializing_if = "Option::is_none")] + pub pre_link_compilation_status: Option, + /// Information on the post-link compiled contracts. + #[serde(skip_serializing_if = "Option::is_none")] + pub post_link_compilation_status: Option, + /// Information on the deployed libraries. + #[serde(skip_serializing_if = "Option::is_none")] + pub deployed_libraries: Option>, + /// Information on the deployed contracts. + #[serde(skip_serializing_if = "Option::is_none")] + pub deployed_contracts: Option>, +} + +/// Information related to compilation +#[derive(Clone, Debug, Serialize)] +#[serde(tag = "status")] +pub enum CompilationStatus { + /// The compilation was successful. + Success { + /// A flag with information on whether the compilation artifacts were cached or not. + is_cached: bool, + /// The version of the compiler used to compile the contracts. + compiler_version: Version, + /// The path of the compiler used to compile the contracts. + compiler_path: PathBuf, + /// The input provided to the compiler to compile the contracts. This is only included if + /// the appropriate flag is set in the CLI configuration and if the contracts were not + /// cached and the compiler was invoked. + #[serde(skip_serializing_if = "Option::is_none")] + compiler_input: Option, + /// The output of the compiler. This is only included if the appropriate flag is set in the + /// CLI configurations. + #[serde(skip_serializing_if = "Option::is_none")] + compiler_output: Option, + }, + /// The compilation failed. + Failure { + /// The failure reason. + reason: String, + /// The version of the compiler used to compile the contracts. + #[serde(skip_serializing_if = "Option::is_none")] + compiler_version: Option, + /// The path of the compiler used to compile the contracts. + #[serde(skip_serializing_if = "Option::is_none")] + compiler_path: Option, + /// The input provided to the compiler to compile the contracts. This is only included if + /// the appropriate flag is set in the CLI configuration and if the contracts were not + /// cached and the compiler was invoked. + #[serde(skip_serializing_if = "Option::is_none")] + compiler_input: Option, + }, +} diff --git a/crates/report/src/analyzer.rs b/crates/report/src/analyzer.rs deleted file mode 100644 index 52fd360..0000000 --- a/crates/report/src/analyzer.rs +++ /dev/null @@ -1,81 +0,0 @@ -//! The report analyzer enriches the raw report data. - -use revive_dt_compiler::CompilerOutput; -use serde::{Deserialize, Serialize}; - -use crate::reporter::CompilationTask; - -/// Provides insights into how well the compilers perform. -#[derive(Clone, Default, Debug, Serialize, Deserialize, PartialEq, PartialOrd)] -pub struct CompilerStatistics { - /// The sum of contracts observed. - pub n_contracts: usize, - /// The mean size of compiled contracts. - pub mean_code_size: usize, - /// The mean size of the optimized YUL IR. - pub mean_yul_size: usize, - /// Is a proxy because the YUL also contains a lot of comments. - pub yul_to_bytecode_size_ratio: f32, -} - -impl CompilerStatistics { - /// Cumulatively update the statistics with the next compiler task. - pub fn sample(&mut self, compilation_task: &CompilationTask) { - let Some(CompilerOutput { contracts }) = &compilation_task.json_output else { - return; - }; - - for (_solidity, contracts) in contracts.iter() { - for (_name, (bytecode, _)) in contracts.iter() { - // The EVM bytecode can be unlinked and thus is not necessarily a decodable hex - // string; for our statistics this is a good enough approximation. - let bytecode_size = bytecode.len() / 2; - - // TODO: for the time being we set the yul_size to be zero. We need to change this - // when we overhaul the reporting. - - self.update_sizes(bytecode_size, 0); - } - } - } - - /// Updates the size statistics cumulatively. - fn update_sizes(&mut self, bytecode_size: usize, yul_size: usize) { - let n_previous = self.n_contracts; - let n_current = self.n_contracts + 1; - - self.n_contracts = n_current; - - self.mean_code_size = (n_previous * self.mean_code_size + bytecode_size) / n_current; - self.mean_yul_size = (n_previous * self.mean_yul_size + yul_size) / n_current; - - if self.mean_code_size > 0 { - self.yul_to_bytecode_size_ratio = - self.mean_yul_size as f32 / self.mean_code_size as f32; - } - } -} - -#[cfg(test)] -mod tests { - use super::CompilerStatistics; - - #[test] - fn compiler_statistics() { - let mut received = CompilerStatistics::default(); - received.update_sizes(0, 0); - received.update_sizes(3, 37); - received.update_sizes(123, 456); - - let mean_code_size = 41; // rounding error from integer truncation - let mean_yul_size = 164; - let expected = CompilerStatistics { - n_contracts: 3, - mean_code_size, - mean_yul_size, - yul_to_bytecode_size_ratio: mean_yul_size as f32 / mean_code_size as f32, - }; - - assert_eq!(received, expected); - } -} diff --git a/crates/report/src/common.rs b/crates/report/src/common.rs new file mode 100644 index 0000000..5b6e3f1 --- /dev/null +++ b/crates/report/src/common.rs @@ -0,0 +1,43 @@ +//! Common types and functions used throughout the crate. + +use std::{path::PathBuf, sync::Arc}; + +use revive_dt_common::define_wrapper_type; +use revive_dt_compiler::Mode; +use revive_dt_format::{case::CaseIdx, input::StepIdx}; +use serde::{Deserialize, Serialize}; + +define_wrapper_type!( + #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] + #[serde(transparent)] + pub struct MetadataFilePath(PathBuf); +); + +/// An absolute specifier for a test. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct TestSpecifier { + pub solc_mode: Mode, + pub metadata_file_path: PathBuf, + pub case_idx: CaseIdx, +} + +/// An absolute path for a test that also includes information about the node that it's assigned to +/// and whether it's the leader or follower. +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct ExecutionSpecifier { + pub test_specifier: Arc, + pub node_id: usize, + pub node_designation: NodeDesignation, +} + +#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] +pub enum NodeDesignation { + Leader, + Follower, +} + +#[derive(Clone, Debug, PartialEq, Eq, Hash)] +pub struct StepExecutionSpecifier { + pub execution_specifier: Arc, + pub step_idx: StepIdx, +} diff --git a/crates/report/src/lib.rs b/crates/report/src/lib.rs index 04ceeed..7a6aa78 100644 --- a/crates/report/src/lib.rs +++ b/crates/report/src/lib.rs @@ -1,4 +1,11 @@ -//! The revive differential tests reporting facility. +//! This crate implements the reporting infrastructure for the differential testing tool. -pub mod analyzer; -pub mod reporter; +mod aggregator; +mod common; +mod reporter_event; +mod runner_event; + +pub use aggregator::*; +pub use common::*; +pub use reporter_event::*; +pub use runner_event::*; diff --git a/crates/report/src/reporter.rs b/crates/report/src/reporter.rs deleted file mode 100644 index 5313ac7..0000000 --- a/crates/report/src/reporter.rs +++ /dev/null @@ -1,234 +0,0 @@ -//! The reporter is the central place observing test execution by collecting data. -//! -//! The data collected gives useful insights into the outcome of the test run -//! and helps identifying and reproducing failing cases. - -use std::{ - collections::HashMap, - fs::{self, File, create_dir_all}, - path::PathBuf, - sync::{Mutex, OnceLock}, - time::{SystemTime, UNIX_EPOCH}, -}; - -use anyhow::Context; -use serde::Serialize; - -use revive_dt_common::types::Mode; -use revive_dt_compiler::{CompilerInput, CompilerOutput}; -use revive_dt_config::{Arguments, TestingPlatform}; -use revive_dt_format::corpus::Corpus; - -use crate::analyzer::CompilerStatistics; - -pub(crate) static REPORTER: OnceLock> = OnceLock::new(); - -/// The `Report` datastructure stores all relevant inforamtion required for generating reports. -#[derive(Clone, Debug, Default, Serialize)] -pub struct Report { - /// The configuration used during the test. - pub config: Arguments, - /// The observed test corpora. - pub corpora: Vec, - /// The observed test definitions. - pub metadata_files: Vec, - /// The observed compilation results. - pub compiler_results: HashMap>, - /// The observed compilation statistics. - pub compiler_statistics: HashMap, - /// The file name this is serialized to. - #[serde(skip)] - directory: PathBuf, -} - -/// Contains a compiled contract. -#[derive(Clone, Debug, Serialize)] -pub struct CompilationTask { - /// The observed compiler input. - pub json_input: CompilerInput, - /// The observed compiler output. - pub json_output: Option, - /// The observed compiler mode. - pub mode: Mode, - /// The observed compiler version. - pub compiler_version: String, - /// The observed error, if any. - pub error: Option, -} - -/// Represents a report about a compilation task. -#[derive(Clone, Debug, Serialize)] -pub struct CompilationResult { - /// The observed compilation task. - pub compilation_task: CompilationTask, - /// The linked span. - pub span: Span, -} - -/// The [Span] struct indicates the context of what is being reported. -#[derive(Clone, Copy, Debug, Serialize)] -pub struct Span { - /// The corpus index this belongs to. - corpus: usize, - /// The metadata file this belongs to. - metadata_file: usize, - /// The index of the case definition this belongs to. - case: usize, - /// The index of the case input this belongs to. - input: usize, -} - -impl Report { - /// The file name where this report will be written to. - pub const FILE_NAME: &str = "report.json"; - - /// The [Span] is expected to initialize the reporter by providing the config. - const INITIALIZED_VIA_SPAN: &str = "requires a Span which initializes the reporter"; - - /// Create a new [Report]. - fn new(config: Arguments) -> anyhow::Result { - let now = SystemTime::now() - .duration_since(UNIX_EPOCH) - .unwrap() - .as_millis(); - - let directory = config.directory().join("report").join(format!("{now}")); - if !directory.exists() { - create_dir_all(&directory)?; - } - - Ok(Self { - config, - directory, - ..Default::default() - }) - } - - /// Add a compilation task to the report. - pub fn compilation(span: Span, platform: TestingPlatform, compilation_task: CompilationTask) { - let mut report = REPORTER - .get() - .expect(Report::INITIALIZED_VIA_SPAN) - .lock() - .unwrap(); - - report - .compiler_statistics - .entry(platform) - .or_default() - .sample(&compilation_task); - - report - .compiler_results - .entry(platform) - .or_default() - .push(CompilationResult { - compilation_task, - span, - }); - } - - /// Write the report to disk. - pub fn save() -> anyhow::Result<()> { - let Some(reporter) = REPORTER.get() else { - return Ok(()); - }; - let report = reporter.lock().unwrap(); - - if let Err(error) = report.write_to_file() { - anyhow::bail!("can not write report: {error}"); - } - - if report.config.extract_problems { - if let Err(error) = report.save_compiler_problems() { - anyhow::bail!("can not write compiler problems: {error}"); - } - } - - Ok(()) - } - - /// Write compiler problems to disk for later debugging. - pub fn save_compiler_problems(&self) -> anyhow::Result<()> { - for (platform, results) in self.compiler_results.iter() { - for result in results { - // ignore if there were no errors - if result.compilation_task.error.is_none() { - continue; - } - - let path = &self.metadata_files[result.span.metadata_file] - .parent() - .unwrap() - .join(format!("{platform}_errors")); - if !path.exists() { - create_dir_all(path)?; - } - - if let Some(error) = result.compilation_task.error.as_ref() { - fs::write(path.join("compiler_error.txt"), error)?; - } - - if let Some(errors) = result.compilation_task.json_output.as_ref() { - let file = File::create(path.join("compiler_output.txt"))?; - serde_json::to_writer_pretty(file, &errors)?; - } - } - } - - Ok(()) - } - - fn write_to_file(&self) -> anyhow::Result<()> { - let path = self.directory.join(Self::FILE_NAME); - - let file = File::create(&path).context(path.display().to_string())?; - serde_json::to_writer_pretty(file, &self)?; - - Ok(()) - } -} - -impl Span { - /// Create a new [Span] with case and input index at 0. - /// - /// Initializes the reporting facility on the first call. - pub fn new(corpus: Corpus, config: Arguments) -> anyhow::Result { - let report = Mutex::new(Report::new(config)?); - let mut reporter = REPORTER.get_or_init(|| report).lock().unwrap(); - reporter.corpora.push(corpus); - - Ok(Self { - corpus: reporter.corpora.len() - 1, - metadata_file: 0, - case: 0, - input: 0, - }) - } - - /// Advance to the next metadata file: Resets the case input index to 0. - pub fn next_metadata(&mut self, metadata_file: PathBuf) { - let mut reporter = REPORTER - .get() - .expect(Report::INITIALIZED_VIA_SPAN) - .lock() - .unwrap(); - - reporter.metadata_files.push(metadata_file); - - self.metadata_file = reporter.metadata_files.len() - 1; - self.case = 0; - self.input = 0; - } - - /// Advance to the next case: Increas the case index by one and resets the input index to 0. - pub fn next_case(&mut self) { - self.case += 1; - self.input = 0; - } - - /// Advance to the next input. - pub fn next_input(&mut self) { - self.input += 1; - } -} diff --git a/crates/report/src/reporter_event.rs b/crates/report/src/reporter_event.rs new file mode 100644 index 0000000..0211e64 --- /dev/null +++ b/crates/report/src/reporter_event.rs @@ -0,0 +1,22 @@ +//! A reporter event sent by the report aggregator to the various listeners. + +use std::collections::BTreeMap; + +use revive_dt_compiler::Mode; +use revive_dt_format::case::CaseIdx; + +use crate::{MetadataFilePath, TestCaseStatus}; + +#[derive(Clone, Debug)] +pub enum ReporterEvent { + /// An event sent by the reporter once an entire metadata file and solc mode combination has + /// finished execution. + MetadataFileSolcModeCombinationExecutionCompleted { + /// The path of the metadata file. + metadata_file_path: MetadataFilePath, + /// The Solc mode that this metadata file was executed in. + mode: Mode, + /// The status of each one of the cases. + case_status: BTreeMap, + }, +} diff --git a/crates/report/src/runner_event.rs b/crates/report/src/runner_event.rs new file mode 100644 index 0000000..ddb67f9 --- /dev/null +++ b/crates/report/src/runner_event.rs @@ -0,0 +1,640 @@ +//! The types associated with the events sent by the runner to the reporter. +#![allow(dead_code)] + +use std::{collections::BTreeMap, path::PathBuf, sync::Arc}; + +use alloy_primitives::Address; +use indexmap::IndexMap; +use revive_dt_compiler::{CompilerInput, CompilerOutput}; +use revive_dt_config::TestingPlatform; +use revive_dt_format::metadata::Metadata; +use revive_dt_format::{corpus::Corpus, metadata::ContractInstance}; +use semver::Version; +use tokio::sync::{broadcast, oneshot}; + +use crate::{ExecutionSpecifier, ReporterEvent, TestSpecifier, common::MetadataFilePath}; + +macro_rules! __report_gen_emit_test_specific { + ( + $ident:ident, + $variant_ident:ident, + $skip_field:ident; + $( $bname:ident : $bty:ty, )* + ; + $( $aname:ident : $aty:ty, )* + ) => { + paste::paste! { + pub fn [< report_ $variant_ident:snake _event >]( + &self + $(, $bname: impl Into<$bty> )* + $(, $aname: impl Into<$aty> )* + ) -> anyhow::Result<()> { + self.report([< $variant_ident Event >] { + $skip_field: self.test_specifier.clone() + $(, $bname: $bname.into() )* + $(, $aname: $aname.into() )* + }) + } + } + }; +} + +macro_rules! __report_gen_emit_test_specific_by_parse { + ( + $ident:ident, + $variant_ident:ident, + $skip_field:ident; + $( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )* + ) => { + __report_gen_emit_test_specific!( + $ident, $variant_ident, $skip_field; + $( $bname : $bty, )* ; $( $aname : $aty, )* + ); + }; +} + +macro_rules! __report_gen_scan_before { + ( + $ident:ident, $variant_ident:ident; + $( $before:ident : $bty:ty, )* + ; + test_specifier : $skip_ty:ty, + $( $after:ident : $aty:ty, )* + ; + ) => { + __report_gen_emit_test_specific_by_parse!( + $ident, $variant_ident, test_specifier; + $( $before : $bty, )* ; $( $after : $aty, )* + ); + }; + ( + $ident:ident, $variant_ident:ident; + $( $before:ident : $bty:ty, )* + ; + $name:ident : $ty:ty, $( $after:ident : $aty:ty, )* + ; + ) => { + __report_gen_scan_before!( + $ident, $variant_ident; + $( $before : $bty, )* $name : $ty, + ; + $( $after : $aty, )* + ; + ); + }; + ( + $ident:ident, $variant_ident:ident; + $( $before:ident : $bty:ty, )* + ; + ; + ) => {}; +} + +macro_rules! __report_gen_for_variant { + ( + $ident:ident, + $variant_ident:ident; + ) => {}; + ( + $ident:ident, + $variant_ident:ident; + $( $field_ident:ident : $field_ty:ty ),+ $(,)? + ) => { + __report_gen_scan_before!( + $ident, $variant_ident; + ; + $( $field_ident : $field_ty, )* + ; + ); + }; +} + +macro_rules! __report_gen_emit_execution_specific { + ( + $ident:ident, + $variant_ident:ident, + $skip_field:ident; + $( $bname:ident : $bty:ty, )* + ; + $( $aname:ident : $aty:ty, )* + ) => { + paste::paste! { + pub fn [< report_ $variant_ident:snake _event >]( + &self + $(, $bname: impl Into<$bty> )* + $(, $aname: impl Into<$aty> )* + ) -> anyhow::Result<()> { + self.report([< $variant_ident Event >] { + $skip_field: self.execution_specifier.clone() + $(, $bname: $bname.into() )* + $(, $aname: $aname.into() )* + }) + } + } + }; +} + +macro_rules! __report_gen_emit_execution_specific_by_parse { + ( + $ident:ident, + $variant_ident:ident, + $skip_field:ident; + $( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )* + ) => { + __report_gen_emit_execution_specific!( + $ident, $variant_ident, $skip_field; + $( $bname : $bty, )* ; $( $aname : $aty, )* + ); + }; +} + +macro_rules! __report_gen_scan_before_exec { + ( + $ident:ident, $variant_ident:ident; + $( $before:ident : $bty:ty, )* + ; + execution_specifier : $skip_ty:ty, + $( $after:ident : $aty:ty, )* + ; + ) => { + __report_gen_emit_execution_specific_by_parse!( + $ident, $variant_ident, execution_specifier; + $( $before : $bty, )* ; $( $after : $aty, )* + ); + }; + ( + $ident:ident, $variant_ident:ident; + $( $before:ident : $bty:ty, )* + ; + $name:ident : $ty:ty, $( $after:ident : $aty:ty, )* + ; + ) => { + __report_gen_scan_before_exec!( + $ident, $variant_ident; + $( $before : $bty, )* $name : $ty, + ; + $( $after : $aty, )* + ; + ); + }; + ( + $ident:ident, $variant_ident:ident; + $( $before:ident : $bty:ty, )* + ; + ; + ) => {}; +} + +macro_rules! __report_gen_for_variant_exec { + ( + $ident:ident, + $variant_ident:ident; + ) => {}; + ( + $ident:ident, + $variant_ident:ident; + $( $field_ident:ident : $field_ty:ty ),+ $(,)? + ) => { + __report_gen_scan_before_exec!( + $ident, $variant_ident; + ; + $( $field_ident : $field_ty, )* + ; + ); + }; +} + +macro_rules! __report_gen_emit_step_execution_specific { + ( + $ident:ident, + $variant_ident:ident, + $skip_field:ident; + $( $bname:ident : $bty:ty, )* + ; + $( $aname:ident : $aty:ty, )* + ) => { + paste::paste! { + pub fn [< report_ $variant_ident:snake _event >]( + &self + $(, $bname: impl Into<$bty> )* + $(, $aname: impl Into<$aty> )* + ) -> anyhow::Result<()> { + self.report([< $variant_ident Event >] { + $skip_field: self.step_specifier.clone() + $(, $bname: $bname.into() )* + $(, $aname: $aname.into() )* + }) + } + } + }; +} + +macro_rules! __report_gen_emit_step_execution_specific_by_parse { + ( + $ident:ident, + $variant_ident:ident, + $skip_field:ident; + $( $bname:ident : $bty:ty, )* ; $( $aname:ident : $aty:ty, )* + ) => { + __report_gen_emit_step_execution_specific!( + $ident, $variant_ident, $skip_field; + $( $bname : $bty, )* ; $( $aname : $aty, )* + ); + }; +} + +macro_rules! __report_gen_scan_before_step { + ( + $ident:ident, $variant_ident:ident; + $( $before:ident : $bty:ty, )* + ; + step_specifier : $skip_ty:ty, + $( $after:ident : $aty:ty, )* + ; + ) => { + __report_gen_emit_step_execution_specific_by_parse!( + $ident, $variant_ident, step_specifier; + $( $before : $bty, )* ; $( $after : $aty, )* + ); + }; + ( + $ident:ident, $variant_ident:ident; + $( $before:ident : $bty:ty, )* + ; + $name:ident : $ty:ty, $( $after:ident : $aty:ty, )* + ; + ) => { + __report_gen_scan_before_step!( + $ident, $variant_ident; + $( $before : $bty, )* $name : $ty, + ; + $( $after : $aty, )* + ; + ); + }; + ( + $ident:ident, $variant_ident:ident; + $( $before:ident : $bty:ty, )* + ; + ; + ) => {}; +} + +macro_rules! __report_gen_for_variant_step { + ( + $ident:ident, + $variant_ident:ident; + ) => {}; + ( + $ident:ident, + $variant_ident:ident; + $( $field_ident:ident : $field_ty:ty ),+ $(,)? + ) => { + __report_gen_scan_before_step!( + $ident, $variant_ident; + ; + $( $field_ident : $field_ty, )* + ; + ); + }; +} + +/// Defines the runner-event which is sent from the test runners to the report aggregator. +/// +/// This macro defines a number of things related to the reporting infrastructure and the interface +/// used. First of all, it defines the enum of all of the possible events that the runners can send +/// to the aggregator. For each one of the variants it defines a separate struct for it to allow the +/// variant field in the enum to be put in a [`Box`]. +/// +/// In addition to the above, it defines [`From`] implementations for the various event types for +/// the [`RunnerEvent`] enum essentially allowing for events such as [`CorpusFileDiscoveryEvent`] to +/// be converted into a [`RunnerEvent`]. +/// +/// In addition to the above, it also defines the [`RunnerEventReporter`] which is a wrapper around +/// an [`UnboundedSender`] allowing for events to be sent to the report aggregator. +/// +/// With the above description, we can see that this macro defines almost all of the interface of +/// the reporting infrastructure, from the enum itself, to its associated types, and also to the +/// reporter that's used to report events to the aggregator. +/// +/// [`UnboundedSender`]: tokio::sync::mpsc::UnboundedSender +macro_rules! define_event { + ( + $(#[$enum_meta: meta])* + $vis: vis enum $ident: ident { + $( + $(#[$variant_meta: meta])* + $variant_ident: ident { + $( + $(#[$field_meta: meta])* + $field_ident: ident: $field_ty: ty + ),* $(,)? + } + ),* $(,)? + } + ) => { + paste::paste! { + $(#[$enum_meta])* + #[derive(Debug)] + $vis enum $ident { + $( + $(#[$variant_meta])* + $variant_ident(Box<[<$variant_ident Event>]>) + ),* + } + + $( + #[derive(Debug)] + $(#[$variant_meta])* + $vis struct [<$variant_ident Event>] { + $( + $(#[$field_meta])* + $vis $field_ident: $field_ty + ),* + } + )* + + $( + impl From<[<$variant_ident Event>]> for $ident { + fn from(value: [<$variant_ident Event>]) -> Self { + Self::$variant_ident(Box::new(value)) + } + } + )* + + /// Provides a way to report events to the aggregator. + /// + /// Under the hood, this is a wrapper around an [`UnboundedSender`] which abstracts away + /// the fact that channels are used and that implements high-level methods for reporting + /// various events to the aggregator. + #[derive(Clone, Debug)] + pub struct [< $ident Reporter >]($vis tokio::sync::mpsc::UnboundedSender<$ident>); + + impl From> for [< $ident Reporter >] { + fn from(value: tokio::sync::mpsc::UnboundedSender<$ident>) -> Self { + Self(value) + } + } + + impl [< $ident Reporter >] { + pub fn test_specific_reporter( + &self, + test_specifier: impl Into> + ) -> [< $ident TestSpecificReporter >] { + [< $ident TestSpecificReporter >] { + reporter: self.clone(), + test_specifier: test_specifier.into(), + } + } + + fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> { + self.0.send(event.into()).map_err(Into::into) + } + + $( + pub fn [< report_ $variant_ident:snake _event >](&self, $($field_ident: impl Into<$field_ty>),*) -> anyhow::Result<()> { + self.report([< $variant_ident Event >] { + $($field_ident: $field_ident.into()),* + }) + } + )* + } + + /// A reporter that's tied to a specific test case. + #[derive(Clone, Debug)] + pub struct [< $ident TestSpecificReporter >] { + $vis reporter: [< $ident Reporter >], + $vis test_specifier: std::sync::Arc, + } + + impl [< $ident TestSpecificReporter >] { + pub fn execution_specific_reporter( + &self, + node_id: impl Into, + node_designation: impl Into<$crate::common::NodeDesignation> + ) -> [< $ident ExecutionSpecificReporter >] { + [< $ident ExecutionSpecificReporter >] { + reporter: self.reporter.clone(), + execution_specifier: Arc::new($crate::common::ExecutionSpecifier { + test_specifier: self.test_specifier.clone(), + node_id: node_id.into(), + node_designation: node_designation.into(), + }) + } + } + + fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> { + self.reporter.report(event) + } + + $( + __report_gen_for_variant! { $ident, $variant_ident; $( $field_ident : $field_ty ),* } + )* + } + + /// A reporter that's tied to a specific execution of the test case such as execution on + /// a specific node like the leader or follower. + #[derive(Clone, Debug)] + pub struct [< $ident ExecutionSpecificReporter >] { + $vis reporter: [< $ident Reporter >], + $vis execution_specifier: std::sync::Arc<$crate::common::ExecutionSpecifier>, + } + + impl [< $ident ExecutionSpecificReporter >] { + fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> { + self.reporter.report(event) + } + + $( + __report_gen_for_variant_exec! { $ident, $variant_ident; $( $field_ident : $field_ty ),* } + )* + } + + /// A reporter that's tied to a specific step execution + #[derive(Clone, Debug)] + pub struct [< $ident StepExecutionSpecificReporter >] { + $vis reporter: [< $ident Reporter >], + $vis step_specifier: std::sync::Arc<$crate::common::StepExecutionSpecifier>, + } + + impl [< $ident StepExecutionSpecificReporter >] { + fn report(&self, event: impl Into<$ident>) -> anyhow::Result<()> { + self.reporter.report(event) + } + + $( + __report_gen_for_variant_step! { $ident, $variant_ident; $( $field_ident : $field_ty ),* } + )* + } + } + }; +} + +define_event! { + /// An event type that's sent by the test runners/drivers to the report aggregator. + pub(crate) enum RunnerEvent { + /// An event emitted by the reporter when it wishes to listen to events emitted by the + /// aggregator. + SubscribeToEvents { + /// The channel that the aggregator is to send the receive side of the channel on. + tx: oneshot::Sender> + }, + /// An event emitted by runners when they've discovered a corpus file. + CorpusFileDiscovery { + /// The contents of the corpus file. + corpus: Corpus + }, + /// An event emitted by runners when they've discovered a metadata file. + MetadataFileDiscovery { + /// The path of the metadata file discovered. + path: MetadataFilePath, + /// The content of the metadata file. + metadata: Metadata + }, + /// An event emitted by the runners when they discover a test case. + TestCaseDiscovery { + /// A specifier for the test that was discovered. + test_specifier: Arc, + }, + /// An event emitted by the runners when a test case is ignored. + TestIgnored { + /// A specifier for the test that's been ignored. + test_specifier: Arc, + /// A reason for the test to be ignored. + reason: String, + /// Additional fields that describe more information on why the test was ignored. + additional_fields: IndexMap + }, + /// An event emitted by the runners when a test case has succeeded. + TestSucceeded { + /// A specifier for the test that succeeded. + test_specifier: Arc, + /// The number of steps of the case that were executed by the driver. + steps_executed: usize, + }, + /// An event emitted by the runners when a test case has failed. + TestFailed { + /// A specifier for the test that succeeded. + test_specifier: Arc, + /// A reason for the failure of the test. + reason: String, + }, + /// An event emitted when the test case is assigned a leader node. + LeaderNodeAssigned { + /// A specifier for the test that the assignment is for. + test_specifier: Arc, + /// The ID of the node that this case is being executed on. + id: usize, + /// The platform of the node. + platform: TestingPlatform, + /// The connection string of the node. + connection_string: String, + }, + /// An event emitted when the test case is assigned a follower node. + FollowerNodeAssigned { + /// A specifier for the test that the assignment is for. + test_specifier: Arc, + /// The ID of the node that this case is being executed on. + id: usize, + /// The platform of the node. + platform: TestingPlatform, + /// The connection string of the node. + connection_string: String, + }, + /// An event emitted by the runners when the compilation of the contracts has succeeded + /// on the pre-link contracts. + PreLinkContractsCompilationSucceeded { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The version of the compiler used to compile the contracts. + compiler_version: Version, + /// The path of the compiler used to compile the contracts. + compiler_path: PathBuf, + /// A flag of whether the contract bytecode and ABI were cached or if they were compiled + /// anew. + is_cached: bool, + /// The input provided to the compiler - this is optional and not provided if the + /// contracts were obtained from the cache. + compiler_input: Option, + /// The output of the compiler. + compiler_output: CompilerOutput + }, + /// An event emitted by the runners when the compilation of the contracts has succeeded + /// on the post-link contracts. + PostLinkContractsCompilationSucceeded { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The version of the compiler used to compile the contracts. + compiler_version: Version, + /// The path of the compiler used to compile the contracts. + compiler_path: PathBuf, + /// A flag of whether the contract bytecode and ABI were cached or if they were compiled + /// anew. + is_cached: bool, + /// The input provided to the compiler - this is optional and not provided if the + /// contracts were obtained from the cache. + compiler_input: Option, + /// The output of the compiler. + compiler_output: CompilerOutput + }, + /// An event emitted by the runners when the compilation of the pre-link contract has + /// failed. + PreLinkContractsCompilationFailed { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The version of the compiler used to compile the contracts. + compiler_version: Option, + /// The path of the compiler used to compile the contracts. + compiler_path: Option, + /// The input provided to the compiler - this is optional and not provided if the + /// contracts were obtained from the cache. + compiler_input: Option, + /// The failure reason. + reason: String, + }, + /// An event emitted by the runners when the compilation of the post-link contract has + /// failed. + PostLinkContractsCompilationFailed { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The version of the compiler used to compile the contracts. + compiler_version: Option, + /// The path of the compiler used to compile the contracts. + compiler_path: Option, + /// The input provided to the compiler - this is optional and not provided if the + /// contracts were obtained from the cache. + compiler_input: Option, + /// The failure reason. + reason: String, + }, + /// An event emitted by the runners when a library has been deployed. + LibrariesDeployed { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The addresses of the libraries that were deployed. + libraries: BTreeMap + }, + /// An event emitted by the runners when they've deployed a new contract. + ContractDeployed { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The instance name of the contract. + contract_instance: ContractInstance, + /// The address of the contract. + address: Address + }, + } +} + +/// An extension to the [`Reporter`] implemented by the macro. +impl RunnerEventReporter { + pub async fn subscribe(&self) -> anyhow::Result> { + let (tx, rx) = oneshot::channel::>(); + self.report_subscribe_to_events_event(tx)?; + rx.await.map_err(Into::into) + } +} + +pub type Reporter = RunnerEventReporter; +pub type TestSpecificReporter = RunnerEventTestSpecificReporter; +pub type ExecutionSpecificReporter = RunnerEventExecutionSpecificReporter; From 84b139d3b4d1deee72f124a55d1998cb08f20d42 Mon Sep 17 00:00:00 2001 From: Omar Date: Mon, 25 Aug 2025 18:46:06 +0300 Subject: [PATCH 2/4] Configure kitchensink to use devnode by default (#153) * Configure kitchensink to use devnode by default * Update the kitchensink tests --- crates/config/src/lib.rs | 12 ++++++++++++ crates/node/src/kitchensink.rs | 32 ++++++++++++++++++++++++++------ 2 files changed, 38 insertions(+), 6 deletions(-) diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index 1f62dd1..21f84a9 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -115,6 +115,18 @@ pub struct Arguments { #[arg(short, long = "kitchensink", default_value = "substrate-node")] pub kitchensink: PathBuf, + /// The path to the `revive-dev-node` executable. + /// + /// By default it uses `revive-dev-node` binary found in `$PATH`. + #[arg(long = "revive-dev-node", default_value = "revive-dev-node")] + pub revive_dev_node: PathBuf, + + /// By default the tool uses the revive-dev-node when it's running differential tests against + /// PolkaVM since the dev-node is much faster than kitchensink. This flag allows the caller to + /// configure the tool to use kitchensink rather than the dev-node. + #[arg(long)] + pub use_kitchensink_not_dev_node: bool, + /// The path to the `eth_proxy` executable. /// /// By default it uses `eth-rpc` binary found in `$PATH`. diff --git a/crates/node/src/kitchensink.rs b/crates/node/src/kitchensink.rs index ce4d30b..e97c060 100644 --- a/crates/node/src/kitchensink.rs +++ b/crates/node/src/kitchensink.rs @@ -54,6 +54,7 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0); pub struct KitchensinkNode { id: u32, substrate_binary: PathBuf, + dev_node_binary: PathBuf, eth_proxy_binary: PathBuf, rpc_url: String, base_directory: PathBuf, @@ -63,6 +64,7 @@ pub struct KitchensinkNode { wallet: Arc, nonce_manager: CachedNonceManager, chain_id_filler: ChainIdFiller, + use_kitchensink_not_dev_node: bool, /// This vector stores [`File`] objects that we use for logging which we want to flush when the /// node object is dropped. We do not store them in a structured fashion at the moment (in /// separate fields) as the logic that we need to apply to them is all the same regardless of @@ -101,11 +103,21 @@ impl KitchensinkNode { // Note: we do not pipe the logs of this process to a separate file since this is just a // once-off export of the default chain spec and not part of the long-running node process. - let output = Command::new(&self.substrate_binary) - .arg("export-chain-spec") - .arg("--chain") - .arg("dev") - .output()?; + let output = if self.use_kitchensink_not_dev_node { + Command::new(&self.substrate_binary) + .arg("export-chain-spec") + .arg("--chain") + .arg("dev") + .output() + .context("Failed to export the chain-spec")? + } else { + Command::new(&self.dev_node_binary) + .arg("build-spec") + .arg("--chain") + .arg("dev") + .output() + .context("Failed to export the chain-spec")? + }; if !output.status.success() { anyhow::bail!( @@ -188,7 +200,12 @@ impl KitchensinkNode { let kitchensink_stderr_logs_file = open_options .clone() .open(self.kitchensink_stderr_log_file_path())?; - self.process_substrate = Command::new(&self.substrate_binary) + let node_binary_path = if self.use_kitchensink_not_dev_node { + self.substrate_binary.as_path() + } else { + self.dev_node_binary.as_path() + }; + self.process_substrate = Command::new(node_binary_path) .arg("--dev") .arg("--chain") .arg(chainspec_path) @@ -533,6 +550,7 @@ impl Node for KitchensinkNode { Self { id, substrate_binary: config.kitchensink.clone(), + dev_node_binary: config.revive_dev_node.clone(), eth_proxy_binary: config.eth_proxy.clone(), rpc_url: String::new(), base_directory, @@ -542,6 +560,7 @@ impl Node for KitchensinkNode { wallet: Arc::new(wallet), chain_id_filler: Default::default(), nonce_manager: Default::default(), + use_kitchensink_not_dev_node: config.use_kitchensink_not_dev_node, // We know that we only need to be storing 4 files so we can specify that when creating // the vector. It's the stdout and stderr of the substrate-node and the eth-rpc. logs_file_to_flush: Vec::with_capacity(4), @@ -1059,6 +1078,7 @@ mod tests { Arguments { kitchensink: PathBuf::from("substrate-node"), eth_proxy: PathBuf::from("eth-rpc"), + use_kitchensink_not_dev_node: true, ..Default::default() } } From eb264fcc7b388344deb556985f5c3475349fbbe3 Mon Sep 17 00:00:00 2001 From: Omar Date: Mon, 25 Aug 2025 23:47:29 +0300 Subject: [PATCH 3/4] feature/fix abi finding resolc (#154) * Configure kitchensink to use devnode by default * Update the kitchensink tests * Fix the logic for finding the ABI in resolc * Edit how CLI reporter prints --- crates/compiler/src/revive_resolc.rs | 52 +++++++++++++++++++--------- crates/core/src/main.rs | 20 +++++++---- 2 files changed, 49 insertions(+), 23 deletions(-) diff --git a/crates/compiler/src/revive_resolc.rs b/crates/compiler/src/revive_resolc.rs index 8579456..87b9c86 100644 --- a/crates/compiler/src/revive_resolc.rs +++ b/crates/compiler/src/revive_resolc.rs @@ -181,23 +181,41 @@ impl SolidityCompiler for Resolc { .evm .and_then(|evm| evm.bytecode.clone()) .context("Unexpected - Contract compiled with resolc has no bytecode")?; - let abi = contract_information - .metadata - .as_ref() - .and_then(|metadata| metadata.as_object()) - .and_then(|metadata| metadata.get("solc_metadata")) - .and_then(|solc_metadata| solc_metadata.as_str()) - .and_then(|metadata| serde_json::from_str::(metadata).ok()) - .and_then(|metadata| { - metadata.get("output").and_then(|output| { - output - .get("abi") - .and_then(|abi| serde_json::from_value::(abi.clone()).ok()) - }) - }) - .context( - "Unexpected - Failed to get the ABI for a contract compiled with resolc", - )?; + let abi = { + let metadata = contract_information + .metadata + .as_ref() + .context("No metadata found for the contract")?; + let solc_metadata_str = match metadata { + serde_json::Value::String(solc_metadata_str) => solc_metadata_str.as_str(), + serde_json::Value::Object(metadata_object) => { + let solc_metadata_value = metadata_object + .get("solc_metadata") + .context("Contract doesn't have a 'solc_metadata' field")?; + solc_metadata_value + .as_str() + .context("The 'solc_metadata' field is not a string")? + } + serde_json::Value::Null + | serde_json::Value::Bool(_) + | serde_json::Value::Number(_) + | serde_json::Value::Array(_) => { + anyhow::bail!("Unsupported type of metadata {metadata:?}") + } + }; + let solc_metadata = + serde_json::from_str::(solc_metadata_str).context( + "Failed to deserialize the solc_metadata as a serde_json generic value", + )?; + let output_value = solc_metadata + .get("output") + .context("solc_metadata doesn't have an output field")?; + let abi_value = output_value + .get("abi") + .context("solc_metadata output doesn't contain an abi field")?; + serde_json::from_value::(abi_value.clone()) + .context("ABI found in solc_metadata output is not valid ABI")? + }; map.insert(contract_name, (bytecode.object, abi)); } } diff --git a/crates/core/src/main.rs b/crates/core/src/main.rs index 43ae3be..dfbe641 100644 --- a/crates/core/src/main.rs +++ b/crates/core/src/main.rs @@ -546,22 +546,30 @@ async fn start_cli_reporting_task(reporter: Reporter) { number_of_successes += 1; writeln!( buf, - "{}{}Case Succeeded{}{} - Steps Executed: {}", - GREEN, BOLD, BOLD_RESET, COLOR_RESET, steps_executed + "{}{}Case Succeeded{} - Steps Executed: {}{}", + GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET ) } TestCaseStatus::Failed { reason } => { number_of_failures += 1; writeln!( buf, - "{}{}Case Failed{}{} - Reason: {}", - RED, BOLD, BOLD_RESET, COLOR_RESET, reason + "{}{}Case Failed{} - Reason: {}{}", + RED, + BOLD, + BOLD_RESET, + reason.trim(), + COLOR_RESET, ) } TestCaseStatus::Ignored { reason, .. } => writeln!( buf, - "{}{}Case Ignored{}{} - Reason: {}", - GREY, BOLD, BOLD_RESET, COLOR_RESET, reason + "{}{}Case Ignored{} - Reason: {}{}", + GREY, + BOLD, + BOLD_RESET, + reason.trim(), + COLOR_RESET, ), }; } From 60328cd49377889d133b1764ed6ec1fc1a0e8279 Mon Sep 17 00:00:00 2001 From: Omar Date: Tue, 26 Aug 2025 00:03:28 +0300 Subject: [PATCH 4/4] Add a Quick Run Script (#152) * Add a quick run script * Add more context to errors * Fix the issue with corpus directory canonicalization * Update the quick run script * Edit the runner script * Support specifying the path of the polkadot sdk --- .gitignore | 4 +- crates/common/src/fs/clear_dir.rs | 19 +++- crates/common/src/futures/poll.rs | 7 +- crates/compiler/src/lib.rs | 8 +- crates/compiler/src/revive_resolc.rs | 59 +++++++--- crates/compiler/src/solc.rs | 70 +++++++++--- crates/core/src/cached_compiler.rs | 51 ++++++--- crates/core/src/driver/mod.rs | 61 ++++++---- crates/core/src/main.rs | 45 +++++--- crates/format/src/corpus.rs | 32 +++--- crates/format/src/input.rs | 52 ++++++--- crates/format/src/metadata.rs | 10 +- crates/format/src/mode.rs | 24 +++- crates/node/src/geth.rs | 151 +++++++++++++++++-------- crates/node/src/kitchensink.rs | 160 ++++++++++++++++++--------- crates/node/src/pool.rs | 9 +- crates/report/src/aggregator.rs | 19 +++- crates/report/src/runner_event.rs | 4 +- crates/solc-binaries/src/cache.rs | 53 +++++++-- crates/solc-binaries/src/download.rs | 32 +++++- crates/solc-binaries/src/lib.rs | 4 +- run_tests.sh | 102 +++++++++++++++++ 22 files changed, 730 insertions(+), 246 deletions(-) create mode 100755 run_tests.sh diff --git a/.gitignore b/.gitignore index 505fb8b..594333a 100644 --- a/.gitignore +++ b/.gitignore @@ -8,4 +8,6 @@ node_modules # added to the .gitignore file. *.log -profile.json.gz \ No newline at end of file +profile.json.gz +resolc-compiler-tests +workdir \ No newline at end of file diff --git a/crates/common/src/fs/clear_dir.rs b/crates/common/src/fs/clear_dir.rs index 1e6c83d..387c134 100644 --- a/crates/common/src/fs/clear_dir.rs +++ b/crates/common/src/fs/clear_dir.rs @@ -3,19 +3,28 @@ use std::{ path::Path, }; -use anyhow::Result; +use anyhow::{Context, Result}; /// This method clears the passed directory of all of the files and directories contained within /// without deleting the directory. pub fn clear_directory(path: impl AsRef) -> Result<()> { - for entry in read_dir(path.as_ref())? { - let entry = entry?; + for entry in read_dir(path.as_ref()) + .with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))? + { + let entry = entry.with_context(|| { + format!( + "Failed to read an entry in directory: {}", + path.as_ref().display() + ) + })?; let entry_path = entry.path(); if entry_path.is_file() { - remove_file(entry_path)? + remove_file(&entry_path) + .with_context(|| format!("Failed to remove file: {}", entry_path.display()))? } else { - remove_dir_all(entry_path)? + remove_dir_all(&entry_path) + .with_context(|| format!("Failed to remove directory: {}", entry_path.display()))? } } Ok(()) diff --git a/crates/common/src/futures/poll.rs b/crates/common/src/futures/poll.rs index 2697541..8551cb7 100644 --- a/crates/common/src/futures/poll.rs +++ b/crates/common/src/futures/poll.rs @@ -1,7 +1,7 @@ use std::ops::ControlFlow; use std::time::Duration; -use anyhow::{Result, anyhow}; +use anyhow::{Context as _, Result, anyhow}; const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60); @@ -38,7 +38,10 @@ where )); } - match future().await? { + match future() + .await + .context("Polled future returned an error during polling loop")? + { ControlFlow::Continue(()) => { let next_wait_duration = match polling_wait_behavior { PollingWaitBehavior::Constant(duration) => duration, diff --git a/crates/compiler/src/lib.rs b/crates/compiler/src/lib.rs index 05d9868..3c3e7c9 100644 --- a/crates/compiler/src/lib.rs +++ b/crates/compiler/src/lib.rs @@ -13,6 +13,7 @@ use std::{ use alloy::json_abi::JsonAbi; use alloy_primitives::Address; +use anyhow::Context; use semver::Version; use serde::{Deserialize, Serialize}; @@ -136,9 +137,10 @@ where } pub fn with_source(mut self, path: impl AsRef) -> anyhow::Result { - self.input - .sources - .insert(path.as_ref().to_path_buf(), read_to_string(path.as_ref())?); + self.input.sources.insert( + path.as_ref().to_path_buf(), + read_to_string(path.as_ref()).context("Failed to read the contract source")?, + ); Ok(self) } diff --git a/crates/compiler/src/revive_resolc.rs b/crates/compiler/src/revive_resolc.rs index 87b9c86..3a2012f 100644 --- a/crates/compiler/src/revive_resolc.rs +++ b/crates/compiler/src/revive_resolc.rs @@ -119,18 +119,28 @@ impl SolidityCompiler for Resolc { .join(","), ); } - let mut child = command.spawn()?; + let mut child = command + .spawn() + .with_context(|| format!("Failed to spawn resolc at {}", self.resolc_path.display()))?; let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); - let serialized_input = serde_json::to_vec(&input)?; - stdin_pipe.write_all(&serialized_input).await?; + let serialized_input = serde_json::to_vec(&input) + .context("Failed to serialize Standard JSON input for resolc")?; + stdin_pipe + .write_all(&serialized_input) + .await + .context("Failed to write Standard JSON to resolc stdin")?; - let output = child.wait_with_output().await?; + let output = child + .wait_with_output() + .await + .context("Failed while waiting for resolc process to finish")?; let stdout = output.stdout; let stderr = output.stderr; if !output.status.success() { - let json_in = serde_json::to_string_pretty(&input)?; + let json_in = serde_json::to_string_pretty(&input) + .context("Failed to pretty-print Standard JSON input for logging")?; let message = String::from_utf8_lossy(&stderr); tracing::error!( status = %output.status, @@ -141,12 +151,14 @@ impl SolidityCompiler for Resolc { anyhow::bail!("Compilation failed with an error: {message}"); } - let parsed = serde_json::from_slice::(&stdout).map_err(|e| { - anyhow::anyhow!( - "failed to parse resolc JSON output: {e}\nstderr: {}", - String::from_utf8_lossy(&stderr) - ) - })?; + let parsed = serde_json::from_slice::(&stdout) + .map_err(|e| { + anyhow::anyhow!( + "failed to parse resolc JSON output: {e}\nstderr: {}", + String::from_utf8_lossy(&stderr) + ) + }) + .context("Failed to parse resolc standard JSON output")?; tracing::debug!( output = %serde_json::to_string(&parsed).unwrap(), @@ -173,7 +185,10 @@ impl SolidityCompiler for Resolc { let mut compiler_output = CompilerOutput::default(); for (source_path, contracts) in contracts.into_iter() { - let source_path = PathBuf::from(source_path).canonicalize()?; + let src_for_msg = source_path.clone(); + let source_path = PathBuf::from(source_path) + .canonicalize() + .with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?; let map = compiler_output.contracts.entry(source_path).or_default(); for (contract_name, contract_information) in contracts.into_iter() { @@ -251,8 +266,20 @@ impl SolidityCompiler for Resolc { let output = Command::new(self.resolc_path.as_path()) .arg("--version") .stdout(Stdio::piped()) - .spawn()? - .wait_with_output()? + .spawn() + .with_context(|| { + format!( + "Failed to spawn resolc at {} to get version", + self.resolc_path.display() + ) + })? + .wait_with_output() + .with_context(|| { + format!( + "Failed waiting for resolc at {} to finish --version", + self.resolc_path.display() + ) + })? .stdout; let output = String::from_utf8_lossy(&output); @@ -264,7 +291,9 @@ impl SolidityCompiler for Resolc { .next() .context("Version parsing failed")?; - let version = Version::parse(version_string)?; + let version = Version::parse(version_string).with_context(|| { + format!("Failed to parse resolc semver from '{version_string}'") + })?; vacant_entry.insert(version.clone()); diff --git a/crates/compiler/src/solc.rs b/crates/compiler/src/solc.rs index a7d8501..99b5a9a 100644 --- a/crates/compiler/src/solc.rs +++ b/crates/compiler/src/solc.rs @@ -49,7 +49,11 @@ impl SolidityCompiler for Solc { }: CompilerInput, _: Self::Options, ) -> anyhow::Result { - let compiler_supports_via_ir = self.version().await? >= SOLC_VERSION_SUPPORTING_VIA_YUL_IR; + let compiler_supports_via_ir = self + .version() + .await + .context("Failed to query solc version to determine via-ir support")? + >= SOLC_VERSION_SUPPORTING_VIA_YUL_IR; // Be careful to entirely omit the viaIR field if the compiler does not support it, // as it will error if you provide fields it does not know about. Because @@ -134,15 +138,25 @@ impl SolidityCompiler for Solc { .join(","), ); } - let mut child = command.spawn()?; + let mut child = command + .spawn() + .with_context(|| format!("Failed to spawn solc at {}", self.solc_path.display()))?; let stdin = child.stdin.as_mut().expect("should be piped"); - let serialized_input = serde_json::to_vec(&input)?; - stdin.write_all(&serialized_input).await?; - let output = child.wait_with_output().await?; + let serialized_input = serde_json::to_vec(&input) + .context("Failed to serialize Standard JSON input for solc")?; + stdin + .write_all(&serialized_input) + .await + .context("Failed to write Standard JSON to solc stdin")?; + let output = child + .wait_with_output() + .await + .context("Failed while waiting for solc process to finish")?; if !output.status.success() { - let json_in = serde_json::to_string_pretty(&input)?; + let json_in = serde_json::to_string_pretty(&input) + .context("Failed to pretty-print Standard JSON input for logging")?; let message = String::from_utf8_lossy(&output.stderr); tracing::error!( status = %output.status, @@ -153,12 +167,14 @@ impl SolidityCompiler for Solc { anyhow::bail!("Compilation failed with an error: {message}"); } - let parsed = serde_json::from_slice::(&output.stdout).map_err(|e| { - anyhow::anyhow!( - "failed to parse resolc JSON output: {e}\nstderr: {}", - String::from_utf8_lossy(&output.stdout) - ) - })?; + let parsed = serde_json::from_slice::(&output.stdout) + .map_err(|e| { + anyhow::anyhow!( + "failed to parse resolc JSON output: {e}\nstderr: {}", + String::from_utf8_lossy(&output.stdout) + ) + }) + .context("Failed to parse solc standard JSON output")?; // Detecting if the compiler output contained errors and reporting them through logs and // errors instead of returning the compiler output that might contain errors. @@ -178,7 +194,12 @@ impl SolidityCompiler for Solc { for (contract_path, contracts) in parsed.contracts { let map = compiler_output .contracts - .entry(contract_path.canonicalize()?) + .entry(contract_path.canonicalize().with_context(|| { + format!( + "Failed to canonicalize contract path {}", + contract_path.display() + ) + })?) .or_default(); for (contract_name, contract_info) in contracts.into_iter() { let source_code = contract_info @@ -207,7 +228,9 @@ impl SolidityCompiler for Solc { config: &Arguments, version: impl Into, ) -> anyhow::Result { - let path = download_solc(config.directory(), version, config.wasm).await?; + let path = download_solc(config.directory(), version, config.wasm) + .await + .context("Failed to download/get path to solc binary")?; Ok(path) } @@ -230,8 +253,19 @@ impl SolidityCompiler for Solc { let child = Command::new(self.solc_path.as_path()) .arg("--version") .stdout(Stdio::piped()) - .spawn()?; - let output = child.wait_with_output()?; + .spawn() + .with_context(|| { + format!( + "Failed to spawn solc at {} to get version", + self.solc_path.display() + ) + })?; + let output = child.wait_with_output().with_context(|| { + format!( + "Failed waiting for solc at {} to finish --version", + self.solc_path.display() + ) + })?; let output = String::from_utf8_lossy(&output.stdout); let version_line = output .split("Version: ") @@ -242,7 +276,9 @@ impl SolidityCompiler for Solc { .next() .context("Version parsing failed")?; - let version = Version::parse(version_string)?; + let version = Version::parse(version_string).with_context(|| { + format!("Failed to parse solc semver from '{version_string}'") + })?; vacant_entry.insert(version.clone()); diff --git a/crates/core/src/cached_compiler.rs b/crates/core/src/cached_compiler.rs index 188befb..14912d9 100644 --- a/crates/core/src/cached_compiler.rs +++ b/crates/core/src/cached_compiler.rs @@ -14,7 +14,7 @@ use revive_dt_config::Arguments; use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata}; use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address}; -use anyhow::{Error, Result}; +use anyhow::{Context as _, Error, Result}; use once_cell::sync::Lazy; use semver::Version; use serde::{Deserialize, Serialize}; @@ -29,7 +29,10 @@ impl CachedCompiler { pub async fn new(path: impl AsRef, invalidate_cache: bool) -> Result { let mut cache = ArtifactsCache::new(path); if invalidate_cache { - cache = cache.with_invalidated_cache().await?; + cache = cache + .with_invalidated_cache() + .await + .context("Failed to invalidate compilation cache directory")?; } Ok(Self(cache)) } @@ -76,9 +79,8 @@ impl CachedCompiler { compiler_version_or_requirement, ) .await - .inspect_err(|err| { - compilation_failure_report_callback(None, None, None, err.to_string()) - })?; + .inspect_err(|err| compilation_failure_report_callback(None, None, None, err.to_string())) + .context("Failed to obtain compiler executable path")?; let compiler_version = ::new(compiler_path.clone()) .version() .await @@ -89,7 +91,8 @@ impl CachedCompiler { None, err.to_string(), ) - })?; + }) + .context("Failed to query compiler version")?; let cache_key = CacheKey { platform_key: P::config_id().to_string(), @@ -104,10 +107,14 @@ impl CachedCompiler { let compilation_success_report_callback = compilation_success_report_callback.clone(); async move { compile_contracts::

( - metadata.directory()?, + metadata + .directory() + .context("Failed to get metadata directory while preparing compilation")?, compiler_path, compiler_version, - metadata.files_to_compile()?, + metadata + .files_to_compile() + .context("Failed to enumerate files to compile from metadata")?, mode, deployed_libraries, compilation_success_report_callback, @@ -131,7 +138,10 @@ impl CachedCompiler { Some(_) => { debug!("Deployed libraries defined, recompilation must take place"); debug!("Cache miss"); - compilation_callback().await?.compiler_output + compilation_callback() + .await + .context("Compilation callback for deployed libraries failed")? + .compiler_output } // If no deployed libraries are specified then we can follow the cached flow and attempt // to lookup the compilation artifacts in the cache. @@ -167,7 +177,12 @@ impl CachedCompiler { ); cache_value.compiler_output } - None => compilation_callback().await?.compiler_output, + None => { + compilation_callback() + .await + .context("Compilation callback failed (cache miss path)")? + .compiler_output + } } } }; @@ -247,7 +262,8 @@ async fn compile_contracts( Some(compiler_input.clone()), err.to_string(), ) - })?; + }) + .context("Failed to configure compiler with sources and options")?; compilation_success_report_callback( compiler_version, compiler_path.as_ref().to_path_buf(), @@ -273,15 +289,20 @@ impl ArtifactsCache { pub async fn with_invalidated_cache(self) -> Result { cacache::clear(self.path.as_path()) .await - .map_err(Into::::into)?; + .map_err(Into::::into) + .with_context(|| format!("Failed to clear cache at {}", self.path.display()))?; Ok(self) } #[instrument(level = "debug", skip_all, err)] pub async fn insert(&self, key: &CacheKey, value: &CacheValue) -> Result<()> { - let key = bson::to_vec(key)?; - let value = bson::to_vec(value)?; - cacache::write(self.path.as_path(), key.encode_hex(), value).await?; + let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?; + let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?; + cacache::write(self.path.as_path(), key.encode_hex(), value) + .await + .with_context(|| { + format!("Failed to write cache entry under {}", self.path.display()) + })?; Ok(()) } diff --git a/crates/core/src/driver/mod.rs b/crates/core/src/driver/mod.rs index d06ea52..e7c1766 100644 --- a/crates/core/src/driver/mod.rs +++ b/crates/core/src/driver/mod.rs @@ -86,18 +86,22 @@ where ) -> anyhow::Result { match step { Step::FunctionCall(input) => { - let (receipt, geth_trace, diff_mode) = - self.handle_input(metadata, input, node).await?; + let (receipt, geth_trace, diff_mode) = self + .handle_input(metadata, input, node) + .await + .context("Failed to handle function call step")?; Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode)) } Step::BalanceAssertion(balance_assertion) => { self.handle_balance_assertion(metadata, balance_assertion, node) - .await?; + .await + .context("Failed to handle balance assertion step")?; Ok(StepOutput::BalanceAssertion) } Step::StorageEmptyAssertion(storage_empty) => { self.handle_storage_empty(metadata, storage_empty, node) - .await?; + .await + .context("Failed to handle storage empty assertion step")?; Ok(StepOutput::StorageEmptyAssertion) } } @@ -113,18 +117,23 @@ where ) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> { let deployment_receipts = self .handle_input_contract_deployment(metadata, input, node) - .await?; + .await + .context("Failed during contract deployment phase of input handling")?; let execution_receipt = self .handle_input_execution(input, deployment_receipts, node) - .await?; + .await + .context("Failed during transaction execution phase of input handling")?; let tracing_result = self .handle_input_call_frame_tracing(&execution_receipt, node) - .await?; - self.handle_input_variable_assignment(input, &tracing_result)?; + .await + .context("Failed during callframe tracing phase of input handling")?; + self.handle_input_variable_assignment(input, &tracing_result) + .context("Failed to assign variables from callframe output")?; let (_, (geth_trace, diff_mode)) = try_join!( self.handle_input_expectations(input, &execution_receipt, node, &tracing_result), self.handle_input_diff(&execution_receipt, node) - )?; + ) + .context("Failed while evaluating expectations and diffs in parallel")?; Ok((execution_receipt, geth_trace, diff_mode)) } @@ -136,9 +145,11 @@ where node: &T::Blockchain, ) -> anyhow::Result<()> { self.handle_balance_assertion_contract_deployment(metadata, balance_assertion, node) - .await?; + .await + .context("Failed to deploy contract for balance assertion")?; self.handle_balance_assertion_execution(balance_assertion, node) - .await?; + .await + .context("Failed to execute balance assertion")?; Ok(()) } @@ -150,9 +161,11 @@ where node: &T::Blockchain, ) -> anyhow::Result<()> { self.handle_storage_empty_assertion_contract_deployment(metadata, storage_empty, node) - .await?; + .await + .context("Failed to deploy contract for storage empty assertion")?; self.handle_storage_empty_assertion_execution(storage_empty, node) - .await?; + .await + .context("Failed to execute storage empty assertion")?; Ok(()) } @@ -191,7 +204,8 @@ where value, node, ) - .await? + .await + .context("Failed to get or deploy contract instance during input execution")? { receipts.insert(instance.clone(), receipt); } @@ -213,7 +227,7 @@ where // lookup the transaction receipt in this case and continue on. Method::Deployer => deployment_receipts .remove(&input.instance) - .context("Failed to find deployment receipt"), + .context("Failed to find deployment receipt for constructor call"), Method::Fallback | Method::FunctionName(_) => { let tx = match input .legacy_transaction(node, self.default_resolution_context()) @@ -385,7 +399,8 @@ where let actual = &tracing_result.output.as_ref().unwrap_or_default(); if !expected .is_equivalent(actual, resolver, resolution_context) - .await? + .await + .context("Failed to resolve calldata equivalence for return data assertion")? { tracing::error!( ?execution_receipt, @@ -448,7 +463,8 @@ where let expected = Calldata::new_compound([expected]); if !expected .is_equivalent(&actual.0, resolver, resolution_context) - .await? + .await + .context("Failed to resolve event topic equivalence")? { tracing::error!( event_idx, @@ -468,7 +484,8 @@ where let actual = &actual_event.data().data; if !expected .is_equivalent(&actual.0, resolver, resolution_context) - .await? + .await + .context("Failed to resolve event value equivalence")? { tracing::error!( event_idx, @@ -501,8 +518,12 @@ where let trace = node .trace_transaction(execution_receipt, trace_options) - .await?; - let diff = node.state_diff(execution_receipt).await?; + .await + .context("Failed to obtain geth prestate tracer output")?; + let diff = node + .state_diff(execution_receipt) + .await + .context("Failed to obtain state diff for transaction")?; Ok((trace, diff)) } diff --git a/crates/core/src/main.rs b/crates/core/src/main.rs index dfbe641..237628f 100644 --- a/crates/core/src/main.rs +++ b/crates/core/src/main.rs @@ -60,7 +60,7 @@ struct Test<'a> { } fn main() -> anyhow::Result<()> { - let (args, _guard) = init_cli()?; + let (args, _guard) = init_cli().context("Failed to initialize CLI and tracing subscriber")?; info!( leader = args.leader.to_string(), follower = args.follower.to_string(), @@ -74,7 +74,8 @@ fn main() -> anyhow::Result<()> { let number_of_threads = args.number_of_threads; let body = async move { - let tests = collect_corpora(&args)? + let tests = collect_corpora(&args) + .context("Failed to collect corpus files from provided arguments")? .into_iter() .inspect(|(corpus, _)| { reporter @@ -96,7 +97,9 @@ fn main() -> anyhow::Result<()> { Some(platform) => { compile_corpus(&args, &tests, platform, reporter, report_aggregator_task).await } - None => execute_corpus(&args, &tests, reporter, report_aggregator_task).await?, + None => execute_corpus(&args, &tests, reporter, report_aggregator_task) + .await + .context("Failed to execute corpus")?, } Ok(()) }; @@ -183,7 +186,9 @@ where F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, { let tests = prepare_tests::(args, metadata_files, reporter.clone()); - let driver_task = start_driver_task::(args, tests).await?; + let driver_task = start_driver_task::(args, tests) + .await + .context("Failed to start driver task")?; let cli_reporting_task = start_cli_reporting_task(reporter); let (_, _, rtn) = tokio::join!(cli_reporting_task, driver_task, report_aggregator_task); @@ -419,9 +424,13 @@ async fn does_compiler_support_mode( mode: &Mode, ) -> anyhow::Result { let compiler_version_or_requirement = mode.compiler_version_to_use(args.solc.clone()); - let compiler_path = - P::Compiler::get_compiler_executable(args, compiler_version_or_requirement).await?; - let compiler_version = P::Compiler::new(compiler_path.clone()).version().await?; + let compiler_path = P::Compiler::get_compiler_executable(args, compiler_version_or_requirement) + .await + .context("Failed to obtain compiler executable path")?; + let compiler_version = P::Compiler::new(compiler_path.clone()) + .version() + .await + .context("Failed to query compiler version")?; Ok(P::Compiler::supports_mode( &compiler_version, @@ -442,15 +451,20 @@ where { info!("Starting driver task"); - let leader_nodes = Arc::new(NodePool::::new(args)?); - let follower_nodes = Arc::new(NodePool::::new(args)?); + let leader_nodes = Arc::new( + NodePool::::new(args).context("Failed to initialize leader node pool")?, + ); + let follower_nodes = Arc::new( + NodePool::::new(args).context("Failed to initialize follower node pool")?, + ); let number_concurrent_tasks = args.number_of_concurrent_tasks(); let cached_compiler = Arc::new( CachedCompiler::new( args.directory().join("compilation_cache"), args.invalidate_compilation_cache, ) - .await?, + .await + .context("Failed to initialize cached compiler")?, ); Ok(tests.for_each_concurrent( @@ -695,11 +709,15 @@ where .expect("Can't fail") } ) - )?; + ) + .context("Failed to compile pre-link contracts for leader/follower in parallel")?; let mut leader_deployed_libraries = None::>; let mut follower_deployed_libraries = None::>; - let mut contract_sources = test.metadata.contract_sources()?; + let mut contract_sources = test + .metadata + .contract_sources() + .context("Failed to retrieve contract sources from metadata")?; for library_instance in test .metadata .libraries @@ -890,7 +908,8 @@ where .expect("Can't fail") } ) - )?; + ) + .context("Failed to compile post-link contracts for leader/follower in parallel")?; let leader_state = CaseState::::new( leader_compiler_version, diff --git a/crates/format/src/corpus.rs b/crates/format/src/corpus.rs index 69921f0..62e81f6 100644 --- a/crates/format/src/corpus.rs +++ b/crates/format/src/corpus.rs @@ -8,6 +8,7 @@ use serde::{Deserialize, Serialize}; use tracing::{debug, info}; use crate::metadata::{Metadata, MetadataFile}; +use anyhow::Context as _; #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(untagged)] @@ -20,23 +21,24 @@ impl Corpus { pub fn try_from_path(file_path: impl AsRef) -> anyhow::Result { let mut corpus = File::open(file_path.as_ref()) .map_err(anyhow::Error::from) - .and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))?; + .and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into)) + .with_context(|| { + format!( + "Failed to open and deserialize corpus file at {}", + file_path.as_ref().display() + ) + })?; + + let corpus_directory = file_path + .as_ref() + .canonicalize() + .context("Failed to canonicalize the path to the corpus file")? + .parent() + .context("Corpus file has no parent")? + .to_path_buf(); for path in corpus.paths_iter_mut() { - *path = file_path - .as_ref() - .parent() - .ok_or_else(|| { - anyhow::anyhow!("Corpus path '{}' does not point to a file", path.display()) - })? - .canonicalize() - .map_err(|error| { - anyhow::anyhow!( - "Failed to canonicalize path to corpus '{}': {error}", - path.display() - ) - })? - .join(path.as_path()) + *path = corpus_directory.join(path.as_path()) } Ok(corpus) diff --git a/crates/format/src/input.rs b/crates/format/src/input.rs index 918c2d4..5852a63 100644 --- a/crates/format/src/input.rs +++ b/crates/format/src/input.rs @@ -268,7 +268,11 @@ impl Input { ) -> anyhow::Result { match self.method { Method::Deployer | Method::Fallback => { - let calldata = self.calldata.calldata(resolver, context).await?; + let calldata = self + .calldata + .calldata(resolver, context) + .await + .context("Failed to produce calldata for deployer/fallback method")?; Ok(calldata.into()) } @@ -283,14 +287,15 @@ impl Input { // Overloads are handled by providing the full function signature in the "function // name". // https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190 - let selector = if function_name.contains('(') && function_name.contains(')') { - Function::parse(function_name) + let selector = + if function_name.contains('(') && function_name.contains(')') { + Function::parse(function_name) .context( "Failed to parse the provided function name into a function signature", )? .selector() - } else { - abi.functions() + } else { + abi.functions() .find(|function| function.signature().starts_with(function_name)) .ok_or_else(|| { anyhow::anyhow!( @@ -298,9 +303,13 @@ impl Input { function_name, &self.instance ) - })? + }) + .with_context(|| format!( + "Failed to resolve function selector for {:?} on instance {:?}", + function_name, &self.instance + ))? .selector() - }; + }; // Allocating a vector that we will be using for the calldata. The vector size will be: // 4 bytes for the function selector. @@ -312,7 +321,8 @@ impl Input { calldata.extend(selector.0); self.calldata .calldata_into_slice(&mut calldata, resolver, context) - .await?; + .await + .context("Failed to append encoded argument to calldata buffer")?; Ok(calldata.into()) } @@ -325,7 +335,10 @@ impl Input { resolver: &impl ResolverApi, context: ResolutionContext<'_>, ) -> anyhow::Result { - let input_data = self.encoded_input(resolver, context).await?; + let input_data = self + .encoded_input(resolver, context) + .await + .context("Failed to encode input bytes for transaction request")?; let transaction_request = TransactionRequest::default().from(self.caller).value( self.value .map(|value| value.into_inner()) @@ -437,7 +450,8 @@ impl Calldata { }) .buffered(0xFF) .try_collect::>() - .await?; + .await + .context("Failed to resolve one or more calldata arguments")?; buffer.extend(resolved.into_iter().flatten()); } @@ -478,7 +492,10 @@ impl Calldata { std::borrow::Cow::Borrowed(other) }; - let this = this.resolve(resolver, context).await?; + let this = this + .resolve(resolver, context) + .await + .context("Failed to resolve calldata item during equivalence check")?; let other = U256::from_be_slice(&other); Ok(this == other) }) @@ -664,17 +681,24 @@ impl> CalldataToken { let current_block_number = match context.tip_block_number() { Some(block_number) => *block_number, - None => resolver.last_block_number().await?, + None => resolver.last_block_number().await.context( + "Failed to query last block number while resolving $BLOCK_HASH", + )?, }; let desired_block_number = current_block_number.saturating_sub(offset); - let block_hash = resolver.block_hash(desired_block_number.into()).await?; + let block_hash = resolver + .block_hash(desired_block_number.into()) + .await + .context("Failed to resolve block hash for desired block number")?; Ok(U256::from_be_bytes(block_hash.0)) } else if item == Self::BLOCK_NUMBER_VARIABLE { let current_block_number = match context.tip_block_number() { Some(block_number) => *block_number, - None => resolver.last_block_number().await?, + None => resolver.last_block_number().await.context( + "Failed to query last block number while resolving $BLOCK_NUMBER", + )?, }; Ok(U256::from(current_block_number)) } else if item == Self::BLOCK_TIMESTAMP_VARIABLE { diff --git a/crates/format/src/metadata.rs b/crates/format/src/metadata.rs index 04411b9..66985a5 100644 --- a/crates/format/src/metadata.rs +++ b/crates/format/src/metadata.rs @@ -132,7 +132,15 @@ impl Metadata { ) in contracts { let alias = alias.clone(); - let absolute_path = directory.join(contract_source_path).canonicalize()?; + let absolute_path = directory + .join(contract_source_path) + .canonicalize() + .map_err(|error| { + anyhow::anyhow!( + "Failed to canonicalize contract source path '{}': {error}", + directory.join(contract_source_path).display() + ) + })?; let contract_ident = contract_ident.clone(); sources.insert( diff --git a/crates/format/src/mode.rs b/crates/format/src/mode.rs index 0476e4e..a89b2cb 100644 --- a/crates/format/src/mode.rs +++ b/crates/format/src/mode.rs @@ -1,3 +1,4 @@ +use anyhow::Context; use regex::Regex; use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; use serde::{Deserialize, Serialize}; @@ -44,21 +45,34 @@ impl FromStr for ParsedMode { }; let pipeline = match caps.name("pipeline") { - Some(m) => Some(ModePipeline::from_str(m.as_str())?), + Some(m) => Some( + ModePipeline::from_str(m.as_str()) + .context("Failed to parse mode pipeline from string")?, + ), None => None, }; let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+"); let optimize_setting = match caps.name("optimize_setting") { - Some(m) => Some(ModeOptimizerSetting::from_str(m.as_str())?), + Some(m) => Some( + ModeOptimizerSetting::from_str(m.as_str()) + .context("Failed to parse optimizer setting from string")?, + ), None => None, }; let version = match caps.name("version") { - Some(m) => Some(semver::VersionReq::parse(m.as_str()).map_err(|e| { - anyhow::anyhow!("Cannot parse the version requirement '{}': {e}", m.as_str()) - })?), + Some(m) => Some( + semver::VersionReq::parse(m.as_str()) + .map_err(|e| { + anyhow::anyhow!( + "Cannot parse the version requirement '{}': {e}", + m.as_str() + ) + }) + .context("Failed to parse semver requirement from mode string")?, + ), None => None, }; diff --git a/crates/node/src/geth.rs b/crates/node/src/geth.rs index e72a7ed..1d87a80 100644 --- a/crates/node/src/geth.rs +++ b/crates/node/src/geth.rs @@ -101,10 +101,13 @@ impl GethNode { let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.logs_directory); - create_dir_all(&self.base_directory)?; - create_dir_all(&self.logs_directory)?; + create_dir_all(&self.base_directory) + .context("Failed to create base directory for geth node")?; + create_dir_all(&self.logs_directory) + .context("Failed to create logs directory for geth node")?; - let mut genesis = serde_json::from_str::(&genesis)?; + let mut genesis = serde_json::from_str::(&genesis) + .context("Failed to deserialize geth genesis JSON")?; for signer_address in >::signer_addresses(&self.wallet) { @@ -116,7 +119,11 @@ impl GethNode { .or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE))); } let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE); - serde_json::to_writer(File::create(&genesis_path)?, &genesis)?; + serde_json::to_writer( + File::create(&genesis_path).context("Failed to create geth genesis file")?, + &genesis, + ) + .context("Failed to serialize geth genesis JSON to file")?; let mut child = Command::new(&self.geth) .arg("--state.scheme") @@ -127,16 +134,22 @@ impl GethNode { .arg(genesis_path) .stderr(Stdio::piped()) .stdout(Stdio::null()) - .spawn()?; + .spawn() + .context("Failed to spawn geth --init process")?; let mut stderr = String::new(); child .stderr .take() .expect("should be piped") - .read_to_string(&mut stderr)?; + .read_to_string(&mut stderr) + .context("Failed to read geth --init stderr")?; - if !child.wait()?.success() { + if !child + .wait() + .context("Failed waiting for geth --init process to finish")? + .success() + { anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id); } @@ -161,8 +174,11 @@ impl GethNode { let stdout_logs_file = open_options .clone() - .open(self.geth_stdout_log_file_path())?; - let stderr_logs_file = open_options.open(self.geth_stderr_log_file_path())?; + .open(self.geth_stdout_log_file_path()) + .context("Failed to open geth stdout logs file")?; + let stderr_logs_file = open_options + .open(self.geth_stderr_log_file_path()) + .context("Failed to open geth stderr logs file")?; self.handle = Command::new(&self.geth) .arg("--dev") .arg("--datadir") @@ -182,14 +198,24 @@ impl GethNode { .arg("full") .arg("--gcmode") .arg("archive") - .stderr(stderr_logs_file.try_clone()?) - .stdout(stdout_logs_file.try_clone()?) - .spawn()? + .stderr( + stderr_logs_file + .try_clone() + .context("Failed to clone geth stderr log file handle")?, + ) + .stdout( + stdout_logs_file + .try_clone() + .context("Failed to clone geth stdout log file handle")?, + ) + .spawn() + .context("Failed to spawn geth node process")? .into(); if let Err(error) = self.wait_ready() { tracing::error!(?error, "Failed to start geth, shutting down gracefully"); - self.shutdown()?; + self.shutdown() + .context("Failed to gracefully shutdown after geth start error")?; return Err(error); } @@ -211,7 +237,8 @@ impl GethNode { .write(false) .append(false) .truncate(false) - .open(self.geth_stderr_log_file_path())?; + .open(self.geth_stderr_log_file_path()) + .context("Failed to open geth stderr logs file for readiness check")?; let maximum_wait_time = Duration::from_millis(self.start_timeout); let mut stderr = BufReader::new(logs_file).lines(); @@ -277,11 +304,18 @@ impl EthereumNode for GethNode { &self, transaction: TransactionRequest, ) -> anyhow::Result { - let provider = self.provider().await?; + let provider = self + .provider() + .await + .context("Failed to create provider for transaction submission")?; - let pending_transaction = provider.send_transaction(transaction).await.inspect_err( - |err| tracing::error!(%err, "Encountered an error when submitting the transaction"), - )?; + let pending_transaction = provider + .send_transaction(transaction) + .await + .inspect_err( + |err| tracing::error!(%err, "Encountered an error when submitting the transaction"), + ) + .context("Failed to submit transaction to geth node")?; let transaction_hash = *pending_transaction.tx_hash(); // The following is a fix for the "transaction indexing is in progress" error that we used @@ -335,7 +369,11 @@ impl EthereumNode for GethNode { transaction: &TransactionReceipt, trace_options: GethDebugTracingOptions, ) -> anyhow::Result { - let provider = Arc::new(self.provider().await?); + let provider = Arc::new( + self.provider() + .await + .context("Failed to create provider for tracing")?, + ); poll( Self::TRACE_POLLING_DURATION, PollingWaitBehavior::Constant(Duration::from_millis(200)), @@ -371,8 +409,10 @@ impl EthereumNode for GethNode { }); match self .trace_transaction(transaction, trace_options) - .await? - .try_into_pre_state_frame()? + .await + .context("Failed to trace transaction for prestate diff")? + .try_into_pre_state_frame() + .context("Failed to convert trace into pre-state frame")? { PreStateFrame::Diff(diff) => Ok(diff), _ => anyhow::bail!("expected a diff mode trace"), @@ -382,7 +422,8 @@ impl EthereumNode for GethNode { #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn balance_of(&self, address: Address) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Geth provider")? .get_balance(address) .await .map_err(Into::into) @@ -395,7 +436,8 @@ impl EthereumNode for GethNode { keys: Vec, ) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Geth provider")? .get_proof(address, keys) .latest() .await @@ -407,7 +449,8 @@ impl ResolverApi for GethNode { #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn chain_id(&self) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Geth provider")? .get_chain_id() .await .map_err(Into::into) @@ -416,7 +459,8 @@ impl ResolverApi for GethNode { #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Geth provider")? .get_transaction_receipt(*tx_hash) .await? .context("Failed to get the transaction receipt") @@ -426,40 +470,48 @@ impl ResolverApi for GethNode { #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Geth provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") .map(|block| block.header.gas_limit as _) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result

{ self.provider() - .await? + .await + .context("Failed to get the Geth provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") .map(|block| block.header.beneficiary) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Geth provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Geth provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") .and_then(|block| { block .header @@ -471,27 +523,32 @@ impl ResolverApi for GethNode { #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Geth provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") .map(|block| block.header.hash) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Geth provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") .map(|block| block.header.timestamp) } #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn last_block_number(&self) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Geth provider")? .get_block_number() .await .map_err(Into::into) @@ -576,8 +633,10 @@ impl Node for GethNode { .stdin(Stdio::null()) .stdout(Stdio::piped()) .stderr(Stdio::null()) - .spawn()? - .wait_with_output()? + .spawn() + .context("Failed to spawn geth --version process")? + .wait_with_output() + .context("Failed to wait for geth --version output")? .stdout; Ok(String::from_utf8_lossy(&output).into()) } diff --git a/crates/node/src/kitchensink.rs b/crates/node/src/kitchensink.rs index e97c060..e8e92ea 100644 --- a/crates/node/src/kitchensink.rs +++ b/crates/node/src/kitchensink.rs @@ -96,8 +96,10 @@ impl KitchensinkNode { let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.logs_directory); - create_dir_all(&self.base_directory)?; - create_dir_all(&self.logs_directory)?; + create_dir_all(&self.base_directory) + .context("Failed to create base directory for kitchensink node")?; + create_dir_all(&self.logs_directory) + .context("Failed to create logs directory for kitchensink node")?; let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE); @@ -126,8 +128,10 @@ impl KitchensinkNode { ); } - let content = String::from_utf8(output.stdout)?; - let mut chainspec_json: JsonValue = serde_json::from_str(&content)?; + let content = String::from_utf8(output.stdout) + .context("Failed to decode substrate export-chain-spec output as UTF-8")?; + let mut chainspec_json: JsonValue = + serde_json::from_str(&content).context("Failed to parse substrate chain spec JSON")?; let existing_chainspec_balances = chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] @@ -149,7 +153,8 @@ impl KitchensinkNode { }) .collect(); let mut eth_balances = { - let mut genesis = serde_json::from_str::(genesis)?; + let mut genesis = serde_json::from_str::(genesis) + .context("Failed to deserialize EVM genesis JSON for kitchensink")?; for signer_address in >::signer_addresses(&self.wallet) { @@ -160,7 +165,8 @@ impl KitchensinkNode { .entry(signer_address) .or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE))); } - self.extract_balance_from_genesis_file(&genesis)? + self.extract_balance_from_genesis_file(&genesis) + .context("Failed to extract balances from EVM genesis JSON")? }; merged_balances.append(&mut eth_balances); @@ -168,9 +174,11 @@ impl KitchensinkNode { json!(merged_balances); serde_json::to_writer_pretty( - std::fs::File::create(&template_chainspec_path)?, + std::fs::File::create(&template_chainspec_path) + .context("Failed to create kitchensink template chainspec file")?, &chainspec_json, - )?; + ) + .context("Failed to write kitchensink template chainspec JSON")?; Ok(self) } @@ -196,10 +204,12 @@ impl KitchensinkNode { // Start Substrate node let kitchensink_stdout_logs_file = open_options .clone() - .open(self.kitchensink_stdout_log_file_path())?; + .open(self.kitchensink_stdout_log_file_path()) + .context("Failed to open kitchensink stdout logs file")?; let kitchensink_stderr_logs_file = open_options .clone() - .open(self.kitchensink_stderr_log_file_path())?; + .open(self.kitchensink_stderr_log_file_path()) + .context("Failed to open kitchensink stderr logs file")?; let node_binary_path = if self.use_kitchensink_not_dev_node { self.substrate_binary.as_path() } else { @@ -223,9 +233,18 @@ impl KitchensinkNode { .arg("--rpc-max-connections") .arg(u32::MAX.to_string()) .env("RUST_LOG", Self::SUBSTRATE_LOG_ENV) - .stdout(kitchensink_stdout_logs_file.try_clone()?) - .stderr(kitchensink_stderr_logs_file.try_clone()?) - .spawn()? + .stdout( + kitchensink_stdout_logs_file + .try_clone() + .context("Failed to clone kitchensink stdout log file handle")?, + ) + .stderr( + kitchensink_stderr_logs_file + .try_clone() + .context("Failed to clone kitchensink stderr log file handle")?, + ) + .spawn() + .context("Failed to spawn substrate node process")? .into(); // Give the node a moment to boot @@ -234,14 +253,18 @@ impl KitchensinkNode { Self::SUBSTRATE_READY_MARKER, Duration::from_secs(60), ) { - self.shutdown()?; + self.shutdown() + .context("Failed to gracefully shutdown after substrate start error")?; return Err(error); }; let eth_proxy_stdout_logs_file = open_options .clone() - .open(self.proxy_stdout_log_file_path())?; - let eth_proxy_stderr_logs_file = open_options.open(self.proxy_stderr_log_file_path())?; + .open(self.proxy_stdout_log_file_path()) + .context("Failed to open eth-proxy stdout logs file")?; + let eth_proxy_stderr_logs_file = open_options + .open(self.proxy_stderr_log_file_path()) + .context("Failed to open eth-proxy stderr logs file")?; self.process_proxy = Command::new(&self.eth_proxy_binary) .arg("--dev") .arg("--rpc-port") @@ -251,9 +274,18 @@ impl KitchensinkNode { .arg("--rpc-max-connections") .arg(u32::MAX.to_string()) .env("RUST_LOG", Self::PROXY_LOG_ENV) - .stdout(eth_proxy_stdout_logs_file.try_clone()?) - .stderr(eth_proxy_stderr_logs_file.try_clone()?) - .spawn()? + .stdout( + eth_proxy_stdout_logs_file + .try_clone() + .context("Failed to clone eth-proxy stdout log file handle")?, + ) + .stderr( + eth_proxy_stderr_logs_file + .try_clone() + .context("Failed to clone eth-proxy stderr log file handle")?, + ) + .spawn() + .context("Failed to spawn eth-proxy process")? .into(); if let Err(error) = Self::wait_ready( @@ -261,7 +293,8 @@ impl KitchensinkNode { Self::ETH_PROXY_READY_MARKER, Duration::from_secs(60), ) { - self.shutdown()?; + self.shutdown() + .context("Failed to gracefully shutdown after eth-proxy start error")?; return Err(error); }; @@ -386,11 +419,14 @@ impl EthereumNode for KitchensinkNode { ) -> anyhow::Result { let receipt = self .provider() - .await? + .await + .context("Failed to create provider for transaction submission")? .send_transaction(transaction) - .await? + .await + .context("Failed to submit transaction to kitchensink proxy")? .get_receipt() - .await?; + .await + .context("Failed to fetch transaction receipt from kitchensink proxy")?; Ok(receipt) } @@ -400,11 +436,12 @@ impl EthereumNode for KitchensinkNode { trace_options: GethDebugTracingOptions, ) -> anyhow::Result { let tx_hash = transaction.transaction_hash; - Ok(self - .provider() - .await? + self.provider() + .await + .context("Failed to create provider for debug tracing")? .debug_trace_transaction(tx_hash, trace_options) - .await?) + .await + .context("Failed to obtain debug trace from kitchensink proxy") } async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result { @@ -425,7 +462,8 @@ impl EthereumNode for KitchensinkNode { async fn balance_of(&self, address: Address) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Kitchensink provider")? .get_balance(address) .await .map_err(Into::into) @@ -437,7 +475,8 @@ impl EthereumNode for KitchensinkNode { keys: Vec, ) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Kitchensink provider")? .get_proof(address, keys) .latest() .await @@ -448,7 +487,8 @@ impl EthereumNode for KitchensinkNode { impl ResolverApi for KitchensinkNode { async fn chain_id(&self) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Kitchensink provider")? .get_chain_id() .await .map_err(Into::into) @@ -456,7 +496,8 @@ impl ResolverApi for KitchensinkNode { async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Kitchensink provider")? .get_transaction_receipt(*tx_hash) .await? .context("Failed to get the transaction receipt") @@ -465,37 +506,45 @@ impl ResolverApi for KitchensinkNode { async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Kitchensink provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the kitchensink block")? + .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") .map(|block| block.header.gas_limit as _) } async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result
{ self.provider() - .await? + .await + .context("Failed to get the Kitchensink provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the kitchensink block")? + .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") .map(|block| block.header.beneficiary) } async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Kitchensink provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the kitchensink block")? + .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) } async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Kitchensink provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the kitchensink block")? + .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") .and_then(|block| { block .header @@ -506,25 +555,30 @@ impl ResolverApi for KitchensinkNode { async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Kitchensink provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the kitchensink block")? + .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") .map(|block| block.header.hash) } async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Kitchensink provider")? .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .await + .context("Failed to get the kitchensink block")? + .context("Failed to get the Kitchensink block, perhaps the chain has no blocks?") .map(|block| block.header.timestamp) } async fn last_block_number(&self) -> anyhow::Result { self.provider() - .await? + .await + .context("Failed to get the Kitchensink provider")? .get_block_number() .await .map_err(Into::into) @@ -611,8 +665,10 @@ impl Node for KitchensinkNode { .stdin(Stdio::null()) .stdout(Stdio::piped()) .stderr(Stdio::null()) - .spawn()? - .wait_with_output()? + .spawn() + .context("Failed to spawn kitchensink --version")? + .wait_with_output() + .context("Failed to wait for kitchensink --version")? .stdout; Ok(String::from_utf8_lossy(&output).into()) } diff --git a/crates/node/src/pool.rs b/crates/node/src/pool.rs index d195988..6fa4784 100644 --- a/crates/node/src/pool.rs +++ b/crates/node/src/pool.rs @@ -44,8 +44,10 @@ where nodes.push( handle .join() - .map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error))? - .map_err(|error| anyhow::anyhow!("node failed to spawn: {error}"))?, + .map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error)) + .context("Failed to join node spawn thread")? + .map_err(|error| anyhow::anyhow!("node failed to spawn: {error}")) + .context("Node failed to spawn")?, ); } @@ -69,7 +71,8 @@ fn spawn_node(args: &Arguments, genesis: String) -> anyhow::Resu connection_string = node.connection_string(), "Spawning node" ); - node.spawn(genesis)?; + node.spawn(genesis) + .context("Failed to spawn node process")?; info!( id = node.id(), connection_string = node.connection_string(), diff --git a/crates/report/src/aggregator.rs b/crates/report/src/aggregator.rs index 1f7af7e..913b0d5 100644 --- a/crates/report/src/aggregator.rs +++ b/crates/report/src/aggregator.rs @@ -9,7 +9,7 @@ use std::{ }; use alloy_primitives::Address; -use anyhow::Result; +use anyhow::{Context as _, Result}; use indexmap::IndexMap; use revive_dt_compiler::{CompilerInput, CompilerOutput, Mode}; use revive_dt_config::{Arguments, TestingPlatform}; @@ -113,7 +113,10 @@ impl ReportAggregator { debug!("Report aggregation completed"); let file_name = { - let current_timestamp = SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs(); + let current_timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .context("System clock is before UNIX_EPOCH; cannot compute report timestamp")? + .as_secs(); let mut file_name = current_timestamp.to_string(); file_name.push_str(".json"); file_name @@ -124,8 +127,16 @@ impl ReportAggregator { .write(true) .truncate(true) .read(false) - .open(file_path)?; - serde_json::to_writer_pretty(file, &self.report)?; + .open(&file_path) + .with_context(|| { + format!( + "Failed to open report file for writing: {}", + file_path.display() + ) + })?; + serde_json::to_writer_pretty(&file, &self.report).with_context(|| { + format!("Failed to serialize report JSON to {}", file_path.display()) + })?; Ok(()) } diff --git a/crates/report/src/runner_event.rs b/crates/report/src/runner_event.rs index ddb67f9..bdd6c0e 100644 --- a/crates/report/src/runner_event.rs +++ b/crates/report/src/runner_event.rs @@ -4,6 +4,7 @@ use std::{collections::BTreeMap, path::PathBuf, sync::Arc}; use alloy_primitives::Address; +use anyhow::Context as _; use indexmap::IndexMap; use revive_dt_compiler::{CompilerInput, CompilerOutput}; use revive_dt_config::TestingPlatform; @@ -630,7 +631,8 @@ define_event! { impl RunnerEventReporter { pub async fn subscribe(&self) -> anyhow::Result> { let (tx, rx) = oneshot::channel::>(); - self.report_subscribe_to_events_event(tx)?; + self.report_subscribe_to_events_event(tx) + .context("Failed to send subscribe request to reporter task")?; rx.await.map_err(Into::into) } } diff --git a/crates/solc-binaries/src/cache.rs b/crates/solc-binaries/src/cache.rs index 57b9696..67d8f9e 100644 --- a/crates/solc-binaries/src/cache.rs +++ b/crates/solc-binaries/src/cache.rs @@ -12,6 +12,7 @@ use std::{ use tokio::sync::Mutex; use crate::download::SolcDownloader; +use anyhow::Context; pub const SOLC_CACHE_DIRECTORY: &str = "solc"; pub(crate) static SOLC_CACHER: LazyLock>> = LazyLock::new(Default::default); @@ -31,8 +32,20 @@ pub(crate) async fn get_or_download( return Ok(target_file); } - create_dir_all(target_directory)?; - download_to_file(&target_file, downloader).await?; + create_dir_all(&target_directory).with_context(|| { + format!( + "Failed to create solc cache directory: {}", + target_directory.display() + ) + })?; + download_to_file(&target_file, downloader) + .await + .with_context(|| { + format!( + "Failed to write downloaded solc to {}", + target_file.display() + ) + })?; cache.insert(target_file.clone()); Ok(target_file) @@ -45,14 +58,26 @@ async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::R #[cfg(unix)] { - let mut permissions = file.metadata()?.permissions(); + let mut permissions = file + .metadata() + .with_context(|| format!("Failed to read metadata for {}", path.display()))? + .permissions(); permissions.set_mode(permissions.mode() | 0o111); - file.set_permissions(permissions)?; + file.set_permissions(permissions).with_context(|| { + format!("Failed to set executable permissions on {}", path.display()) + })?; } let mut file = BufWriter::new(file); - file.write_all(&downloader.download().await?)?; - file.flush()?; + file.write_all( + &downloader + .download() + .await + .context("Failed to download solc binary bytes")?, + ) + .with_context(|| format!("Failed to write solc binary to {}", path.display()))?; + file.flush() + .with_context(|| format!("Failed to flush file {}", path.display()))?; drop(file); #[cfg(target_os = "macos")] @@ -63,8 +88,20 @@ async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::R .stderr(std::process::Stdio::null()) .stdout(std::process::Stdio::null()) .stdout(std::process::Stdio::null()) - .spawn()? - .wait()?; + .spawn() + .with_context(|| { + format!( + "Failed to spawn xattr to remove quarantine attribute on {}", + path.display() + ) + })? + .wait() + .with_context(|| { + format!( + "Failed waiting for xattr operation to complete on {}", + path.display() + ) + })?; Ok(()) } diff --git a/crates/solc-binaries/src/download.rs b/crates/solc-binaries/src/download.rs index 691f639..0e94ffa 100644 --- a/crates/solc-binaries/src/download.rs +++ b/crates/solc-binaries/src/download.rs @@ -11,6 +11,7 @@ use semver::Version; use sha2::{Digest, Sha256}; use crate::list::List; +use anyhow::Context; pub static LIST_CACHE: LazyLock>> = LazyLock::new(Default::default); @@ -30,7 +31,12 @@ impl List { return Ok(list.clone()); } - let body: List = reqwest::get(url).await?.json().await?; + let body: List = reqwest::get(url) + .await + .with_context(|| format!("Failed to GET solc list from {url}"))? + .json() + .await + .with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?; LIST_CACHE.lock().unwrap().insert(url, body.clone()); @@ -68,7 +74,8 @@ impl SolcDownloader { }), VersionOrRequirement::Requirement(requirement) => { let Some(version) = List::download(list) - .await? + .await + .with_context(|| format!("Failed to download solc builds list from {list}"))? .builds .into_iter() .map(|build| build.version) @@ -107,11 +114,20 @@ impl SolcDownloader { /// Errors out if the download fails or the digest of the downloaded file /// mismatches the expected digest from the release [List]. pub async fn download(&self) -> anyhow::Result> { - let builds = List::download(self.list).await?.builds; + let builds = List::download(self.list) + .await + .with_context(|| format!("Failed to download solc builds list from {}", self.list))? + .builds; let build = builds .iter() .find(|build| build.version == self.version) - .ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version))?; + .ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version)) + .with_context(|| { + format!( + "Requested solc version {} was not found in builds list fetched from {}", + self.version, self.list + ) + })?; let path = build.path.clone(); let expected_digest = build @@ -121,7 +137,13 @@ impl SolcDownloader { .to_string(); let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display()); - let file = reqwest::get(url).await?.bytes().await?.to_vec(); + let file = reqwest::get(&url) + .await + .with_context(|| format!("Failed to GET solc binary from {url}"))? + .bytes() + .await + .with_context(|| format!("Failed to read solc binary bytes from {url}"))? + .to_vec(); if hex::encode(Sha256::digest(&file)) != expected_digest { anyhow::bail!("sha256 mismatch for solc version {}", self.version); diff --git a/crates/solc-binaries/src/lib.rs b/crates/solc-binaries/src/lib.rs index 05d0de5..8bf4134 100644 --- a/crates/solc-binaries/src/lib.rs +++ b/crates/solc-binaries/src/lib.rs @@ -5,6 +5,7 @@ use std::path::{Path, PathBuf}; +use anyhow::Context; use cache::get_or_download; use download::SolcDownloader; @@ -34,7 +35,8 @@ pub async fn download_solc( SolcDownloader::windows(version).await } else { unimplemented!() - }?; + } + .context("Failed to initialize the Solc Downloader")?; get_or_download(cache_directory, &downloader).await } diff --git a/run_tests.sh b/run_tests.sh new file mode 100755 index 0000000..924d494 --- /dev/null +++ b/run_tests.sh @@ -0,0 +1,102 @@ +#!/bin/bash + +# Revive Differential Tests - Quick Start Script +# This script clones the test repository, sets up the corpus file, and runs the tool + +set -e # Exit on any error + +# Colors for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +# Configuration +TEST_REPO_URL="https://github.com/paritytech/resolc-compiler-tests" +TEST_REPO_DIR="resolc-compiler-tests" +CORPUS_FILE="./corpus.json" +WORKDIR="workdir" + +# Optional positional argument: path to polkadot-sdk directory +POLKADOT_SDK_DIR="${1:-}" + +# Binary paths (default to names in $PATH) +REVIVE_DEV_NODE_BIN="revive-dev-node" +ETH_RPC_BIN="eth-rpc" +SUBSTRATE_NODE_BIN="substrate-node" + +echo -e "${GREEN}=== Revive Differential Tests Quick Start ===${NC}" +echo "" + +# Check if test repo already exists +if [ -d "$TEST_REPO_DIR" ]; then + echo -e "${YELLOW}Test repository already exists. Pulling latest changes...${NC}" + cd "$TEST_REPO_DIR" + git pull + cd .. +else + echo -e "${GREEN}Cloning test repository...${NC}" + git clone "$TEST_REPO_URL" +fi + +# If polkadot-sdk path is provided, verify and use binaries from there; build if needed +if [ -n "$POLKADOT_SDK_DIR" ]; then + if [ ! -d "$POLKADOT_SDK_DIR" ]; then + echo -e "${RED}Provided polkadot-sdk directory does not exist: $POLKADOT_SDK_DIR${NC}" + exit 1 + fi + + POLKADOT_SDK_DIR=$(realpath "$POLKADOT_SDK_DIR") + echo -e "${GREEN}Using polkadot-sdk at: $POLKADOT_SDK_DIR${NC}" + + REVIVE_DEV_NODE_BIN="$POLKADOT_SDK_DIR/target/release/revive-dev-node" + ETH_RPC_BIN="$POLKADOT_SDK_DIR/target/release/eth-rpc" + SUBSTRATE_NODE_BIN="$POLKADOT_SDK_DIR/target/release/substrate-node" + + if [ ! -x "$REVIVE_DEV_NODE_BIN" ] || [ ! -x "$ETH_RPC_BIN" ] || [ ! -x "$SUBSTRATE_NODE_BIN" ]; then + echo -e "${YELLOW}Required binaries not found in release target. Building...${NC}" + (cd "$POLKADOT_SDK_DIR" && cargo build --release --package revive-dev-node --package eth-rpc --package substrate-node) + fi + + for bin in "$REVIVE_DEV_NODE_BIN" "$ETH_RPC_BIN" "$SUBSTRATE_NODE_BIN"; do + if [ ! -x "$bin" ]; then + echo -e "${RED}Expected binary not found after build: $bin${NC}" + exit 1 + fi + done +else + echo -e "${YELLOW}No polkadot-sdk path provided. Using binaries from $PATH.${NC}" +fi + +# Create corpus file with absolute path resolved at runtime +echo -e "${GREEN}Creating corpus file...${NC}" +ABSOLUTE_PATH=$(realpath "$TEST_REPO_DIR/fixtures/solidity/") + +cat > "$CORPUS_FILE" << EOF +{ + "name": "MatterLabs Solidity Simple, Complex, and Semantic Tests", + "path": "$ABSOLUTE_PATH" +} +EOF + +echo -e "${GREEN}Corpus file created: $CORPUS_FILE${NC}" + +# Create workdir if it doesn't exist +mkdir -p "$WORKDIR" + +echo -e "${GREEN}Starting differential tests...${NC}" +echo "This may take a while..." +echo "" + +# Run the tool +RUST_LOG="error" cargo run --release -- \ + --corpus "$CORPUS_FILE" \ + --workdir "$WORKDIR" \ + --number-of-nodes 5 \ + --kitchensink "$SUBSTRATE_NODE_BIN" \ + --revive-dev-node "$REVIVE_DEV_NODE_BIN" \ + --eth_proxy "$ETH_RPC_BIN" \ + > logs.log \ + 2> output.log + +echo -e "${GREEN}=== Test run completed! ===${NC}" \ No newline at end of file