diff --git a/.gitignore b/.gitignore index cf26eca..505fb8b 100644 --- a/.gitignore +++ b/.gitignore @@ -6,4 +6,6 @@ node_modules # We do not want to commit any log files that we produce from running the code locally so this is # added to the .gitignore file. -*.log \ No newline at end of file +*.log + +profile.json.gz \ No newline at end of file diff --git a/Cargo.lock b/Cargo.lock index 20b1631..a04261a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4482,6 +4482,7 @@ dependencies = [ "alloy", "alloy-primitives", "anyhow", + "dashmap", "foundry-compilers-artifacts", "revive-common", "revive-dt-common", @@ -4532,6 +4533,7 @@ dependencies = [ "tempfile", "tokio", "tracing", + "tracing-appender", "tracing-subscriber", ] @@ -4543,6 +4545,7 @@ dependencies = [ "alloy-primitives", "alloy-sol-types", "anyhow", + "futures", "regex", "revive-common", "revive-dt-common", @@ -4592,7 +4595,6 @@ dependencies = [ "revive-dt-format", "serde", "serde_json", - "tracing", ] [[package]] @@ -6108,6 +6110,18 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-appender" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror 1.0.69", + "time", + "tracing-subscriber", +] + [[package]] name = "tracing-attributes" version = "0.1.28" diff --git a/Cargo.toml b/Cargo.toml index 598234d..d8b4213 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -28,6 +28,7 @@ anyhow = "1.0" bson = { version = "2.15.0" } cacache = { version = "13.1.0" } clap = { version = "4", features = ["derive"] } +dashmap = { version = "6.1.0" } foundry-compilers-artifacts = { version = "0.18.0" } futures = { version = "0.3.31" } hex = "0.4.3" @@ -54,7 +55,8 @@ tokio = { version = "1.47.0", default-features = false, features = [ "rt", ] } uuid = { version = "1.8", features = ["v4"] } -tracing = "0.1.41" +tracing = { version = "0.1.41" } +tracing-appender = { version = "0.2.3" } tracing-subscriber = { version = "0.3.19", default-features = false, features = [ "fmt", "json", @@ -89,3 +91,5 @@ features = [ inherits = "release" lto = true codegen-units = 1 + +[workspace.lints.clippy] diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/clippy.toml @@ -0,0 +1 @@ + diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 128e464..20cf865 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -15,3 +15,6 @@ once_cell = { workspace = true } semver = { workspace = true } serde = { workspace = true } tokio = { workspace = true, default-features = false, features = ["time"] } + +[lints] +workspace = true diff --git a/crates/common/src/macros/define_wrapper_type.rs b/crates/common/src/macros/define_wrapper_type.rs index 7eb28bc..2196595 100644 --- a/crates/common/src/macros/define_wrapper_type.rs +++ b/crates/common/src/macros/define_wrapper_type.rs @@ -1,3 +1,14 @@ +#[macro_export] +macro_rules! impl_for_wrapper { + (Display, $ident: ident) => { + impl std::fmt::Display for $ident { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(&self.0, f) + } + } + }; +} + /// Defines wrappers around types. /// /// For example, the macro invocation seen below: @@ -42,7 +53,13 @@ macro_rules! define_wrapper_type { ( $(#[$meta: meta])* - $vis:vis struct $ident: ident($ty: ty); + $vis:vis struct $ident: ident($ty: ty) + + $( + impl $($trait_ident: ident),* + )? + + ; ) => { $(#[$meta])* $vis struct $ident($ty); @@ -98,9 +115,15 @@ macro_rules! define_wrapper_type { value.0 } } + + $( + $( + $crate::macros::impl_for_wrapper!($trait_ident, $ident); + )* + )? }; } /// Technically not needed but this allows for the macro to be found in the `macros` module of the /// crate in addition to being found in the root of the crate. -pub use define_wrapper_type; +pub use {define_wrapper_type, impl_for_wrapper}; diff --git a/crates/compiler/Cargo.toml b/crates/compiler/Cargo.toml index 9e10a10..6797a22 100644 --- a/crates/compiler/Cargo.toml +++ b/crates/compiler/Cargo.toml @@ -18,9 +18,13 @@ revive-common = { workspace = true } alloy = { workspace = true } alloy-primitives = { workspace = true } anyhow = { workspace = true } +dashmap = { workspace = true } foundry-compilers-artifacts = { workspace = true } semver = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tracing = { workspace = true } tokio = { workspace = true } + +[lints] +workspace = true diff --git a/crates/compiler/src/lib.rs b/crates/compiler/src/lib.rs index 799124e..05d9868 100644 --- a/crates/compiler/src/lib.rs +++ b/crates/compiler/src/lib.rs @@ -47,7 +47,7 @@ pub trait SolidityCompiler { version: impl Into, ) -> impl Future>; - fn version(&self) -> anyhow::Result; + fn version(&self) -> impl Future>; /// Does the compiler support the provided mode and version settings? fn supports_mode( diff --git a/crates/compiler/src/revive_resolc.rs b/crates/compiler/src/revive_resolc.rs index efa0812..5e549a5 100644 --- a/crates/compiler/src/revive_resolc.rs +++ b/crates/compiler/src/revive_resolc.rs @@ -4,8 +4,10 @@ use std::{ path::PathBuf, process::{Command, Stdio}, + sync::LazyLock, }; +use dashmap::DashMap; use revive_dt_common::types::VersionOrRequirement; use revive_dt_config::Arguments; use revive_solc_json_interface::{ @@ -219,26 +221,39 @@ impl SolidityCompiler for Resolc { Ok(PathBuf::from("resolc")) } - fn version(&self) -> anyhow::Result { - // Logic for parsing the resolc version from the following string: - // Solidity frontend for the revive compiler version 0.3.0+commit.b238913.llvm-18.1.8 + async fn version(&self) -> anyhow::Result { + /// This is a cache of the path of the compiler to the version number of the compiler. We + /// choose to cache the version in this way rather than through a field on the struct since + /// compiler objects are being created all the time from the path and the compiler object is + /// not reused over time. + static VERSION_CACHE: LazyLock> = LazyLock::new(Default::default); - let output = Command::new(self.resolc_path.as_path()) - .arg("--version") - .stdout(Stdio::piped()) - .spawn()? - .wait_with_output()? - .stdout; - let output = String::from_utf8_lossy(&output); - let version_string = output - .split("version ") - .nth(1) - .context("Version parsing failed")? - .split("+") - .next() - .context("Version parsing failed")?; + match VERSION_CACHE.entry(self.resolc_path.clone()) { + dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()), + dashmap::Entry::Vacant(vacant_entry) => { + let output = Command::new(self.resolc_path.as_path()) + .arg("--version") + .stdout(Stdio::piped()) + .spawn()? + .wait_with_output()? + .stdout; - Version::parse(version_string).map_err(Into::into) + let output = String::from_utf8_lossy(&output); + let version_string = output + .split("version ") + .nth(1) + .context("Version parsing failed")? + .split("+") + .next() + .context("Version parsing failed")?; + + let version = Version::parse(version_string)?; + + vacant_entry.insert(version.clone()); + + Ok(version) + } + } } fn supports_mode( @@ -268,7 +283,7 @@ mod test { let compiler = Resolc::new(path); // Act - let version = compiler.version(); + let version = compiler.version().await; // Assert let _ = version.expect("Failed to get version"); diff --git a/crates/compiler/src/solc.rs b/crates/compiler/src/solc.rs index b785b06..a7d8501 100644 --- a/crates/compiler/src/solc.rs +++ b/crates/compiler/src/solc.rs @@ -4,8 +4,10 @@ use std::{ path::PathBuf, process::{Command, Stdio}, + sync::LazyLock, }; +use dashmap::DashMap; use revive_dt_common::types::VersionOrRequirement; use revive_dt_config::Arguments; use revive_dt_solc_binaries::download_solc; @@ -47,7 +49,7 @@ impl SolidityCompiler for Solc { }: CompilerInput, _: Self::Options, ) -> anyhow::Result { - let compiler_supports_via_ir = self.version()? >= SOLC_VERSION_SUPPORTING_VIA_YUL_IR; + let compiler_supports_via_ir = self.version().await? >= SOLC_VERSION_SUPPORTING_VIA_YUL_IR; // Be careful to entirely omit the viaIR field if the compiler does not support it, // as it will error if you provide fields it does not know about. Because @@ -209,30 +211,44 @@ impl SolidityCompiler for Solc { Ok(path) } - fn version(&self) -> anyhow::Result { - // The following is the parsing code for the version from the solc version strings which - // look like the following: - // ``` - // solc, the solidity compiler commandline interface - // Version: 0.8.30+commit.73712a01.Darwin.appleclang - // ``` + async fn version(&self) -> anyhow::Result { + /// This is a cache of the path of the compiler to the version number of the compiler. We + /// choose to cache the version in this way rather than through a field on the struct since + /// compiler objects are being created all the time from the path and the compiler object is + /// not reused over time. + static VERSION_CACHE: LazyLock> = LazyLock::new(Default::default); - let child = Command::new(self.solc_path.as_path()) - .arg("--version") - .stdout(Stdio::piped()) - .spawn()?; - let output = child.wait_with_output()?; - let output = String::from_utf8_lossy(&output.stdout); - let version_line = output - .split("Version: ") - .nth(1) - .context("Version parsing failed")?; - let version_string = version_line - .split("+") - .next() - .context("Version parsing failed")?; + match VERSION_CACHE.entry(self.solc_path.clone()) { + dashmap::Entry::Occupied(occupied_entry) => Ok(occupied_entry.get().clone()), + dashmap::Entry::Vacant(vacant_entry) => { + // The following is the parsing code for the version from the solc version strings + // which look like the following: + // ``` + // solc, the solidity compiler commandline interface + // Version: 0.8.30+commit.73712a01.Darwin.appleclang + // ``` + let child = Command::new(self.solc_path.as_path()) + .arg("--version") + .stdout(Stdio::piped()) + .spawn()?; + let output = child.wait_with_output()?; + let output = String::from_utf8_lossy(&output.stdout); + let version_line = output + .split("Version: ") + .nth(1) + .context("Version parsing failed")?; + let version_string = version_line + .split("+") + .next() + .context("Version parsing failed")?; - Version::parse(version_string).map_err(Into::into) + let version = Version::parse(version_string)?; + + vacant_entry.insert(version.clone()); + + Ok(version) + } + } } fn supports_mode( @@ -256,15 +272,13 @@ mod test { async fn compiler_version_can_be_obtained() { // Arrange let args = Arguments::default(); - println!("Getting compiler path"); let path = Solc::get_compiler_executable(&args, Version::new(0, 7, 6)) .await .unwrap(); - println!("Got compiler path"); let compiler = Solc::new(path); // Act - let version = compiler.version(); + let version = compiler.version().await; // Assert assert_eq!( @@ -277,15 +291,13 @@ mod test { async fn compiler_version_can_be_obtained1() { // Arrange let args = Arguments::default(); - println!("Getting compiler path"); let path = Solc::get_compiler_executable(&args, Version::new(0, 4, 21)) .await .unwrap(); - println!("Got compiler path"); let compiler = Solc::new(path); // Act - let version = compiler.version(); + let version = compiler.version().await; // Assert assert_eq!( diff --git a/crates/compiler/tests/lib.rs b/crates/compiler/tests/lib.rs index 733e2d3..80858f2 100644 --- a/crates/compiler/tests/lib.rs +++ b/crates/compiler/tests/lib.rs @@ -11,7 +11,6 @@ async fn contracts_can_be_compiled_with_solc() { let compiler_path = Solc::get_compiler_executable(&args, Version::new(0, 8, 30)) .await .unwrap(); - println!("About to assert"); // Act let output = Compiler::::new() diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index e58c747..10c5c61 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -15,3 +15,5 @@ semver = { workspace = true } temp-dir = { workspace = true } serde = { workspace = true } +[lints] +workspace = true diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index b747bc1..a4b2221 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -31,9 +31,13 @@ indexmap = { workspace = true } once_cell = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } +tracing-appender = { workspace = true } tracing-subscriber = { workspace = true } semver = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } temp-dir = { workspace = true } tempfile = { workspace = true } + +[lints] +workspace = true diff --git a/crates/core/src/cached_compiler.rs b/crates/core/src/cached_compiler.rs index ee05428..b94b1f3 100644 --- a/crates/core/src/cached_compiler.rs +++ b/crates/core/src/cached_compiler.rs @@ -62,8 +62,9 @@ impl CachedCompiler { compiler_version_or_requirement, ) .await?; - let compiler_version = - ::new(compiler_path.clone()).version()?; + let compiler_version = ::new(compiler_path.clone()) + .version() + .await?; let cache_key = CacheKey { platform_key: P::config_id().to_string(), diff --git a/crates/core/src/driver/mod.rs b/crates/core/src/driver/mod.rs index 35ef782..4912b9a 100644 --- a/crates/core/src/driver/mod.rs +++ b/crates/core/src/driver/mod.rs @@ -16,26 +16,24 @@ use alloy::rpc::types::trace::geth::{ }; use alloy::{ primitives::Address, - rpc::types::{ - TransactionRequest, - trace::geth::{AccountState, DiffMode}, - }, + rpc::types::{TransactionRequest, trace::geth::DiffMode}, }; use anyhow::Context; +use futures::TryStreamExt; use indexmap::IndexMap; use revive_dt_format::traits::{ResolutionContext, ResolverApi}; use semver::Version; -use revive_dt_format::case::{Case, CaseIdx}; +use revive_dt_format::case::Case; use revive_dt_format::input::{ - BalanceAssertion, Calldata, EtherValue, Expected, ExpectedOutput, Input, Method, + BalanceAssertion, Calldata, EtherValue, Expected, ExpectedOutput, Input, Method, StepIdx, StorageEmptyAssertion, }; use revive_dt_format::metadata::{ContractIdent, ContractInstance, ContractPathAndIdent}; use revive_dt_format::{input::Step, metadata::Metadata}; -use revive_dt_node::Node; use revive_dt_node_interaction::EthereumNode; -use tracing::Instrument; +use tokio::try_join; +use tracing::{Instrument, info, info_span, instrument}; use crate::Platform; @@ -77,38 +75,38 @@ where pub async fn handle_step( &mut self, metadata: &Metadata, - case_idx: CaseIdx, step: &Step, node: &T::Blockchain, ) -> anyhow::Result { match step { Step::FunctionCall(input) => { let (receipt, geth_trace, diff_mode) = - self.handle_input(metadata, case_idx, input, node).await?; + self.handle_input(metadata, input, node).await?; Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode)) } Step::BalanceAssertion(balance_assertion) => { - self.handle_balance_assertion(metadata, case_idx, balance_assertion, node) + self.handle_balance_assertion(metadata, balance_assertion, node) .await?; Ok(StepOutput::BalanceAssertion) } Step::StorageEmptyAssertion(storage_empty) => { - self.handle_storage_empty(metadata, case_idx, storage_empty, node) + self.handle_storage_empty(metadata, storage_empty, node) .await?; Ok(StepOutput::StorageEmptyAssertion) } } + .inspect(|_| info!("Step Succeeded")) } + #[instrument(level = "info", name = "Handling Input", skip_all)] pub async fn handle_input( &mut self, metadata: &Metadata, - case_idx: CaseIdx, input: &Input, node: &T::Blockchain, ) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> { let deployment_receipts = self - .handle_input_contract_deployment(metadata, case_idx, input, node) + .handle_input_contract_deployment(metadata, input, node) .await?; let execution_receipt = self .handle_input_execution(input, deployment_receipts, node) @@ -117,16 +115,17 @@ where .handle_input_call_frame_tracing(&execution_receipt, node) .await?; self.handle_input_variable_assignment(input, &tracing_result)?; - self.handle_input_expectations(input, &execution_receipt, node, &tracing_result) - .await?; - self.handle_input_diff(case_idx, execution_receipt, node) - .await + let (_, (geth_trace, diff_mode)) = try_join!( + self.handle_input_expectations(input, &execution_receipt, node, &tracing_result), + self.handle_input_diff(&execution_receipt, node) + )?; + Ok((execution_receipt, geth_trace, diff_mode)) } + #[instrument(level = "info", name = "Handling Balance Assertion", skip_all)] pub async fn handle_balance_assertion( &mut self, metadata: &Metadata, - _: CaseIdx, balance_assertion: &BalanceAssertion, node: &T::Blockchain, ) -> anyhow::Result<()> { @@ -137,10 +136,10 @@ where Ok(()) } + #[instrument(level = "info", name = "Handling Storage Assertion", skip_all)] pub async fn handle_storage_empty( &mut self, metadata: &Metadata, - _: CaseIdx, storage_empty: &StorageEmptyAssertion, node: &T::Blockchain, ) -> anyhow::Result<()> { @@ -152,10 +151,10 @@ where } /// Handles the contract deployment for a given input performing it if it needs to be performed. + #[instrument(level = "info", skip_all)] async fn handle_input_contract_deployment( &mut self, metadata: &Metadata, - _: CaseIdx, input: &Input, node: &T::Blockchain, ) -> anyhow::Result> { @@ -170,11 +169,6 @@ where instances_we_must_deploy.insert(input.instance.clone(), true); } - tracing::debug!( - instances_to_deploy = instances_we_must_deploy.len(), - "Computed the number of required deployments for input" - ); - let mut receipts = HashMap::new(); for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() { let calldata = deploy_with_constructor_arguments.then_some(&input.calldata); @@ -201,6 +195,7 @@ where } /// Handles the execution of the input in terms of the calls that need to be made. + #[instrument(level = "info", skip_all)] async fn handle_input_execution( &mut self, input: &Input, @@ -218,33 +213,21 @@ where .legacy_transaction(node, self.default_resolution_context()) .await { - Ok(tx) => { - tracing::debug!("Legacy transaction data: {tx:#?}"); - tx - } + Ok(tx) => tx, Err(err) => { - tracing::error!("Failed to construct legacy transaction: {err:?}"); return Err(err); } }; - tracing::trace!("Executing transaction for input: {input:?}"); - match node.execute_transaction(tx).await { Ok(receipt) => Ok(receipt), - Err(err) => { - tracing::error!( - "Failed to execute transaction when executing the contract: {}, {:?}", - &*input.instance, - err - ); - Err(err) - } + Err(err) => Err(err), } } } } + #[instrument(level = "info", skip_all)] async fn handle_input_call_frame_tracing( &self, execution_receipt: &TransactionReceipt, @@ -259,7 +242,10 @@ where tracer_config: GethDebugTracerConfig(serde_json::json! {{ "onlyTopCall": true, "withLog": false, - "withReturnData": false + "withStorage": false, + "withMemory": false, + "withStack": false, + "withReturnData": true }}), ..Default::default() }, @@ -272,6 +258,7 @@ where }) } + #[instrument(level = "info", skip_all)] fn handle_input_variable_assignment( &mut self, input: &Input, @@ -302,8 +289,9 @@ where Ok(()) } + #[instrument(level = "info", skip_all)] async fn handle_input_expectations( - &mut self, + &self, input: &Input, execution_receipt: &TransactionReceipt, resolver: &impl ResolverApi, @@ -337,24 +325,25 @@ where } } - for expectation in expectations.iter() { - self.handle_input_expectation_item( - execution_receipt, - resolver, - expectation, - tracing_result, - ) - .await?; - } - - Ok(()) + futures::stream::iter(expectations.into_iter().map(Ok)) + .try_for_each_concurrent(None, |expectation| async move { + self.handle_input_expectation_item( + execution_receipt, + resolver, + expectation, + tracing_result, + ) + .await + }) + .await } + #[instrument(level = "info", skip_all)] async fn handle_input_expectation_item( - &mut self, + &self, execution_receipt: &TransactionReceipt, resolver: &impl ResolverApi, - expectation: &ExpectedOutput, + expectation: ExpectedOutput, tracing_result: &CallFrame, ) -> anyhow::Result<()> { if let Some(ref version_requirement) = expectation.compiler_version { @@ -492,12 +481,12 @@ where Ok(()) } + #[instrument(level = "info", skip_all)] async fn handle_input_diff( - &mut self, - _: CaseIdx, - execution_receipt: TransactionReceipt, + &self, + execution_receipt: &TransactionReceipt, node: &T::Blockchain, - ) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> { + ) -> anyhow::Result<(GethTrace, DiffMode)> { let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { diff_mode: Some(true), disable_code: None, @@ -505,13 +494,14 @@ where }); let trace = node - .trace_transaction(&execution_receipt, trace_options) + .trace_transaction(execution_receipt, trace_options) .await?; - let diff = node.state_diff(&execution_receipt).await?; + let diff = node.state_diff(execution_receipt).await?; - Ok((execution_receipt, trace, diff)) + Ok((trace, diff)) } + #[instrument(level = "info", skip_all)] pub async fn handle_balance_assertion_contract_deployment( &mut self, metadata: &Metadata, @@ -537,6 +527,7 @@ where Ok(()) } + #[instrument(level = "info", skip_all)] pub async fn handle_balance_assertion_execution( &mut self, BalanceAssertion { @@ -572,6 +563,7 @@ where Ok(()) } + #[instrument(level = "info", skip_all)] pub async fn handle_storage_empty_assertion_contract_deployment( &mut self, metadata: &Metadata, @@ -597,6 +589,7 @@ where Ok(()) } + #[instrument(level = "info", skip_all)] pub async fn handle_storage_empty_assertion_execution( &mut self, StorageEmptyAssertion { @@ -658,7 +651,6 @@ where contract_ident, }) = metadata.contract_sources()?.remove(contract_instance) else { - tracing::error!("Contract source not found for instance"); anyhow::bail!( "Contract source not found for instance {:?}", contract_instance @@ -671,11 +663,6 @@ where .and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref())) .cloned() else { - tracing::error!( - contract_source_path = contract_source_path.display().to_string(), - contract_ident = contract_ident.as_ref(), - "Failed to find information for contract" - ); anyhow::bail!( "Failed to find information for contract {:?}", contract_instance @@ -724,7 +711,6 @@ where }; let Some(address) = receipt.contract_address else { - tracing::error!("Contract deployment transaction didn't return an address"); anyhow::bail!("Contract deployment didn't return an address"); }; tracing::info!( @@ -751,7 +737,6 @@ where pub struct CaseDriver<'a, Leader: Platform, Follower: Platform> { metadata: &'a Metadata, case: &'a Case, - case_idx: CaseIdx, leader_node: &'a Leader::Blockchain, follower_node: &'a Follower::Blockchain, leader_state: CaseState, @@ -767,7 +752,6 @@ where pub fn new( metadata: &'a Metadata, case: &'a Case, - case_idx: impl Into, leader_node: &'a L::Blockchain, follower_node: &'a F::Blockchain, leader_state: CaseState, @@ -776,7 +760,6 @@ where Self { metadata, case, - case_idx: case_idx.into(), leader_node, follower_node, leader_state, @@ -784,79 +767,44 @@ where } } - pub fn trace_diff_mode(label: &str, diff: &DiffMode) { - tracing::trace!("{label} - PRE STATE:"); - for (addr, state) in &diff.pre { - Self::trace_account_state(" [pre]", addr, state); - } - - tracing::trace!("{label} - POST STATE:"); - for (addr, state) in &diff.post { - Self::trace_account_state(" [post]", addr, state); - } - } - - fn trace_account_state(prefix: &str, addr: &Address, state: &AccountState) { - tracing::trace!("{prefix} 0x{addr:x}"); - - if let Some(balance) = &state.balance { - tracing::trace!("{prefix} balance: {balance}"); - } - if let Some(nonce) = &state.nonce { - tracing::trace!("{prefix} nonce: {nonce}"); - } - if let Some(code) = &state.code { - tracing::trace!("{prefix} code: {code}"); - } - } - + #[instrument(level = "info", name = "Executing Case", skip_all)] pub async fn execute(&mut self) -> anyhow::Result { - if !self - .leader_node - .matches_target(self.metadata.targets.as_deref()) - || !self - .follower_node - .matches_target(self.metadata.targets.as_deref()) - { - tracing::warn!( - targets = ?self.metadata.targets, - "Either the leader or follower node do not support the targets of the file" - ); - return Ok(0); - } - let mut steps_executed = 0; - for (step_idx, step) in self.case.steps_iterator().enumerate() { - let tracing_span = tracing::info_span!("Handling input", step_idx); + for (step_idx, step) in self + .case + .steps_iterator() + .enumerate() + .map(|(idx, v)| (StepIdx::new(idx), v)) + { + let (leader_step_output, follower_step_output) = try_join!( + self.leader_state + .handle_step(self.metadata, &step, self.leader_node) + .instrument(info_span!( + "Handling Step", + %step_idx, + target = "Leader", + )), + self.follower_state + .handle_step(self.metadata, &step, self.follower_node) + .instrument(info_span!( + "Handling Step", + %step_idx, + target = "Follower", + )) + )?; - let leader_step_output = self - .leader_state - .handle_step(self.metadata, self.case_idx, &step, self.leader_node) - .instrument(tracing_span.clone()) - .await?; - let follower_step_output = self - .follower_state - .handle_step(self.metadata, self.case_idx, &step, self.follower_node) - .instrument(tracing_span) - .await?; match (leader_step_output, follower_step_output) { - ( - StepOutput::FunctionCall(leader_receipt, _, leader_diff), - StepOutput::FunctionCall(follower_receipt, _, follower_diff), - ) => { - if leader_diff == follower_diff { - tracing::debug!("State diffs match between leader and follower."); - } else { - tracing::debug!("State diffs mismatch between leader and follower."); - Self::trace_diff_mode("Leader", &leader_diff); - Self::trace_diff_mode("Follower", &follower_diff); - } - - if leader_receipt.logs() != follower_receipt.logs() { - tracing::debug!("Log/event mismatch between leader and follower."); - tracing::trace!("Leader logs: {:?}", leader_receipt.logs()); - tracing::trace!("Follower logs: {:?}", follower_receipt.logs()); - } + (StepOutput::FunctionCall(..), StepOutput::FunctionCall(..)) => { + // TODO: We need to actually work out how/if we will compare the diff between + // the leader and the follower. The diffs are almost guaranteed to be different + // from leader and follower and therefore without an actual strategy for this + // we have something that's guaranteed to fail. Even a simple call to some + // contract will produce two non-equal diffs because on the leader the contract + // has address X and on the follower it has address Y. On the leader contract X + // contains address A in the state and on the follower it contains address B. So + // this isn't exactly a straightforward thing to do and I'm not even sure that + // it's possible to do. Once we have an actual strategy for doing the diffs we + // will implement it here. Until then, this remains empty. } (StepOutput::BalanceAssertion, StepOutput::BalanceAssertion) => {} (StepOutput::StorageEmptyAssertion, StepOutput::StorageEmptyAssertion) => {} diff --git a/crates/core/src/main.rs b/crates/core/src/main.rs index 2c7b5a5..f74101d 100644 --- a/crates/core/src/main.rs +++ b/crates/core/src/main.rs @@ -1,8 +1,9 @@ mod cached_compiler; use std::{ - collections::HashMap, - path::{Path, PathBuf}, + collections::{BTreeMap, HashMap}, + io::{BufWriter, Write, stderr}, + path::Path, sync::{Arc, LazyLock}, time::Instant, }; @@ -13,16 +14,18 @@ use alloy::{ }; use anyhow::Context; use clap::Parser; -use futures::stream::futures_unordered::FuturesUnordered; +use futures::stream; use futures::{Stream, StreamExt}; +use indexmap::IndexMap; use revive_dt_node_interaction::EthereumNode; use temp_dir::TempDir; -use tokio::sync::mpsc; -use tracing::{Instrument, Level}; +use tokio::{sync::mpsc, try_join}; +use tracing::{debug, info, info_span, instrument}; +use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{EnvFilter, FmtSubscriber}; use revive_dt_common::types::Mode; -use revive_dt_compiler::SolidityCompiler; +use revive_dt_compiler::{CompilerOutput, SolidityCompiler}; use revive_dt_config::*; use revive_dt_core::{ Geth, Kitchensink, Platform, @@ -32,9 +35,10 @@ use revive_dt_format::{ case::{Case, CaseIdx}, corpus::Corpus, input::{Input, Step}, - metadata::{ContractPathAndIdent, Metadata, MetadataFile}, + metadata::{ContractPathAndIdent, MetadataFile}, + mode::ParsedMode, }; -use revive_dt_node::pool::NodePool; +use revive_dt_node::{Node, pool::NodePool}; use revive_dt_report::reporter::{Report, Span}; use crate::cached_compiler::CachedCompiler; @@ -42,20 +46,28 @@ use crate::cached_compiler::CachedCompiler; static TEMP_DIR: LazyLock = LazyLock::new(|| TempDir::new().unwrap()); /// this represents a single "test"; a mode, path and collection of cases. -#[derive(Clone)] -struct Test { - metadata: Metadata, - path: PathBuf, +#[derive(Clone, Debug)] +struct Test<'a> { + metadata: &'a MetadataFile, + metadata_file_path: &'a Path, mode: Mode, case_idx: CaseIdx, - case: Case, + case: &'a Case, } /// This represents the results that we gather from running test cases. type CaseResult = Result; fn main() -> anyhow::Result<()> { - let args = init_cli()?; + let (args, _guard) = init_cli()?; + info!( + leader = args.leader.to_string(), + follower = args.follower.to_string(), + working_directory = %args.directory().display(), + number_of_nodes = args.number_of_nodes, + invalidate_compilation_cache = args.invalidate_compilation_cache, + "Differential testing tool has been initialized" + ); let body = async { for (corpus, tests) in collect_corpora(&args)? { @@ -77,15 +89,25 @@ fn main() -> anyhow::Result<()> { .block_on(body) } -fn init_cli() -> anyhow::Result { +fn init_cli() -> anyhow::Result<(Arguments, WorkerGuard)> { + let (writer, guard) = tracing_appender::non_blocking::NonBlockingBuilder::default() + .lossy(false) + // Assuming that each line contains 255 characters and that each character is one byte, then + // this means that our buffer is about 4GBs large. + .buffered_lines_limit(0x1000000) + .thread_name("buffered writer") + .finish(std::io::stdout()); + let subscriber = FmtSubscriber::builder() - .with_thread_ids(true) - .with_thread_names(true) + .with_writer(writer) + .with_thread_ids(false) + .with_thread_names(false) .with_env_filter(EnvFilter::from_default_env()) .with_ansi(false) .pretty() .finish(); tracing::subscriber::set_global_default(subscriber)?; + info!("Differential testing tool is starting"); let mut args = Arguments::parse(); @@ -103,19 +125,25 @@ fn init_cli() -> anyhow::Result { args.temp_dir = Some(&TEMP_DIR); } } - tracing::info!("workdir: {}", args.directory().display()); - Ok(args) + Ok((args, guard)) } +#[instrument(level = "debug", name = "Collecting Corpora", skip_all)] fn collect_corpora(args: &Arguments) -> anyhow::Result>> { let mut corpora = HashMap::new(); for path in &args.corpus { + let span = info_span!("Processing corpus file", path = %path.display()); + let _guard = span.enter(); + let corpus = Corpus::try_from_path(path)?; - tracing::info!("found corpus: {}", path.display()); + info!( + name = corpus.name(), + number_of_contained_paths = corpus.path_count(), + "Deserialized corpus file" + ); let tests = corpus.enumerate_tests(); - tracing::info!("corpus '{}' contains {} tests", &corpus.name(), tests.len()); corpora.insert(corpus, tests); } @@ -133,7 +161,7 @@ where L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, { - let (report_tx, report_rx) = mpsc::unbounded_channel::<(Test, CaseResult)>(); + let (report_tx, report_rx) = mpsc::unbounded_channel::<(Test<'_>, CaseResult)>(); let tests = prepare_tests::(args, metadata_files); let driver_task = start_driver_task::(args, tests, span, report_tx).await?; @@ -144,111 +172,148 @@ where Ok(()) } -fn prepare_tests( +fn prepare_tests<'a, L, F>( args: &Arguments, - metadata_files: &[MetadataFile], -) -> impl Stream + metadata_files: &'a [MetadataFile], +) -> impl Stream> where L: Platform, F: Platform, L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, { - metadata_files + let filtered_tests = metadata_files .iter() - .flat_map( - |MetadataFile { - path, - content: metadata, - }| { - metadata - .cases - .iter() - .enumerate() - .flat_map(move |(case_idx, case)| { - metadata - .solc_modes() - .into_iter() - .map(move |solc_mode| (path, metadata, case_idx, case, solc_mode)) - }) + .flat_map(|metadata_file| { + metadata_file + .cases + .iter() + .enumerate() + .map(move |(case_idx, case)| (metadata_file, case_idx, case)) + }) + // Flatten over the modes, prefer the case modes over the metadata file modes. + .flat_map(|(metadata_file, case_idx, case)| { + case.modes + .as_ref() + .or(metadata_file.modes.as_ref()) + .map(|modes| ParsedMode::many_to_modes(modes.iter()).collect::>()) + .unwrap_or(Mode::all().collect()) + .into_iter() + .map(move |mode| (metadata_file, case_idx, case, mode)) + }) + .fold( + IndexMap::<_, BTreeMap<_, Vec<_>>>::new(), + |mut map, (metadata_file, case_idx, case, mode)| { + let test = Test { + metadata: metadata_file, + metadata_file_path: metadata_file.metadata_file_path.as_path(), + mode: mode.clone(), + case_idx: CaseIdx::new(case_idx), + case, + }; + map.entry(mode) + .or_default() + .entry(test.case_idx) + .or_default() + .push(test); + map }, ) - .filter( - |(metadata_file_path, metadata, _, _, _)| match metadata.ignore { - Some(true) => { - tracing::warn!( - metadata_file_path = %metadata_file_path.display(), - "Ignoring metadata file" - ); - false - } - Some(false) | None => true, - }, - ) - .filter( - |(metadata_file_path, _, case_idx, case, _)| match case.ignore { - Some(true) => { - tracing::warn!( - metadata_file_path = %metadata_file_path.display(), - case_idx, - case_name = ?case.name, - "Ignoring case" - ); - false - } - Some(false) | None => true, - }, - ) - .filter(|(metadata_file_path, metadata, ..)| match metadata.required_evm_version { - Some(evm_version_requirement) => { - let is_allowed = evm_version_requirement - .matches(&::evm_version()) - && evm_version_requirement - .matches(&::evm_version()); + .into_values() + .flatten() + .flat_map(|(_, value)| value.into_iter()) + // Filter the test out if the leader and follower do not support the target. + .filter(|test| { + let leader_support = + ::matches_target(test.metadata.targets.as_deref()); + let follower_support = + ::matches_target(test.metadata.targets.as_deref()); + let is_allowed = leader_support && follower_support; + + if !is_allowed { + debug!( + file_path = %test.metadata.relative_path().display(), + leader_support, + follower_support, + "Target is not supported, throwing metadata file out" + ) + } + + is_allowed + }) + // Filter the test out if the metadata file is ignored. + .filter(|test| { + if test.metadata.ignore.is_some_and(|ignore| ignore) { + debug!( + file_path = %test.metadata.relative_path().display(), + "Metadata file is ignored, throwing case out" + ); + false + } else { + true + } + }) + // Filter the test case if the case is ignored. + .filter(|test| { + if test.case.ignore.is_some_and(|ignore| ignore) { + debug!( + file_path = %test.metadata.relative_path().display(), + case_idx = %test.case_idx, + "Case is ignored, throwing case out" + ); + false + } else { + true + } + }) + // Filtering based on the EVM version compatibility + .filter(|test| { + if let Some(evm_version_requirement) = test.metadata.required_evm_version { + let leader_compatibility = evm_version_requirement + .matches(&::evm_version()); + let follower_compatibility = evm_version_requirement + .matches(&::evm_version()); + let is_allowed = leader_compatibility && follower_compatibility; if !is_allowed { - tracing::warn!( - metadata_file_path = %metadata_file_path.display(), - leader_evm_version = %::evm_version(), - follower_evm_version = %::evm_version(), - version_requirement = %evm_version_requirement, - "Skipped test since the EVM version requirement was not fulfilled." + debug!( + file_path = %test.metadata.relative_path().display(), + case_idx = %test.case_idx, + leader_compatibility, + follower_compatibility, + "EVM Version is incompatible, throwing case out" ); } is_allowed - } - None => true, - }) - .map(|(metadata_file_path, metadata, case_idx, case, solc_mode)| { - Test { - metadata: metadata.clone(), - path: metadata_file_path.to_path_buf(), - mode: solc_mode, - case_idx: case_idx.into(), - case: case.clone(), - } - }) - .map(async |test| test) - .collect::>() - .filter_map(async move |test| { - // Check that both compilers support this test, else we skip it - let is_supported = does_compiler_support_mode::(args, &test.mode).await.ok().unwrap_or(false) && - does_compiler_support_mode::(args, &test.mode).await.ok().unwrap_or(false); - - // We filter_map to avoid needing to clone `test`, but return it as-is. - if is_supported { - Some(test) } else { - tracing::warn!( - metadata_file_path = %test.path.display(), - case_idx = %test.case_idx, - case_name = ?test.case.name, - mode = %test.mode, - "Skipping test as one or both of the compilers don't support it" - ); - None + true } + }); + + stream::iter(filtered_tests) + // Filter based on the compiler compatibility + .filter_map(move |test| async move { + let leader_support = does_compiler_support_mode::(args, &test.mode) + .await + .ok() + .unwrap_or(false); + let follower_support = does_compiler_support_mode::(args, &test.mode) + .await + .ok() + .unwrap_or(false); + let is_allowed = leader_support && follower_support; + + if !is_allowed { + debug!( + file_path = %test.metadata.relative_path().display(), + leader_support, + follower_support, + "Compilers do not support this, throwing case out" + ); + } + + is_allowed.then_some(test) }) } @@ -259,7 +324,7 @@ async fn does_compiler_support_mode( let compiler_version_or_requirement = mode.compiler_version_to_use(args.solc.clone()); let compiler_path = P::Compiler::get_compiler_executable(args, compiler_version_or_requirement).await?; - let compiler_version = P::Compiler::new(compiler_path.clone()).version()?; + let compiler_version = P::Compiler::new(compiler_path.clone()).version().await?; Ok(P::Compiler::supports_mode( &compiler_version, @@ -268,11 +333,11 @@ async fn does_compiler_support_mode( )) } -async fn start_driver_task( +async fn start_driver_task<'a, L, F>( args: &Arguments, - tests: impl Stream, + tests: impl Stream>, span: Span, - report_tx: mpsc::UnboundedSender<(Test, CaseResult)>, + report_tx: mpsc::UnboundedSender<(Test<'a>, CaseResult)>, ) -> anyhow::Result> where L: Platform, @@ -310,19 +375,11 @@ where let leader_node = leader_nodes.round_robbin(); let follower_node = follower_nodes.round_robbin(); - let tracing_span = tracing::span!( - Level::INFO, - "Running driver", - metadata_file_path = %test.path.display(), - case_idx = ?test.case_idx, - solc_mode = ?test.mode, - ); - let result = handle_case_driver::( - &test.path, - &test.metadata, + test.metadata_file_path, + test.metadata, test.case_idx, - &test.case, + test.case, test.mode.clone(), args, cached_compiler, @@ -330,7 +387,6 @@ where follower_node, span, ) - .instrument(tracing_span) .await; report_tx @@ -341,7 +397,7 @@ where )) } -async fn start_reporter_task(mut report_rx: mpsc::UnboundedReceiver<(Test, CaseResult)>) { +async fn start_reporter_task(mut report_rx: mpsc::UnboundedReceiver<(Test<'_>, CaseResult)>) { let start = Instant::now(); const GREEN: &str = "\x1B[32m"; @@ -355,22 +411,25 @@ async fn start_reporter_task(mut report_rx: mpsc::UnboundedReceiver<(Test, CaseR let mut failures = vec![]; // Wait for reports to come from our test runner. When the channel closes, this ends. + let mut buf = BufWriter::new(stderr()); while let Some((test, case_result)) = report_rx.recv().await { let case_name = test.case.name.as_deref().unwrap_or("unnamed_case"); let case_idx = test.case_idx; - let test_path = test.path.display(); + let test_path = test.metadata_file_path.display(); let test_mode = test.mode.clone(); match case_result { Ok(_inputs) => { number_of_successes += 1; - eprintln!( + let _ = writeln!( + buf, "{GREEN}Case Succeeded:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode})" ); } Err(err) => { number_of_failures += 1; - eprintln!( + let _ = writeln!( + buf, "{RED}Case Failed:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode})" ); failures.push((test, err)); @@ -378,29 +437,31 @@ async fn start_reporter_task(mut report_rx: mpsc::UnboundedReceiver<(Test, CaseR } } - eprintln!(); + let _ = writeln!(buf,); let elapsed = start.elapsed(); // Now, log the failures with more complete errors at the bottom, like `cargo test` does, so // that we don't have to scroll through the entire output to find them. if !failures.is_empty() { - eprintln!("{BOLD}Failures:{BOLD_RESET}\n"); + let _ = writeln!(buf, "{BOLD}Failures:{BOLD_RESET}\n"); for failure in failures { let (test, err) = failure; let case_name = test.case.name.as_deref().unwrap_or("unnamed_case"); let case_idx = test.case_idx; - let test_path = test.path.display(); + let test_path = test.metadata_file_path.display(); let test_mode = test.mode.clone(); - eprintln!( + let _ = writeln!( + buf, "---- {RED}Case Failed:{COLOUR_RESET} {test_path} -> {case_name}:{case_idx} (mode: {test_mode}) ----\n\n{err}\n" ); } } // Summary at the end. - eprintln!( + let _ = writeln!( + buf, "{} cases: {GREEN}{number_of_successes}{COLOUR_RESET} cases succeeded, {RED}{number_of_failures}{COLOUR_RESET} cases failed in {} seconds", number_of_successes + number_of_failures, elapsed.as_secs() @@ -408,9 +469,22 @@ async fn start_reporter_task(mut report_rx: mpsc::UnboundedReceiver<(Test, CaseR } #[allow(clippy::too_many_arguments)] +#[instrument( + level = "info", + name = "Handling Case" + skip_all, + fields( + metadata_file_path = %metadata.relative_path().display(), + mode = %mode, + %case_idx, + case_name = case.name.as_deref().unwrap_or("Unnamed Case"), + leader_node = leader_node.id(), + follower_node = follower_node.id(), + ) +)] async fn handle_case_driver( metadata_file_path: &Path, - metadata: &Metadata, + metadata: &MetadataFile, case_idx: CaseIdx, case: &Case, mode: Mode, @@ -426,16 +500,23 @@ where L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, { - let leader_pre_link_contracts = cached_compiler - .compile_contracts::(metadata, metadata_file_path, &mode, config, None) - .await? - .0 - .contracts; - let follower_pre_link_contracts = cached_compiler - .compile_contracts::(metadata, metadata_file_path, &mode, config, None) - .await? - .0 - .contracts; + let ( + ( + CompilerOutput { + contracts: leader_pre_link_contracts, + }, + _, + ), + ( + CompilerOutput { + contracts: follower_pre_link_contracts, + }, + _, + ), + ) = try_join!( + cached_compiler.compile_contracts::(metadata, metadata_file_path, &mode, config, None), + cached_compiler.compile_contracts::(metadata, metadata_file_path, &mode, config, None) + )?; let mut leader_deployed_libraries = None::>; let mut follower_deployed_libraries = None::>; @@ -446,6 +527,8 @@ where .flatten() .flat_map(|(_, map)| map.values()) { + debug!(%library_instance, "Deploying Library Instance"); + let ContractPathAndIdent { contract_source_path: library_source_path, contract_ident: library_ident, @@ -465,24 +548,12 @@ where let leader_code = match alloy::hex::decode(leader_code) { Ok(code) => code, Err(error) => { - tracing::error!( - ?error, - contract_source_path = library_source_path.display().to_string(), - contract_ident = library_ident.as_ref(), - "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" - ); anyhow::bail!("Failed to hex-decode the byte code {}", error) } }; let follower_code = match alloy::hex::decode(follower_code) { Ok(code) => code, Err(error) => { - tracing::error!( - ?error, - contract_source_path = library_source_path.display().to_string(), - contract_ident = library_ident.as_ref(), - "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" - ); anyhow::bail!("Failed to hex-decode the byte code {}", error) } }; @@ -509,46 +580,28 @@ where follower_code, ); - let leader_receipt = match leader_node.execute_transaction(leader_tx).await { - Ok(receipt) => receipt, - Err(error) => { - tracing::error!( - node = std::any::type_name::(), - ?error, - "Contract deployment transaction failed." - ); - return Err(error); - } - }; - let follower_receipt = match follower_node.execute_transaction(follower_tx).await { - Ok(receipt) => receipt, - Err(error) => { - tracing::error!( - node = std::any::type_name::(), - ?error, - "Contract deployment transaction failed." - ); - return Err(error); - } - }; + let (leader_receipt, follower_receipt) = try_join!( + leader_node.execute_transaction(leader_tx), + follower_node.execute_transaction(follower_tx) + )?; - tracing::info!( + debug!( ?library_instance, library_address = ?leader_receipt.contract_address, "Deployed library to leader" ); - tracing::info!( + debug!( ?library_instance, library_address = ?follower_receipt.contract_address, "Deployed library to follower" ); - let Some(leader_library_address) = leader_receipt.contract_address else { - anyhow::bail!("Contract deployment didn't return an address"); - }; - let Some(follower_library_address) = follower_receipt.contract_address else { - anyhow::bail!("Contract deployment didn't return an address"); - }; + let leader_library_address = leader_receipt + .contract_address + .context("Contract deployment didn't return an address")?; + let follower_library_address = follower_receipt + .contract_address + .context("Contract deployment didn't return an address")?; leader_deployed_libraries.get_or_insert_default().insert( library_instance.clone(), @@ -568,46 +621,59 @@ where ); } - let (leader_post_link_contracts, leader_compiler_version) = cached_compiler - .compile_contracts::( + let ( + ( + CompilerOutput { + contracts: leader_post_link_contracts, + }, + leader_compiler_version, + ), + ( + CompilerOutput { + contracts: follower_post_link_contracts, + }, + follower_compiler_version, + ), + ) = try_join!( + cached_compiler.compile_contracts::( metadata, metadata_file_path, &mode, config, - leader_deployed_libraries.as_ref(), - ) - .await?; - let (follower_post_link_contracts, follower_compiler_version) = cached_compiler - .compile_contracts::( + leader_deployed_libraries.as_ref() + ), + cached_compiler.compile_contracts::( metadata, metadata_file_path, &mode, config, - follower_deployed_libraries.as_ref(), + follower_deployed_libraries.as_ref() ) - .await?; + )?; let leader_state = CaseState::::new( leader_compiler_version, - leader_post_link_contracts.contracts, + leader_post_link_contracts, leader_deployed_libraries.unwrap_or_default(), ); let follower_state = CaseState::::new( follower_compiler_version, - follower_post_link_contracts.contracts, + follower_post_link_contracts, follower_deployed_libraries.unwrap_or_default(), ); let mut driver = CaseDriver::::new( metadata, case, - case_idx, leader_node, follower_node, leader_state, follower_state, ); - driver.execute().await + driver + .execute() + .await + .inspect(|steps_executed| info!(steps_executed, "Case succeeded")) } async fn execute_corpus( @@ -657,7 +723,7 @@ async fn compile_corpus( let _ = cached_compiler .compile_contracts::( metadata, - metadata.path.as_path(), + metadata.metadata_file_path.as_path(), &mode, config, None, @@ -668,7 +734,7 @@ async fn compile_corpus( let _ = cached_compiler .compile_contracts::( metadata, - metadata.path.as_path(), + metadata.metadata_file_path.as_path(), &mode, config, None, diff --git a/crates/format/Cargo.toml b/crates/format/Cargo.toml index 0e5745e..e49872f 100644 --- a/crates/format/Cargo.toml +++ b/crates/format/Cargo.toml @@ -17,6 +17,7 @@ alloy = { workspace = true } alloy-primitives = { workspace = true } alloy-sol-types = { workspace = true } anyhow = { workspace = true } +futures = { workspace = true } regex = { workspace = true } tracing = { workspace = true } semver = { workspace = true } @@ -25,3 +26,6 @@ serde_json = { workspace = true } [dev-dependencies] tokio = { workspace = true } + +[lints] +workspace = true diff --git a/crates/format/src/case.rs b/crates/format/src/case.rs index b1bd234..2ef9ead 100644 --- a/crates/format/src/case.rs +++ b/crates/format/src/case.rs @@ -1,6 +1,6 @@ use serde::{Deserialize, Serialize}; -use revive_dt_common::macros::define_wrapper_type; +use revive_dt_common::{macros::define_wrapper_type, types::Mode}; use crate::{ input::{Expected, Step}, @@ -60,16 +60,17 @@ impl Case { } }) } + + pub fn solc_modes(&self) -> Vec { + match &self.modes { + Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), + None => Mode::all().collect(), + } + } } define_wrapper_type!( /// A wrapper type for the index of test cases found in metadata file. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] - pub struct CaseIdx(usize); + pub struct CaseIdx(usize) impl Display; ); - -impl std::fmt::Display for CaseIdx { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.0) - } -} diff --git a/crates/format/src/corpus.rs b/crates/format/src/corpus.rs index a3151b2..69921f0 100644 --- a/crates/format/src/corpus.rs +++ b/crates/format/src/corpus.rs @@ -3,10 +3,11 @@ use std::{ path::{Path, PathBuf}, }; -use revive_dt_common::cached_fs::read_dir; +use revive_dt_common::iterators::FilesWithExtensionIterator; use serde::{Deserialize, Serialize}; +use tracing::{debug, info}; -use crate::metadata::MetadataFile; +use crate::metadata::{Metadata, MetadataFile}; #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(untagged)] @@ -18,7 +19,7 @@ pub enum Corpus { impl Corpus { pub fn try_from_path(file_path: impl AsRef) -> anyhow::Result { let mut corpus = File::open(file_path.as_ref()) - .map_err(Into::::into) + .map_err(anyhow::Error::from) .and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))?; for path in corpus.paths_iter_mut() { @@ -42,10 +43,52 @@ impl Corpus { } pub fn enumerate_tests(&self) -> Vec { - let mut tests = Vec::new(); - for path in self.paths_iter() { - collect_metadata(path, &mut tests); - } + let mut tests = self + .paths_iter() + .flat_map(|root_path| { + if !root_path.is_dir() { + Box::new(std::iter::once(root_path.to_path_buf())) + as Box> + } else { + Box::new( + FilesWithExtensionIterator::new(root_path) + .with_use_cached_fs(true) + .with_allowed_extension("sol") + .with_allowed_extension("json"), + ) + } + .map(move |metadata_file_path| (root_path, metadata_file_path)) + }) + .filter_map(|(root_path, metadata_file_path)| { + Metadata::try_from_file(&metadata_file_path) + .or_else(|| { + debug!( + discovered_from = %root_path.display(), + metadata_file_path = %metadata_file_path.display(), + "Skipping file since it doesn't contain valid metadata" + ); + None + }) + .map(|metadata| MetadataFile { + metadata_file_path, + corpus_file_path: root_path.to_path_buf(), + content: metadata, + }) + .inspect(|metadata_file| { + debug!( + metadata_file_path = %metadata_file.relative_path().display(), + "Loaded metadata file" + ) + }) + }) + .collect::>(); + tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path)); + tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path); + info!( + len = tests.len(), + corpus_name = self.name(), + "Found tests in Corpus" + ); tests } @@ -76,55 +119,11 @@ impl Corpus { } } } -} -/// Recursively walks `path` and parses any JSON or Solidity file into a test -/// definition [Metadata]. -/// -/// Found tests are inserted into `tests`. -/// -/// `path` is expected to be a directory. -pub fn collect_metadata(path: &Path, tests: &mut Vec) { - if path.is_dir() { - let dir_entry = match read_dir(path) { - Ok(dir_entry) => dir_entry, - Err(error) => { - tracing::error!("failed to read dir '{}': {error}", path.display()); - return; - } - }; - - for path in dir_entry { - let path = match path { - Ok(entry) => entry, - Err(error) => { - tracing::error!("error reading dir entry: {error}"); - continue; - } - }; - - if path.is_dir() { - collect_metadata(&path, tests); - continue; - } - - if path.is_file() { - if let Some(metadata) = MetadataFile::try_from_file(&path) { - tests.push(metadata) - } - } - } - } else { - let Some(extension) = path.extension() else { - tracing::error!("Failed to get file extension"); - return; - }; - if extension.eq_ignore_ascii_case("sol") || extension.eq_ignore_ascii_case("json") { - if let Some(metadata) = MetadataFile::try_from_file(path) { - tests.push(metadata) - } - } else { - tracing::error!(?extension, "Unsupported file extension"); + pub fn path_count(&self) -> usize { + match self { + Corpus::SinglePath { .. } => 1, + Corpus::MultiplePaths { paths, .. } => paths.len(), } } } diff --git a/crates/format/src/input.rs b/crates/format/src/input.rs index 36899b0..918c2d4 100644 --- a/crates/format/src/input.rs +++ b/crates/format/src/input.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use alloy::{ eips::BlockNumberOrTag, - hex::ToHexExt, json_abi::Function, network::TransactionBuilder, primitives::{Address, Bytes, U256}, @@ -10,10 +9,12 @@ use alloy::{ }; use alloy_primitives::{FixedBytes, utils::parse_units}; use anyhow::Context; +use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream}; use semver::VersionReq; use serde::{Deserialize, Serialize}; use revive_dt_common::macros::define_wrapper_type; +use tracing::{Instrument, info_span, instrument}; use crate::traits::ResolverApi; use crate::{metadata::ContractInstance, traits::ResolutionContext}; @@ -33,6 +34,11 @@ pub enum Step { StorageEmptyAssertion(Box), } +define_wrapper_type!( + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] + pub struct StepIdx(usize) impl Display; +); + #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)] pub struct Input { #[serde(default = "Input::default_caller")] @@ -188,7 +194,7 @@ define_wrapper_type! { /// This represents an item in the [`Calldata::Compound`] variant. #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(transparent)] - pub struct CalldataItem(String); + pub struct CalldataItem(String) impl Display; } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] @@ -233,7 +239,7 @@ pub enum Method { define_wrapper_type!( #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash)] - pub struct EtherValue(U256); + pub struct EtherValue(U256) impl Display; ); #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)] @@ -268,15 +274,9 @@ impl Input { } Method::FunctionName(ref function_name) => { let Some(abi) = context.deployed_contract_abi(&self.instance) else { - tracing::error!( - contract_name = self.instance.as_ref(), - "Attempted to lookup ABI of contract but it wasn't found" - ); anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref()); }; - tracing::trace!("ABI found for instance: {}", &self.instance.as_ref()); - // We follow the same logic that's implemented in the matter-labs-tester where they resolve // the function name into a function selector and they assume that he function doesn't have // any existing overloads. @@ -302,13 +302,6 @@ impl Input { .selector() }; - tracing::trace!("Functions found for instance: {}", self.instance.as_ref()); - - tracing::trace!( - "Starting encoding ABI's parameters for instance: {}", - self.instance.as_ref() - ); - // Allocating a vector that we will be using for the calldata. The vector size will be: // 4 bytes for the function selector. // function.inputs.len() * 32 bytes for the arguments (each argument is a U256). @@ -435,17 +428,18 @@ impl Calldata { buffer.extend_from_slice(bytes); } Calldata::Compound(items) => { - for (arg_idx, arg) in items.iter().enumerate() { - match arg.resolve(resolver, context).await { - Ok(resolved) => { - buffer.extend(resolved.to_be_bytes::<32>()); - } - Err(error) => { - tracing::error!(?arg, arg_idx, ?error, "Failed to resolve argument"); - return Err(error); - } - }; - } + let resolved = stream::iter(items.iter().enumerate()) + .map(|(arg_idx, arg)| async move { + arg.resolve(resolver, context) + .instrument(info_span!("Resolving argument", %arg, arg_idx)) + .map_ok(|value| value.to_be_bytes::<32>()) + .await + }) + .buffered(0xFF) + .try_collect::>() + .await?; + + buffer.extend(resolved.into_iter().flatten()); } }; Ok(()) @@ -468,36 +462,37 @@ impl Calldata { match self { Calldata::Single(calldata) => Ok(calldata == other), Calldata::Compound(items) => { - // Chunking the "other" calldata into 32 byte chunks since each - // one of the items in the compound calldata represents 32 bytes - for (this, other) in items.iter().zip(other.chunks(32)) { - // The matterlabs format supports wildcards and therefore we - // also need to support them. - if this.as_ref() == "*" { - continue; - } + stream::iter(items.iter().zip(other.chunks(32))) + .map(|(this, other)| async move { + // The matterlabs format supports wildcards and therefore we + // also need to support them. + if this.as_ref() == "*" { + return Ok::<_, anyhow::Error>(true); + } - let other = if other.len() < 32 { - let mut vec = other.to_vec(); - vec.resize(32, 0); - std::borrow::Cow::Owned(vec) - } else { - std::borrow::Cow::Borrowed(other) - }; + let other = if other.len() < 32 { + let mut vec = other.to_vec(); + vec.resize(32, 0); + std::borrow::Cow::Owned(vec) + } else { + std::borrow::Cow::Borrowed(other) + }; - let this = this.resolve(resolver, context).await?; - let other = U256::from_be_slice(&other); - if this != other { - return Ok(false); - } - } - Ok(true) + let this = this.resolve(resolver, context).await?; + let other = U256::from_be_slice(&other); + Ok(this == other) + }) + .buffered(0xFF) + .all(|v| async move { v.is_ok_and(|v| v) }) + .map(Ok) + .await } } } } impl CalldataItem { + #[instrument(level = "info", skip_all, err)] async fn resolve( &self, resolver: &impl ResolverApi, @@ -548,14 +543,7 @@ impl CalldataItem { match stack.as_slice() { // Empty stack means that we got an empty compound calldata which we resolve to zero. [] => Ok(U256::ZERO), - [CalldataToken::Item(item)] => { - tracing::debug!( - original = self.0, - resolved = item.to_be_bytes::<32>().encode_hex(), - "Resolved a Calldata item" - ); - Ok(*item) - } + [CalldataToken::Item(item)] => Ok(*item), _ => Err(anyhow::anyhow!( "Invalid calldata arithmetic operation - Invalid stack" )), diff --git a/crates/format/src/metadata.rs b/crates/format/src/metadata.rs index 5e6e07d..04411b9 100644 --- a/crates/format/src/metadata.rs +++ b/crates/format/src/metadata.rs @@ -15,6 +15,7 @@ use revive_dt_common::{ cached_fs::read_to_string, iterators::FilesWithExtensionIterator, macros::define_wrapper_type, types::Mode, }; +use tracing::error; use crate::{case::Case, mode::ParsedMode}; @@ -24,16 +25,26 @@ pub const SOLIDITY_CASE_COMMENT_MARKER: &str = "//!"; #[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)] pub struct MetadataFile { - pub path: PathBuf, + /// The path of the metadata file. This will either be a JSON or solidity file. + pub metadata_file_path: PathBuf, + + /// This is the path contained within the corpus file. This could either be the path of some dir + /// or could be the actual metadata file path. + pub corpus_file_path: PathBuf, + + /// The metadata contained within the file. pub content: Metadata, } impl MetadataFile { - pub fn try_from_file(path: &Path) -> Option { - Metadata::try_from_file(path).map(|metadata| Self { - path: path.to_owned(), - content: metadata, - }) + pub fn relative_path(&self) -> &Path { + if self.corpus_file_path.is_file() { + &self.corpus_file_path + } else { + self.metadata_file_path + .strip_prefix(&self.corpus_file_path) + .unwrap() + } } } @@ -145,10 +156,7 @@ impl Metadata { pub fn try_from_file(path: &Path) -> Option { assert!(path.is_file(), "not a file: {}", path.display()); - let Some(file_extension) = path.extension() else { - tracing::debug!("skipping corpus file: {}", path.display()); - return None; - }; + let file_extension = path.extension()?; if file_extension == METADATA_FILE_EXTENSION { return Self::try_from_json(path); @@ -158,18 +166,12 @@ impl Metadata { return Self::try_from_solidity(path); } - tracing::debug!("ignoring invalid corpus file: {}", path.display()); None } fn try_from_json(path: &Path) -> Option { let file = File::open(path) - .inspect_err(|error| { - tracing::error!( - "opening JSON test metadata file '{}' error: {error}", - path.display() - ); - }) + .inspect_err(|err| error!(path = %path.display(), %err, "Failed to open file")) .ok()?; match serde_json::from_reader::<_, Metadata>(file) { @@ -177,11 +179,8 @@ impl Metadata { metadata.file_path = Some(path.to_path_buf()); Some(metadata) } - Err(error) => { - tracing::error!( - "parsing JSON test metadata file '{}' error: {error}", - path.display() - ); + Err(err) => { + error!(path = %path.display(), %err, "Deserialization of metadata failed"); None } } @@ -189,12 +188,7 @@ impl Metadata { fn try_from_solidity(path: &Path) -> Option { let spec = read_to_string(path) - .inspect_err(|error| { - tracing::error!( - "opening JSON test metadata file '{}' error: {error}", - path.display() - ); - }) + .inspect_err(|err| error!(path = %path.display(), %err, "Failed to read file content")) .ok()? .lines() .filter_map(|line| line.strip_prefix(SOLIDITY_CASE_COMMENT_MARKER)) @@ -222,11 +216,8 @@ impl Metadata { ); Some(metadata) } - Err(error) => { - tracing::error!( - "parsing Solidity test metadata file '{}' error: '{error}' from data: {spec}", - path.display() - ); + Err(err) => { + error!(path = %path.display(), %err, "Failed to deserialize metadata"); None } } @@ -266,7 +257,7 @@ define_wrapper_type!( Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, )] #[serde(transparent)] - pub struct ContractInstance(String); + pub struct ContractInstance(String) impl Display; ); define_wrapper_type!( @@ -277,7 +268,7 @@ define_wrapper_type!( Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, )] #[serde(transparent)] - pub struct ContractIdent(String); + pub struct ContractIdent(String) impl Display; ); /// Represents an identifier used for contracts. diff --git a/crates/format/src/mode.rs b/crates/format/src/mode.rs index 7e6dfc8..0476e4e 100644 --- a/crates/format/src/mode.rs +++ b/crates/format/src/mode.rs @@ -223,7 +223,7 @@ mod tests { for (actual, expected) in strings { let parsed = ParsedMode::from_str(actual) - .expect(format!("Failed to parse mode string '{actual}'").as_str()); + .unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'")); assert_eq!( expected, parsed.to_string(), @@ -249,7 +249,7 @@ mod tests { for (actual, expected) in strings { let parsed = ParsedMode::from_str(actual) - .expect(format!("Failed to parse mode string '{actual}'").as_str()); + .unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'")); let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect(); let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect(); diff --git a/crates/node-interaction/Cargo.toml b/crates/node-interaction/Cargo.toml index 9953c69..c5c002e 100644 --- a/crates/node-interaction/Cargo.toml +++ b/crates/node-interaction/Cargo.toml @@ -11,3 +11,6 @@ rust-version.workspace = true [dependencies] alloy = { workspace = true } anyhow = { workspace = true } + +[lints] +workspace = true diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 318e1a2..b895165 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -29,3 +29,6 @@ sp-runtime = { workspace = true } [dev-dependencies] temp-dir = { workspace = true } tokio = { workspace = true } + +[lints] +workspace = true diff --git a/crates/node/src/geth.rs b/crates/node/src/geth.rs index c034ba5..e72a7ed 100644 --- a/crates/node/src/geth.rs +++ b/crates/node/src/geth.rs @@ -33,9 +33,12 @@ use alloy::{ }; use anyhow::Context; use revive_common::EVMVersion; -use tracing::{Instrument, Level}; +use tracing::{Instrument, instrument}; -use revive_dt_common::{fs::clear_directory, futures::poll}; +use revive_dt_common::{ + fs::clear_directory, + futures::{PollingWaitBehavior, poll}, +}; use revive_dt_config::Arguments; use revive_dt_format::traits::ResolverApi; use revive_dt_node_interaction::EthereumNode; @@ -52,6 +55,7 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0); /// /// Prunes the child process and the base directory on drop. #[derive(Debug)] +#[allow(clippy::type_complexity)] pub struct GethNode { connection_string: String, base_directory: PathBuf, @@ -61,8 +65,9 @@ pub struct GethNode { id: u32, handle: Option, start_timeout: u64, - wallet: EthereumWallet, + wallet: Arc, nonce_manager: CachedNonceManager, + chain_id_filler: ChainIdFiller, /// This vector stores [`File`] objects that we use for logging which we want to flush when the /// node object is dropped. We do not store them in a structured fashion at the moment (in /// separate fields) as the logic that we need to apply to them is all the same regardless of @@ -91,7 +96,7 @@ impl GethNode { const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60); /// Create the node directory and call `geth init` to configure the genesis. - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> { let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.logs_directory); @@ -141,7 +146,7 @@ impl GethNode { /// Spawn the go-ethereum node child process. /// /// [Instance::init] must be called prior. - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn spawn_process(&mut self) -> anyhow::Result<&mut Self> { // This is the `OpenOptions` that we wish to use for all of the log files that we will be // opening in this method. We need to construct it in this way to: @@ -197,7 +202,7 @@ impl GethNode { /// Wait for the g-ethereum node child process getting ready. /// /// [Instance::spawn_process] must be called priorly. - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn wait_ready(&mut self) -> anyhow::Result<&mut Self> { let start_time = Instant::now(); @@ -231,80 +236,75 @@ impl GethNode { } } - #[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn geth_stdout_log_file_path(&self) -> PathBuf { self.logs_directory.join(Self::GETH_STDOUT_LOG_FILE_NAME) } - #[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn geth_stderr_log_file_path(&self) -> PathBuf { self.logs_directory.join(Self::GETH_STDERR_LOG_FILE_NAME) } - fn provider( + async fn provider( &self, - ) -> impl Future< - Output = anyhow::Result< - FillProvider, impl Provider, Ethereum>, - >, - > + 'static { - let connection_string = self.connection_string(); - let wallet = self.wallet.clone(); - - // Note: We would like all providers to make use of the same nonce manager so that we have - // monotonically increasing nonces that are cached. The cached nonce manager uses Arc's in - // its implementation and therefore it means that when we clone it then it still references - // the same state. - let nonce_manager = self.nonce_manager.clone(); - - Box::pin(async move { - ProviderBuilder::new() - .disable_recommended_fillers() - .filler(FallbackGasFiller::new( - 25_000_000, - 1_000_000_000, - 1_000_000_000, - )) - .filler(ChainIdFiller::default()) - .filler(NonceFiller::new(nonce_manager)) - .wallet(wallet) - .connect(&connection_string) - .await - .map_err(Into::into) - }) + ) -> anyhow::Result, impl Provider, Ethereum>> + { + ProviderBuilder::new() + .disable_recommended_fillers() + .filler(FallbackGasFiller::new( + 25_000_000, + 1_000_000_000, + 1_000_000_000, + )) + .filler(self.chain_id_filler.clone()) + .filler(NonceFiller::new(self.nonce_manager.clone())) + .wallet(self.wallet.clone()) + .connect(&self.connection_string) + .await + .map_err(Into::into) } } impl EthereumNode for GethNode { - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument( + level = "info", + skip_all, + fields(geth_node_id = self.id, connection_string = self.connection_string), + err, + )] async fn execute_transaction( &self, transaction: TransactionRequest, ) -> anyhow::Result { - let provider = Arc::new(self.provider().await?); - let transaction_hash = *provider.send_transaction(transaction).await?.tx_hash(); + let provider = self.provider().await?; - // The following is a fix for the "transaction indexing is in progress" error that we - // used to get. You can find more information on this in the following GH issue in geth + let pending_transaction = provider.send_transaction(transaction).await.inspect_err( + |err| tracing::error!(%err, "Encountered an error when submitting the transaction"), + )?; + let transaction_hash = *pending_transaction.tx_hash(); + + // The following is a fix for the "transaction indexing is in progress" error that we used + // to get. You can find more information on this in the following GH issue in geth // https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, // before we can get the receipt of the transaction it needs to have been indexed by the - // node's indexer. Just because the transaction has been confirmed it doesn't mean that - // it has been indexed. When we call alloy's `get_receipt` it checks if the transaction - // was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method - // which _might_ return the above error if the tx has not yet been indexed yet. So, we - // need to implement a retry mechanism for the receipt to keep retrying to get it until - // it eventually works, but we only do that if the error we get back is the "transaction + // node's indexer. Just because the transaction has been confirmed it doesn't mean that it + // has been indexed. When we call alloy's `get_receipt` it checks if the transaction was + // confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which + // _might_ return the above error if the tx has not yet been indexed yet. So, we need to + // implement a retry mechanism for the receipt to keep retrying to get it until it + // eventually works, but we only do that if the error we get back is the "transaction // indexing is in progress" error or if the receipt is None. // - // Getting the transaction indexed and taking a receipt can take a long time especially - // when a lot of transactions are being submitted to the node. Thus, while initially we - // only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to - // allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting - // with exponential backoff each time we attempt to get the receipt and find that it's - // not available. + // Getting the transaction indexed and taking a receipt can take a long time especially when + // a lot of transactions are being submitted to the node. Thus, while initially we only + // allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for + // a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential + // backoff each time we attempt to get the receipt and find that it's not available. + let provider = Arc::new(provider); poll( Self::RECEIPT_POLLING_DURATION, - Default::default(), + PollingWaitBehavior::Constant(Duration::from_millis(200)), move || { let provider = provider.clone(); async move { @@ -329,7 +329,7 @@ impl EthereumNode for GethNode { .await } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn trace_transaction( &self, transaction: &TransactionReceipt, @@ -338,7 +338,7 @@ impl EthereumNode for GethNode { let provider = Arc::new(self.provider().await?); poll( Self::TRACE_POLLING_DURATION, - Default::default(), + PollingWaitBehavior::Constant(Duration::from_millis(200)), move || { let provider = provider.clone(); let trace_options = trace_options.clone(); @@ -362,7 +362,7 @@ impl EthereumNode for GethNode { .await } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result { let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { diff_mode: Some(true), @@ -379,7 +379,7 @@ impl EthereumNode for GethNode { } } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn balance_of(&self, address: Address) -> anyhow::Result { self.provider() .await? @@ -388,7 +388,7 @@ impl EthereumNode for GethNode { .map_err(Into::into) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn latest_state_proof( &self, address: Address, @@ -404,7 +404,7 @@ impl EthereumNode for GethNode { } impl ResolverApi for GethNode { - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn chain_id(&self) -> anyhow::Result { self.provider() .await? @@ -413,7 +413,7 @@ impl ResolverApi for GethNode { .map_err(Into::into) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result { self.provider() .await? @@ -423,7 +423,7 @@ impl ResolverApi for GethNode { .map(|receipt| receipt.effective_gas_price) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -433,7 +433,7 @@ impl ResolverApi for GethNode { .map(|block| block.header.gas_limit as _) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result
{ self.provider() .await? @@ -443,7 +443,7 @@ impl ResolverApi for GethNode { .map(|block| block.header.beneficiary) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -453,7 +453,7 @@ impl ResolverApi for GethNode { .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -468,7 +468,7 @@ impl ResolverApi for GethNode { }) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -478,7 +478,7 @@ impl ResolverApi for GethNode { .map(|block| block.header.hash) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -488,7 +488,7 @@ impl ResolverApi for GethNode { .map(|block| block.header.timestamp) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn last_block_number(&self) -> anyhow::Result { self.provider() .await? @@ -522,20 +522,26 @@ impl Node for GethNode { id, handle: None, start_timeout: config.geth_start_timeout, - wallet, + wallet: Arc::new(wallet), + chain_id_filler: Default::default(), + nonce_manager: Default::default(), // We know that we only need to be storing 2 files so we can specify that when creating // the vector. It's the stdout and stderr of the geth node. logs_file_to_flush: Vec::with_capacity(2), - nonce_manager: Default::default(), } } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn id(&self) -> usize { + self.id as _ + } + + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn connection_string(&self) -> String { self.connection_string.clone() } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn shutdown(&mut self) -> anyhow::Result<()> { // Terminate the processes in a graceful manner to allow for the output to be flushed. if let Some(mut child) = self.handle.take() { @@ -557,13 +563,13 @@ impl Node for GethNode { Ok(()) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn spawn(&mut self, genesis: String) -> anyhow::Result<()> { self.init(genesis)?.spawn_process()?; Ok(()) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn version(&self) -> anyhow::Result { let output = Command::new(&self.geth) .arg("--version") @@ -576,8 +582,7 @@ impl Node for GethNode { Ok(String::from_utf8_lossy(&output).into()) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn matches_target(&self, targets: Option<&[String]>) -> bool { + fn matches_target(targets: Option<&[String]>) -> bool { match targets { None => true, Some(targets) => targets.iter().any(|str| str.as_str() == "evm"), @@ -590,7 +595,7 @@ impl Node for GethNode { } impl Drop for GethNode { - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn drop(&mut self) { self.shutdown().expect("Failed to shutdown") } diff --git a/crates/node/src/kitchensink.rs b/crates/node/src/kitchensink.rs index f5a9e0a..832760a 100644 --- a/crates/node/src/kitchensink.rs +++ b/crates/node/src/kitchensink.rs @@ -3,7 +3,10 @@ use std::{ io::{BufRead, Write}, path::{Path, PathBuf}, process::{Child, Command, Stdio}, - sync::atomic::{AtomicU32, Ordering}, + sync::{ + Arc, + atomic::{AtomicU32, Ordering}, + }, time::Duration, }; @@ -39,7 +42,6 @@ use serde::{Deserialize, Serialize}; use serde_json::{Value as JsonValue, json}; use sp_core::crypto::Ss58Codec; use sp_runtime::AccountId32; -use tracing::Level; use revive_dt_config::Arguments; use revive_dt_node_interaction::EthereumNode; @@ -54,12 +56,13 @@ pub struct KitchensinkNode { substrate_binary: PathBuf, eth_proxy_binary: PathBuf, rpc_url: String, - wallet: EthereumWallet, base_directory: PathBuf, logs_directory: PathBuf, process_substrate: Option, process_proxy: Option, + wallet: Arc, nonce_manager: CachedNonceManager, + chain_id_filler: ChainIdFiller, /// This vector stores [`File`] objects that we use for logging which we want to flush when the /// node object is dropped. We do not store them in a structured fashion at the moment (in /// separate fields) as the logic that we need to apply to them is all the same regardless of @@ -87,7 +90,6 @@ impl KitchensinkNode { const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log"; const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log"; - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> { let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.logs_directory); @@ -160,7 +162,6 @@ impl KitchensinkNode { Ok(self) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] fn spawn_process(&mut self) -> anyhow::Result<()> { let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16; let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16; @@ -214,10 +215,6 @@ impl KitchensinkNode { Self::SUBSTRATE_READY_MARKER, Duration::from_secs(60), ) { - tracing::error!( - ?error, - "Failed to start substrate, shutting down gracefully" - ); self.shutdown()?; return Err(error); }; @@ -243,7 +240,6 @@ impl KitchensinkNode { Self::ETH_PROXY_READY_MARKER, Duration::from_secs(60), ) { - tracing::error!(?error, "Failed to start proxy, shutting down gracefully"); self.shutdown()?; return Err(error); }; @@ -258,7 +254,6 @@ impl KitchensinkNode { Ok(()) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] fn extract_balance_from_genesis_file( &self, genesis: &Genesis, @@ -307,7 +302,6 @@ impl KitchensinkNode { } } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] pub fn eth_rpc_version(&self) -> anyhow::Result { let output = Command::new(&self.eth_proxy_binary) .arg("--version") @@ -320,74 +314,55 @@ impl KitchensinkNode { Ok(String::from_utf8_lossy(&output).trim().to_string()) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)] fn kitchensink_stdout_log_file_path(&self) -> PathBuf { self.logs_directory .join(Self::KITCHENSINK_STDOUT_LOG_FILE_NAME) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)] fn kitchensink_stderr_log_file_path(&self) -> PathBuf { self.logs_directory .join(Self::KITCHENSINK_STDERR_LOG_FILE_NAME) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)] fn proxy_stdout_log_file_path(&self) -> PathBuf { self.logs_directory.join(Self::PROXY_STDOUT_LOG_FILE_NAME) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)] fn proxy_stderr_log_file_path(&self) -> PathBuf { self.logs_directory.join(Self::PROXY_STDERR_LOG_FILE_NAME) } - fn provider( + async fn provider( &self, - ) -> impl Future< - Output = anyhow::Result< - FillProvider< - impl TxFiller, - impl Provider, - KitchenSinkNetwork, - >, + ) -> anyhow::Result< + FillProvider< + impl TxFiller, + impl Provider, + KitchenSinkNetwork, >, - > + 'static { - let connection_string = self.connection_string(); - let wallet = self.wallet.clone(); - - // Note: We would like all providers to make use of the same nonce manager so that we have - // monotonically increasing nonces that are cached. The cached nonce manager uses Arc's in - // its implementation and therefore it means that when we clone it then it still references - // the same state. - let nonce_manager = self.nonce_manager.clone(); - - Box::pin(async move { - ProviderBuilder::new() - .disable_recommended_fillers() - .network::() - .filler(FallbackGasFiller::new( - 25_000_000, - 1_000_000_000, - 1_000_000_000, - )) - .filler(ChainIdFiller::default()) - .filler(NonceFiller::new(nonce_manager)) - .wallet(wallet) - .connect(&connection_string) - .await - .map_err(Into::into) - }) + > { + ProviderBuilder::new() + .disable_recommended_fillers() + .network::() + .filler(FallbackGasFiller::new( + 25_000_000, + 1_000_000_000, + 1_000_000_000, + )) + .filler(self.chain_id_filler.clone()) + .filler(NonceFiller::new(self.nonce_manager.clone())) + .wallet(self.wallet.clone()) + .connect(&self.rpc_url) + .await + .map_err(Into::into) } } impl EthereumNode for KitchensinkNode { - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn execute_transaction( &self, transaction: alloy::rpc::types::TransactionRequest, ) -> anyhow::Result { - tracing::debug!(?transaction, "Submitting transaction"); let receipt = self .provider() .await? @@ -395,11 +370,9 @@ impl EthereumNode for KitchensinkNode { .await? .get_receipt() .await?; - tracing::info!(?receipt, "Submitted tx to kitchensink"); Ok(receipt) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn trace_transaction( &self, transaction: &TransactionReceipt, @@ -413,7 +386,6 @@ impl EthereumNode for KitchensinkNode { .await?) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result { let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { diff_mode: Some(true), @@ -430,7 +402,6 @@ impl EthereumNode for KitchensinkNode { } } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn balance_of(&self, address: Address) -> anyhow::Result { self.provider() .await? @@ -439,7 +410,6 @@ impl EthereumNode for KitchensinkNode { .map_err(Into::into) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn latest_state_proof( &self, address: Address, @@ -455,7 +425,6 @@ impl EthereumNode for KitchensinkNode { } impl ResolverApi for KitchensinkNode { - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn chain_id(&self) -> anyhow::Result { self.provider() .await? @@ -464,7 +433,6 @@ impl ResolverApi for KitchensinkNode { .map_err(Into::into) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result { self.provider() .await? @@ -474,7 +442,6 @@ impl ResolverApi for KitchensinkNode { .map(|receipt| receipt.effective_gas_price) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -484,7 +451,6 @@ impl ResolverApi for KitchensinkNode { .map(|block| block.header.gas_limit as _) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result
{ self.provider() .await? @@ -494,7 +460,6 @@ impl ResolverApi for KitchensinkNode { .map(|block| block.header.beneficiary) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -504,7 +469,6 @@ impl ResolverApi for KitchensinkNode { .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -519,7 +483,6 @@ impl ResolverApi for KitchensinkNode { }) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -529,7 +492,6 @@ impl ResolverApi for KitchensinkNode { .map(|block| block.header.hash) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -539,7 +501,6 @@ impl ResolverApi for KitchensinkNode { .map(|block| block.header.timestamp) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn last_block_number(&self) -> anyhow::Result { self.provider() .await? @@ -570,11 +531,12 @@ impl Node for KitchensinkNode { substrate_binary: config.kitchensink.clone(), eth_proxy_binary: config.eth_proxy.clone(), rpc_url: String::new(), - wallet, base_directory, logs_directory, process_substrate: None, process_proxy: None, + wallet: Arc::new(wallet), + chain_id_filler: Default::default(), nonce_manager: Default::default(), // We know that we only need to be storing 4 files so we can specify that when creating // the vector. It's the stdout and stderr of the substrate-node and the eth-rpc. @@ -582,12 +544,14 @@ impl Node for KitchensinkNode { } } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] + fn id(&self) -> usize { + self.id as _ + } + fn connection_string(&self) -> String { self.rpc_url.clone() } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] fn shutdown(&mut self) -> anyhow::Result<()> { // Terminate the processes in a graceful manner to allow for the output to be flushed. if let Some(mut child) = self.process_proxy.take() { @@ -614,12 +578,10 @@ impl Node for KitchensinkNode { Ok(()) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] fn spawn(&mut self, genesis: String) -> anyhow::Result<()> { self.init(&genesis)?.spawn_process() } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] fn version(&self) -> anyhow::Result { let output = Command::new(&self.substrate_binary) .arg("--version") @@ -632,8 +594,7 @@ impl Node for KitchensinkNode { Ok(String::from_utf8_lossy(&output).into()) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] - fn matches_target(&self, targets: Option<&[String]>) -> bool { + fn matches_target(targets: Option<&[String]>) -> bool { match targets { None => true, Some(targets) => targets.iter().any(|str| str.as_str() == "pvm"), @@ -646,7 +607,6 @@ impl Node for KitchensinkNode { } impl Drop for KitchensinkNode { - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] fn drop(&mut self) { self.shutdown().expect("Failed to shutdown") } diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 446b66a..74ea8cd 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -18,6 +18,9 @@ pub trait Node: EthereumNode { /// Create a new uninitialized instance. fn new(config: &Arguments) -> Self; + /// Returns the identifier of the node. + fn id(&self) -> usize; + /// Spawns a node configured according to the genesis json. /// /// Blocking until it's ready to accept transactions. @@ -36,7 +39,7 @@ pub trait Node: EthereumNode { /// Given a list of targets from the metadata file, this function determines if the metadata /// file can be ran on this node or not. - fn matches_target(&self, targets: Option<&[String]>) -> bool; + fn matches_target(targets: Option<&[String]>) -> bool; /// Returns the EVM version of the node. fn evm_version() -> EVMVersion; diff --git a/crates/node/src/pool.rs b/crates/node/src/pool.rs index dfb71ab..015c004 100644 --- a/crates/node/src/pool.rs +++ b/crates/node/src/pool.rs @@ -63,7 +63,6 @@ where fn spawn_node(args: &Arguments, genesis: String) -> anyhow::Result { let mut node = T::new(args); - tracing::info!("starting node: {}", node.connection_string()); node.spawn(genesis)?; Ok(node) } diff --git a/crates/report/Cargo.toml b/crates/report/Cargo.toml index d18caab..0e6e896 100644 --- a/crates/report/Cargo.toml +++ b/crates/report/Cargo.toml @@ -14,6 +14,8 @@ revive-dt-format = { workspace = true } revive-dt-compiler = { workspace = true } anyhow = { workspace = true } -tracing = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } + +[lints] +workspace = true diff --git a/crates/report/src/reporter.rs b/crates/report/src/reporter.rs index e5d0d1f..5313ac7 100644 --- a/crates/report/src/reporter.rs +++ b/crates/report/src/reporter.rs @@ -185,8 +185,6 @@ impl Report { let file = File::create(&path).context(path.display().to_string())?; serde_json::to_writer_pretty(file, &self)?; - tracing::info!("report written to: {}", path.display()); - Ok(()) } } diff --git a/crates/solc-binaries/Cargo.toml b/crates/solc-binaries/Cargo.toml index be5dcf7..30ff149 100644 --- a/crates/solc-binaries/Cargo.toml +++ b/crates/solc-binaries/Cargo.toml @@ -19,3 +19,6 @@ reqwest = { workspace = true } semver = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } + +[lints] +workspace = true diff --git a/crates/solc-binaries/src/cache.rs b/crates/solc-binaries/src/cache.rs index b2d8846..57b9696 100644 --- a/crates/solc-binaries/src/cache.rs +++ b/crates/solc-binaries/src/cache.rs @@ -39,10 +39,7 @@ pub(crate) async fn get_or_download( } async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> { - tracing::info!("caching file: {}", path.display()); - let Ok(file) = File::create_new(path) else { - tracing::debug!("cache file already exists: {}", path.display()); return Ok(()); }; diff --git a/crates/solc-binaries/src/download.rs b/crates/solc-binaries/src/download.rs index 119c4dd..691f639 100644 --- a/crates/solc-binaries/src/download.rs +++ b/crates/solc-binaries/src/download.rs @@ -107,7 +107,6 @@ impl SolcDownloader { /// Errors out if the download fails or the digest of the downloaded file /// mismatches the expected digest from the release [List]. pub async fn download(&self) -> anyhow::Result> { - tracing::info!("downloading solc: {self:?}"); let builds = List::download(self.list).await?.builds; let build = builds .iter()