From 609ececea6b6f87ad3eae83dda1c889f78c9977c Mon Sep 17 00:00:00 2001 From: Omar Abdulla Date: Mon, 18 Aug 2025 16:16:33 +0300 Subject: [PATCH] Better logging and fix concurrency issues --- Cargo.lock | 14 ++ Cargo.toml | 5 +- clippy.toml | 1 + crates/common/Cargo.toml | 3 + crates/compiler/Cargo.toml | 3 + crates/config/Cargo.toml | 2 + crates/core/Cargo.toml | 4 + crates/core/src/driver/mod.rs | 170 +++++++------------ crates/core/src/main.rs | 240 +++++++++++++++------------ crates/format/Cargo.toml | 3 + crates/format/src/corpus.rs | 109 ++++++------ crates/format/src/input.rs | 45 ++--- crates/format/src/metadata.rs | 57 +++---- crates/node-interaction/Cargo.toml | 3 + crates/node/Cargo.toml | 4 + crates/node/src/geth.rs | 185 +++++++++------------ crates/node/src/kitchensink.rs | 104 ++++-------- crates/node/src/lib.rs | 2 +- crates/node/src/pool.rs | 1 - crates/report/Cargo.toml | 3 + crates/report/src/reporter.rs | 2 - crates/solc-binaries/Cargo.toml | 3 + crates/solc-binaries/src/cache.rs | 3 - crates/solc-binaries/src/download.rs | 1 - 24 files changed, 433 insertions(+), 534 deletions(-) create mode 100644 clippy.toml diff --git a/Cargo.lock b/Cargo.lock index 57dca8f..aa32c80 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -4533,6 +4533,7 @@ dependencies = [ "tempfile", "tokio", "tracing", + "tracing-appender", "tracing-subscriber", ] @@ -4560,6 +4561,7 @@ version = "0.1.0" dependencies = [ "alloy", "anyhow", + "dashmap", "revive-common", "revive-dt-common", "revive-dt-config", @@ -6109,6 +6111,18 @@ dependencies = [ "tracing-core", ] +[[package]] +name = "tracing-appender" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3566e8ce28cc0a3fe42519fc80e6b4c943cc4c8cef275620eb8dac2d3d4e06cf" +dependencies = [ + "crossbeam-channel", + "thiserror 1.0.69", + "time", + "tracing-subscriber", +] + [[package]] name = "tracing-attributes" version = "0.1.28" diff --git a/Cargo.toml b/Cargo.toml index c378619..d8b4213 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -55,7 +55,8 @@ tokio = { version = "1.47.0", default-features = false, features = [ "rt", ] } uuid = { version = "1.8", features = ["v4"] } -tracing = "0.1.41" +tracing = { version = "0.1.41" } +tracing-appender = { version = "0.2.3" } tracing-subscriber = { version = "0.3.19", default-features = false, features = [ "fmt", "json", @@ -90,3 +91,5 @@ features = [ inherits = "release" lto = true codegen-units = 1 + +[workspace.lints.clippy] diff --git a/clippy.toml b/clippy.toml new file mode 100644 index 0000000..8b13789 --- /dev/null +++ b/clippy.toml @@ -0,0 +1 @@ + diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 128e464..20cf865 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -15,3 +15,6 @@ once_cell = { workspace = true } semver = { workspace = true } serde = { workspace = true } tokio = { workspace = true, default-features = false, features = ["time"] } + +[lints] +workspace = true diff --git a/crates/compiler/Cargo.toml b/crates/compiler/Cargo.toml index c1c839f..6797a22 100644 --- a/crates/compiler/Cargo.toml +++ b/crates/compiler/Cargo.toml @@ -25,3 +25,6 @@ serde = { workspace = true } serde_json = { workspace = true } tracing = { workspace = true } tokio = { workspace = true } + +[lints] +workspace = true diff --git a/crates/config/Cargo.toml b/crates/config/Cargo.toml index e58c747..10c5c61 100644 --- a/crates/config/Cargo.toml +++ b/crates/config/Cargo.toml @@ -15,3 +15,5 @@ semver = { workspace = true } temp-dir = { workspace = true } serde = { workspace = true } +[lints] +workspace = true diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index b747bc1..a4b2221 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -31,9 +31,13 @@ indexmap = { workspace = true } once_cell = { workspace = true } tokio = { workspace = true } tracing = { workspace = true } +tracing-appender = { workspace = true } tracing-subscriber = { workspace = true } semver = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } temp-dir = { workspace = true } tempfile = { workspace = true } + +[lints] +workspace = true diff --git a/crates/core/src/driver/mod.rs b/crates/core/src/driver/mod.rs index 22708fd..bea8c6b 100644 --- a/crates/core/src/driver/mod.rs +++ b/crates/core/src/driver/mod.rs @@ -16,26 +16,22 @@ use alloy::rpc::types::trace::geth::{ }; use alloy::{ primitives::Address, - rpc::types::{ - TransactionRequest, - trace::geth::{AccountState, DiffMode}, - }, + rpc::types::{TransactionRequest, trace::geth::DiffMode}, }; use anyhow::Context; use indexmap::IndexMap; use revive_dt_format::traits::{ResolutionContext, ResolverApi}; use semver::Version; -use revive_dt_format::case::{Case, CaseIdx}; +use revive_dt_format::case::Case; use revive_dt_format::input::{ - BalanceAssertion, Calldata, EtherValue, Expected, ExpectedOutput, Input, Method, + BalanceAssertion, Calldata, EtherValue, Expected, ExpectedOutput, Input, Method, StepIdx, StorageEmptyAssertion, }; use revive_dt_format::metadata::{ContractIdent, ContractInstance, ContractPathAndIdent}; use revive_dt_format::{input::Step, metadata::Metadata}; -use revive_dt_node::Node; use revive_dt_node_interaction::EthereumNode; -use tracing::Instrument; +use tracing::{Instrument, info, info_span, instrument}; use crate::Platform; @@ -77,38 +73,38 @@ where pub async fn handle_step( &mut self, metadata: &Metadata, - case_idx: CaseIdx, step: &Step, node: &T::Blockchain, ) -> anyhow::Result { match step { Step::FunctionCall(input) => { let (receipt, geth_trace, diff_mode) = - self.handle_input(metadata, case_idx, input, node).await?; + self.handle_input(metadata, input, node).await?; Ok(StepOutput::FunctionCall(receipt, geth_trace, diff_mode)) } Step::BalanceAssertion(balance_assertion) => { - self.handle_balance_assertion(metadata, case_idx, balance_assertion, node) + self.handle_balance_assertion(metadata, balance_assertion, node) .await?; Ok(StepOutput::BalanceAssertion) } Step::StorageEmptyAssertion(storage_empty) => { - self.handle_storage_empty(metadata, case_idx, storage_empty, node) + self.handle_storage_empty(metadata, storage_empty, node) .await?; Ok(StepOutput::StorageEmptyAssertion) } } + .inspect(|_| info!("Step Succeeded")) } + #[instrument(level = "info", name = "Handling Input", skip_all)] pub async fn handle_input( &mut self, metadata: &Metadata, - case_idx: CaseIdx, input: &Input, node: &T::Blockchain, ) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> { let deployment_receipts = self - .handle_input_contract_deployment(metadata, case_idx, input, node) + .handle_input_contract_deployment(metadata, input, node) .await?; let execution_receipt = self .handle_input_execution(input, deployment_receipts, node) @@ -119,14 +115,13 @@ where self.handle_input_variable_assignment(input, &tracing_result)?; self.handle_input_expectations(input, &execution_receipt, node, &tracing_result) .await?; - self.handle_input_diff(case_idx, execution_receipt, node) - .await + self.handle_input_diff(execution_receipt, node).await } + #[instrument(level = "info", name = "Handling Balance Assertion", skip_all)] pub async fn handle_balance_assertion( &mut self, metadata: &Metadata, - _: CaseIdx, balance_assertion: &BalanceAssertion, node: &T::Blockchain, ) -> anyhow::Result<()> { @@ -137,10 +132,10 @@ where Ok(()) } + #[instrument(level = "info", name = "Handling Storage Assertion", skip_all)] pub async fn handle_storage_empty( &mut self, metadata: &Metadata, - _: CaseIdx, storage_empty: &StorageEmptyAssertion, node: &T::Blockchain, ) -> anyhow::Result<()> { @@ -152,10 +147,10 @@ where } /// Handles the contract deployment for a given input performing it if it needs to be performed. + #[instrument(level = "info", skip_all)] async fn handle_input_contract_deployment( &mut self, metadata: &Metadata, - _: CaseIdx, input: &Input, node: &T::Blockchain, ) -> anyhow::Result> { @@ -170,11 +165,6 @@ where instances_we_must_deploy.insert(input.instance.clone(), true); } - tracing::debug!( - instances_to_deploy = instances_we_must_deploy.len(), - "Computed the number of required deployments for input" - ); - let mut receipts = HashMap::new(); for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() { let calldata = deploy_with_constructor_arguments.then_some(&input.calldata); @@ -201,6 +191,7 @@ where } /// Handles the execution of the input in terms of the calls that need to be made. + #[instrument(level = "info", skip_all)] async fn handle_input_execution( &mut self, input: &Input, @@ -218,33 +209,21 @@ where .legacy_transaction(node, self.default_resolution_context()) .await { - Ok(tx) => { - tracing::debug!("Legacy transaction data: {tx:#?}"); - tx - } + Ok(tx) => tx, Err(err) => { - tracing::error!("Failed to construct legacy transaction: {err:?}"); return Err(err); } }; - tracing::trace!("Executing transaction for input: {input:?}"); - match node.execute_transaction(tx).await { Ok(receipt) => Ok(receipt), - Err(err) => { - tracing::error!( - "Failed to execute transaction when executing the contract: {}, {:?}", - &*input.instance, - err - ); - Err(err) - } + Err(err) => Err(err), } } } } + #[instrument(level = "info", skip_all)] async fn handle_input_call_frame_tracing( &self, execution_receipt: &TransactionReceipt, @@ -275,6 +254,7 @@ where }) } + #[instrument(level = "info", skip_all)] fn handle_input_variable_assignment( &mut self, input: &Input, @@ -305,6 +285,7 @@ where Ok(()) } + #[instrument(level = "info", skip_all)] async fn handle_input_expectations( &mut self, input: &Input, @@ -353,6 +334,7 @@ where Ok(()) } + #[instrument(level = "info", skip_all)] async fn handle_input_expectation_item( &mut self, execution_receipt: &TransactionReceipt, @@ -495,9 +477,9 @@ where Ok(()) } + #[instrument(level = "info", skip_all)] async fn handle_input_diff( &mut self, - _: CaseIdx, execution_receipt: TransactionReceipt, node: &T::Blockchain, ) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> { @@ -515,6 +497,7 @@ where Ok((execution_receipt, trace, diff)) } + #[instrument(level = "info", skip_all)] pub async fn handle_balance_assertion_contract_deployment( &mut self, metadata: &Metadata, @@ -540,6 +523,7 @@ where Ok(()) } + #[instrument(level = "info", skip_all)] pub async fn handle_balance_assertion_execution( &mut self, BalanceAssertion { @@ -575,6 +559,7 @@ where Ok(()) } + #[instrument(level = "info", skip_all)] pub async fn handle_storage_empty_assertion_contract_deployment( &mut self, metadata: &Metadata, @@ -600,6 +585,7 @@ where Ok(()) } + #[instrument(level = "info", skip_all)] pub async fn handle_storage_empty_assertion_execution( &mut self, StorageEmptyAssertion { @@ -661,7 +647,6 @@ where contract_ident, }) = metadata.contract_sources()?.remove(contract_instance) else { - tracing::error!("Contract source not found for instance"); anyhow::bail!( "Contract source not found for instance {:?}", contract_instance @@ -674,11 +659,6 @@ where .and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref())) .cloned() else { - tracing::error!( - contract_source_path = contract_source_path.display().to_string(), - contract_ident = contract_ident.as_ref(), - "Failed to find information for contract" - ); anyhow::bail!( "Failed to find information for contract {:?}", contract_instance @@ -727,7 +707,6 @@ where }; let Some(address) = receipt.contract_address else { - tracing::error!("Contract deployment transaction didn't return an address"); anyhow::bail!("Contract deployment didn't return an address"); }; tracing::info!( @@ -754,7 +733,6 @@ where pub struct CaseDriver<'a, Leader: Platform, Follower: Platform> { metadata: &'a Metadata, case: &'a Case, - case_idx: CaseIdx, leader_node: &'a Leader::Blockchain, follower_node: &'a Follower::Blockchain, leader_state: CaseState, @@ -770,7 +748,6 @@ where pub fn new( metadata: &'a Metadata, case: &'a Case, - case_idx: impl Into, leader_node: &'a L::Blockchain, follower_node: &'a F::Blockchain, leader_state: CaseState, @@ -779,7 +756,6 @@ where Self { metadata, case, - case_idx: case_idx.into(), leader_node, follower_node, leader_state, @@ -787,79 +763,45 @@ where } } - pub fn trace_diff_mode(label: &str, diff: &DiffMode) { - tracing::trace!("{label} - PRE STATE:"); - for (addr, state) in &diff.pre { - Self::trace_account_state(" [pre]", addr, state); - } - - tracing::trace!("{label} - POST STATE:"); - for (addr, state) in &diff.post { - Self::trace_account_state(" [post]", addr, state); - } - } - - fn trace_account_state(prefix: &str, addr: &Address, state: &AccountState) { - tracing::trace!("{prefix} 0x{addr:x}"); - - if let Some(balance) = &state.balance { - tracing::trace!("{prefix} balance: {balance}"); - } - if let Some(nonce) = &state.nonce { - tracing::trace!("{prefix} nonce: {nonce}"); - } - if let Some(code) = &state.code { - tracing::trace!("{prefix} code: {code}"); - } - } - + #[instrument(level = "info", name = "Executing Case", skip_all)] pub async fn execute(&mut self) -> anyhow::Result { - if !self - .leader_node - .matches_target(self.metadata.targets.as_deref()) - || !self - .follower_node - .matches_target(self.metadata.targets.as_deref()) - { - tracing::warn!( - targets = ?self.metadata.targets, - "Either the leader or follower node do not support the targets of the file" - ); - return Ok(0); - } - let mut steps_executed = 0; - for (step_idx, step) in self.case.steps_iterator().enumerate() { - let tracing_span = tracing::info_span!("Handling input", step_idx); - + for (step_idx, step) in self + .case + .steps_iterator() + .enumerate() + .map(|(idx, v)| (StepIdx::new(idx), v)) + { let leader_step_output = self .leader_state - .handle_step(self.metadata, self.case_idx, &step, self.leader_node) - .instrument(tracing_span.clone()) + .handle_step(self.metadata, &step, self.leader_node) + .instrument(info_span!( + "Handling Step", + %step_idx, + target = "Leader", + )) .await?; let follower_step_output = self .follower_state - .handle_step(self.metadata, self.case_idx, &step, self.follower_node) - .instrument(tracing_span) + .handle_step(self.metadata, &step, self.follower_node) + .instrument(info_span!( + "Handling Step", + %step_idx, + target = "Follower", + )) .await?; match (leader_step_output, follower_step_output) { - ( - StepOutput::FunctionCall(leader_receipt, _, leader_diff), - StepOutput::FunctionCall(follower_receipt, _, follower_diff), - ) => { - if leader_diff == follower_diff { - tracing::debug!("State diffs match between leader and follower."); - } else { - tracing::debug!("State diffs mismatch between leader and follower."); - Self::trace_diff_mode("Leader", &leader_diff); - Self::trace_diff_mode("Follower", &follower_diff); - } - - if leader_receipt.logs() != follower_receipt.logs() { - tracing::debug!("Log/event mismatch between leader and follower."); - tracing::trace!("Leader logs: {:?}", leader_receipt.logs()); - tracing::trace!("Follower logs: {:?}", follower_receipt.logs()); - } + (StepOutput::FunctionCall(..), StepOutput::FunctionCall(..)) => { + // TODO: We need to actually work out how/if we will compare the diff between + // the leader and the follower. The diffs are almost guaranteed to be different + // from leader and follower and therefore without an actual strategy for this + // we have something that's guaranteed to fail. Even a simple call to some + // contract will produce two non-equal diffs because on the leader the contract + // has address X and on the follower it has address Y. On the leader contract X + // contains address A in the state and on the follower it contains address B. So + // this isn't exactly a straightforward thing to do and I'm not even sure that + // it's possible to do. Once we have an actual strategy for doing the diffs we + // will implement it here. Until then, this remains empty. } (StepOutput::BalanceAssertion, StepOutput::BalanceAssertion) => {} (StepOutput::StorageEmptyAssertion, StepOutput::StorageEmptyAssertion) => {} diff --git a/crates/core/src/main.rs b/crates/core/src/main.rs index c069ffe..f70ee6d 100644 --- a/crates/core/src/main.rs +++ b/crates/core/src/main.rs @@ -20,7 +20,8 @@ use indexmap::IndexMap; use revive_dt_node_interaction::EthereumNode; use temp_dir::TempDir; use tokio::sync::mpsc; -use tracing::{Instrument, Level}; +use tracing::{debug, info, info_span, instrument}; +use tracing_appender::non_blocking::WorkerGuard; use tracing_subscriber::{EnvFilter, FmtSubscriber}; use revive_dt_common::types::Mode; @@ -34,10 +35,10 @@ use revive_dt_format::{ case::{Case, CaseIdx}, corpus::Corpus, input::{Input, Step}, - metadata::{ContractPathAndIdent, Metadata, MetadataFile}, + metadata::{ContractPathAndIdent, MetadataFile}, mode::ParsedMode, }; -use revive_dt_node::pool::NodePool; +use revive_dt_node::{Node, pool::NodePool}; use revive_dt_report::reporter::{Report, Span}; use crate::cached_compiler::CachedCompiler; @@ -45,8 +46,9 @@ use crate::cached_compiler::CachedCompiler; static TEMP_DIR: LazyLock = LazyLock::new(|| TempDir::new().unwrap()); /// this represents a single "test"; a mode, path and collection of cases. +#[derive(Clone, Debug)] struct Test<'a> { - metadata: &'a Metadata, + metadata: &'a MetadataFile, metadata_file_path: &'a Path, mode: Mode, case_idx: CaseIdx, @@ -57,7 +59,15 @@ struct Test<'a> { type CaseResult = Result; fn main() -> anyhow::Result<()> { - let args = init_cli()?; + let (args, _guard) = init_cli()?; + info!( + leader = args.leader.to_string(), + follower = args.follower.to_string(), + working_directory = %args.directory().display(), + number_of_nodes = args.number_of_nodes, + invalidate_compilation_cache = args.invalidate_compilation_cache, + "Differential testing tool has been initialized" + ); let body = async { for (corpus, tests) in collect_corpora(&args)? { @@ -79,15 +89,25 @@ fn main() -> anyhow::Result<()> { .block_on(body) } -fn init_cli() -> anyhow::Result { +fn init_cli() -> anyhow::Result<(Arguments, WorkerGuard)> { + let (writer, guard) = tracing_appender::non_blocking::NonBlockingBuilder::default() + .lossy(false) + // Assuming that each line contains 255 characters and that each character is one byte, then + // this means that our buffer is about 4GBs large. + .buffered_lines_limit(0x1000000) + .thread_name("buffered writer") + .finish(std::io::stdout()); + let subscriber = FmtSubscriber::builder() - .with_thread_ids(true) - .with_thread_names(true) + .with_writer(writer) + .with_thread_ids(false) + .with_thread_names(false) .with_env_filter(EnvFilter::from_default_env()) .with_ansi(false) .pretty() .finish(); tracing::subscriber::set_global_default(subscriber)?; + info!("Differential testing tool is starting"); let mut args = Arguments::parse(); @@ -105,19 +125,25 @@ fn init_cli() -> anyhow::Result { args.temp_dir = Some(&TEMP_DIR); } } - tracing::info!("workdir: {}", args.directory().display()); - Ok(args) + Ok((args, guard)) } +#[instrument(level = "debug", name = "Collecting Corpora", skip_all)] fn collect_corpora(args: &Arguments) -> anyhow::Result>> { let mut corpora = HashMap::new(); for path in &args.corpus { + let span = info_span!("Processing corpus file", path = %path.display()); + let _guard = span.enter(); + let corpus = Corpus::try_from_path(path)?; - tracing::info!("found corpus: {}", path.display()); + info!( + name = corpus.name(), + number_of_contained_paths = corpus.path_count(), + "Deserialized corpus file" + ); let tests = corpus.enumerate_tests(); - tracing::info!("corpus '{}' contains {} tests", &corpus.name(), tests.len()); corpora.insert(corpus, tests); } @@ -156,7 +182,7 @@ where L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, { - let flattened_tests = metadata_files + let filtered_tests = metadata_files .iter() .flat_map(|metadata_file| { metadata_file @@ -165,14 +191,13 @@ where .enumerate() .map(move |(case_idx, case)| (metadata_file, case_idx, case)) }) + // Flatten over the modes, prefer the case modes over the metadata file modes. .flat_map(|(metadata_file, case_idx, case)| { - let modes = match (metadata_file.modes.as_ref(), case.modes.as_ref()) { - (Some(_), Some(modes)) | (None, Some(modes)) | (Some(modes), None) => { - ParsedMode::many_to_modes(modes.iter()).collect::>() - } - (None, None) => Mode::all().collect(), - }; - modes + case.modes + .as_ref() + .or(metadata_file.modes.as_ref()) + .map(|modes| ParsedMode::many_to_modes(modes.iter()).collect::>()) + .unwrap_or(Mode::all().collect()) .into_iter() .map(move |mode| (metadata_file, case_idx, case, mode)) }) @@ -180,8 +205,8 @@ where IndexMap::<_, BTreeMap<_, Vec<_>>>::new(), |mut map, (metadata_file, case_idx, case, mode)| { let test = Test { - metadata: &metadata_file.content, - metadata_file_path: metadata_file.path.as_path(), + metadata: metadata_file, + metadata_file_path: metadata_file.metadata_file_path.as_path(), mode: mode.clone(), case_idx: CaseIdx::new(case_idx), case, @@ -193,18 +218,35 @@ where .push(test); map }, - ); - - let filtered_tests = flattened_tests + ) .into_values() .flatten() .flat_map(|(_, value)| value.into_iter()) + // Filter the test out if the leader and follower do not support the target. + .filter(|test| { + let leader_support = + ::matches_target(test.metadata.targets.as_deref()); + let follower_support = + ::matches_target(test.metadata.targets.as_deref()); + let is_allowed = leader_support && follower_support; + + if !is_allowed { + debug!( + file_path = %test.metadata.relative_path().display(), + leader_support, + follower_support, + "Target is not supported, throwing metadata file out" + ) + } + + is_allowed + }) // Filter the test out if the metadata file is ignored. .filter(|test| { if test.metadata.ignore.is_some_and(|ignore| ignore) { - tracing::warn!( - metadata_file_path = %test.metadata_file_path.display(), - "Ignoring test case since the metadata file is ignored" + debug!( + file_path = %test.metadata.relative_path().display(), + "Metadata file is ignored, throwing case out" ); false } else { @@ -214,9 +256,10 @@ where // Filter the test case if the case is ignored. .filter(|test| { if test.case.ignore.is_some_and(|ignore| ignore) { - tracing::warn!( - metadata_file_path = %test.metadata_file_path.display(), - "Ignoring test case since the case file is ignored" + debug!( + file_path = %test.metadata.relative_path().display(), + case_idx = %test.case_idx, + "Case is ignored, throwing case out" ); false } else { @@ -226,18 +269,19 @@ where // Filtering based on the EVM version compatibility .filter(|test| { if let Some(evm_version_requirement) = test.metadata.required_evm_version { - let is_allowed = evm_version_requirement - .matches(&::evm_version()) - && evm_version_requirement + let leader_compatibility = evm_version_requirement + .matches(&::evm_version()); + let follower_compatibility = evm_version_requirement .matches(&::evm_version()); + let is_allowed = leader_compatibility && follower_compatibility; if !is_allowed { - tracing::warn!( - metadata_file_path = %test.metadata_file_path.display(), - leader_evm_version = %::evm_version(), - follower_evm_version = %::evm_version(), - version_requirement = %evm_version_requirement, - "Skipped test since the EVM version requirement was not fulfilled." + debug!( + file_path = %test.metadata.relative_path().display(), + case_idx = %test.case_idx, + leader_compatibility, + follower_compatibility, + "EVM Version is incompatible, throwing case out" ); } @@ -249,31 +293,27 @@ where stream::iter(filtered_tests) // Filter based on the compiler compatibility - .filter_map(|test| { - let args = args.clone(); + .filter_map(move |test| async move { + let leader_support = does_compiler_support_mode::(args, &test.mode) + .await + .ok() + .unwrap_or(false); + let follower_support = does_compiler_support_mode::(args, &test.mode) + .await + .ok() + .unwrap_or(false); + let is_allowed = leader_support && follower_support; - async move { - let is_supported = does_compiler_support_mode::(&args, &test.mode) - .await - .ok() - .unwrap_or(false) - && does_compiler_support_mode::(&args, &test.mode) - .await - .ok() - .unwrap_or(false); - - if !is_supported { - tracing::warn!( - metadata_file_path = %test.metadata_file_path.display(), - case_idx = %test.case_idx, - case_name = ?test.case.name, - mode = %test.mode, - "Skipping test as one or both of the compilers don't support it" - ); - } - - is_supported.then_some(test) + if !is_allowed { + debug!( + file_path = %test.metadata.relative_path().display(), + leader_support, + follower_support, + "Compilers do not support this, throwing case out" + ); } + + is_allowed.then_some(test) }) } @@ -335,14 +375,6 @@ where let leader_node = leader_nodes.round_robbin(); let follower_node = follower_nodes.round_robbin(); - let tracing_span = tracing::span!( - Level::INFO, - "Running driver", - metadata_file_path = %test.metadata_file_path.display(), - case_idx = ?test.case_idx, - solc_mode = ?test.mode, - ); - let result = handle_case_driver::( test.metadata_file_path, test.metadata, @@ -355,7 +387,6 @@ where follower_node, span, ) - .instrument(tracing_span) .await; report_tx @@ -438,9 +469,22 @@ async fn start_reporter_task(mut report_rx: mpsc::UnboundedReceiver<(Test<'_>, C } #[allow(clippy::too_many_arguments)] +#[instrument( + level = "info", + name = "Handling Case" + skip_all, + fields( + metadata_file_path = %metadata.relative_path().display(), + mode = %mode, + %case_idx, + case_name = case.name.as_deref().unwrap_or("Unnamed Case"), + leader_node = leader_node.id(), + follower_node = follower_node.id(), + ) +)] async fn handle_case_driver( metadata_file_path: &Path, - metadata: &Metadata, + metadata: &MetadataFile, case_idx: CaseIdx, case: &Case, mode: Mode, @@ -476,6 +520,8 @@ where .flatten() .flat_map(|(_, map)| map.values()) { + debug!(%library_instance, "Deploying Library Instance"); + let ContractPathAndIdent { contract_source_path: library_source_path, contract_ident: library_ident, @@ -495,24 +541,12 @@ where let leader_code = match alloy::hex::decode(leader_code) { Ok(code) => code, Err(error) => { - tracing::error!( - ?error, - contract_source_path = library_source_path.display().to_string(), - contract_ident = library_ident.as_ref(), - "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" - ); anyhow::bail!("Failed to hex-decode the byte code {}", error) } }; let follower_code = match alloy::hex::decode(follower_code) { Ok(code) => code, Err(error) => { - tracing::error!( - ?error, - contract_source_path = library_source_path.display().to_string(), - contract_ident = library_ident.as_ref(), - "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" - ); anyhow::bail!("Failed to hex-decode the byte code {}", error) } }; @@ -542,43 +576,33 @@ where let leader_receipt = match leader_node.execute_transaction(leader_tx).await { Ok(receipt) => receipt, Err(error) => { - tracing::error!( - node = std::any::type_name::(), - ?error, - "Contract deployment transaction failed." - ); return Err(error); } }; let follower_receipt = match follower_node.execute_transaction(follower_tx).await { Ok(receipt) => receipt, Err(error) => { - tracing::error!( - node = std::any::type_name::(), - ?error, - "Contract deployment transaction failed." - ); return Err(error); } }; - tracing::info!( + debug!( ?library_instance, library_address = ?leader_receipt.contract_address, "Deployed library to leader" ); - tracing::info!( + debug!( ?library_instance, library_address = ?follower_receipt.contract_address, "Deployed library to follower" ); - let Some(leader_library_address) = leader_receipt.contract_address else { - anyhow::bail!("Contract deployment didn't return an address"); - }; - let Some(follower_library_address) = follower_receipt.contract_address else { - anyhow::bail!("Contract deployment didn't return an address"); - }; + let leader_library_address = leader_receipt + .contract_address + .context("Contract deployment didn't return an address")?; + let follower_library_address = follower_receipt + .contract_address + .context("Contract deployment didn't return an address")?; leader_deployed_libraries.get_or_insert_default().insert( library_instance.clone(), @@ -631,13 +655,15 @@ where let mut driver = CaseDriver::::new( metadata, case, - case_idx, leader_node, follower_node, leader_state, follower_state, ); - driver.execute().await + driver + .execute() + .await + .inspect(|steps_executed| info!(steps_executed, "Case succeeded")) } async fn execute_corpus( @@ -687,7 +713,7 @@ async fn compile_corpus( let _ = cached_compiler .compile_contracts::( metadata, - metadata.path.as_path(), + metadata.metadata_file_path.as_path(), &mode, config, None, @@ -698,7 +724,7 @@ async fn compile_corpus( let _ = cached_compiler .compile_contracts::( metadata, - metadata.path.as_path(), + metadata.metadata_file_path.as_path(), &mode, config, None, diff --git a/crates/format/Cargo.toml b/crates/format/Cargo.toml index 0e5745e..2aa03a9 100644 --- a/crates/format/Cargo.toml +++ b/crates/format/Cargo.toml @@ -25,3 +25,6 @@ serde_json = { workspace = true } [dev-dependencies] tokio = { workspace = true } + +[lints] +workspace = true diff --git a/crates/format/src/corpus.rs b/crates/format/src/corpus.rs index a3151b2..69921f0 100644 --- a/crates/format/src/corpus.rs +++ b/crates/format/src/corpus.rs @@ -3,10 +3,11 @@ use std::{ path::{Path, PathBuf}, }; -use revive_dt_common::cached_fs::read_dir; +use revive_dt_common::iterators::FilesWithExtensionIterator; use serde::{Deserialize, Serialize}; +use tracing::{debug, info}; -use crate::metadata::MetadataFile; +use crate::metadata::{Metadata, MetadataFile}; #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(untagged)] @@ -18,7 +19,7 @@ pub enum Corpus { impl Corpus { pub fn try_from_path(file_path: impl AsRef) -> anyhow::Result { let mut corpus = File::open(file_path.as_ref()) - .map_err(Into::::into) + .map_err(anyhow::Error::from) .and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into))?; for path in corpus.paths_iter_mut() { @@ -42,10 +43,52 @@ impl Corpus { } pub fn enumerate_tests(&self) -> Vec { - let mut tests = Vec::new(); - for path in self.paths_iter() { - collect_metadata(path, &mut tests); - } + let mut tests = self + .paths_iter() + .flat_map(|root_path| { + if !root_path.is_dir() { + Box::new(std::iter::once(root_path.to_path_buf())) + as Box> + } else { + Box::new( + FilesWithExtensionIterator::new(root_path) + .with_use_cached_fs(true) + .with_allowed_extension("sol") + .with_allowed_extension("json"), + ) + } + .map(move |metadata_file_path| (root_path, metadata_file_path)) + }) + .filter_map(|(root_path, metadata_file_path)| { + Metadata::try_from_file(&metadata_file_path) + .or_else(|| { + debug!( + discovered_from = %root_path.display(), + metadata_file_path = %metadata_file_path.display(), + "Skipping file since it doesn't contain valid metadata" + ); + None + }) + .map(|metadata| MetadataFile { + metadata_file_path, + corpus_file_path: root_path.to_path_buf(), + content: metadata, + }) + .inspect(|metadata_file| { + debug!( + metadata_file_path = %metadata_file.relative_path().display(), + "Loaded metadata file" + ) + }) + }) + .collect::>(); + tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path)); + tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path); + info!( + len = tests.len(), + corpus_name = self.name(), + "Found tests in Corpus" + ); tests } @@ -76,55 +119,11 @@ impl Corpus { } } } -} -/// Recursively walks `path` and parses any JSON or Solidity file into a test -/// definition [Metadata]. -/// -/// Found tests are inserted into `tests`. -/// -/// `path` is expected to be a directory. -pub fn collect_metadata(path: &Path, tests: &mut Vec) { - if path.is_dir() { - let dir_entry = match read_dir(path) { - Ok(dir_entry) => dir_entry, - Err(error) => { - tracing::error!("failed to read dir '{}': {error}", path.display()); - return; - } - }; - - for path in dir_entry { - let path = match path { - Ok(entry) => entry, - Err(error) => { - tracing::error!("error reading dir entry: {error}"); - continue; - } - }; - - if path.is_dir() { - collect_metadata(&path, tests); - continue; - } - - if path.is_file() { - if let Some(metadata) = MetadataFile::try_from_file(&path) { - tests.push(metadata) - } - } - } - } else { - let Some(extension) = path.extension() else { - tracing::error!("Failed to get file extension"); - return; - }; - if extension.eq_ignore_ascii_case("sol") || extension.eq_ignore_ascii_case("json") { - if let Some(metadata) = MetadataFile::try_from_file(path) { - tests.push(metadata) - } - } else { - tracing::error!(?extension, "Unsupported file extension"); + pub fn path_count(&self) -> usize { + match self { + Corpus::SinglePath { .. } => 1, + Corpus::MultiplePaths { paths, .. } => paths.len(), } } } diff --git a/crates/format/src/input.rs b/crates/format/src/input.rs index afc6845..897d650 100644 --- a/crates/format/src/input.rs +++ b/crates/format/src/input.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use alloy::{ eips::BlockNumberOrTag, - hex::ToHexExt, json_abi::Function, network::TransactionBuilder, primitives::{Address, Bytes, U256}, @@ -14,6 +13,7 @@ use semver::VersionReq; use serde::{Deserialize, Serialize}; use revive_dt_common::macros::define_wrapper_type; +use tracing::{Instrument, info_span, instrument}; use crate::traits::ResolverApi; use crate::{metadata::ContractInstance, traits::ResolutionContext}; @@ -33,6 +33,11 @@ pub enum Step { StorageEmptyAssertion(Box), } +define_wrapper_type!( + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] + pub struct StepIdx(usize) impl Display; +); + #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq)] pub struct Input { #[serde(default = "Input::default_caller")] @@ -268,15 +273,9 @@ impl Input { } Method::FunctionName(ref function_name) => { let Some(abi) = context.deployed_contract_abi(&self.instance) else { - tracing::error!( - contract_name = self.instance.as_ref(), - "Attempted to lookup ABI of contract but it wasn't found" - ); anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref()); }; - tracing::trace!("ABI found for instance: {}", &self.instance.as_ref()); - // We follow the same logic that's implemented in the matter-labs-tester where they resolve // the function name into a function selector and they assume that he function doesn't have // any existing overloads. @@ -302,13 +301,6 @@ impl Input { .selector() }; - tracing::trace!("Functions found for instance: {}", self.instance.as_ref()); - - tracing::trace!( - "Starting encoding ABI's parameters for instance: {}", - self.instance.as_ref() - ); - // Allocating a vector that we will be using for the calldata. The vector size will be: // 4 bytes for the function selector. // function.inputs.len() * 32 bytes for the arguments (each argument is a U256). @@ -436,15 +428,12 @@ impl Calldata { } Calldata::Compound(items) => { for (arg_idx, arg) in items.iter().enumerate() { - match arg.resolve(resolver, context).await { - Ok(resolved) => { - buffer.extend(resolved.to_be_bytes::<32>()); - } - Err(error) => { - tracing::error!(?arg, arg_idx, ?error, "Failed to resolve argument"); - return Err(error); - } - }; + buffer.extend( + arg.resolve(resolver, context) + .instrument(info_span!("Resolving argument", %arg, arg_idx)) + .await? + .to_be_bytes::<32>(), + ); } } }; @@ -498,6 +487,7 @@ impl Calldata { } impl CalldataItem { + #[instrument(level = "info", skip_all, err)] async fn resolve( &self, resolver: &impl ResolverApi, @@ -548,14 +538,7 @@ impl CalldataItem { match stack.as_slice() { // Empty stack means that we got an empty compound calldata which we resolve to zero. [] => Ok(U256::ZERO), - [CalldataToken::Item(item)] => { - tracing::debug!( - original = self.0, - resolved = item.to_be_bytes::<32>().encode_hex(), - "Resolved a Calldata item" - ); - Ok(*item) - } + [CalldataToken::Item(item)] => Ok(*item), _ => Err(anyhow::anyhow!( "Invalid calldata arithmetic operation - Invalid stack" )), diff --git a/crates/format/src/metadata.rs b/crates/format/src/metadata.rs index c09993c..04411b9 100644 --- a/crates/format/src/metadata.rs +++ b/crates/format/src/metadata.rs @@ -15,6 +15,7 @@ use revive_dt_common::{ cached_fs::read_to_string, iterators::FilesWithExtensionIterator, macros::define_wrapper_type, types::Mode, }; +use tracing::error; use crate::{case::Case, mode::ParsedMode}; @@ -24,16 +25,26 @@ pub const SOLIDITY_CASE_COMMENT_MARKER: &str = "//!"; #[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)] pub struct MetadataFile { - pub path: PathBuf, + /// The path of the metadata file. This will either be a JSON or solidity file. + pub metadata_file_path: PathBuf, + + /// This is the path contained within the corpus file. This could either be the path of some dir + /// or could be the actual metadata file path. + pub corpus_file_path: PathBuf, + + /// The metadata contained within the file. pub content: Metadata, } impl MetadataFile { - pub fn try_from_file(path: &Path) -> Option { - Metadata::try_from_file(path).map(|metadata| Self { - path: path.to_owned(), - content: metadata, - }) + pub fn relative_path(&self) -> &Path { + if self.corpus_file_path.is_file() { + &self.corpus_file_path + } else { + self.metadata_file_path + .strip_prefix(&self.corpus_file_path) + .unwrap() + } } } @@ -145,10 +156,7 @@ impl Metadata { pub fn try_from_file(path: &Path) -> Option { assert!(path.is_file(), "not a file: {}", path.display()); - let Some(file_extension) = path.extension() else { - tracing::debug!("skipping corpus file: {}", path.display()); - return None; - }; + let file_extension = path.extension()?; if file_extension == METADATA_FILE_EXTENSION { return Self::try_from_json(path); @@ -158,18 +166,12 @@ impl Metadata { return Self::try_from_solidity(path); } - tracing::debug!("ignoring invalid corpus file: {}", path.display()); None } fn try_from_json(path: &Path) -> Option { let file = File::open(path) - .inspect_err(|error| { - tracing::error!( - "opening JSON test metadata file '{}' error: {error}", - path.display() - ); - }) + .inspect_err(|err| error!(path = %path.display(), %err, "Failed to open file")) .ok()?; match serde_json::from_reader::<_, Metadata>(file) { @@ -177,11 +179,8 @@ impl Metadata { metadata.file_path = Some(path.to_path_buf()); Some(metadata) } - Err(error) => { - tracing::error!( - "parsing JSON test metadata file '{}' error: {error}", - path.display() - ); + Err(err) => { + error!(path = %path.display(), %err, "Deserialization of metadata failed"); None } } @@ -189,12 +188,7 @@ impl Metadata { fn try_from_solidity(path: &Path) -> Option { let spec = read_to_string(path) - .inspect_err(|error| { - tracing::error!( - "opening JSON test metadata file '{}' error: {error}", - path.display() - ); - }) + .inspect_err(|err| error!(path = %path.display(), %err, "Failed to read file content")) .ok()? .lines() .filter_map(|line| line.strip_prefix(SOLIDITY_CASE_COMMENT_MARKER)) @@ -222,11 +216,8 @@ impl Metadata { ); Some(metadata) } - Err(error) => { - tracing::error!( - "parsing Solidity test metadata file '{}' error: '{error}' from data: {spec}", - path.display() - ); + Err(err) => { + error!(path = %path.display(), %err, "Failed to deserialize metadata"); None } } diff --git a/crates/node-interaction/Cargo.toml b/crates/node-interaction/Cargo.toml index 9953c69..c5c002e 100644 --- a/crates/node-interaction/Cargo.toml +++ b/crates/node-interaction/Cargo.toml @@ -11,3 +11,6 @@ rust-version.workspace = true [dependencies] alloy = { workspace = true } anyhow = { workspace = true } + +[lints] +workspace = true diff --git a/crates/node/Cargo.toml b/crates/node/Cargo.toml index 318e1a2..aa09bd1 100644 --- a/crates/node/Cargo.toml +++ b/crates/node/Cargo.toml @@ -11,6 +11,7 @@ rust-version.workspace = true [dependencies] anyhow = { workspace = true } alloy = { workspace = true } +dashmap = { workspace = true } tracing = { workspace = true } tokio = { workspace = true } @@ -29,3 +30,6 @@ sp-runtime = { workspace = true } [dev-dependencies] temp-dir = { workspace = true } tokio = { workspace = true } + +[lints] +workspace = true diff --git a/crates/node/src/geth.rs b/crates/node/src/geth.rs index 32d9513..46a05ba 100644 --- a/crates/node/src/geth.rs +++ b/crates/node/src/geth.rs @@ -18,15 +18,13 @@ use alloy::{ genesis::{Genesis, GenesisAccount}, network::{Ethereum, EthereumWallet, NetworkWallet}, primitives::{ - Address, BlockHash, BlockNumber, BlockTimestamp, FixedBytes, StorageKey, TxHash, U256, + Address, BlockHash, BlockNumber, BlockTimestamp, FixedBytes, StorageKey, TxHash, + U256, }, providers::{ - Identity, Provider, ProviderBuilder, RootProvider, + Provider, ProviderBuilder, ext::DebugApi, - fillers::{ - CachedNonceManager, ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, - WalletFiller, - }, + fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller}, }, rpc::types::{ EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, @@ -36,8 +34,7 @@ use alloy::{ }; use anyhow::Context; use revive_common::EVMVersion; -use tokio::sync::RwLock; -use tracing::{Instrument, Level}; +use tracing::{Instrument, instrument}; use revive_dt_common::{fs::clear_directory, futures::poll}; use revive_dt_config::Arguments; @@ -66,25 +63,9 @@ pub struct GethNode { id: u32, handle: Option, start_timeout: u64, - wallet: EthereumWallet, - provider: Arc< - RwLock< - Option< - Arc< - FillProvider< - JoinFill< - JoinFill< - JoinFill, ChainIdFiller>, - NonceFiller, - >, - WalletFiller, - >, - RootProvider, - >, - >, - >, - >, - >, + wallet: Arc, + nonce_manager: CachedNonceManager, + chain_id_filler: ChainIdFiller, /// This vector stores [`File`] objects that we use for logging which we want to flush when the /// node object is dropped. We do not store them in a structured fashion at the moment (in /// separate fields) as the logic that we need to apply to them is all the same regardless of @@ -113,7 +94,7 @@ impl GethNode { const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60); /// Create the node directory and call `geth init` to configure the genesis. - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> { let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.logs_directory); @@ -163,7 +144,7 @@ impl GethNode { /// Spawn the go-ethereum node child process. /// /// [Instance::init] must be called prior. - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn spawn_process(&mut self) -> anyhow::Result<&mut Self> { // This is the `OpenOptions` that we wish to use for all of the log files that we will be // opening in this method. We need to construct it in this way to: @@ -219,7 +200,7 @@ impl GethNode { /// Wait for the g-ethereum node child process getting ready. /// /// [Instance::spawn_process] must be called priorly. - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn wait_ready(&mut self) -> anyhow::Result<&mut Self> { let start_time = Instant::now(); @@ -253,91 +234,72 @@ impl GethNode { } } - #[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn geth_stdout_log_file_path(&self) -> PathBuf { self.logs_directory.join(Self::GETH_STDOUT_LOG_FILE_NAME) } - #[tracing::instrument(skip_all, fields(geth_node_id = self.id), level = Level::TRACE)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn geth_stderr_log_file_path(&self) -> PathBuf { self.logs_directory.join(Self::GETH_STDERR_LOG_FILE_NAME) } async fn provider( &self, - ) -> anyhow::Result, impl Provider, Ethereum>>> + ) -> anyhow::Result, impl Provider, Ethereum>> { - let read_guard = self.provider.read().await; - - match read_guard.as_ref() { - Some(provider) => Ok(provider.clone()), - None => { - drop(read_guard); - let mut write_guard = self.provider.write().await; - - let provider = ProviderBuilder::new() - .disable_recommended_fillers() - .filler(FallbackGasFiller::new( - 25_000_000, - 1_000_000_000, - 1_000_000_000, - )) - .filler(ChainIdFiller::default()) - .filler(NonceFiller::new(CachedNonceManager::default())) - .wallet(self.wallet.clone()) - .connect(&self.connection_string) - .await - .map(Arc::new)?; - - *write_guard = Some(provider.clone()); - Ok(provider) - } - } + ProviderBuilder::new() + .disable_recommended_fillers() + .filler(FallbackGasFiller::new( + 25_000_000, + 1_000_000_000, + 1_000_000_000, + )) + .filler(self.chain_id_filler.clone()) + .filler(NonceFiller::new(self.nonce_manager.clone())) + .wallet(self.wallet.clone()) + .connect(&self.connection_string) + .await + .map_err(Into::into) } } impl EthereumNode for GethNode { - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument( + level = "info", + skip_all, + fields(geth_node_id = self.id, connection_string = self.connection_string), + err, + )] async fn execute_transaction( &self, transaction: TransactionRequest, ) -> anyhow::Result { let provider = self.provider().await?; - let transaction = provider.fill(transaction).await?; - let transaction = transaction - .as_envelope() - .context("Filled transaction is not an envelope")?; - let transaction_hash = *transaction.tx_hash(); + let pending_transaction = provider.send_transaction(transaction).await.inspect_err( + |err| tracing::error!(%err, "Encountered an error when submitting the transaction"), + )?; + let transaction_hash = *pending_transaction.tx_hash(); - let rtn = provider.send_tx_envelope(transaction.clone()).await; - match rtn { - Ok(_) => {} - Err(error) => { - if !error.to_string().contains("already known") { - return Err(error.into()); - } - } - } - - // The following is a fix for the "transaction indexing is in progress" error that we - // used to get. You can find more information on this in the following GH issue in geth + // The following is a fix for the "transaction indexing is in progress" error that we used + // to get. You can find more information on this in the following GH issue in geth // https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, // before we can get the receipt of the transaction it needs to have been indexed by the - // node's indexer. Just because the transaction has been confirmed it doesn't mean that - // it has been indexed. When we call alloy's `get_receipt` it checks if the transaction - // was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method - // which _might_ return the above error if the tx has not yet been indexed yet. So, we - // need to implement a retry mechanism for the receipt to keep retrying to get it until - // it eventually works, but we only do that if the error we get back is the "transaction + // node's indexer. Just because the transaction has been confirmed it doesn't mean that it + // has been indexed. When we call alloy's `get_receipt` it checks if the transaction was + // confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which + // _might_ return the above error if the tx has not yet been indexed yet. So, we need to + // implement a retry mechanism for the receipt to keep retrying to get it until it + // eventually works, but we only do that if the error we get back is the "transaction // indexing is in progress" error or if the receipt is None. // - // Getting the transaction indexed and taking a receipt can take a long time especially - // when a lot of transactions are being submitted to the node. Thus, while initially we - // only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to - // allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting - // with exponential backoff each time we attempt to get the receipt and find that it's - // not available. + // Getting the transaction indexed and taking a receipt can take a long time especially when + // a lot of transactions are being submitted to the node. Thus, while initially we only + // allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for + // a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential + // backoff each time we attempt to get the receipt and find that it's not available. + let provider = Arc::new(provider); poll( Self::RECEIPT_POLLING_DURATION, Default::default(), @@ -365,7 +327,7 @@ impl EthereumNode for GethNode { .await } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn trace_transaction( &self, transaction: &TransactionReceipt, @@ -398,7 +360,7 @@ impl EthereumNode for GethNode { .await } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result { let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { diff_mode: Some(true), @@ -415,7 +377,7 @@ impl EthereumNode for GethNode { } } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn balance_of(&self, address: Address) -> anyhow::Result { self.provider() .await? @@ -424,7 +386,7 @@ impl EthereumNode for GethNode { .map_err(Into::into) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn latest_state_proof( &self, address: Address, @@ -440,7 +402,7 @@ impl EthereumNode for GethNode { } impl ResolverApi for GethNode { - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn chain_id(&self) -> anyhow::Result { self.provider() .await? @@ -449,7 +411,7 @@ impl ResolverApi for GethNode { .map_err(Into::into) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result { self.provider() .await? @@ -459,7 +421,7 @@ impl ResolverApi for GethNode { .map(|receipt| receipt.effective_gas_price) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -469,7 +431,7 @@ impl ResolverApi for GethNode { .map(|block| block.header.gas_limit as _) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result
{ self.provider() .await? @@ -479,7 +441,7 @@ impl ResolverApi for GethNode { .map(|block| block.header.beneficiary) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -489,7 +451,7 @@ impl ResolverApi for GethNode { .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -504,7 +466,7 @@ impl ResolverApi for GethNode { }) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -514,7 +476,7 @@ impl ResolverApi for GethNode { .map(|block| block.header.hash) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -524,7 +486,7 @@ impl ResolverApi for GethNode { .map(|block| block.header.timestamp) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] async fn last_block_number(&self) -> anyhow::Result { self.provider() .await? @@ -558,24 +520,26 @@ impl Node for GethNode { id, handle: None, start_timeout: config.geth_start_timeout, - wallet, - provider: Default::default(), + wallet: Arc::new(wallet), + chain_id_filler: Default::default(), + nonce_manager: Default::default(), // We know that we only need to be storing 2 files so we can specify that when creating // the vector. It's the stdout and stderr of the geth node. logs_file_to_flush: Vec::with_capacity(2), } } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn id(&self) -> usize { self.id as _ } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn connection_string(&self) -> String { self.connection_string.clone() } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn shutdown(&mut self) -> anyhow::Result<()> { // Terminate the processes in a graceful manner to allow for the output to be flushed. if let Some(mut child) = self.handle.take() { @@ -597,13 +561,13 @@ impl Node for GethNode { Ok(()) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn spawn(&mut self, genesis: String) -> anyhow::Result<()> { self.init(genesis)?.spawn_process()?; Ok(()) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id), err)] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn version(&self) -> anyhow::Result { let output = Command::new(&self.geth) .arg("--version") @@ -616,8 +580,7 @@ impl Node for GethNode { Ok(String::from_utf8_lossy(&output).into()) } - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn matches_target(&self, targets: Option<&[String]>) -> bool { + fn matches_target(targets: Option<&[String]>) -> bool { match targets { None => true, Some(targets) => targets.iter().any(|str| str.as_str() == "evm"), @@ -630,7 +593,7 @@ impl Node for GethNode { } impl Drop for GethNode { - #[tracing::instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] fn drop(&mut self) { self.shutdown().expect("Failed to shutdown") } diff --git a/crates/node/src/kitchensink.rs b/crates/node/src/kitchensink.rs index a3f3149..832760a 100644 --- a/crates/node/src/kitchensink.rs +++ b/crates/node/src/kitchensink.rs @@ -3,7 +3,10 @@ use std::{ io::{BufRead, Write}, path::{Path, PathBuf}, process::{Child, Command, Stdio}, - sync::atomic::{AtomicU32, Ordering}, + sync::{ + Arc, + atomic::{AtomicU32, Ordering}, + }, time::Duration, }; @@ -39,7 +42,6 @@ use serde::{Deserialize, Serialize}; use serde_json::{Value as JsonValue, json}; use sp_core::crypto::Ss58Codec; use sp_runtime::AccountId32; -use tracing::Level; use revive_dt_config::Arguments; use revive_dt_node_interaction::EthereumNode; @@ -54,12 +56,13 @@ pub struct KitchensinkNode { substrate_binary: PathBuf, eth_proxy_binary: PathBuf, rpc_url: String, - wallet: EthereumWallet, base_directory: PathBuf, logs_directory: PathBuf, process_substrate: Option, process_proxy: Option, + wallet: Arc, nonce_manager: CachedNonceManager, + chain_id_filler: ChainIdFiller, /// This vector stores [`File`] objects that we use for logging which we want to flush when the /// node object is dropped. We do not store them in a structured fashion at the moment (in /// separate fields) as the logic that we need to apply to them is all the same regardless of @@ -87,7 +90,6 @@ impl KitchensinkNode { const PROXY_STDOUT_LOG_FILE_NAME: &str = "proxy_stdout.log"; const PROXY_STDERR_LOG_FILE_NAME: &str = "proxy_stderr.log"; - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> { let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.logs_directory); @@ -160,7 +162,6 @@ impl KitchensinkNode { Ok(self) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] fn spawn_process(&mut self) -> anyhow::Result<()> { let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16; let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16; @@ -214,10 +215,6 @@ impl KitchensinkNode { Self::SUBSTRATE_READY_MARKER, Duration::from_secs(60), ) { - tracing::error!( - ?error, - "Failed to start substrate, shutting down gracefully" - ); self.shutdown()?; return Err(error); }; @@ -243,7 +240,6 @@ impl KitchensinkNode { Self::ETH_PROXY_READY_MARKER, Duration::from_secs(60), ) { - tracing::error!(?error, "Failed to start proxy, shutting down gracefully"); self.shutdown()?; return Err(error); }; @@ -258,7 +254,6 @@ impl KitchensinkNode { Ok(()) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] fn extract_balance_from_genesis_file( &self, genesis: &Genesis, @@ -307,7 +302,6 @@ impl KitchensinkNode { } } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] pub fn eth_rpc_version(&self) -> anyhow::Result { let output = Command::new(&self.eth_proxy_binary) .arg("--version") @@ -320,74 +314,55 @@ impl KitchensinkNode { Ok(String::from_utf8_lossy(&output).trim().to_string()) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)] fn kitchensink_stdout_log_file_path(&self) -> PathBuf { self.logs_directory .join(Self::KITCHENSINK_STDOUT_LOG_FILE_NAME) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)] fn kitchensink_stderr_log_file_path(&self) -> PathBuf { self.logs_directory .join(Self::KITCHENSINK_STDERR_LOG_FILE_NAME) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)] fn proxy_stdout_log_file_path(&self) -> PathBuf { self.logs_directory.join(Self::PROXY_STDOUT_LOG_FILE_NAME) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), level = Level::TRACE)] fn proxy_stderr_log_file_path(&self) -> PathBuf { self.logs_directory.join(Self::PROXY_STDERR_LOG_FILE_NAME) } - fn provider( + async fn provider( &self, - ) -> impl Future< - Output = anyhow::Result< - FillProvider< - impl TxFiller, - impl Provider, - KitchenSinkNetwork, - >, + ) -> anyhow::Result< + FillProvider< + impl TxFiller, + impl Provider, + KitchenSinkNetwork, >, - > + 'static { - let connection_string = self.connection_string(); - let wallet = self.wallet.clone(); - - // Note: We would like all providers to make use of the same nonce manager so that we have - // monotonically increasing nonces that are cached. The cached nonce manager uses Arc's in - // its implementation and therefore it means that when we clone it then it still references - // the same state. - let nonce_manager = self.nonce_manager.clone(); - - Box::pin(async move { - ProviderBuilder::new() - .disable_recommended_fillers() - .network::() - .filler(FallbackGasFiller::new( - 25_000_000, - 1_000_000_000, - 1_000_000_000, - )) - .filler(ChainIdFiller::default()) - .filler(NonceFiller::new(nonce_manager)) - .wallet(wallet) - .connect(&connection_string) - .await - .map_err(Into::into) - }) + > { + ProviderBuilder::new() + .disable_recommended_fillers() + .network::() + .filler(FallbackGasFiller::new( + 25_000_000, + 1_000_000_000, + 1_000_000_000, + )) + .filler(self.chain_id_filler.clone()) + .filler(NonceFiller::new(self.nonce_manager.clone())) + .wallet(self.wallet.clone()) + .connect(&self.rpc_url) + .await + .map_err(Into::into) } } impl EthereumNode for KitchensinkNode { - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn execute_transaction( &self, transaction: alloy::rpc::types::TransactionRequest, ) -> anyhow::Result { - tracing::debug!(?transaction, "Submitting transaction"); let receipt = self .provider() .await? @@ -395,11 +370,9 @@ impl EthereumNode for KitchensinkNode { .await? .get_receipt() .await?; - tracing::info!(?receipt, "Submitted tx to kitchensink"); Ok(receipt) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn trace_transaction( &self, transaction: &TransactionReceipt, @@ -413,7 +386,6 @@ impl EthereumNode for KitchensinkNode { .await?) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result { let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { diff_mode: Some(true), @@ -430,7 +402,6 @@ impl EthereumNode for KitchensinkNode { } } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn balance_of(&self, address: Address) -> anyhow::Result { self.provider() .await? @@ -439,7 +410,6 @@ impl EthereumNode for KitchensinkNode { .map_err(Into::into) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn latest_state_proof( &self, address: Address, @@ -455,7 +425,6 @@ impl EthereumNode for KitchensinkNode { } impl ResolverApi for KitchensinkNode { - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn chain_id(&self) -> anyhow::Result { self.provider() .await? @@ -464,7 +433,6 @@ impl ResolverApi for KitchensinkNode { .map_err(Into::into) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn transaction_gas_price(&self, tx_hash: &TxHash) -> anyhow::Result { self.provider() .await? @@ -474,7 +442,6 @@ impl ResolverApi for KitchensinkNode { .map(|receipt| receipt.effective_gas_price) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -484,7 +451,6 @@ impl ResolverApi for KitchensinkNode { .map(|block| block.header.gas_limit as _) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result
{ self.provider() .await? @@ -494,7 +460,6 @@ impl ResolverApi for KitchensinkNode { .map(|block| block.header.beneficiary) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -504,7 +469,6 @@ impl ResolverApi for KitchensinkNode { .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_base_fee(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -519,7 +483,6 @@ impl ResolverApi for KitchensinkNode { }) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -529,7 +492,6 @@ impl ResolverApi for KitchensinkNode { .map(|block| block.header.hash) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { self.provider() .await? @@ -539,7 +501,6 @@ impl ResolverApi for KitchensinkNode { .map(|block| block.header.timestamp) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] async fn last_block_number(&self) -> anyhow::Result { self.provider() .await? @@ -570,11 +531,12 @@ impl Node for KitchensinkNode { substrate_binary: config.kitchensink.clone(), eth_proxy_binary: config.eth_proxy.clone(), rpc_url: String::new(), - wallet, base_directory, logs_directory, process_substrate: None, process_proxy: None, + wallet: Arc::new(wallet), + chain_id_filler: Default::default(), nonce_manager: Default::default(), // We know that we only need to be storing 4 files so we can specify that when creating // the vector. It's the stdout and stderr of the substrate-node and the eth-rpc. @@ -586,12 +548,10 @@ impl Node for KitchensinkNode { self.id as _ } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] fn connection_string(&self) -> String { self.rpc_url.clone() } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] fn shutdown(&mut self) -> anyhow::Result<()> { // Terminate the processes in a graceful manner to allow for the output to be flushed. if let Some(mut child) = self.process_proxy.take() { @@ -618,12 +578,10 @@ impl Node for KitchensinkNode { Ok(()) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] fn spawn(&mut self, genesis: String) -> anyhow::Result<()> { self.init(&genesis)?.spawn_process() } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id), err)] fn version(&self) -> anyhow::Result { let output = Command::new(&self.substrate_binary) .arg("--version") @@ -636,8 +594,7 @@ impl Node for KitchensinkNode { Ok(String::from_utf8_lossy(&output).into()) } - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] - fn matches_target(&self, targets: Option<&[String]>) -> bool { + fn matches_target(targets: Option<&[String]>) -> bool { match targets { None => true, Some(targets) => targets.iter().any(|str| str.as_str() == "pvm"), @@ -650,7 +607,6 @@ impl Node for KitchensinkNode { } impl Drop for KitchensinkNode { - #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] fn drop(&mut self) { self.shutdown().expect("Failed to shutdown") } diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index d40fc18..74ea8cd 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -39,7 +39,7 @@ pub trait Node: EthereumNode { /// Given a list of targets from the metadata file, this function determines if the metadata /// file can be ran on this node or not. - fn matches_target(&self, targets: Option<&[String]>) -> bool; + fn matches_target(targets: Option<&[String]>) -> bool; /// Returns the EVM version of the node. fn evm_version() -> EVMVersion; diff --git a/crates/node/src/pool.rs b/crates/node/src/pool.rs index dfb71ab..015c004 100644 --- a/crates/node/src/pool.rs +++ b/crates/node/src/pool.rs @@ -63,7 +63,6 @@ where fn spawn_node(args: &Arguments, genesis: String) -> anyhow::Result { let mut node = T::new(args); - tracing::info!("starting node: {}", node.connection_string()); node.spawn(genesis)?; Ok(node) } diff --git a/crates/report/Cargo.toml b/crates/report/Cargo.toml index d18caab..23f6bfa 100644 --- a/crates/report/Cargo.toml +++ b/crates/report/Cargo.toml @@ -17,3 +17,6 @@ anyhow = { workspace = true } tracing = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } + +[lints] +workspace = true diff --git a/crates/report/src/reporter.rs b/crates/report/src/reporter.rs index e5d0d1f..5313ac7 100644 --- a/crates/report/src/reporter.rs +++ b/crates/report/src/reporter.rs @@ -185,8 +185,6 @@ impl Report { let file = File::create(&path).context(path.display().to_string())?; serde_json::to_writer_pretty(file, &self)?; - tracing::info!("report written to: {}", path.display()); - Ok(()) } } diff --git a/crates/solc-binaries/Cargo.toml b/crates/solc-binaries/Cargo.toml index be5dcf7..30ff149 100644 --- a/crates/solc-binaries/Cargo.toml +++ b/crates/solc-binaries/Cargo.toml @@ -19,3 +19,6 @@ reqwest = { workspace = true } semver = { workspace = true } serde = { workspace = true } sha2 = { workspace = true } + +[lints] +workspace = true diff --git a/crates/solc-binaries/src/cache.rs b/crates/solc-binaries/src/cache.rs index b2d8846..57b9696 100644 --- a/crates/solc-binaries/src/cache.rs +++ b/crates/solc-binaries/src/cache.rs @@ -39,10 +39,7 @@ pub(crate) async fn get_or_download( } async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> { - tracing::info!("caching file: {}", path.display()); - let Ok(file) = File::create_new(path) else { - tracing::debug!("cache file already exists: {}", path.display()); return Ok(()); }; diff --git a/crates/solc-binaries/src/download.rs b/crates/solc-binaries/src/download.rs index 119c4dd..691f639 100644 --- a/crates/solc-binaries/src/download.rs +++ b/crates/solc-binaries/src/download.rs @@ -107,7 +107,6 @@ impl SolcDownloader { /// Errors out if the download fails or the digest of the downloaded file /// mismatches the expected digest from the release [List]. pub async fn download(&self) -> anyhow::Result> { - tracing::info!("downloading solc: {self:?}"); let builds = List::download(self.list).await?.builds; let build = builds .iter()