diff --git a/Cargo.lock b/Cargo.lock index 160dc91..230d945 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -2713,7 +2713,7 @@ dependencies = [ "libc", "percent-encoding", "pin-project-lite", - "socket2", + "socket2 0.5.10", "system-configuration", "tokio", "tower-service", @@ -2953,6 +2953,17 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "io-uring" +version = "0.7.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" +dependencies = [ + "bitflags 2.9.1", + "cfg-if", + "libc", +] + [[package]] name = "ipnet" version = "2.11.0" @@ -3969,9 +3980,7 @@ dependencies = [ "base64", "bytes", "encoding_rs", - "futures-channel", "futures-core", - "futures-util", "h2", "http", "http-body", @@ -4020,11 +4029,7 @@ name = "revive-dt-common" version = "0.1.0" dependencies = [ "anyhow", - "futures", - "once_cell", "semver 1.0.26", - "tokio", - "tracing", ] [[package]] @@ -4043,6 +4048,7 @@ dependencies = [ "semver 1.0.26", "serde", "serde_json", + "tokio", "tracing", ] @@ -4064,8 +4070,8 @@ dependencies = [ "alloy", "anyhow", "clap", + "futures", "indexmap 2.10.0", - "rayon", "revive-dt-common", "revive-dt-compiler", "revive-dt-config", @@ -4075,6 +4081,7 @@ dependencies = [ "revive-dt-report", "semver 1.0.26", "temp-dir", + "tokio", "tracing", "tracing-subscriber", ] @@ -4091,6 +4098,7 @@ dependencies = [ "semver 1.0.26", "serde", "serde_json", + "tokio", "tracing", ] @@ -4145,6 +4153,7 @@ dependencies = [ "semver 1.0.26", "serde", "sha2 0.10.9", + "tokio", "tracing", ] @@ -4701,6 +4710,15 @@ version = "1.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" +[[package]] +name = "signal-hook-registry" +version = "1.4.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "9203b8055f63a2a00e2f593bb0510367fe707d7ff1e5c872de2f537b339e5410" +dependencies = [ + "libc", +] + [[package]] name = "signature" version = "2.2.0" @@ -4745,6 +4763,16 @@ dependencies = [ "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "sp-application-crypto" version = "40.1.0" @@ -5404,18 +5432,21 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.45.1" +version = "1.47.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779" +checksum = "43864ed400b6043a4757a25c7a64a8efde741aed79a056a2fb348a406701bb35" dependencies = [ "backtrace", "bytes", + "io-uring", "libc", "mio", "pin-project-lite", - "socket2", + "signal-hook-registry", + "slab", + "socket2 0.6.0", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] diff --git a/Cargo.toml b/Cargo.toml index 9855d12..92bb064 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -29,9 +29,8 @@ clap = { version = "4", features = ["derive"] } foundry-compilers-artifacts = { version = "0.18.0" } futures = { version = "0.3.31" } hex = "0.4.3" -reqwest = { version = "0.12.15", features = ["blocking", "json"] } +reqwest = { version = "0.12.15", features = ["json"] } once_cell = "1.21" -rayon = { version = "1.10" } semver = { version = "1.0", features = ["serde"] } serde = { version = "1.0", default-features = false, features = ["derive"] } serde_json = { version = "1.0", default-features = false, features = [ @@ -43,8 +42,10 @@ sp-core = "36.1.0" sp-runtime = "41.1.0" temp-dir = { version = "0.1.16" } tempfile = "3.3" -tokio = { version = "1", default-features = false, features = [ +tokio = { version = "1.47.0", default-features = false, features = [ "rt-multi-thread", + "process", + "rt", ] } uuid = { version = "1.8", features = ["v4"] } tracing = "0.1.41" diff --git a/crates/common/Cargo.toml b/crates/common/Cargo.toml index 7dc81c4..c804254 100644 --- a/crates/common/Cargo.toml +++ b/crates/common/Cargo.toml @@ -10,8 +10,4 @@ rust-version.workspace = true [dependencies] anyhow = { workspace = true } -futures = { workspace = true } semver = { workspace = true } -tracing = { workspace = true } -once_cell = { workspace = true } -tokio = { workspace = true } diff --git a/crates/common/src/concepts/blocking_executor.rs b/crates/common/src/concepts/blocking_executor.rs deleted file mode 100644 index b07b7fd..0000000 --- a/crates/common/src/concepts/blocking_executor.rs +++ /dev/null @@ -1,225 +0,0 @@ -//! The alloy crate __requires__ a tokio runtime. -//! We contain any async rust right here. - -use std::{any::Any, panic::AssertUnwindSafe, pin::Pin, thread}; - -use futures::FutureExt; -use once_cell::sync::Lazy; -use tokio::{ - runtime::Builder, - sync::{mpsc::UnboundedSender, oneshot}, -}; -use tracing::Instrument; - -/// A blocking async executor. -/// -/// This struct exposes the abstraction of a blocking async executor. It is a global and static -/// executor which means that it doesn't require for new instances of it to be created, it's a -/// singleton and can be accessed by any thread that wants to perform some async computation on the -/// blocking executor thread. -/// -/// The API of the blocking executor is created in a way so that it's very natural, simple to use, -/// and unbounded to specific tasks or return types. The following is an example of using this -/// executor to drive an async computation: -/// -/// ```rust -/// use revive_dt_common::concepts::*; -/// -/// fn blocking_function() { -/// let result = BlockingExecutor::execute(async move { -/// tokio::time::sleep(std::time::Duration::from_secs(1)).await; -/// 0xFFu8 -/// }) -/// .expect("Computation failed"); -/// -/// assert_eq!(result, 0xFF); -/// } -/// ``` -/// -/// Users get to pass in their async tasks without needing to worry about putting them in a [`Box`], -/// [`Pin`], needing to perform down-casting, or the internal channel mechanism used by the runtime. -/// To the user, it just looks like a function that converts some async code into sync code. -/// -/// This struct also handled panics that occur in the passed futures and converts them into errors -/// that can be handled by the user. This is done to allow the executor to be robust. -/// -/// Internally, the executor communicates with the tokio runtime thread through channels which carry -/// the [`TaskMessage`] and the results of the execution. -pub struct BlockingExecutor; - -impl BlockingExecutor { - pub fn execute(future: impl Future + Send + 'static) -> Result - where - R: Send + 'static, - { - // Note: The blocking executor is a singleton and therefore we store its state in a static - // so that it's assigned only once. Additionally, when we set the state of the executor we - // spawn the thread where the async runtime runs. - static STATE: Lazy = Lazy::new(|| { - tracing::trace!("Initializing the BlockingExecutor state"); - - // All communication with the tokio runtime thread happens over mspc channels where the - // producers here are the threads that want to run async tasks and the consumer here is - // the tokio runtime thread. - let (tx, mut rx) = tokio::sync::mpsc::unbounded_channel::(); - - thread::spawn(move || { - tracing::info!( - thread_id = ?std::thread::current().id(), - "Starting async runtime thread" - ); - - let runtime = Builder::new_current_thread() - .enable_all() - .build() - .expect("Failed to create the async runtime"); - - runtime.block_on(async move { - while let Some(TaskMessage { - future: task, - response_tx: response_channel, - }) = rx.recv().await - { - tracing::trace!("Received a new future to execute"); - tokio::spawn(async move { - // One of the things that the blocking executor does is that it allows - // us to catch panics if they occur. By wrapping the given future in an - // AssertUnwindSafe::catch_unwind we are able to catch all panic unwinds - // in the given future and convert them into errors. - let task = AssertUnwindSafe(task).catch_unwind(); - - let result = task.await; - let _ = response_channel.send(result); - }); - } - }) - }); - - ExecutorState { tx } - }); - - // We need to perform blocking synchronous communication between the current thread and the - // tokio runtime thread with the result of the async computation and the oneshot channels - // from tokio allows us to do that. The sender side of the channel will be given to the - // tokio runtime thread to send the result when the computation is completed and the receive - // side of the channel will be kept with this thread to await for the response of the async - // task to come back. - let (response_tx, response_rx) = - oneshot::channel::, Box>>(); - - // The tokio runtime thread expects a Future> + Send to be - // sent to it to execute. However, this function has a typed Future + Send and - // therefore we need to change the type of the future to fit what the runtime thread expects - // in the task message. In doing this conversion, we lose some of the type information since - // we're converting R => dyn Any. However, we will perform down-casting on the result to - // convert it back into R. - let future = Box::pin( - async move { Box::new(future.await) as Box }.in_current_span(), - ); - - let task = TaskMessage::new(future, response_tx); - if let Err(error) = STATE.tx.send(task) { - tracing::error!(?error, "Failed to send the task to the blocking executor"); - anyhow::bail!("Failed to send the task to the blocking executor: {error:?}") - } - - let result = match response_rx.blocking_recv() { - Ok(result) => result, - Err(error) => { - tracing::error!( - ?error, - "Failed to get the response from the blocking executor" - ); - anyhow::bail!("Failed to get the response from the blocking executor: {error:?}") - } - }; - - let result = match result { - Ok(result) => result, - Err(error) => { - tracing::error!(?error, "An error occurred when running the async task"); - anyhow::bail!("An error occurred when running the async task: {error:?}") - } - }; - - Ok(*result - .downcast::() - .expect("An error occurred when downcasting into R. This is a bug")) - } -} -/// Represents the state of the async runtime. This runtime is designed to be a singleton runtime -/// which means that in the current running program there's just a single thread that has an async -/// runtime. -struct ExecutorState { - /// The sending side of the task messages channel. This is used by all of the other threads to - /// communicate with the async runtime thread. - tx: UnboundedSender, -} - -/// Represents a message that contains an asynchronous task that's to be executed by the runtime -/// as well as a way for the runtime to report back on the result of the execution. -struct TaskMessage { - /// The task that's being requested to run. This is a future that returns an object that does - /// implement [`Any`] and [`Send`] to allow it to be sent between the requesting thread and the - /// async thread. - future: Pin> + Send>>, - - /// A one shot sender channel where the sender of the task is expecting to hear back on the - /// result of the task. - response_tx: oneshot::Sender, Box>>, -} - -impl TaskMessage { - pub fn new( - future: Pin> + Send>>, - response_tx: oneshot::Sender, Box>>, - ) -> Self { - Self { - future, - response_tx, - } - } -} - -#[cfg(test)] -mod test { - use super::*; - - #[test] - fn simple_future_works() { - // Act - let result = BlockingExecutor::execute(async move { - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - 0xFFu8 - }) - .unwrap(); - - // Assert - assert_eq!(result, 0xFFu8); - } - - #[test] - #[allow(unreachable_code, clippy::unreachable)] - fn panics_in_futures_are_caught() { - // Act - let result = BlockingExecutor::execute(async move { - panic!( - "If this panic causes, well, a panic, then this is an issue. If it's caught then all good!" - ); - 0xFFu8 - }); - - // Assert - assert!(result.is_err()); - - // Act - let result = BlockingExecutor::execute(async move { - tokio::time::sleep(std::time::Duration::from_secs(1)).await; - 0xFFu8 - }) - .unwrap(); - - // Assert - assert_eq!(result, 0xFFu8) - } -} diff --git a/crates/common/src/concepts/mod.rs b/crates/common/src/concepts/mod.rs deleted file mode 100644 index cf633e9..0000000 --- a/crates/common/src/concepts/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -mod blocking_executor; - -pub use blocking_executor::*; diff --git a/crates/common/src/fs/clear_dir.rs b/crates/common/src/fs/clear_dir.rs new file mode 100644 index 0000000..1e6c83d --- /dev/null +++ b/crates/common/src/fs/clear_dir.rs @@ -0,0 +1,22 @@ +use std::{ + fs::{read_dir, remove_dir_all, remove_file}, + path::Path, +}; + +use anyhow::Result; + +/// This method clears the passed directory of all of the files and directories contained within +/// without deleting the directory. +pub fn clear_directory(path: impl AsRef) -> Result<()> { + for entry in read_dir(path.as_ref())? { + let entry = entry?; + let entry_path = entry.path(); + + if entry_path.is_file() { + remove_file(entry_path)? + } else { + remove_dir_all(entry_path)? + } + } + Ok(()) +} diff --git a/crates/common/src/fs/mod.rs b/crates/common/src/fs/mod.rs new file mode 100644 index 0000000..f41c82e --- /dev/null +++ b/crates/common/src/fs/mod.rs @@ -0,0 +1,3 @@ +mod clear_dir; + +pub use clear_dir::*; diff --git a/crates/common/src/lib.rs b/crates/common/src/lib.rs index 8b0ae35..e280af5 100644 --- a/crates/common/src/lib.rs +++ b/crates/common/src/lib.rs @@ -1,7 +1,7 @@ //! This crate provides common concepts, functionality, types, macros, and more that other crates in //! the workspace can benefit from. -pub mod concepts; +pub mod fs; pub mod iterators; pub mod macros; pub mod types; diff --git a/crates/compiler/Cargo.toml b/crates/compiler/Cargo.toml index 295a147..9e10a10 100644 --- a/crates/compiler/Cargo.toml +++ b/crates/compiler/Cargo.toml @@ -23,3 +23,4 @@ semver = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tracing = { workspace = true } +tokio = { workspace = true } diff --git a/crates/compiler/src/lib.rs b/crates/compiler/src/lib.rs index 364359c..551831e 100644 --- a/crates/compiler/src/lib.rs +++ b/crates/compiler/src/lib.rs @@ -33,14 +33,14 @@ pub trait SolidityCompiler { &self, input: CompilerInput, additional_options: Self::Options, - ) -> anyhow::Result; + ) -> impl Future>; fn new(solc_executable: PathBuf) -> Self; fn get_compiler_executable( config: &Arguments, version: impl Into, - ) -> anyhow::Result; + ) -> impl Future>; fn version(&self) -> anyhow::Result; } @@ -147,8 +147,13 @@ where self } - pub fn try_build(self, compiler_path: impl AsRef) -> anyhow::Result { - T::new(compiler_path.as_ref().to_path_buf()).build(self.input, self.additional_options) + pub async fn try_build( + self, + compiler_path: impl AsRef, + ) -> anyhow::Result { + T::new(compiler_path.as_ref().to_path_buf()) + .build(self.input, self.additional_options) + .await } pub fn input(&self) -> CompilerInput { diff --git a/crates/compiler/src/revive_resolc.rs b/crates/compiler/src/revive_resolc.rs index 507539e..0771211 100644 --- a/crates/compiler/src/revive_resolc.rs +++ b/crates/compiler/src/revive_resolc.rs @@ -6,7 +6,6 @@ use std::{ process::{Command, Stdio}, }; -use alloy::json_abi::JsonAbi; use revive_dt_common::types::VersionOrRequirement; use revive_dt_config::Arguments; use revive_solc_json_interface::{ @@ -17,8 +16,10 @@ use revive_solc_json_interface::{ use crate::{CompilerInput, CompilerOutput, SolidityCompiler}; +use alloy::json_abi::JsonAbi; use anyhow::Context; use semver::Version; +use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; // TODO: I believe that we need to also pass the solc compiler to resolc so that resolc uses the // specified solc compiler. I believe that currently we completely ignore the specified solc binary @@ -35,7 +36,7 @@ impl SolidityCompiler for Resolc { type Options = Vec; #[tracing::instrument(level = "debug", ret)] - fn build( + async fn build( &self, CompilerInput { enable_optimization, @@ -87,7 +88,7 @@ impl SolidityCompiler for Resolc { }, }; - let mut command = Command::new(&self.resolc_path); + let mut command = AsyncCommand::new(&self.resolc_path); command .stdin(Stdio::piped()) .stdout(Stdio::piped()) @@ -109,9 +110,10 @@ impl SolidityCompiler for Resolc { let mut child = command.spawn()?; let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); - serde_json::to_writer(stdin_pipe, &input)?; + let serialized_input = serde_json::to_vec(&input)?; + stdin_pipe.write_all(&serialized_input).await?; - let output = child.wait_with_output()?; + let output = child.wait_with_output().await?; let stdout = output.stdout; let stderr = output.stderr; @@ -195,7 +197,7 @@ impl SolidityCompiler for Resolc { Resolc { resolc_path } } - fn get_compiler_executable( + async fn get_compiler_executable( config: &Arguments, _version: impl Into, ) -> anyhow::Result { @@ -233,11 +235,13 @@ impl SolidityCompiler for Resolc { mod test { use super::*; - #[test] - fn compiler_version_can_be_obtained() { + #[tokio::test] + async fn compiler_version_can_be_obtained() { // Arrange let args = Arguments::default(); - let path = Resolc::get_compiler_executable(&args, Version::new(0, 7, 6)).unwrap(); + let path = Resolc::get_compiler_executable(&args, Version::new(0, 7, 6)) + .await + .unwrap(); let compiler = Resolc::new(path); // Act diff --git a/crates/compiler/src/solc.rs b/crates/compiler/src/solc.rs index 20f5e6d..f296e3e 100644 --- a/crates/compiler/src/solc.rs +++ b/crates/compiler/src/solc.rs @@ -21,6 +21,7 @@ use foundry_compilers_artifacts::{ solc::*, }; use semver::Version; +use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; #[derive(Debug)] pub struct Solc { @@ -31,7 +32,7 @@ impl SolidityCompiler for Solc { type Options = (); #[tracing::instrument(level = "debug", ret)] - fn build( + async fn build( &self, CompilerInput { enable_optimization, @@ -90,7 +91,7 @@ impl SolidityCompiler for Solc { }, }; - let mut command = Command::new(&self.solc_path); + let mut command = AsyncCommand::new(&self.solc_path); command .stdin(Stdio::piped()) .stdout(Stdio::piped()) @@ -112,8 +113,9 @@ impl SolidityCompiler for Solc { let mut child = command.spawn()?; let stdin = child.stdin.as_mut().expect("should be piped"); - serde_json::to_writer(stdin, &input)?; - let output = child.wait_with_output()?; + let serialized_input = serde_json::to_vec(&input)?; + stdin.write_all(&serialized_input).await?; + let output = child.wait_with_output().await?; if !output.status.success() { let json_in = serde_json::to_string_pretty(&input)?; @@ -177,11 +179,11 @@ impl SolidityCompiler for Solc { Self { solc_path } } - fn get_compiler_executable( + async fn get_compiler_executable( config: &Arguments, version: impl Into, ) -> anyhow::Result { - let path = download_solc(config.directory(), version, config.wasm)?; + let path = download_solc(config.directory(), version, config.wasm).await?; Ok(path) } @@ -216,11 +218,15 @@ impl SolidityCompiler for Solc { mod test { use super::*; - #[test] - fn compiler_version_can_be_obtained() { + #[tokio::test] + async fn compiler_version_can_be_obtained() { // Arrange let args = Arguments::default(); - let path = Solc::get_compiler_executable(&args, Version::new(0, 7, 6)).unwrap(); + println!("Getting compiler path"); + let path = Solc::get_compiler_executable(&args, Version::new(0, 7, 6)) + .await + .unwrap(); + println!("Got compiler path"); let compiler = Solc::new(path); // Act diff --git a/crates/compiler/tests/lib.rs b/crates/compiler/tests/lib.rs index 63e07e7..733e2d3 100644 --- a/crates/compiler/tests/lib.rs +++ b/crates/compiler/tests/lib.rs @@ -4,11 +4,14 @@ use revive_dt_compiler::{Compiler, SolidityCompiler, revive_resolc::Resolc, solc use revive_dt_config::Arguments; use semver::Version; -#[test] -fn contracts_can_be_compiled_with_solc() { +#[tokio::test] +async fn contracts_can_be_compiled_with_solc() { // Arrange let args = Arguments::default(); - let compiler_path = Solc::get_compiler_executable(&args, Version::new(0, 8, 30)).unwrap(); + let compiler_path = Solc::get_compiler_executable(&args, Version::new(0, 8, 30)) + .await + .unwrap(); + println!("About to assert"); // Act let output = Compiler::::new() @@ -16,7 +19,8 @@ fn contracts_can_be_compiled_with_solc() { .unwrap() .with_source("./tests/assets/array_one_element/main.sol") .unwrap() - .try_build(compiler_path); + .try_build(compiler_path) + .await; // Assert let output = output.expect("Failed to compile"); @@ -42,11 +46,13 @@ fn contracts_can_be_compiled_with_solc() { assert!(callable_file_contracts.contains_key("Callable")); } -#[test] -fn contracts_can_be_compiled_with_resolc() { +#[tokio::test] +async fn contracts_can_be_compiled_with_resolc() { // Arrange let args = Arguments::default(); - let compiler_path = Resolc::get_compiler_executable(&args, Version::new(0, 8, 30)).unwrap(); + let compiler_path = Resolc::get_compiler_executable(&args, Version::new(0, 8, 30)) + .await + .unwrap(); // Act let output = Compiler::::new() @@ -54,7 +60,8 @@ fn contracts_can_be_compiled_with_resolc() { .unwrap() .with_source("./tests/assets/array_one_element/main.sol") .unwrap() - .try_build(compiler_path); + .try_build(compiler_path) + .await; // Assert let output = output.expect("Failed to compile"); diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index e275904..bc8b312 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -77,7 +77,7 @@ pub struct Arguments { /// This argument controls which private keys the nodes should have access to and be added to /// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set /// of the node. - #[arg(long = "private-keys-count", default_value_t = 30)] + #[arg(long = "private-keys-count", default_value_t = 15_000)] pub private_keys_to_add: usize, /// The differential testing leader node implementation. @@ -92,9 +92,13 @@ pub struct Arguments { #[arg(long = "compile-only")] pub compile_only: Option, - /// Determines the amount of tests that are executed in parallel. - #[arg(long = "workers", default_value = "12")] - pub workers: usize, + /// Determines the amount of nodes that will be spawned for each chain. + #[arg(long, default_value = "1")] + pub number_of_nodes: usize, + + /// Determines the amount of threads that will will be used. + #[arg(long, default_value = "12")] + pub number_of_threads: usize, /// Extract problems back to the test corpus. #[arg(short, long = "extract-problems")] @@ -159,7 +163,9 @@ impl Default for Arguments { /// The Solidity compatible node implementation. /// /// This describes the solutions to be tested against on a high level. -#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq, ValueEnum, Serialize, Deserialize)] +#[derive( + Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, ValueEnum, Serialize, Deserialize, +)] #[clap(rename_all = "lower")] pub enum TestingPlatform { /// The go-ethereum reference full node EVM implementation. diff --git a/crates/core/Cargo.toml b/crates/core/Cargo.toml index a1da05d..3966618 100644 --- a/crates/core/Cargo.toml +++ b/crates/core/Cargo.toml @@ -24,9 +24,10 @@ revive-dt-report = { workspace = true } alloy = { workspace = true } anyhow = { workspace = true } clap = { workspace = true } +futures = { workspace = true } indexmap = { workspace = true } +tokio = { workspace = true } tracing = { workspace = true } tracing-subscriber = { workspace = true } -rayon = { workspace = true } semver = { workspace = true } temp-dir = { workspace = true } diff --git a/crates/core/src/driver/mod.rs b/crates/core/src/driver/mod.rs index 721003e..fc06ca1 100644 --- a/crates/core/src/driver/mod.rs +++ b/crates/core/src/driver/mod.rs @@ -1,7 +1,6 @@ //! The test driver handles the compilation and execution of the test cases. use std::collections::HashMap; -use std::fmt::Debug; use std::marker::PhantomData; use std::path::PathBuf; @@ -24,218 +23,75 @@ use anyhow::Context; use indexmap::IndexMap; use semver::Version; -use revive_dt_common::iterators::FilesWithExtensionIterator; -use revive_dt_compiler::{Compiler, SolidityCompiler}; -use revive_dt_config::Arguments; -use revive_dt_format::case::CaseIdx; +use revive_dt_format::case::{Case, CaseIdx}; use revive_dt_format::input::{Calldata, EtherValue, Expected, ExpectedOutput, Method}; use revive_dt_format::metadata::{ContractInstance, ContractPathAndIdent}; -use revive_dt_format::{input::Input, metadata::Metadata, mode::SolcMode}; +use revive_dt_format::{input::Input, metadata::Metadata}; use revive_dt_node::Node; use revive_dt_node_interaction::EthereumNode; -use revive_dt_report::reporter::{CompilationTask, Report, Span}; use crate::Platform; -pub struct State<'a, T: Platform> { - /// The configuration that the framework was started with. - /// - /// This is currently used to get certain information from it such as the solc mode and other - /// information used at runtime. - config: &'a Arguments, - - /// The [`Span`] used in reporting. - span: Span, - +pub struct CaseState { /// A map of all of the compiled contracts for the given metadata file. compiled_contracts: HashMap>, - /// This map stores the contracts deployments that have been made for each case within a - /// metadata file. Note, this means that the state can't be reused between different metadata - /// files. - deployed_contracts: HashMap>, + /// This map stores the contracts deployments for this case. + deployed_contracts: HashMap, /// This map stores the variables used for each one of the cases contained in the metadata /// file. - variables: HashMap>, + variables: HashMap, - /// This is a map of the deployed libraries. - /// - /// This map is not per case, but rather, per metadata file. This means that we do not redeploy - /// the libraries with each case. - deployed_libraries: HashMap, - - /// Stores the version of the compiler used for the given Solc mode. - compiler_version: HashMap<&'a SolcMode, Version>, + /// Stores the version used for the current case. + compiler_version: Version, phantom: PhantomData, } -impl<'a, T> State<'a, T> +impl CaseState where T: Platform, { - pub fn new(config: &'a Arguments, span: Span) -> Self { + pub fn new( + compiler_version: Version, + compiled_contracts: HashMap>, + deployed_contracts: HashMap, + ) -> Self { Self { - config, - span, - compiled_contracts: Default::default(), - deployed_contracts: Default::default(), + compiled_contracts, + deployed_contracts, variables: Default::default(), - deployed_libraries: Default::default(), - compiler_version: Default::default(), - phantom: Default::default(), + compiler_version, + phantom: PhantomData, } } - /// Returns a copy of the current span. - fn span(&self) -> Span { - self.span - } - - pub fn build_contracts( - &mut self, - mode: &'a SolcMode, - metadata: &Metadata, - ) -> anyhow::Result<()> { - let mut span = self.span(); - span.next_metadata( - metadata - .file_path - .as_ref() - .expect("metadata should have been read from a file") - .clone(), - ); - - let compiler_version_or_requirement = - mode.compiler_version_to_use(self.config.solc.clone()); - let compiler_path = - T::Compiler::get_compiler_executable(self.config, compiler_version_or_requirement)?; - let compiler_version = T::Compiler::new(compiler_path.clone()).version()?; - self.compiler_version.insert(mode, compiler_version.clone()); - - tracing::info!(%compiler_version, "Resolved the compiler version to use"); - - let compiler = Compiler::::new() - .with_allow_path(metadata.directory()?) - .with_optimization(mode.solc_optimize()); - let mut compiler = metadata - .files_to_compile()? - .try_fold(compiler, |compiler, path| compiler.with_source(&path))?; - for (library_instance, (library_address, _)) in self.deployed_libraries.iter() { - let library_ident = &metadata - .contracts - .as_ref() - .and_then(|contracts| contracts.get(library_instance)) - .expect("Impossible for library to not be found in contracts") - .contract_ident; - - // Note the following: we need to tell solc which files require the libraries to be - // linked into them. We do not have access to this information and therefore we choose - // an easier, yet more compute intensive route, of telling solc that all of the files - // need to link the library and it will only perform the linking for the files that do - // actually need the library. - compiler = FilesWithExtensionIterator::new(metadata.directory()?) - .with_allowed_extension("sol") - .fold(compiler, |compiler, path| { - compiler.with_library(&path, library_ident.as_str(), *library_address) - }); - } - - let mut task = CompilationTask { - json_input: compiler.input(), - json_output: None, - mode: mode.clone(), - compiler_version: format!("{}", &compiler_version), - error: None, - }; - - match compiler.try_build(compiler_path) { - Ok(output) => { - task.json_output = Some(output.clone()); - - for (contract_path, contracts) in output.contracts.into_iter() { - let map = self - .compiled_contracts - .entry(contract_path.clone()) - .or_default(); - for (contract_name, contract_info) in contracts.into_iter() { - tracing::debug!( - contract_path = %contract_path.display(), - contract_name = contract_name, - "Compiled contract" - ); - - map.insert(contract_name, contract_info); - } - } - - Report::compilation(span, T::config_id(), task); - Ok(()) - } - Err(error) => { - tracing::error!("Failed to compile contract: {:?}", error.to_string()); - task.error = Some(error.to_string()); - Err(error) - } - } - } - - pub fn build_and_publish_libraries( - &mut self, - metadata: &Metadata, - mode: &'a SolcMode, - node: &T::Blockchain, - ) -> anyhow::Result<()> { - self.build_contracts(mode, metadata)?; - - for library_instance in metadata - .libraries - .iter() - .flatten() - .flat_map(|(_, map)| map.values()) - { - self.get_or_deploy_contract_instance( - library_instance, - metadata, - None, - Input::default_caller(), - None, - None, - node, - )?; - } - - Ok(()) - } - - pub fn handle_input( + pub async fn handle_input( &mut self, metadata: &Metadata, case_idx: CaseIdx, input: &Input, node: &T::Blockchain, - mode: &SolcMode, ) -> anyhow::Result<(TransactionReceipt, GethTrace, DiffMode)> { - let deployment_receipts = - self.handle_contract_deployment(metadata, case_idx, input, node)?; - let execution_receipt = - self.handle_input_execution(case_idx, input, deployment_receipts, node)?; - let tracing_result = self.handle_input_call_frame_tracing(&execution_receipt, node)?; - self.handle_input_variable_assignment(case_idx, input, &tracing_result)?; - self.handle_input_expectations( - case_idx, - input, - &execution_receipt, - node, - mode, - &tracing_result, - )?; + let deployment_receipts = self + .handle_contract_deployment(metadata, case_idx, input, node) + .await?; + let execution_receipt = self + .handle_input_execution(input, deployment_receipts, node) + .await?; + let tracing_result = self + .handle_input_call_frame_tracing(&execution_receipt, node) + .await?; + self.handle_input_variable_assignment(input, &tracing_result)?; + self.handle_input_expectations(input, &execution_receipt, node, &tracing_result) + .await?; self.handle_input_diff(case_idx, execution_receipt, node) + .await } /// Handles the contract deployment for a given input performing it if it needs to be performed. - fn handle_contract_deployment( + async fn handle_contract_deployment( &mut self, metadata: &Metadata, case_idx: CaseIdx, @@ -251,12 +107,7 @@ where let mut instances_we_must_deploy = IndexMap::::new(); for instance in input.find_all_contract_instances().into_iter() { - if !self - .deployed_contracts - .entry(case_idx) - .or_insert_with(|| self.deployed_libraries.clone()) - .contains_key(&instance) - { + if !self.deployed_contracts.contains_key(&instance) { instances_we_must_deploy.entry(instance).or_insert(false); } } @@ -277,15 +128,17 @@ where .then_some(input.value) .flatten(); - if let (_, _, Some(receipt)) = self.get_or_deploy_contract_instance( - &instance, - metadata, - case_idx, - input.caller, - calldata, - value, - node, - )? { + if let (_, _, Some(receipt)) = self + .get_or_deploy_contract_instance( + &instance, + metadata, + input.caller, + calldata, + value, + node, + ) + .await? + { receipts.insert(instance.clone(), receipt); } } @@ -294,9 +147,8 @@ where } /// Handles the execution of the input in terms of the calls that need to be made. - fn handle_input_execution( + async fn handle_input_execution( &mut self, - case_idx: CaseIdx, input: &Input, mut deployment_receipts: HashMap, node: &T::Blockchain, @@ -308,13 +160,10 @@ where .remove(&input.instance) .context("Failed to find deployment receipt"), Method::Fallback | Method::FunctionName(_) => { - let tx = match input.legacy_transaction( - self.deployed_contracts - .entry(case_idx) - .or_insert_with(|| self.deployed_libraries.clone()), - &*self.variables.entry(case_idx).or_default(), - node, - ) { + let tx = match input + .legacy_transaction(&self.deployed_contracts, &self.variables, node) + .await + { Ok(tx) => { tracing::debug!("Legacy transaction data: {tx:#?}"); tx @@ -327,7 +176,7 @@ where tracing::trace!("Executing transaction for input: {input:?}"); - match node.execute_transaction(tx) { + match node.execute_transaction(tx).await { Ok(receipt) => Ok(receipt), Err(err) => { tracing::error!( @@ -342,7 +191,7 @@ where } } - fn handle_input_call_frame_tracing( + async fn handle_input_call_frame_tracing( &self, execution_receipt: &TransactionReceipt, node: &T::Blockchain, @@ -356,6 +205,7 @@ where ..Default::default() }, ) + .await .map(|trace| { trace .try_into_call_frame() @@ -365,7 +215,6 @@ where fn handle_input_variable_assignment( &mut self, - case_idx: CaseIdx, input: &Input, tracing_result: &CallFrame, ) -> anyhow::Result<()> { @@ -383,22 +232,17 @@ where .chunks(32), ) { let value = U256::from_be_slice(output_word); - self.variables - .entry(case_idx) - .or_default() - .insert(variable_name.clone(), value); + self.variables.insert(variable_name.clone(), value); } Ok(()) } - fn handle_input_expectations( + async fn handle_input_expectations( &mut self, - case_idx: CaseIdx, input: &Input, execution_receipt: &TransactionReceipt, node: &T::Blockchain, - mode: &SolcMode, tracing_result: &CallFrame, ) -> anyhow::Result<()> { let span = tracing::info_span!("Handling input expectations"); @@ -434,42 +278,32 @@ where for expectation in expectations.iter() { self.handle_input_expectation_item( - case_idx, execution_receipt, node, expectation, tracing_result, - mode, - )?; + ) + .await?; } Ok(()) } - fn handle_input_expectation_item( + async fn handle_input_expectation_item( &mut self, - case_idx: CaseIdx, execution_receipt: &TransactionReceipt, node: &T::Blockchain, expectation: &ExpectedOutput, tracing_result: &CallFrame, - mode: &SolcMode, ) -> anyhow::Result<()> { if let Some(ref version_requirement) = expectation.compiler_version { - let compiler_version = self - .compiler_version - .get(mode) - .context("Failed to find the compiler version fo the solc mode")?; - if !version_requirement.matches(compiler_version) { + if !version_requirement.matches(&self.compiler_version) { return Ok(()); } } - let deployed_contracts = self - .deployed_contracts - .entry(case_idx) - .or_insert_with(|| self.deployed_libraries.clone()); - let variables = self.variables.entry(case_idx).or_default(); + let deployed_contracts = &mut self.deployed_contracts; + let variables = &mut self.variables; let chain_state_provider = node; // Handling the receipt state assertion. @@ -492,12 +326,15 @@ where if let Some(ref expected_calldata) = expectation.return_data { let expected = expected_calldata; let actual = &tracing_result.output.as_ref().unwrap_or_default(); - if !expected.is_equivalent( - actual, - deployed_contracts, - &*variables, - chain_state_provider, - )? { + if !expected + .is_equivalent( + actual, + deployed_contracts, + &*variables, + chain_state_provider, + ) + .await? + { tracing::error!( ?execution_receipt, ?expected, @@ -528,7 +365,8 @@ where if let Some(ref expected_address) = expected_event.address { let expected = Address::from_slice( Calldata::new_compound([expected_address]) - .calldata(deployed_contracts, &*variables, node)? + .calldata(deployed_contracts, &*variables, node) + .await? .get(12..32) .expect("Can't fail"), ); @@ -553,12 +391,15 @@ where .zip(actual_event.topics()) { let expected = Calldata::new_compound([expected]); - if !expected.is_equivalent( - &actual.0, - deployed_contracts, - &*variables, - chain_state_provider, - )? { + if !expected + .is_equivalent( + &actual.0, + deployed_contracts, + &*variables, + chain_state_provider, + ) + .await? + { tracing::error!( ?execution_receipt, ?expected, @@ -574,12 +415,15 @@ where // Handling the values assertion. let expected = &expected_event.values; let actual = &actual_event.data().data; - if !expected.is_equivalent( - &actual.0, - deployed_contracts, - &*variables, - chain_state_provider, - )? { + if !expected + .is_equivalent( + &actual.0, + deployed_contracts, + &*variables, + chain_state_provider, + ) + .await? + { tracing::error!( ?execution_receipt, ?expected, @@ -596,7 +440,7 @@ where Ok(()) } - fn handle_input_diff( + async fn handle_input_diff( &mut self, _: CaseIdx, execution_receipt: TransactionReceipt, @@ -611,8 +455,10 @@ where disable_storage: None, }); - let trace = node.trace_transaction(&execution_receipt, trace_options)?; - let diff = node.state_diff(&execution_receipt)?; + let trace = node + .trace_transaction(&execution_receipt, trace_options) + .await?; + let diff = node.state_diff(&execution_receipt).await?; Ok((execution_receipt, trace, diff)) } @@ -623,27 +469,16 @@ where /// If a [`CaseIdx`] is not specified then this contact instance address will be stored in the /// cross-case deployed contracts address mapping. #[allow(clippy::too_many_arguments)] - pub fn get_or_deploy_contract_instance( + pub async fn get_or_deploy_contract_instance( &mut self, contract_instance: &ContractInstance, metadata: &Metadata, - case_idx: impl Into>, deployer: Address, calldata: Option<&Calldata>, value: Option, node: &T::Blockchain, ) -> anyhow::Result<(Address, JsonAbi, Option)> { - let case_idx = case_idx.into(); - - let deployed_contracts = match case_idx { - Some(case_idx) => self - .deployed_contracts - .entry(case_idx) - .or_insert_with(|| self.deployed_libraries.clone()), - None => &mut self.deployed_libraries, - }; - - if let Some((address, abi)) = deployed_contracts.get(contract_instance) { + if let Some((address, abi)) = self.deployed_contracts.get(contract_instance) { return Ok((*address, abi.clone(), None)); } @@ -690,7 +525,9 @@ where }; if let Some(calldata) = calldata { - let calldata = calldata.calldata(deployed_contracts, None, node)?; + let calldata = calldata + .calldata(&self.deployed_contracts, None, node) + .await?; code.extend(calldata); } @@ -703,7 +540,7 @@ where TransactionBuilder::::with_deploy_code(tx, code) }; - let receipt = match node.execute_transaction(tx) { + let receipt = match node.execute_transaction(tx).await { Ok(receipt) => receipt, Err(error) => { tracing::error!( @@ -725,35 +562,46 @@ where "Deployed contract" ); - deployed_contracts.insert(contract_instance.clone(), (address, abi.clone())); + self.deployed_contracts + .insert(contract_instance.clone(), (address, abi.clone())); Ok((address, abi, Some(receipt))) } } -pub struct Driver<'a, Leader: Platform, Follower: Platform> { +pub struct CaseDriver<'a, Leader: Platform, Follower: Platform> { metadata: &'a Metadata, - config: &'a Arguments, + case: &'a Case, + case_idx: CaseIdx, leader_node: &'a Leader::Blockchain, follower_node: &'a Follower::Blockchain, + leader_state: CaseState, + follower_state: CaseState, } -impl<'a, L, F> Driver<'a, L, F> +impl<'a, L, F> CaseDriver<'a, L, F> where L: Platform, F: Platform, { + #[allow(clippy::too_many_arguments)] pub fn new( metadata: &'a Metadata, - config: &'a Arguments, + case: &'a Case, + case_idx: impl Into, leader_node: &'a L::Blockchain, follower_node: &'a F::Blockchain, - ) -> Driver<'a, L, F> { + leader_state: CaseState, + follower_state: CaseState, + ) -> CaseDriver<'a, L, F> { Self { metadata, - config, + case, + case_idx: case_idx.into(), leader_node, follower_node, + leader_state, + follower_state, } } @@ -783,37 +631,7 @@ where } } - // A note on this function and the choice of how we handle errors that happen here. This is not - // a doc comment since it's a comment for the maintainers of this code and not for the users of - // this code. - // - // This function does a few things: it builds the contracts for the various SOLC modes needed. - // It deploys the contracts to the chain, and it executes the various inputs that are specified - // for the test cases. - // - // In most functions in the codebase, it's fine to just say "If we encounter an error just - // bubble it up to the caller", but this isn't a good idea to do here and we need an elaborate - // way to report errors all while being graceful and continuing execution where we can. For - // example, if one of the inputs of one of the cases fail to execute, then we should not just - // bubble that error up immediately. Instead, we should note it down and continue to the next - // case as the next case might succeed. - // - // Therefore, this method returns an `ExecutionResult` object, and not just a normal `Result`. - // This object is fully typed to contain information about what exactly in the execution was a - // success and what failed. - // - // The above then allows us to have better logging and better information in the caller of this - // function as we have a more detailed view of what worked and what didn't. - pub fn execute(&mut self, span: Span) -> ExecutionResult { - // This is the execution result object that all of the execution information will be - // collected into and returned at the end of the execution. - let mut execution_result = ExecutionResult::default(); - - let tracing_span = tracing::info_span!("Handling metadata file"); - let _guard = tracing_span.enter(); - - // We only execute this input if it's valid for the leader and the follower. Otherwise, we - // skip it with a warning. + pub async fn execute(&mut self) -> anyhow::Result { if !self .leader_node .matches_target(self.metadata.targets.as_deref()) @@ -825,422 +643,40 @@ where targets = ?self.metadata.targets, "Either the leader or follower node do not support the targets of the file" ); - return execution_result; + return Ok(0); } - for mode in self.metadata.solc_modes() { - let tracing_span = tracing::info_span!("With solc mode", solc_mode = ?mode); + let mut inputs_executed = 0; + for (input_idx, input) in self.case.inputs_iterator().enumerate() { + let tracing_span = tracing::info_span!("Handling input", input_idx); let _guard = tracing_span.enter(); - let mut leader_state = State::::new(self.config, span); - let mut follower_state = State::::new(self.config, span); + let (leader_receipt, _, leader_diff) = self + .leader_state + .handle_input(self.metadata, self.case_idx, &input, self.leader_node) + .await?; + let (follower_receipt, _, follower_diff) = self + .follower_state + .handle_input(self.metadata, self.case_idx, &input, self.follower_node) + .await?; - // Note: we are currently forced to do two compilation passes due to linking. In the - // first compilation pass we compile the libraries and publish them to the chain. In the - // second compilation pass we compile the contracts with the library addresses so that - // they're linked at compile-time. - let build_result = tracing::info_span!("Building and publishing libraries") - .in_scope(|| { - match leader_state.build_and_publish_libraries(self.metadata, &mode, self.leader_node) { - Ok(_) => { - tracing::debug!(target = ?Target::Leader, "Library building succeeded"); - execution_result.add_successful_build(Target::Leader, mode.clone()); - }, - Err(error) => { - tracing::error!(target = ?Target::Leader, ?error, "Library building failed"); - execution_result.add_failed_build(Target::Leader, mode.clone(), error); - return Err(()); - } - } - match follower_state.build_and_publish_libraries(self.metadata, &mode, self.follower_node) { - Ok(_) => { - tracing::debug!(target = ?Target::Follower, "Library building succeeded"); - execution_result.add_successful_build(Target::Follower, mode.clone()); - }, - Err(error) => { - tracing::error!(target = ?Target::Follower, ?error, "Library building failed"); - execution_result.add_failed_build(Target::Follower, mode.clone(), error); - return Err(()); - } - } - Ok(()) - }); - if build_result.is_err() { - // Note: We skip to the next solc mode as there's nothing that we can do at this - // point, the building has failed. We do NOT bail out of the execution as a whole. - continue; + if leader_diff == follower_diff { + tracing::debug!("State diffs match between leader and follower."); + } else { + tracing::debug!("State diffs mismatch between leader and follower."); + Self::trace_diff_mode("Leader", &leader_diff); + Self::trace_diff_mode("Follower", &follower_diff); } - // We build the contracts. If building the contracts for the metadata file fails then we - // have no other option but to keep note of this error and move on to the next solc mode - // and NOT just bail out of the execution as a whole. - let build_result = tracing::info_span!("Building contracts").in_scope(|| { - match leader_state.build_contracts(&mode, self.metadata) { - Ok(_) => { - tracing::debug!(target = ?Target::Leader, "Contract building succeeded"); - execution_result.add_successful_build(Target::Leader, mode.clone()); - }, - Err(error) => { - tracing::error!(target = ?Target::Leader, ?error, "Contract building failed"); - execution_result.add_failed_build(Target::Leader, mode.clone(), error); - return Err(()); - } - } - match follower_state.build_contracts(&mode, self.metadata) { - Ok(_) => { - tracing::debug!(target = ?Target::Follower, "Contract building succeeded"); - execution_result.add_successful_build(Target::Follower, mode.clone()); - }, - Err(error) => { - tracing::error!(target = ?Target::Follower, ?error, "Contract building failed"); - execution_result.add_failed_build(Target::Follower, mode.clone(), error); - return Err(()); - } - } - Ok(()) - }); - if build_result.is_err() { - // Note: We skip to the next solc mode as there's nothing that we can do at this - // point, the building has failed. We do NOT bail out of the execution as a whole. - continue; + if leader_receipt.logs() != follower_receipt.logs() { + tracing::debug!("Log/event mismatch between leader and follower."); + tracing::trace!("Leader logs: {:?}", leader_receipt.logs()); + tracing::trace!("Follower logs: {:?}", follower_receipt.logs()); } - // For cases if one of the inputs fail then we move on to the next case and we do NOT - // bail out of the whole thing. - 'case_loop: for (case_idx, case) in self.metadata.cases.iter().enumerate() { - let tracing_span = tracing::info_span!( - "Handling case", - case_name = case.name, - case_idx = case_idx - ); - let _guard = tracing_span.enter(); - - let case_idx = CaseIdx::new(case_idx); - - // For inputs if one of the inputs fail we move on to the next case (we do not move - // on to the next input as it doesn't make sense. It depends on the previous one). - for (input_idx, input) in case.inputs_iterator().enumerate() { - let tracing_span = tracing::info_span!("Handling input", input_idx); - let _guard = tracing_span.enter(); - - let execution_result = - tracing::info_span!("Executing input", contract_name = ?input.instance) - .in_scope(|| { - let (leader_receipt, _, leader_diff) = match leader_state - .handle_input( - self.metadata, - case_idx, - &input, - self.leader_node, - &mode, - ) { - Ok(result) => result, - Err(error) => { - tracing::error!( - target = ?Target::Leader, - ?error, - "Contract execution failed" - ); - execution_result.add_failed_case( - Target::Leader, - mode.clone(), - case.name - .as_deref() - .unwrap_or("no case name") - .to_owned(), - case_idx, - input_idx, - anyhow::Error::msg(format!("{error}")), - ); - return Err(error); - } - }; - - let (follower_receipt, _, follower_diff) = match follower_state - .handle_input( - self.metadata, - case_idx, - &input, - self.follower_node, - &mode, - ) { - Ok(result) => result, - Err(error) => { - tracing::error!( - target = ?Target::Follower, - ?error, - "Contract execution failed" - ); - execution_result.add_failed_case( - Target::Follower, - mode.clone(), - case.name - .as_deref() - .unwrap_or("no case name") - .to_owned(), - case_idx, - input_idx, - anyhow::Error::msg(format!("{error}")), - ); - return Err(error); - } - }; - - Ok((leader_receipt, leader_diff, follower_receipt, follower_diff)) - }); - let Ok((leader_receipt, leader_diff, follower_receipt, follower_diff)) = - execution_result - else { - // Noting it again here: if something in the input fails we do not move on - // to the next input, we move to the next case completely. - continue 'case_loop; - }; - - if leader_diff == follower_diff { - tracing::debug!("State diffs match between leader and follower."); - } else { - tracing::debug!("State diffs mismatch between leader and follower."); - Self::trace_diff_mode("Leader", &leader_diff); - Self::trace_diff_mode("Follower", &follower_diff); - } - - if leader_receipt.logs() != follower_receipt.logs() { - tracing::debug!("Log/event mismatch between leader and follower."); - tracing::trace!("Leader logs: {:?}", leader_receipt.logs()); - tracing::trace!("Follower logs: {:?}", follower_receipt.logs()); - } - } - - // Note: Only consider the case as having been successful after we have processed - // all of the inputs and completed the entire loop over the input. - execution_result.add_successful_case( - Target::Leader, - mode.clone(), - case.name.clone().unwrap_or("no case name".to_owned()), - case_idx, - ); - execution_result.add_successful_case( - Target::Follower, - mode.clone(), - case.name.clone().unwrap_or("no case name".to_owned()), - case_idx, - ); - } + inputs_executed += 1; } - execution_result - } -} - -#[derive(Debug, Default)] -pub struct ExecutionResult { - pub results: Vec>, - pub successful_cases_count: usize, - pub failed_cases_count: usize, -} - -impl ExecutionResult { - pub fn new() -> Self { - Self { - results: Default::default(), - successful_cases_count: Default::default(), - failed_cases_count: Default::default(), - } - } - - pub fn add_successful_build(&mut self, target: Target, solc_mode: SolcMode) { - self.results - .push(Box::new(BuildResult::Success { target, solc_mode })); - } - - pub fn add_failed_build(&mut self, target: Target, solc_mode: SolcMode, error: anyhow::Error) { - self.results.push(Box::new(BuildResult::Failure { - target, - solc_mode, - error, - })); - } - - pub fn add_successful_case( - &mut self, - target: Target, - solc_mode: SolcMode, - case_name: String, - case_idx: CaseIdx, - ) { - self.successful_cases_count += 1; - self.results.push(Box::new(CaseResult::Success { - target, - solc_mode, - case_name, - case_idx, - })); - } - - pub fn add_failed_case( - &mut self, - target: Target, - solc_mode: SolcMode, - case_name: String, - case_idx: CaseIdx, - input_idx: usize, - error: anyhow::Error, - ) { - self.failed_cases_count += 1; - self.results.push(Box::new(CaseResult::Failure { - target, - solc_mode, - case_name, - case_idx, - error, - input_idx, - })); - } -} - -pub trait ExecutionResultItem: Debug { - /// Converts this result item into an [`anyhow::Result`]. - fn as_result(&self) -> Result<(), &anyhow::Error>; - - /// Provides information on whether the provided result item is of a success or failure. - fn is_success(&self) -> bool; - - /// Provides information of the target that this result is for. - fn target(&self) -> &Target; - - /// Provides information on the [`SolcMode`] mode that we being used for this result item. - fn solc_mode(&self) -> &SolcMode; - - /// Provides information on the case name and number that this result item pertains to. This is - /// [`None`] if the error doesn't belong to any case (e.g., if it's a build error outside of any - /// of the cases.). - fn case_name_and_index(&self) -> Option<(&str, &CaseIdx)>; - - /// Provides information on the input number that this result item pertains to. This is [`None`] - /// if the error doesn't belong to any input (e.g., if it's a build error outside of any of the - /// inputs.). - fn input_index(&self) -> Option; -} - -#[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] -pub enum Target { - Leader, - Follower, -} - -#[derive(Debug)] -pub enum BuildResult { - Success { - target: Target, - solc_mode: SolcMode, - }, - Failure { - target: Target, - solc_mode: SolcMode, - error: anyhow::Error, - }, -} - -impl ExecutionResultItem for BuildResult { - fn as_result(&self) -> Result<(), &anyhow::Error> { - match self { - Self::Success { .. } => Ok(()), - Self::Failure { error, .. } => Err(error)?, - } - } - - fn is_success(&self) -> bool { - match self { - Self::Success { .. } => true, - Self::Failure { .. } => false, - } - } - - fn target(&self) -> &Target { - match self { - Self::Success { target, .. } | Self::Failure { target, .. } => target, - } - } - - fn solc_mode(&self) -> &SolcMode { - match self { - Self::Success { solc_mode, .. } | Self::Failure { solc_mode, .. } => solc_mode, - } - } - - fn case_name_and_index(&self) -> Option<(&str, &CaseIdx)> { - None - } - - fn input_index(&self) -> Option { - None - } -} - -#[derive(Debug)] -pub enum CaseResult { - Success { - target: Target, - solc_mode: SolcMode, - case_name: String, - case_idx: CaseIdx, - }, - Failure { - target: Target, - solc_mode: SolcMode, - case_name: String, - case_idx: CaseIdx, - input_idx: usize, - error: anyhow::Error, - }, -} - -impl ExecutionResultItem for CaseResult { - fn as_result(&self) -> Result<(), &anyhow::Error> { - match self { - Self::Success { .. } => Ok(()), - Self::Failure { error, .. } => Err(error)?, - } - } - - fn is_success(&self) -> bool { - match self { - Self::Success { .. } => true, - Self::Failure { .. } => false, - } - } - - fn target(&self) -> &Target { - match self { - Self::Success { target, .. } | Self::Failure { target, .. } => target, - } - } - - fn solc_mode(&self) -> &SolcMode { - match self { - Self::Success { solc_mode, .. } | Self::Failure { solc_mode, .. } => solc_mode, - } - } - - fn case_name_and_index(&self) -> Option<(&str, &CaseIdx)> { - match self { - Self::Success { - case_name, - case_idx, - .. - } - | Self::Failure { - case_name, - case_idx, - .. - } => Some((case_name, case_idx)), - } - } - - fn input_index(&self) -> Option { - match self { - CaseResult::Success { .. } => None, - CaseResult::Failure { input_idx, .. } => Some(*input_idx), - } + Ok(inputs_executed) } } diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index c332803..1e5e5c9 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -26,7 +26,7 @@ pub trait Platform { pub struct Geth; impl Platform for Geth { - type Blockchain = geth::Instance; + type Blockchain = geth::GethNode; type Compiler = solc::Solc; fn config_id() -> TestingPlatform { diff --git a/crates/core/src/main.rs b/crates/core/src/main.rs index c8403af..660c6d7 100644 --- a/crates/core/src/main.rs +++ b/crates/core/src/main.rs @@ -1,37 +1,75 @@ -use std::{collections::HashMap, sync::LazyLock}; +use std::{ + collections::HashMap, + path::Path, + sync::{Arc, LazyLock}, +}; +use alloy::{ + json_abi::JsonAbi, + network::{Ethereum, TransactionBuilder}, + primitives::Address, + rpc::types::TransactionRequest, +}; +use anyhow::Context; use clap::Parser; -use rayon::{ThreadPoolBuilder, prelude::*}; +use futures::StreamExt; +use revive_dt_common::iterators::FilesWithExtensionIterator; +use revive_dt_node_interaction::EthereumNode; +use semver::Version; +use temp_dir::TempDir; +use tokio::sync::{Mutex, RwLock}; +use tracing::{Instrument, Level}; +use tracing_subscriber::{EnvFilter, FmtSubscriber}; +use revive_dt_compiler::SolidityCompiler; +use revive_dt_compiler::{Compiler, CompilerOutput}; use revive_dt_config::*; use revive_dt_core::{ Geth, Kitchensink, Platform, - driver::{Driver, State}, + driver::{CaseDriver, CaseState}, +}; +use revive_dt_format::{ + case::{Case, CaseIdx}, + corpus::Corpus, + input::Input, + metadata::{ContractInstance, ContractPathAndIdent, Metadata, MetadataFile}, + mode::SolcMode, }; -use revive_dt_format::{corpus::Corpus, metadata::MetadataFile}; use revive_dt_node::pool::NodePool; use revive_dt_report::reporter::{Report, Span}; -use temp_dir::TempDir; -use tracing::Level; -use tracing_subscriber::{EnvFilter, FmtSubscriber}; static TEMP_DIR: LazyLock = LazyLock::new(|| TempDir::new().unwrap()); +type CompilationCache<'a> = Arc< + RwLock< + HashMap< + (&'a Path, SolcMode, TestingPlatform), + Arc>>>, + >, + >, +>; + fn main() -> anyhow::Result<()> { let args = init_cli()?; - for (corpus, tests) in collect_corpora(&args)? { - let span = Span::new(corpus, args.clone())?; - - match &args.compile_only { - Some(platform) => compile_corpus(&args, &tests, platform, span), - None => execute_corpus(&args, &tests, span)?, + let body = async { + for (corpus, tests) in collect_corpora(&args)? { + let span = Span::new(corpus, args.clone())?; + match &args.compile_only { + Some(platform) => compile_corpus(&args, &tests, platform, span).await, + None => execute_corpus(&args, &tests, span).await?, + } + Report::save()?; } + Ok(()) + }; - Report::save()?; - } - - Ok(()) + tokio::runtime::Builder::new_multi_thread() + .worker_threads(args.number_of_threads) + .enable_all() + .build() + .expect("Failed building the Runtime") + .block_on(body) } fn init_cli() -> anyhow::Result { @@ -62,10 +100,6 @@ fn init_cli() -> anyhow::Result { } tracing::info!("workdir: {}", args.directory().display()); - ThreadPoolBuilder::new() - .num_threads(args.workers) - .build_global()?; - Ok(args) } @@ -83,7 +117,11 @@ fn collect_corpora(args: &Arguments) -> anyhow::Result(args: &Arguments, tests: &[MetadataFile], span: Span) -> anyhow::Result<()> +async fn run_driver( + args: &Arguments, + tests: &[MetadataFile], + span: Span, +) -> anyhow::Result<()> where L: Platform, F: Platform, @@ -93,61 +131,389 @@ where let leader_nodes = NodePool::::new(args)?; let follower_nodes = NodePool::::new(args)?; - tests.par_iter().for_each( - |MetadataFile { - content: metadata, - path: metadata_file_path, - }| { - // Starting a new tracing span for this metadata file. This allows our logs to be clear - // about which metadata file the logs belong to. We can add other information into this - // as well to be able to associate the logs with the correct metadata file and case - // that's being executed. - let tracing_span = tracing::span!( - Level::INFO, - "Running driver", - metadata_file_path = metadata_file_path.display().to_string(), - ); - let _guard = tracing_span.enter(); + let test_cases = tests + .iter() + .flat_map( + |MetadataFile { + path, + content: metadata, + }| { + metadata + .cases + .iter() + .enumerate() + .flat_map(move |(case_idx, case)| { + metadata + .solc_modes() + .into_iter() + .map(move |solc_mode| (path, metadata, case_idx, case, solc_mode)) + }) + }, + ) + .collect::>(); - let mut driver = Driver::::new( - metadata, - args, - leader_nodes.round_robbin(), - follower_nodes.round_robbin(), - ); - - let execution_result = driver.execute(span); - tracing::info!( - case_success_count = execution_result.successful_cases_count, - case_failure_count = execution_result.failed_cases_count, - "Execution completed" - ); - - let mut error_count = 0; - for result in execution_result.results.iter() { - if !result.is_success() { - tracing::error!(execution_error = ?result, "Encountered an error"); - error_count += 1; + let compilation_cache = Arc::new(RwLock::new(HashMap::new())); + futures::stream::iter(test_cases) + .for_each_concurrent( + None, + |(metadata_file_path, metadata, case_idx, case, solc_mode)| { + let compilation_cache = compilation_cache.clone(); + let leader_node = leader_nodes.round_robbin(); + let follower_node = follower_nodes.round_robbin(); + let tracing_span = tracing::span!( + Level::INFO, + "Running driver", + metadata_file_path = %metadata_file_path.display(), + case_idx = case_idx, + solc_mode = ?solc_mode, + ); + async move { + let result = handle_case_driver::( + metadata_file_path.as_path(), + metadata, + case_idx.into(), + case, + solc_mode, + args, + compilation_cache.clone(), + leader_node, + follower_node, + span, + ) + .await; + match result { + Ok(inputs_executed) => { + tracing::info!(inputs_executed, "Execution succeeded") + } + Err(error) => tracing::info!(%error, "Execution failed"), + } + tracing::info!("Execution completed"); } - } - if error_count == 0 { - tracing::info!("Execution succeeded"); - } else { - tracing::info!("Execution failed"); - } - }, - ); + .instrument(tracing_span) + }, + ) + .await; Ok(()) } -fn execute_corpus(args: &Arguments, tests: &[MetadataFile], span: Span) -> anyhow::Result<()> { +#[allow(clippy::too_many_arguments)] +async fn handle_case_driver<'a, L, F>( + metadata_file_path: &'a Path, + metadata: &'a Metadata, + case_idx: CaseIdx, + case: &Case, + mode: SolcMode, + config: &Arguments, + compilation_cache: CompilationCache<'a>, + leader_node: &L::Blockchain, + follower_node: &F::Blockchain, + _: Span, +) -> anyhow::Result +where + L: Platform, + F: Platform, + L::Blockchain: revive_dt_node::Node + Send + Sync + 'static, + F::Blockchain: revive_dt_node::Node + Send + Sync + 'static, +{ + let leader_pre_link_contracts = get_or_build_contracts::( + metadata, + metadata_file_path, + mode.clone(), + config, + compilation_cache.clone(), + &HashMap::new(), + ) + .await?; + let follower_pre_link_contracts = get_or_build_contracts::( + metadata, + metadata_file_path, + mode.clone(), + config, + compilation_cache.clone(), + &HashMap::new(), + ) + .await?; + + let mut leader_deployed_libraries = HashMap::new(); + let mut follower_deployed_libraries = HashMap::new(); + let mut contract_sources = metadata.contract_sources()?; + for library_instance in metadata + .libraries + .iter() + .flatten() + .flat_map(|(_, map)| map.values()) + { + let ContractPathAndIdent { + contract_source_path: library_source_path, + contract_ident: library_ident, + } = contract_sources + .remove(library_instance) + .context("Failed to find the contract source")?; + + let (leader_code, leader_abi) = leader_pre_link_contracts + .1 + .contracts + .get(&library_source_path) + .and_then(|contracts| contracts.get(library_ident.as_str())) + .context("Declared library was not compiled")?; + let (follower_code, follower_abi) = follower_pre_link_contracts + .1 + .contracts + .get(&library_source_path) + .and_then(|contracts| contracts.get(library_ident.as_str())) + .context("Declared library was not compiled")?; + + let leader_code = match alloy::hex::decode(leader_code) { + Ok(code) => code, + Err(error) => { + tracing::error!( + ?error, + contract_source_path = library_source_path.display().to_string(), + contract_ident = library_ident.as_ref(), + "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" + ); + anyhow::bail!("Failed to hex-decode the byte code {}", error) + } + }; + let follower_code = match alloy::hex::decode(follower_code) { + Ok(code) => code, + Err(error) => { + tracing::error!( + ?error, + contract_source_path = library_source_path.display().to_string(), + contract_ident = library_ident.as_ref(), + "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" + ); + anyhow::bail!("Failed to hex-decode the byte code {}", error) + } + }; + + // Getting the deployer address from the cases themselves. This is to ensure that we're + // doing the deployments from different accounts and therefore we're not slowed down by + // the nonce. + let deployer_address = case + .inputs + .iter() + .map(|input| input.caller) + .next() + .unwrap_or(Input::default_caller()); + let leader_tx = TransactionBuilder::::with_deploy_code( + TransactionRequest::default().from(deployer_address), + leader_code, + ); + let follower_tx = TransactionBuilder::::with_deploy_code( + TransactionRequest::default().from(deployer_address), + follower_code, + ); + + let leader_receipt = match leader_node.execute_transaction(leader_tx).await { + Ok(receipt) => receipt, + Err(error) => { + tracing::error!( + node = std::any::type_name::(), + ?error, + "Contract deployment transaction failed." + ); + return Err(error); + } + }; + let follower_receipt = match follower_node.execute_transaction(follower_tx).await { + Ok(receipt) => receipt, + Err(error) => { + tracing::error!( + node = std::any::type_name::(), + ?error, + "Contract deployment transaction failed." + ); + return Err(error); + } + }; + + let Some(leader_library_address) = leader_receipt.contract_address else { + tracing::error!("Contract deployment transaction didn't return an address"); + anyhow::bail!("Contract deployment didn't return an address"); + }; + let Some(follower_library_address) = follower_receipt.contract_address else { + tracing::error!("Contract deployment transaction didn't return an address"); + anyhow::bail!("Contract deployment didn't return an address"); + }; + + leader_deployed_libraries.insert( + library_instance.clone(), + (leader_library_address, leader_abi.clone()), + ); + follower_deployed_libraries.insert( + library_instance.clone(), + (follower_library_address, follower_abi.clone()), + ); + } + + let metadata_file_contains_libraries = metadata + .libraries + .iter() + .flat_map(|map| map.iter()) + .flat_map(|(_, value)| value.iter()) + .next() + .is_some(); + let compiled_contracts_require_linking = leader_pre_link_contracts + .1 + .contracts + .values() + .chain(follower_pre_link_contracts.1.contracts.values()) + .flat_map(|value| value.values()) + .any(|(code, _)| !code.chars().all(|char| char.is_ascii_hexdigit())); + let (leader_compiled_contracts, follower_compiled_contracts) = + if metadata_file_contains_libraries && compiled_contracts_require_linking { + let leader_key = (metadata_file_path, mode.clone(), L::config_id()); + let follower_key = (metadata_file_path, mode.clone(), L::config_id()); + { + let mut cache = compilation_cache.write().await; + cache.remove(&leader_key); + cache.remove(&follower_key); + } + + let leader_post_link_contracts = get_or_build_contracts::( + metadata, + metadata_file_path, + mode.clone(), + config, + compilation_cache.clone(), + &leader_deployed_libraries, + ) + .await?; + let follower_post_link_contracts = get_or_build_contracts::( + metadata, + metadata_file_path, + mode.clone(), + config, + compilation_cache, + &follower_deployed_libraries, + ) + .await?; + + (leader_post_link_contracts, follower_post_link_contracts) + } else { + (leader_pre_link_contracts, follower_pre_link_contracts) + }; + + let leader_state = CaseState::::new( + leader_compiled_contracts.0.clone(), + leader_compiled_contracts.1.contracts.clone(), + leader_deployed_libraries, + ); + let follower_state = CaseState::::new( + follower_compiled_contracts.0.clone(), + follower_compiled_contracts.1.contracts.clone(), + follower_deployed_libraries, + ); + + let mut driver = CaseDriver::::new( + metadata, + case, + case_idx, + leader_node, + follower_node, + leader_state, + follower_state, + ); + driver.execute().await +} + +async fn get_or_build_contracts<'a, P: Platform>( + metadata: &'a Metadata, + metadata_file_path: &'a Path, + mode: SolcMode, + config: &Arguments, + compilation_cache: CompilationCache<'a>, + deployed_libraries: &HashMap, +) -> anyhow::Result> { + let key = (metadata_file_path, mode.clone(), P::config_id()); + if let Some(compilation_artifact) = compilation_cache.read().await.get(&key).cloned() { + let mut compilation_artifact = compilation_artifact.lock().await; + match *compilation_artifact { + Some(ref compiled_contracts) => { + tracing::debug!(?key, "Compiled contracts cache hit"); + return Ok(compiled_contracts.clone()); + } + None => { + tracing::debug!(?key, "Compiled contracts cache miss"); + let compiled_contracts = Arc::new( + compile_contracts::

(metadata, &mode, config, deployed_libraries).await?, + ); + *compilation_artifact = Some(compiled_contracts.clone()); + return Ok(compiled_contracts.clone()); + } + } + }; + + tracing::debug!(?key, "Compiled contracts cache miss"); + let mutex = { + let mut compilation_cache = compilation_cache.write().await; + let mutex = Arc::new(Mutex::new(None)); + compilation_cache.insert(key, mutex.clone()); + mutex + }; + let mut compilation_artifact = mutex.lock().await; + let compiled_contracts = + Arc::new(compile_contracts::

(metadata, &mode, config, deployed_libraries).await?); + *compilation_artifact = Some(compiled_contracts.clone()); + Ok(compiled_contracts.clone()) +} + +async fn compile_contracts( + metadata: &Metadata, + mode: &SolcMode, + config: &Arguments, + deployed_libraries: &HashMap, +) -> anyhow::Result<(Version, CompilerOutput)> { + let compiler_version_or_requirement = mode.compiler_version_to_use(config.solc.clone()); + let compiler_path = + P::Compiler::get_compiler_executable(config, compiler_version_or_requirement).await?; + let compiler_version = P::Compiler::new(compiler_path.clone()).version()?; + + let compiler = Compiler::::new() + .with_allow_path(metadata.directory()?) + .with_optimization(mode.solc_optimize()); + let mut compiler = metadata + .files_to_compile()? + .try_fold(compiler, |compiler, path| compiler.with_source(&path))?; + for (library_instance, (library_address, _)) in deployed_libraries.iter() { + let library_ident = &metadata + .contracts + .as_ref() + .and_then(|contracts| contracts.get(library_instance)) + .expect("Impossible for library to not be found in contracts") + .contract_ident; + + // Note the following: we need to tell solc which files require the libraries to be + // linked into them. We do not have access to this information and therefore we choose + // an easier, yet more compute intensive route, of telling solc that all of the files + // need to link the library and it will only perform the linking for the files that do + // actually need the library. + compiler = FilesWithExtensionIterator::new(metadata.directory()?) + .with_allowed_extension("sol") + .fold(compiler, |compiler, path| { + compiler.with_library(&path, library_ident.as_str(), *library_address) + }); + } + + let compiler_output = compiler.try_build(compiler_path).await?; + + Ok((compiler_version, compiler_output)) +} + +async fn execute_corpus( + args: &Arguments, + tests: &[MetadataFile], + span: Span, +) -> anyhow::Result<()> { match (&args.leader, &args.follower) { (TestingPlatform::Geth, TestingPlatform::Kitchensink) => { - run_driver::(args, tests, span)? + run_driver::(args, tests, span).await? } (TestingPlatform::Geth, TestingPlatform::Geth) => { - run_driver::(args, tests, span)? + run_driver::(args, tests, span).await? } _ => unimplemented!(), } @@ -155,24 +521,41 @@ fn execute_corpus(args: &Arguments, tests: &[MetadataFile], span: Span) -> anyho Ok(()) } -fn compile_corpus( +async fn compile_corpus( config: &Arguments, tests: &[MetadataFile], platform: &TestingPlatform, - span: Span, + _: Span, ) { - tests.par_iter().for_each(|metadata| { - for mode in &metadata.solc_modes() { + let tests = tests.iter().flat_map(|metadata| { + metadata + .solc_modes() + .into_iter() + .map(move |solc_mode| (metadata, solc_mode)) + }); + + futures::stream::iter(tests) + .for_each_concurrent(None, |(metadata, mode)| async move { match platform { TestingPlatform::Geth => { - let mut state = State::::new(config, span); - let _ = state.build_contracts(mode, metadata); + let _ = compile_contracts::( + &metadata.content, + &mode, + config, + &Default::default(), + ) + .await; } TestingPlatform::Kitchensink => { - let mut state = State::::new(config, span); - let _ = state.build_contracts(mode, metadata); + let _ = compile_contracts::( + &metadata.content, + &mode, + config, + &Default::default(), + ) + .await; } - }; - } - }); + } + }) + .await; } diff --git a/crates/format/Cargo.toml b/crates/format/Cargo.toml index f5150ac..48d754b 100644 --- a/crates/format/Cargo.toml +++ b/crates/format/Cargo.toml @@ -19,3 +19,6 @@ tracing = { workspace = true } semver = { workspace = true } serde = { workspace = true, features = ["derive"] } serde_json = { workspace = true } + +[dev-dependencies] +tokio = { workspace = true } diff --git a/crates/format/src/input.rs b/crates/format/src/input.rs index 8031553..7eebf7f 100644 --- a/crates/format/src/input.rs +++ b/crates/format/src/input.rs @@ -198,7 +198,7 @@ impl Input { .ok_or_else(|| anyhow::anyhow!("instance {instance:?} not deployed")) } - pub fn encoded_input<'a>( + pub async fn encoded_input<'a>( &'a self, deployed_contracts: &HashMap, variables: impl Into>> + Clone, @@ -206,9 +206,10 @@ impl Input { ) -> anyhow::Result { match self.method { Method::Deployer | Method::Fallback => { - let calldata = - self.calldata - .calldata(deployed_contracts, variables, chain_state_provider)?; + let calldata = self + .calldata + .calldata(deployed_contracts, variables, chain_state_provider) + .await?; Ok(calldata.into()) } @@ -254,12 +255,14 @@ impl Input { // a new buffer for each one of the resolved arguments. let mut calldata = Vec::::with_capacity(4 + self.calldata.size_requirement()); calldata.extend(function.selector().0); - self.calldata.calldata_into_slice( - &mut calldata, - deployed_contracts, - variables, - chain_state_provider, - )?; + self.calldata + .calldata_into_slice( + &mut calldata, + deployed_contracts, + variables, + chain_state_provider, + ) + .await?; Ok(calldata.into()) } @@ -267,13 +270,15 @@ impl Input { } /// Parse this input into a legacy transaction. - pub fn legacy_transaction<'a>( + pub async fn legacy_transaction<'a>( &'a self, deployed_contracts: &HashMap, variables: impl Into>> + Clone, chain_state_provider: &impl ResolverApi, ) -> anyhow::Result { - let input_data = self.encoded_input(deployed_contracts, variables, chain_state_provider)?; + let input_data = self + .encoded_input(deployed_contracts, variables, chain_state_provider) + .await?; let transaction_request = TransactionRequest::default().from(self.caller).value( self.value .map(|value| value.into_inner()) @@ -351,7 +356,7 @@ impl Calldata { } } - pub fn calldata<'a>( + pub async fn calldata<'a>( &'a self, deployed_contracts: &HashMap, variables: impl Into>> + Clone, @@ -363,11 +368,12 @@ impl Calldata { deployed_contracts, variables, chain_state_provider, - )?; + ) + .await?; Ok(buffer) } - pub fn calldata_into_slice<'a>( + pub async fn calldata_into_slice<'a>( &'a self, buffer: &mut Vec, deployed_contracts: &HashMap, @@ -380,7 +386,10 @@ impl Calldata { } Calldata::Compound(items) => { for (arg_idx, arg) in items.iter().enumerate() { - match arg.resolve(deployed_contracts, variables.clone(), chain_state_provider) { + match arg + .resolve(deployed_contracts, variables.clone(), chain_state_provider) + .await + { Ok(resolved) => { buffer.extend(resolved.to_be_bytes::<32>()); } @@ -403,7 +412,7 @@ impl Calldata { } /// Checks if this [`Calldata`] is equivalent to the passed calldata bytes. - pub fn is_equivalent<'a>( + pub async fn is_equivalent<'a>( &'a self, other: &[u8], deployed_contracts: &HashMap, @@ -430,8 +439,9 @@ impl Calldata { std::borrow::Cow::Borrowed(other) }; - let this = - this.resolve(deployed_contracts, variables.clone(), chain_state_provider)?; + let this = this + .resolve(deployed_contracts, variables.clone(), chain_state_provider) + .await?; let other = U256::from_be_slice(&other); if this != other { return Ok(false); @@ -444,7 +454,7 @@ impl Calldata { } impl CalldataItem { - fn resolve<'a>( + async fn resolve<'a>( &'a self, deployed_contracts: &HashMap, variables: impl Into>> + Clone, @@ -456,7 +466,7 @@ impl CalldataItem { .calldata_tokens() .map(|token| token.resolve(deployed_contracts, variables.clone(), chain_state_provider)) { - let token = token?; + let token = token.await?; let new_token = match token { CalldataToken::Item(_) => token, CalldataToken::Operation(operation) => { @@ -555,7 +565,7 @@ impl> CalldataToken { /// This piece of code is taken from the matter-labs-tester repository which is licensed under /// MIT or Apache. The original source code can be found here: /// https://github.com/matter-labs/era-compiler-tester/blob/0ed598a27f6eceee7008deab3ff2311075a2ec69/compiler_tester/src/test/case/input/value.rs#L43-L146 - fn resolve<'a>( + async fn resolve<'a>( self, deployed_contracts: &HashMap, variables: impl Into>> + Clone, @@ -589,18 +599,22 @@ impl> CalldataToken { anyhow::anyhow!("Invalid hexadecimal literal: {}", error) })?) } else if item == Self::CHAIN_VARIABLE { - let chain_id = chain_state_provider.chain_id()?; + let chain_id = chain_state_provider.chain_id().await?; Ok(U256::from(chain_id)) } else if item == Self::GAS_LIMIT_VARIABLE { - let gas_limit = - chain_state_provider.block_gas_limit(BlockNumberOrTag::Latest)?; + let gas_limit = chain_state_provider + .block_gas_limit(BlockNumberOrTag::Latest) + .await?; Ok(U256::from(gas_limit)) } else if item == Self::COINBASE_VARIABLE { - let coinbase = chain_state_provider.block_coinbase(BlockNumberOrTag::Latest)?; + let coinbase = chain_state_provider + .block_coinbase(BlockNumberOrTag::Latest) + .await?; Ok(U256::from_be_slice(coinbase.as_ref())) } else if item == Self::DIFFICULTY_VARIABLE { - let block_difficulty = - chain_state_provider.block_difficulty(BlockNumberOrTag::Latest)?; + let block_difficulty = chain_state_provider + .block_difficulty(BlockNumberOrTag::Latest) + .await?; Ok(block_difficulty) } else if item.starts_with(Self::BLOCK_HASH_VARIABLE_PREFIX) { let offset: u64 = item @@ -609,19 +623,21 @@ impl> CalldataToken { .and_then(|value| value.parse().ok()) .unwrap_or_default(); - let current_block_number = chain_state_provider.last_block_number()?; + let current_block_number = chain_state_provider.last_block_number().await?; let desired_block_number = current_block_number - offset; - let block_hash = - chain_state_provider.block_hash(desired_block_number.into())?; + let block_hash = chain_state_provider + .block_hash(desired_block_number.into()) + .await?; Ok(U256::from_be_bytes(block_hash.0)) } else if item == Self::BLOCK_NUMBER_VARIABLE { - let current_block_number = chain_state_provider.last_block_number()?; + let current_block_number = chain_state_provider.last_block_number().await?; Ok(U256::from(current_block_number)) } else if item == Self::BLOCK_TIMESTAMP_VARIABLE { - let timestamp = - chain_state_provider.block_timestamp(BlockNumberOrTag::Latest)?; + let timestamp = chain_state_provider + .block_timestamp(BlockNumberOrTag::Latest) + .await?; Ok(U256::from(timestamp)) } else if let Some(variable_name) = item.strip_prefix(Self::VARIABLE_PREFIX) { let Some(variables) = variables.into() else { @@ -682,43 +698,46 @@ mod tests { struct MockResolver; impl ResolverApi for MockResolver { - fn chain_id(&self) -> anyhow::Result { + async fn chain_id(&self) -> anyhow::Result { Ok(0x123) } - fn block_gas_limit(&self, _: alloy::eips::BlockNumberOrTag) -> anyhow::Result { + async fn block_gas_limit(&self, _: alloy::eips::BlockNumberOrTag) -> anyhow::Result { Ok(0x1234) } - fn block_coinbase(&self, _: alloy::eips::BlockNumberOrTag) -> anyhow::Result

{ + async fn block_coinbase( + &self, + _: alloy::eips::BlockNumberOrTag, + ) -> anyhow::Result
{ Ok(Address::ZERO) } - fn block_difficulty(&self, _: alloy::eips::BlockNumberOrTag) -> anyhow::Result { + async fn block_difficulty(&self, _: alloy::eips::BlockNumberOrTag) -> anyhow::Result { Ok(U256::from(0x12345u128)) } - fn block_hash( + async fn block_hash( &self, _: alloy::eips::BlockNumberOrTag, ) -> anyhow::Result { Ok([0xEE; 32].into()) } - fn block_timestamp( + async fn block_timestamp( &self, _: alloy::eips::BlockNumberOrTag, ) -> anyhow::Result { Ok(0x123456) } - fn last_block_number(&self) -> anyhow::Result { + async fn last_block_number(&self) -> anyhow::Result { Ok(0x1234567) } } - #[test] - fn test_encoded_input_uint256() { + #[tokio::test] + async fn test_encoded_input_uint256() { let raw_metadata = r#" [ { @@ -755,6 +774,7 @@ mod tests { let encoded = input .encoded_input(&contracts, None, &MockResolver) + .await .unwrap(); assert!(encoded.0.starts_with(&selector)); @@ -763,8 +783,8 @@ mod tests { assert_eq!(decoded.0, 42); } - #[test] - fn test_encoded_input_address_with_signature() { + #[tokio::test] + async fn test_encoded_input_address_with_signature() { let raw_abi = r#"[ { "inputs": [{"name": "recipient", "type": "address"}], @@ -799,6 +819,7 @@ mod tests { let encoded = input .encoded_input(&contracts, None, &MockResolver) + .await .unwrap(); assert!(encoded.0.starts_with(&selector)); @@ -810,8 +831,8 @@ mod tests { ); } - #[test] - fn test_encoded_input_address() { + #[tokio::test] + async fn test_encoded_input_address() { let raw_abi = r#"[ { "inputs": [{"name": "recipient", "type": "address"}], @@ -846,6 +867,7 @@ mod tests { let encoded = input .encoded_input(&contracts, None, &MockResolver) + .await .unwrap(); assert!(encoded.0.starts_with(&selector)); @@ -857,50 +879,57 @@ mod tests { ); } - fn resolve_calldata_item( + async fn resolve_calldata_item( input: &str, deployed_contracts: &HashMap, chain_state_provider: &impl ResolverApi, ) -> anyhow::Result { - CalldataItem::new(input).resolve(deployed_contracts, None, chain_state_provider) + CalldataItem::new(input) + .resolve(deployed_contracts, None, chain_state_provider) + .await } - #[test] - fn resolver_can_resolve_chain_id_variable() { + #[tokio::test] + async fn resolver_can_resolve_chain_id_variable() { // Arrange let input = "$CHAIN_ID"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!(resolved, U256::from(MockResolver.chain_id().unwrap())) + assert_eq!(resolved, U256::from(MockResolver.chain_id().await.unwrap())) } - #[test] - fn resolver_can_resolve_gas_limit_variable() { + #[tokio::test] + async fn resolver_can_resolve_gas_limit_variable() { // Arrange let input = "$GAS_LIMIT"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); assert_eq!( resolved, - U256::from(MockResolver.block_gas_limit(Default::default()).unwrap()) + U256::from( + MockResolver + .block_gas_limit(Default::default()) + .await + .unwrap() + ) ) } - #[test] - fn resolver_can_resolve_coinbase_variable() { + #[tokio::test] + async fn resolver_can_resolve_coinbase_variable() { // Arrange let input = "$COINBASE"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); @@ -909,163 +938,172 @@ mod tests { U256::from_be_slice( MockResolver .block_coinbase(Default::default()) + .await .unwrap() .as_ref() ) ) } - #[test] - fn resolver_can_resolve_block_difficulty_variable() { + #[tokio::test] + async fn resolver_can_resolve_block_difficulty_variable() { // Arrange let input = "$DIFFICULTY"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); assert_eq!( resolved, - MockResolver.block_difficulty(Default::default()).unwrap() + MockResolver + .block_difficulty(Default::default()) + .await + .unwrap() ) } - #[test] - fn resolver_can_resolve_block_hash_variable() { + #[tokio::test] + async fn resolver_can_resolve_block_hash_variable() { // Arrange let input = "$BLOCK_HASH"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); assert_eq!( resolved, - U256::from_be_bytes(MockResolver.block_hash(Default::default()).unwrap().0) + U256::from_be_bytes(MockResolver.block_hash(Default::default()).await.unwrap().0) ) } - #[test] - fn resolver_can_resolve_block_number_variable() { + #[tokio::test] + async fn resolver_can_resolve_block_number_variable() { // Arrange let input = "$BLOCK_NUMBER"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); assert_eq!( resolved, - U256::from(MockResolver.last_block_number().unwrap()) + U256::from(MockResolver.last_block_number().await.unwrap()) ) } - #[test] - fn resolver_can_resolve_block_timestamp_variable() { + #[tokio::test] + async fn resolver_can_resolve_block_timestamp_variable() { // Arrange let input = "$BLOCK_TIMESTAMP"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); assert_eq!( resolved, - U256::from(MockResolver.block_timestamp(Default::default()).unwrap()) + U256::from( + MockResolver + .block_timestamp(Default::default()) + .await + .unwrap() + ) ) } - #[test] - fn simple_addition_can_be_resolved() { + #[tokio::test] + async fn simple_addition_can_be_resolved() { // Arrange let input = "2 4 +"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); assert_eq!(resolved, U256::from(6)); } - #[test] - fn simple_subtraction_can_be_resolved() { + #[tokio::test] + async fn simple_subtraction_can_be_resolved() { // Arrange let input = "4 2 -"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); assert_eq!(resolved, U256::from(2)); } - #[test] - fn simple_multiplication_can_be_resolved() { + #[tokio::test] + async fn simple_multiplication_can_be_resolved() { // Arrange let input = "4 2 *"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); assert_eq!(resolved, U256::from(8)); } - #[test] - fn simple_division_can_be_resolved() { + #[tokio::test] + async fn simple_division_can_be_resolved() { // Arrange let input = "4 2 /"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); assert_eq!(resolved, U256::from(2)); } - #[test] - fn arithmetic_errors_are_not_panics() { + #[tokio::test] + async fn arithmetic_errors_are_not_panics() { // Arrange let input = "4 0 /"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert assert!(resolved.is_err()) } - #[test] - fn arithmetic_with_resolution_works() { + #[tokio::test] + async fn arithmetic_with_resolution_works() { // Arrange let input = "$BLOCK_NUMBER 10 +"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert let resolved = resolved.expect("Failed to resolve argument"); assert_eq!( resolved, - U256::from(MockResolver.last_block_number().unwrap() + 10) + U256::from(MockResolver.last_block_number().await.unwrap() + 10) ); } - #[test] - fn incorrect_number_of_arguments_errors() { + #[tokio::test] + async fn incorrect_number_of_arguments_errors() { // Arrange let input = "$BLOCK_NUMBER 10 + +"; // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver); + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; // Assert assert!(resolved.is_err()) diff --git a/crates/format/src/metadata.rs b/crates/format/src/metadata.rs index b72376b..aad74fc 100644 --- a/crates/format/src/metadata.rs +++ b/crates/format/src/metadata.rs @@ -310,16 +310,22 @@ impl FromStr for ContractPathAndIdent { identifier = Some(next_item.to_owned()) } } - let Some(path) = path else { - anyhow::bail!("Path is not defined"); - }; - let Some(identifier) = identifier else { - anyhow::bail!("Contract identifier is not defined") - }; - Ok(Self { - contract_source_path: PathBuf::from(path), - contract_ident: ContractIdent::new(identifier), - }) + match (path, identifier) { + (Some(path), Some(identifier)) => Ok(Self { + contract_source_path: PathBuf::from(path), + contract_ident: ContractIdent::new(identifier), + }), + (None, Some(path)) | (Some(path), None) => { + let Some(identifier) = path.split(".").next().map(ToOwned::to_owned) else { + anyhow::bail!("Failed to find identifier"); + }; + Ok(Self { + contract_source_path: PathBuf::from(path), + contract_ident: ContractIdent::new(identifier), + }) + } + (None, None) => anyhow::bail!("Failed to find the path and identifier"), + } } } diff --git a/crates/format/src/traits.rs b/crates/format/src/traits.rs index c22f24a..3d302f9 100644 --- a/crates/format/src/traits.rs +++ b/crates/format/src/traits.rs @@ -6,25 +6,28 @@ use anyhow::Result; /// crate implements to go from string calldata and into the bytes calldata. pub trait ResolverApi { /// Returns the ID of the chain that the node is on. - fn chain_id(&self) -> Result; + fn chain_id(&self) -> impl Future>; // TODO: This is currently a u128 due to Kitchensink needing more than 64 bits for its gas limit // when we implement the changes to the gas we need to adjust this to be a u64. /// Returns the gas limit of the specified block. - fn block_gas_limit(&self, number: BlockNumberOrTag) -> Result; + fn block_gas_limit(&self, number: BlockNumberOrTag) -> impl Future>; /// Returns the coinbase of the specified block. - fn block_coinbase(&self, number: BlockNumberOrTag) -> Result
; + fn block_coinbase(&self, number: BlockNumberOrTag) -> impl Future>; /// Returns the difficulty of the specified block. - fn block_difficulty(&self, number: BlockNumberOrTag) -> Result; + fn block_difficulty(&self, number: BlockNumberOrTag) -> impl Future>; /// Returns the hash of the specified block. - fn block_hash(&self, number: BlockNumberOrTag) -> Result; + fn block_hash(&self, number: BlockNumberOrTag) -> impl Future>; /// Returns the timestamp of the specified block, - fn block_timestamp(&self, number: BlockNumberOrTag) -> Result; + fn block_timestamp( + &self, + number: BlockNumberOrTag, + ) -> impl Future>; /// Returns the number of the last block. - fn last_block_number(&self) -> Result; + fn last_block_number(&self) -> impl Future>; } diff --git a/crates/node-interaction/src/lib.rs b/crates/node-interaction/src/lib.rs index 130d804..791ba4b 100644 --- a/crates/node-interaction/src/lib.rs +++ b/crates/node-interaction/src/lib.rs @@ -7,15 +7,18 @@ use anyhow::Result; /// An interface for all interactions with Ethereum compatible nodes. pub trait EthereumNode { /// Execute the [TransactionRequest] and return a [TransactionReceipt]. - fn execute_transaction(&self, transaction: TransactionRequest) -> Result; + fn execute_transaction( + &self, + transaction: TransactionRequest, + ) -> impl Future>; /// Trace the transaction in the [TransactionReceipt] and return a [GethTrace]. fn trace_transaction( &self, receipt: &TransactionReceipt, trace_options: GethDebugTracingOptions, - ) -> Result; + ) -> impl Future>; /// Returns the state diff of the transaction hash in the [TransactionReceipt]. - fn state_diff(&self, receipt: &TransactionReceipt) -> Result; + fn state_diff(&self, receipt: &TransactionReceipt) -> impl Future>; } diff --git a/crates/node/src/geth.rs b/crates/node/src/geth.rs index 9795893..2760d04 100644 --- a/crates/node/src/geth.rs +++ b/crates/node/src/geth.rs @@ -25,7 +25,7 @@ use alloy::{ }, signers::local::PrivateKeySigner, }; -use revive_dt_common::concepts::BlockingExecutor; +use revive_dt_common::fs::clear_directory; use revive_dt_config::Arguments; use revive_dt_format::traits::ResolverApi; use revive_dt_node_interaction::EthereumNode; @@ -43,7 +43,7 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0); /// /// Prunes the child process and the base directory on drop. #[derive(Debug)] -pub struct Instance { +pub struct GethNode { connection_string: String, base_directory: PathBuf, data_directory: PathBuf, @@ -62,7 +62,7 @@ pub struct Instance { logs_file_to_flush: Vec, } -impl Instance { +impl GethNode { const BASE_DIRECTORY: &str = "geth"; const DATA_DIRECTORY: &str = "data"; const LOGS_DIRECTORY: &str = "logs"; @@ -81,6 +81,9 @@ impl Instance { /// Create the node directory and call `geth init` to configure the genesis. #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] fn init(&mut self, genesis: String) -> anyhow::Result<&mut Self> { + let _ = clear_directory(&self.base_directory); + let _ = clear_directory(&self.logs_directory); + create_dir_all(&self.base_directory)?; create_dir_all(&self.logs_directory)?; @@ -244,108 +247,104 @@ impl Instance { } } -impl EthereumNode for Instance { +impl EthereumNode for GethNode { #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn execute_transaction( + async fn execute_transaction( &self, transaction: TransactionRequest, ) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - let outer_span = tracing::debug_span!("Submitting transaction", ?transaction); - let _outer_guard = outer_span.enter(); + let outer_span = tracing::debug_span!("Submitting transaction", ?transaction); + let _outer_guard = outer_span.enter(); - let provider = provider.await?; + let provider = self.provider().await?; - let pending_transaction = provider.send_transaction(transaction).await?; - let transaction_hash = pending_transaction.tx_hash(); + let pending_transaction = provider.send_transaction(transaction).await?; + let transaction_hash = pending_transaction.tx_hash(); - let span = tracing::info_span!("Awaiting transaction receipt", ?transaction_hash); - let _guard = span.enter(); + let span = tracing::info_span!("Awaiting transaction receipt", ?transaction_hash); + let _guard = span.enter(); - // The following is a fix for the "transaction indexing is in progress" error that we - // used to get. You can find more information on this in the following GH issue in geth - // https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, - // before we can get the receipt of the transaction it needs to have been indexed by the - // node's indexer. Just because the transaction has been confirmed it doesn't mean that - // it has been indexed. When we call alloy's `get_receipt` it checks if the transaction - // was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method - // which _might_ return the above error if the tx has not yet been indexed yet. So, we - // need to implement a retry mechanism for the receipt to keep retrying to get it until - // it eventually works, but we only do that if the error we get back is the "transaction - // indexing is in progress" error or if the receipt is None. - // - // Getting the transaction indexed and taking a receipt can take a long time especially - // when a lot of transactions are being submitted to the node. Thus, while initially we - // only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to - // allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting - // with exponential backoff each time we attempt to get the receipt and find that it's - // not available. - let mut retries = 0; - let mut total_wait_duration = Duration::from_secs(0); - let max_allowed_wait_duration = Duration::from_secs(5 * 60); - loop { - if total_wait_duration >= max_allowed_wait_duration { - tracing::error!( - ?total_wait_duration, - ?max_allowed_wait_duration, - retry_count = retries, - "Failed to get receipt after polling for it" - ); - anyhow::bail!( - "Polled for receipt for {total_wait_duration:?} but failed to get it" - ); - } - - match provider.get_transaction_receipt(*transaction_hash).await { - Ok(Some(receipt)) => { - tracing::info!(?total_wait_duration, "Found receipt"); - break Ok(receipt); - } - Ok(None) => {} - Err(error) => { - let error_string = error.to_string(); - if !error_string.contains(Self::TRANSACTION_INDEXING_ERROR) { - break Err(error.into()); - } - } - }; - - let next_wait_duration = Duration::from_secs(2u64.pow(retries)) - .min(max_allowed_wait_duration - total_wait_duration); - total_wait_duration += next_wait_duration; - retries += 1; - - tokio::time::sleep(next_wait_duration).await; + // The following is a fix for the "transaction indexing is in progress" error that we + // used to get. You can find more information on this in the following GH issue in geth + // https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, + // before we can get the receipt of the transaction it needs to have been indexed by the + // node's indexer. Just because the transaction has been confirmed it doesn't mean that + // it has been indexed. When we call alloy's `get_receipt` it checks if the transaction + // was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method + // which _might_ return the above error if the tx has not yet been indexed yet. So, we + // need to implement a retry mechanism for the receipt to keep retrying to get it until + // it eventually works, but we only do that if the error we get back is the "transaction + // indexing is in progress" error or if the receipt is None. + // + // Getting the transaction indexed and taking a receipt can take a long time especially + // when a lot of transactions are being submitted to the node. Thus, while initially we + // only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to + // allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting + // with exponential backoff each time we attempt to get the receipt and find that it's + // not available. + let mut retries = 0; + let mut total_wait_duration = Duration::from_secs(0); + let max_allowed_wait_duration = Duration::from_secs(5 * 60); + loop { + if total_wait_duration >= max_allowed_wait_duration { + tracing::error!( + ?total_wait_duration, + ?max_allowed_wait_duration, + retry_count = retries, + "Failed to get receipt after polling for it" + ); + anyhow::bail!( + "Polled for receipt for {total_wait_duration:?} but failed to get it" + ); } - })? + + match provider.get_transaction_receipt(*transaction_hash).await { + Ok(Some(receipt)) => { + tracing::info!(?total_wait_duration, "Found receipt"); + break Ok(receipt); + } + Ok(None) => {} + Err(error) => { + let error_string = error.to_string(); + if !error_string.contains(Self::TRANSACTION_INDEXING_ERROR) { + break Err(error.into()); + } + } + }; + + let next_wait_duration = Duration::from_secs(2u64.pow(retries)) + .min(max_allowed_wait_duration - total_wait_duration); + total_wait_duration += next_wait_duration; + retries += 1; + + tokio::time::sleep(next_wait_duration).await; + } } #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn trace_transaction( + async fn trace_transaction( &self, transaction: &TransactionReceipt, trace_options: GethDebugTracingOptions, ) -> anyhow::Result { let tx_hash = transaction.transaction_hash; - let provider = self.provider(); - BlockingExecutor::execute(async move { - Ok(provider - .await? - .debug_trace_transaction(tx_hash, trace_options) - .await?) - })? + Ok(self + .provider() + .await? + .debug_trace_transaction(tx_hash, trace_options) + .await?) } #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result { + async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result { let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { diff_mode: Some(true), disable_code: None, disable_storage: None, }); match self - .trace_transaction(transaction, trace_options)? + .trace_transaction(transaction, trace_options) + .await? .try_into_pre_state_frame()? { PreStateFrame::Diff(diff) => Ok(diff), @@ -354,90 +353,77 @@ impl EthereumNode for Instance { } } -impl ResolverApi for Instance { +impl ResolverApi for GethNode { #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn chain_id(&self) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider.await?.get_chain_id().await.map_err(Into::into) - })? + async fn chain_id(&self) -> anyhow::Result { + self.provider() + .await? + .get_chain_id() + .await + .map_err(Into::into) } #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider - .await? - .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) - .map(|block| block.header.gas_limit as _) - })? + async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { + self.provider() + .await? + .get_block_by_number(number) + .await? + .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .map(|block| block.header.gas_limit as _) } #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result
{ - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider - .await? - .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) - .map(|block| block.header.beneficiary) - })? + async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result
{ + self.provider() + .await? + .get_block_by_number(number) + .await? + .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .map(|block| block.header.beneficiary) } #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider - .await? - .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) - .map(|block| block.header.difficulty) - })? + async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { + self.provider() + .await? + .get_block_by_number(number) + .await? + .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .map(|block| block.header.difficulty) } #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider - .await? - .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) - .map(|block| block.header.hash) - })? + async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { + self.provider() + .await? + .get_block_by_number(number) + .await? + .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .map(|block| block.header.hash) } #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider - .await? - .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) - .map(|block| block.header.timestamp) - })? + async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { + self.provider() + .await? + .get_block_by_number(number) + .await? + .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .map(|block| block.header.timestamp) } #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn last_block_number(&self) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider.await?.get_block_number().await.map_err(Into::into) - })? + async fn last_block_number(&self) -> anyhow::Result { + self.provider() + .await? + .get_block_number() + .await + .map_err(Into::into) } } -impl Node for Instance { +impl Node for GethNode { fn new(config: &Arguments) -> Self { let geth_directory = config.directory().join(Self::BASE_DIRECTORY); let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); @@ -525,7 +511,7 @@ impl Node for Instance { } } -impl Drop for Instance { +impl Drop for GethNode { #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] fn drop(&mut self) { self.shutdown().expect("Failed to shutdown") @@ -550,9 +536,9 @@ mod tests { (config, temp_dir) } - fn new_node() -> (Instance, TempDir) { + fn new_node() -> (GethNode, TempDir) { let (args, temp_dir) = test_config(); - let mut node = Instance::new(&args); + let mut node = GethNode::new(&args); node.init(GENESIS_JSON.to_owned()) .expect("Failed to initialize the node") .spawn_process() @@ -562,110 +548,110 @@ mod tests { #[test] fn init_works() { - Instance::new(&test_config().0) + GethNode::new(&test_config().0) .init(GENESIS_JSON.to_string()) .unwrap(); } #[test] fn spawn_works() { - Instance::new(&test_config().0) + GethNode::new(&test_config().0) .spawn(GENESIS_JSON.to_string()) .unwrap(); } #[test] fn version_works() { - let version = Instance::new(&test_config().0).version().unwrap(); + let version = GethNode::new(&test_config().0).version().unwrap(); assert!( version.starts_with("geth version"), "expected version string, got: '{version}'" ); } - #[test] - fn can_get_chain_id_from_node() { + #[tokio::test] + async fn can_get_chain_id_from_node() { // Arrange let (node, _temp_dir) = new_node(); // Act - let chain_id = node.chain_id(); + let chain_id = node.chain_id().await; // Assert let chain_id = chain_id.expect("Failed to get the chain id"); assert_eq!(chain_id, 420_420_420); } - #[test] - fn can_get_gas_limit_from_node() { + #[tokio::test] + async fn can_get_gas_limit_from_node() { // Arrange let (node, _temp_dir) = new_node(); // Act - let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest); + let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await; // Assert let gas_limit = gas_limit.expect("Failed to get the gas limit"); assert_eq!(gas_limit, u32::MAX as u128) } - #[test] - fn can_get_coinbase_from_node() { + #[tokio::test] + async fn can_get_coinbase_from_node() { // Arrange let (node, _temp_dir) = new_node(); // Act - let coinbase = node.block_coinbase(BlockNumberOrTag::Latest); + let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await; // Assert let coinbase = coinbase.expect("Failed to get the coinbase"); assert_eq!(coinbase, Address::new([0xFF; 20])) } - #[test] - fn can_get_block_difficulty_from_node() { + #[tokio::test] + async fn can_get_block_difficulty_from_node() { // Arrange let (node, _temp_dir) = new_node(); // Act - let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest); + let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await; // Assert let block_difficulty = block_difficulty.expect("Failed to get the block difficulty"); assert_eq!(block_difficulty, U256::ZERO) } - #[test] - fn can_get_block_hash_from_node() { + #[tokio::test] + async fn can_get_block_hash_from_node() { // Arrange let (node, _temp_dir) = new_node(); // Act - let block_hash = node.block_hash(BlockNumberOrTag::Latest); + let block_hash = node.block_hash(BlockNumberOrTag::Latest).await; // Assert let _ = block_hash.expect("Failed to get the block hash"); } - #[test] - fn can_get_block_timestamp_from_node() { + #[tokio::test] + async fn can_get_block_timestamp_from_node() { // Arrange let (node, _temp_dir) = new_node(); // Act - let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest); + let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await; // Assert let _ = block_timestamp.expect("Failed to get the block timestamp"); } - #[test] - fn can_get_block_number_from_node() { + #[tokio::test] + async fn can_get_block_number_from_node() { // Arrange let (node, _temp_dir) = new_node(); // Act - let block_number = node.last_block_number(); + let block_number = node.last_block_number().await; // Assert let block_number = block_number.expect("Failed to get the block number"); diff --git a/crates/node/src/kitchensink.rs b/crates/node/src/kitchensink.rs index ff9d652..a0a6f54 100644 --- a/crates/node/src/kitchensink.rs +++ b/crates/node/src/kitchensink.rs @@ -30,6 +30,7 @@ use alloy::{ }, signers::local::PrivateKeySigner, }; +use revive_dt_common::fs::clear_directory; use revive_dt_format::traits::ResolverApi; use serde::{Deserialize, Serialize}; use serde_json::{Value as JsonValue, json}; @@ -37,7 +38,6 @@ use sp_core::crypto::Ss58Codec; use sp_runtime::AccountId32; use tracing::Level; -use revive_dt_common::concepts::BlockingExecutor; use revive_dt_config::Arguments; use revive_dt_node_interaction::EthereumNode; @@ -86,6 +86,9 @@ impl KitchensinkNode { #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] fn init(&mut self, genesis: &str) -> anyhow::Result<&mut Self> { + let _ = clear_directory(&self.base_directory); + let _ = clear_directory(&self.logs_directory); + create_dir_all(&self.base_directory)?; create_dir_all(&self.logs_directory)?; @@ -377,49 +380,46 @@ impl KitchensinkNode { impl EthereumNode for KitchensinkNode { #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] - fn execute_transaction( + async fn execute_transaction( &self, transaction: alloy::rpc::types::TransactionRequest, ) -> anyhow::Result { tracing::debug!(?transaction, "Submitting transaction"); - let provider = self.provider(); - let receipt = BlockingExecutor::execute(async move { - Ok(provider - .await? - .send_transaction(transaction) - .await? - .get_receipt() - .await?) - })?; + let receipt = self + .provider() + .await? + .send_transaction(transaction) + .await? + .get_receipt() + .await?; tracing::info!(?receipt, "Submitted tx to kitchensink"); - receipt + Ok(receipt) } #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] - fn trace_transaction( + async fn trace_transaction( &self, transaction: &TransactionReceipt, trace_options: GethDebugTracingOptions, ) -> anyhow::Result { let tx_hash = transaction.transaction_hash; - let provider = self.provider(); - BlockingExecutor::execute(async move { - Ok(provider - .await? - .debug_trace_transaction(tx_hash, trace_options) - .await?) - })? + Ok(self + .provider() + .await? + .debug_trace_transaction(tx_hash, trace_options) + .await?) } #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] - fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result { + async fn state_diff(&self, transaction: &TransactionReceipt) -> anyhow::Result { let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { diff_mode: Some(true), disable_code: None, disable_storage: None, }); match self - .trace_transaction(transaction, trace_options)? + .trace_transaction(transaction, trace_options) + .await? .try_into_pre_state_frame()? { PreStateFrame::Diff(diff) => Ok(diff), @@ -429,85 +429,72 @@ impl EthereumNode for KitchensinkNode { } impl ResolverApi for KitchensinkNode { - #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn chain_id(&self) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider.await?.get_chain_id().await.map_err(Into::into) - })? + #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] + async fn chain_id(&self) -> anyhow::Result { + self.provider() + .await? + .get_chain_id() + .await + .map_err(Into::into) } - #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider - .await? - .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) - .map(|block| block.header.gas_limit) - })? + #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] + async fn block_gas_limit(&self, number: BlockNumberOrTag) -> anyhow::Result { + self.provider() + .await? + .get_block_by_number(number) + .await? + .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .map(|block| block.header.gas_limit as _) } - #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result
{ - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider - .await? - .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) - .map(|block| block.header.beneficiary) - })? + #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] + async fn block_coinbase(&self, number: BlockNumberOrTag) -> anyhow::Result
{ + self.provider() + .await? + .get_block_by_number(number) + .await? + .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .map(|block| block.header.beneficiary) } - #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider - .await? - .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) - .map(|block| block.header.difficulty) - })? + #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] + async fn block_difficulty(&self, number: BlockNumberOrTag) -> anyhow::Result { + self.provider() + .await? + .get_block_by_number(number) + .await? + .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .map(|block| block.header.difficulty) } - #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider - .await? - .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) - .map(|block| block.header.hash) - })? + #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] + async fn block_hash(&self, number: BlockNumberOrTag) -> anyhow::Result { + self.provider() + .await? + .get_block_by_number(number) + .await? + .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .map(|block| block.header.hash) } - #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider - .await? - .get_block_by_number(number) - .await? - .ok_or(anyhow::Error::msg("Blockchain has no blocks")) - .map(|block| block.header.timestamp) - })? + #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] + async fn block_timestamp(&self, number: BlockNumberOrTag) -> anyhow::Result { + self.provider() + .await? + .get_block_by_number(number) + .await? + .ok_or(anyhow::Error::msg("Blockchain has no blocks")) + .map(|block| block.header.timestamp) } - #[tracing::instrument(skip_all, fields(geth_node_id = self.id))] - fn last_block_number(&self) -> anyhow::Result { - let provider = self.provider(); - BlockingExecutor::execute(async move { - provider.await?.get_block_number().await.map_err(Into::into) - })? + #[tracing::instrument(skip_all, fields(kitchensink_node_id = self.id))] + async fn last_block_number(&self) -> anyhow::Result { + self.provider() + .await? + .get_block_number() + .await + .map_err(Into::into) } } @@ -1043,26 +1030,21 @@ mod tests { use revive_dt_config::Arguments; use std::path::PathBuf; use std::sync::{LazyLock, Mutex}; - use temp_dir::TempDir; use std::fs; use super::*; use crate::{GENESIS_JSON, Node}; - fn test_config() -> (Arguments, TempDir) { - let mut config = Arguments::default(); - let temp_dir = TempDir::new().unwrap(); - - config.working_directory = temp_dir.path().to_path_buf().into(); - - config.kitchensink = PathBuf::from("substrate-node"); - config.eth_proxy = PathBuf::from("eth-rpc"); - - (config, temp_dir) + fn test_config() -> Arguments { + Arguments { + kitchensink: PathBuf::from("substrate-node"), + eth_proxy: PathBuf::from("eth-rpc"), + ..Default::default() + } } - fn new_node() -> (KitchensinkNode, Arguments, TempDir) { + fn new_node() -> (KitchensinkNode, Arguments) { // Note: When we run the tests in the CI we found that if they're all // run in parallel then the CI is unable to start all of the nodes in // time and their start up times-out. Therefore, we want all of the @@ -1081,20 +1063,20 @@ mod tests { static NODE_START_MUTEX: Mutex<()> = Mutex::new(()); let _guard = NODE_START_MUTEX.lock().unwrap(); - let (args, temp_dir) = test_config(); + let args = test_config(); let mut node = KitchensinkNode::new(&args); node.init(GENESIS_JSON) .expect("Failed to initialize the node") .spawn_process() .expect("Failed to spawn the node process"); - (node, args, temp_dir) + (node, args) } /// A shared node that multiple tests can use. It starts up once. fn shared_node() -> &'static KitchensinkNode { - static NODE: LazyLock<(KitchensinkNode, TempDir)> = LazyLock::new(|| { - let (node, _, temp_dir) = new_node(); - (node, temp_dir) + static NODE: LazyLock<(KitchensinkNode, Arguments)> = LazyLock::new(|| { + let (node, args) = new_node(); + (node, args) }); &NODE.0 } @@ -1102,7 +1084,7 @@ mod tests { #[tokio::test] async fn node_mines_simple_transfer_transaction_and_returns_receipt() { // Arrange - let (node, args, _temp_dir) = new_node(); + let (node, args) = new_node(); let provider = node.provider().await.expect("Failed to create provider"); @@ -1137,7 +1119,7 @@ mod tests { } "#; - let mut dummy_node = KitchensinkNode::new(&test_config().0); + let mut dummy_node = KitchensinkNode::new(&test_config()); // Call `init()` dummy_node.init(genesis_content).expect("init failed"); @@ -1181,7 +1163,7 @@ mod tests { } "#; - let node = KitchensinkNode::new(&test_config().0); + let node = KitchensinkNode::new(&test_config()); let result = node .extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap()) @@ -1252,15 +1234,16 @@ mod tests { #[test] fn spawn_works() { - let (config, _temp_dir) = test_config(); + let config = test_config(); let mut node = KitchensinkNode::new(&config); + node.spawn(GENESIS_JSON.to_string()).unwrap(); } #[test] fn version_works() { - let (config, _temp_dir) = test_config(); + let config = test_config(); let node = KitchensinkNode::new(&config); let version = node.version().unwrap(); @@ -1273,7 +1256,7 @@ mod tests { #[test] fn eth_rpc_version_works() { - let (config, _temp_dir) = test_config(); + let config = test_config(); let node = KitchensinkNode::new(&config); let version = node.eth_rpc_version().unwrap(); @@ -1284,86 +1267,86 @@ mod tests { ); } - #[test] - fn can_get_chain_id_from_node() { + #[tokio::test] + async fn can_get_chain_id_from_node() { // Arrange let node = shared_node(); // Act - let chain_id = node.chain_id(); + let chain_id = node.chain_id().await; // Assert let chain_id = chain_id.expect("Failed to get the chain id"); assert_eq!(chain_id, 420_420_420); } - #[test] - fn can_get_gas_limit_from_node() { + #[tokio::test] + async fn can_get_gas_limit_from_node() { // Arrange let node = shared_node(); // Act - let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest); + let gas_limit = node.block_gas_limit(BlockNumberOrTag::Latest).await; // Assert let _ = gas_limit.expect("Failed to get the gas limit"); } - #[test] - fn can_get_coinbase_from_node() { + #[tokio::test] + async fn can_get_coinbase_from_node() { // Arrange let node = shared_node(); // Act - let coinbase = node.block_coinbase(BlockNumberOrTag::Latest); + let coinbase = node.block_coinbase(BlockNumberOrTag::Latest).await; // Assert let _ = coinbase.expect("Failed to get the coinbase"); } - #[test] - fn can_get_block_difficulty_from_node() { + #[tokio::test] + async fn can_get_block_difficulty_from_node() { // Arrange let node = shared_node(); // Act - let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest); + let block_difficulty = node.block_difficulty(BlockNumberOrTag::Latest).await; // Assert let _ = block_difficulty.expect("Failed to get the block difficulty"); } - #[test] - fn can_get_block_hash_from_node() { + #[tokio::test] + async fn can_get_block_hash_from_node() { // Arrange let node = shared_node(); // Act - let block_hash = node.block_hash(BlockNumberOrTag::Latest); + let block_hash = node.block_hash(BlockNumberOrTag::Latest).await; // Assert let _ = block_hash.expect("Failed to get the block hash"); } - #[test] - fn can_get_block_timestamp_from_node() { + #[tokio::test] + async fn can_get_block_timestamp_from_node() { // Arrange let node = shared_node(); // Act - let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest); + let block_timestamp = node.block_timestamp(BlockNumberOrTag::Latest).await; // Assert let _ = block_timestamp.expect("Failed to get the block timestamp"); } - #[test] - fn can_get_block_number_from_node() { + #[tokio::test] + async fn can_get_block_number_from_node() { // Arrange let node = shared_node(); // Act - let block_number = node.last_block_number(); + let block_number = node.last_block_number().await; // Assert let _ = block_number.expect("Failed to get the block number"); diff --git a/crates/node/src/pool.rs b/crates/node/src/pool.rs index 10d4d59..8cb83cc 100644 --- a/crates/node/src/pool.rs +++ b/crates/node/src/pool.rs @@ -24,7 +24,7 @@ where { /// Create a new Pool. This will start as many nodes as there are workers in `config`. pub fn new(config: &Arguments) -> anyhow::Result { - let nodes = config.workers; + let nodes = config.number_of_nodes; let genesis = read_to_string(&config.genesis_file).context(format!( "can not read genesis file: {}", config.genesis_file.display() diff --git a/crates/solc-binaries/Cargo.toml b/crates/solc-binaries/Cargo.toml index 9bb6090..be5dcf7 100644 --- a/crates/solc-binaries/Cargo.toml +++ b/crates/solc-binaries/Cargo.toml @@ -14,6 +14,7 @@ revive-dt-common = { workspace = true } anyhow = { workspace = true } hex = { workspace = true } tracing = { workspace = true } +tokio = { workspace = true } reqwest = { workspace = true } semver = { workspace = true } serde = { workspace = true } diff --git a/crates/solc-binaries/src/cache.rs b/crates/solc-binaries/src/cache.rs index 364ea01..0b7daaf 100644 --- a/crates/solc-binaries/src/cache.rs +++ b/crates/solc-binaries/src/cache.rs @@ -6,15 +6,17 @@ use std::{ io::{BufWriter, Write}, os::unix::fs::PermissionsExt, path::{Path, PathBuf}, - sync::{LazyLock, Mutex}, + sync::LazyLock, }; +use tokio::sync::Mutex; + use crate::download::GHDownloader; pub const SOLC_CACHE_DIRECTORY: &str = "solc"; pub(crate) static SOLC_CACHER: LazyLock>> = LazyLock::new(Default::default); -pub(crate) fn get_or_download( +pub(crate) async fn get_or_download( working_directory: &Path, downloader: &GHDownloader, ) -> anyhow::Result { @@ -23,20 +25,20 @@ pub(crate) fn get_or_download( .join(downloader.version.to_string()); let target_file = target_directory.join(downloader.target); - let mut cache = SOLC_CACHER.lock().unwrap(); + let mut cache = SOLC_CACHER.lock().await; if cache.contains(&target_file) { tracing::debug!("using cached solc: {}", target_file.display()); return Ok(target_file); } create_dir_all(target_directory)?; - download_to_file(&target_file, downloader)?; + download_to_file(&target_file, downloader).await?; cache.insert(target_file.clone()); Ok(target_file) } -fn download_to_file(path: &Path, downloader: &GHDownloader) -> anyhow::Result<()> { +async fn download_to_file(path: &Path, downloader: &GHDownloader) -> anyhow::Result<()> { tracing::info!("caching file: {}", path.display()); let Ok(file) = File::create_new(path) else { @@ -52,7 +54,7 @@ fn download_to_file(path: &Path, downloader: &GHDownloader) -> anyhow::Result<() } let mut file = BufWriter::new(file); - file.write_all(&downloader.download()?)?; + file.write_all(&downloader.download().await?)?; file.flush()?; drop(file); diff --git a/crates/solc-binaries/src/download.rs b/crates/solc-binaries/src/download.rs index 067102c..93ee451 100644 --- a/crates/solc-binaries/src/download.rs +++ b/crates/solc-binaries/src/download.rs @@ -25,12 +25,12 @@ impl List { /// /// Caches the list retrieved from the `url` into [LIST_CACHE], /// subsequent calls with the same `url` will return the cached list. - pub fn download(url: &'static str) -> anyhow::Result { + pub async fn download(url: &'static str) -> anyhow::Result { if let Some(list) = LIST_CACHE.lock().unwrap().get(url) { return Ok(list.clone()); } - let body: List = reqwest::blocking::get(url)?.json()?; + let body: List = reqwest::get(url).await?.json().await?; LIST_CACHE.lock().unwrap().insert(url, body.clone()); @@ -54,7 +54,7 @@ impl GHDownloader { pub const WINDOWS_NAME: &str = "solc-windows.exe"; pub const WASM_NAME: &str = "soljson.js"; - fn new( + async fn new( version: impl Into, target: &'static str, list: &'static str, @@ -67,7 +67,8 @@ impl GHDownloader { list, }), VersionOrRequirement::Requirement(requirement) => { - let Some(version) = List::download(list)? + let Some(version) = List::download(list) + .await? .builds .into_iter() .map(|build| build.version) @@ -85,20 +86,20 @@ impl GHDownloader { } } - pub fn linux(version: impl Into) -> anyhow::Result { - Self::new(version, Self::LINUX_NAME, List::LINUX_URL) + pub async fn linux(version: impl Into) -> anyhow::Result { + Self::new(version, Self::LINUX_NAME, List::LINUX_URL).await } - pub fn macosx(version: impl Into) -> anyhow::Result { - Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL) + pub async fn macosx(version: impl Into) -> anyhow::Result { + Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL).await } - pub fn windows(version: impl Into) -> anyhow::Result { - Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL) + pub async fn windows(version: impl Into) -> anyhow::Result { + Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL).await } - pub fn wasm(version: impl Into) -> anyhow::Result { - Self::new(version, Self::WASM_NAME, List::WASM_URL) + pub async fn wasm(version: impl Into) -> anyhow::Result { + Self::new(version, Self::WASM_NAME, List::WASM_URL).await } /// Returns the download link. @@ -110,16 +111,17 @@ impl GHDownloader { /// /// Errors out if the download fails or the digest of the downloaded file /// mismatches the expected digest from the release [List]. - pub fn download(&self) -> anyhow::Result> { + pub async fn download(&self) -> anyhow::Result> { tracing::info!("downloading solc: {self:?}"); - let expected_digest = List::download(self.list)? + let expected_digest = List::download(self.list) + .await? .builds .iter() .find(|build| build.version == self.version) .ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version)) .map(|b| b.sha256.strip_prefix("0x").unwrap_or(&b.sha256).to_string())?; - let file = reqwest::blocking::get(self.url())?.bytes()?.to_vec(); + let file = reqwest::get(self.url()).await?.bytes().await?.to_vec(); if hex::encode(Sha256::digest(&file)) != expected_digest { anyhow::bail!("sha256 mismatch for solc version {}", self.version); @@ -133,27 +135,56 @@ impl GHDownloader { mod tests { use crate::{download::GHDownloader, list::List}; - #[test] - fn try_get_windows() { - let version = List::download(List::WINDOWS_URL).unwrap().latest_release; - GHDownloader::windows(version).unwrap().download().unwrap(); + #[tokio::test] + async fn try_get_windows() { + let version = List::download(List::WINDOWS_URL) + .await + .unwrap() + .latest_release; + GHDownloader::windows(version) + .await + .unwrap() + .download() + .await + .unwrap(); } - #[test] - fn try_get_macosx() { - let version = List::download(List::MACOSX_URL).unwrap().latest_release; - GHDownloader::macosx(version).unwrap().download().unwrap(); + #[tokio::test] + async fn try_get_macosx() { + let version = List::download(List::MACOSX_URL) + .await + .unwrap() + .latest_release; + GHDownloader::macosx(version) + .await + .unwrap() + .download() + .await + .unwrap(); } - #[test] - fn try_get_linux() { - let version = List::download(List::LINUX_URL).unwrap().latest_release; - GHDownloader::linux(version).unwrap().download().unwrap(); + #[tokio::test] + async fn try_get_linux() { + let version = List::download(List::LINUX_URL) + .await + .unwrap() + .latest_release; + GHDownloader::linux(version) + .await + .unwrap() + .download() + .await + .unwrap(); } - #[test] - fn try_get_wasm() { - let version = List::download(List::WASM_URL).unwrap().latest_release; - GHDownloader::wasm(version).unwrap().download().unwrap(); + #[tokio::test] + async fn try_get_wasm() { + let version = List::download(List::WASM_URL).await.unwrap().latest_release; + GHDownloader::wasm(version) + .await + .unwrap() + .download() + .await + .unwrap(); } } diff --git a/crates/solc-binaries/src/lib.rs b/crates/solc-binaries/src/lib.rs index 5fefbd8..7251c7c 100644 --- a/crates/solc-binaries/src/lib.rs +++ b/crates/solc-binaries/src/lib.rs @@ -19,22 +19,22 @@ pub mod list; /// /// Subsequent calls for the same version will use a cached artifact /// and not download it again. -pub fn download_solc( +pub async fn download_solc( cache_directory: &Path, version: impl Into, wasm: bool, ) -> anyhow::Result { let downloader = if wasm { - GHDownloader::wasm(version) + GHDownloader::wasm(version).await } else if cfg!(target_os = "linux") { - GHDownloader::linux(version) + GHDownloader::linux(version).await } else if cfg!(target_os = "macos") { - GHDownloader::macosx(version) + GHDownloader::macosx(version).await } else if cfg!(target_os = "windows") { - GHDownloader::windows(version) + GHDownloader::windows(version).await } else { unimplemented!() }?; - get_or_download(cache_directory, &downloader) + get_or_download(cache_directory, &downloader).await }