diff --git a/.rustfmt.toml b/.rustfmt.toml index a6e138d..c017c9e 100644 --- a/.rustfmt.toml +++ b/.rustfmt.toml @@ -1,34 +1,25 @@ -# Rustfmt configuration - configured to minimize changes -# Only stable features are used to ensure compatibility - -# Edition +# Basic edition = "2024" - -# Line width +hard_tabs = true max_width = 100 - -# Use small heuristics -use_small_heuristics = "Default" - -# Formatting -newline_style = "Unix" -use_field_init_shorthand = true -use_try_shorthand = true - -# Chains -chain_width = 60 - -# Function calls -fn_call_width = 60 - -# Structs -struct_lit_width = 18 - -# Arrays -array_width = 60 - -# Misc +use_small_heuristics = "Max" +# Imports +imports_granularity = "Crate" reorder_imports = true -reorder_modules = true -remove_nested_parens = true -merge_derives = true +# Consistency +newline_style = "Unix" +# Misc +chain_width = 80 +spaces_around_ranges = false +binop_separator = "Back" +reorder_impl_items = false +match_arm_leading_pipes = "Preserve" +match_arm_blocks = false +match_block_trailing_comma = true +trailing_comma = "Vertical" +trailing_semicolon = false +use_field_init_shorthand = true +# Format comments +comment_width = 100 +wrap_comments = true + diff --git a/crates/common/src/cached_fs/mod.rs b/crates/common/src/cached_fs/mod.rs index 196a6c8..148352a 100644 --- a/crates/common/src/cached_fs/mod.rs +++ b/crates/common/src/cached_fs/mod.rs @@ -2,50 +2,47 @@ //! rather being queried from the file system again. use std::{ - fs, - io::{Error, Result}, - path::{Path, PathBuf}, + fs, + io::{Error, Result}, + path::{Path, PathBuf}, }; use moka::sync::Cache; use once_cell::sync::Lazy; pub fn read(path: impl AsRef) -> Result> { - static READ_CACHE: Lazy>> = Lazy::new(|| Cache::new(10_000)); + static READ_CACHE: Lazy>> = Lazy::new(|| Cache::new(10_000)); - let path = path.as_ref().canonicalize()?; - match READ_CACHE.get(path.as_path()) { - Some(content) => Ok(content), - None => { - let content = fs::read(path.as_path())?; - READ_CACHE.insert(path, content.clone()); - Ok(content) - } - } + let path = path.as_ref().canonicalize()?; + match READ_CACHE.get(path.as_path()) { + Some(content) => Ok(content), + None => { + let content = fs::read(path.as_path())?; + READ_CACHE.insert(path, content.clone()); + Ok(content) + }, + } } pub fn read_to_string(path: impl AsRef) -> Result { - let content = read(path)?; - String::from_utf8(content).map_err(|_| { - Error::new( - std::io::ErrorKind::InvalidData, - "The contents of the file are not valid UTF8", - ) - }) + let content = read(path)?; + String::from_utf8(content).map_err(|_| { + Error::new(std::io::ErrorKind::InvalidData, "The contents of the file are not valid UTF8") + }) } pub fn read_dir(path: impl AsRef) -> Result>>> { - static READ_DIR_CACHE: Lazy>> = Lazy::new(|| Cache::new(10_000)); + static READ_DIR_CACHE: Lazy>> = Lazy::new(|| Cache::new(10_000)); - let path = path.as_ref().canonicalize()?; - match READ_DIR_CACHE.get(path.as_path()) { - Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>), - None => { - let entries = fs::read_dir(path.as_path())? - .flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path())) - .collect(); - READ_DIR_CACHE.insert(path.clone(), entries); - Ok(read_dir(path).unwrap()) - } - } + let path = path.as_ref().canonicalize()?; + match READ_DIR_CACHE.get(path.as_path()) { + Some(entries) => Ok(Box::new(entries.into_iter().map(Ok)) as Box<_>), + None => { + let entries = fs::read_dir(path.as_path())? + .flat_map(|maybe_entry| maybe_entry.map(|entry| entry.path())) + .collect(); + READ_DIR_CACHE.insert(path.clone(), entries); + Ok(read_dir(path).unwrap()) + }, + } } diff --git a/crates/common/src/fs/clear_dir.rs b/crates/common/src/fs/clear_dir.rs index 387c134..f7073d5 100644 --- a/crates/common/src/fs/clear_dir.rs +++ b/crates/common/src/fs/clear_dir.rs @@ -1,6 +1,6 @@ use std::{ - fs::{read_dir, remove_dir_all, remove_file}, - path::Path, + fs::{read_dir, remove_dir_all, remove_file}, + path::Path, }; use anyhow::{Context, Result}; @@ -8,24 +8,21 @@ use anyhow::{Context, Result}; /// This method clears the passed directory of all of the files and directories contained within /// without deleting the directory. pub fn clear_directory(path: impl AsRef) -> Result<()> { - for entry in read_dir(path.as_ref()) - .with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))? - { - let entry = entry.with_context(|| { - format!( - "Failed to read an entry in directory: {}", - path.as_ref().display() - ) - })?; - let entry_path = entry.path(); + for entry in read_dir(path.as_ref()) + .with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))? + { + let entry = entry.with_context(|| { + format!("Failed to read an entry in directory: {}", path.as_ref().display()) + })?; + let entry_path = entry.path(); - if entry_path.is_file() { - remove_file(&entry_path) - .with_context(|| format!("Failed to remove file: {}", entry_path.display()))? - } else { - remove_dir_all(&entry_path) - .with_context(|| format!("Failed to remove directory: {}", entry_path.display()))? - } - } - Ok(()) + if entry_path.is_file() { + remove_file(&entry_path) + .with_context(|| format!("Failed to remove file: {}", entry_path.display()))? + } else { + remove_dir_all(&entry_path) + .with_context(|| format!("Failed to remove directory: {}", entry_path.display()))? + } + } + Ok(()) } diff --git a/crates/common/src/futures/poll.rs b/crates/common/src/futures/poll.rs index 76cb2f4..d0cbbf1 100644 --- a/crates/common/src/futures/poll.rs +++ b/crates/common/src/futures/poll.rs @@ -17,55 +17,51 @@ const EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION: Duration = Duration::from_secs(60); /// [`Break`]: ControlFlow::Break /// [`Continue`]: ControlFlow::Continue pub async fn poll( - polling_duration: Duration, - polling_wait_behavior: PollingWaitBehavior, - mut future: impl FnMut() -> F, + polling_duration: Duration, + polling_wait_behavior: PollingWaitBehavior, + mut future: impl FnMut() -> F, ) -> Result where - F: Future>>, + F: Future>>, { - let mut retries = 0; - let mut total_wait_duration = Duration::ZERO; - let max_allowed_wait_duration = polling_duration; + let mut retries = 0; + let mut total_wait_duration = Duration::ZERO; + let max_allowed_wait_duration = polling_duration; - loop { - if total_wait_duration >= max_allowed_wait_duration { - break Err(anyhow!( - "Polling failed after {} retries and a total of {:?} of wait time", - retries, - total_wait_duration - )); - } + loop { + if total_wait_duration >= max_allowed_wait_duration { + break Err(anyhow!( + "Polling failed after {} retries and a total of {:?} of wait time", + retries, + total_wait_duration + )); + } - match future() - .await - .context("Polled future returned an error during polling loop")? - { - ControlFlow::Continue(()) => { - let next_wait_duration = match polling_wait_behavior { - PollingWaitBehavior::Constant(duration) => duration, - PollingWaitBehavior::ExponentialBackoff => { - Duration::from_secs(2u64.pow(retries)) - .min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION) - } - }; - let next_wait_duration = - next_wait_duration.min(max_allowed_wait_duration - total_wait_duration); - total_wait_duration += next_wait_duration; - retries += 1; + match future().await.context("Polled future returned an error during polling loop")? { + ControlFlow::Continue(()) => { + let next_wait_duration = match polling_wait_behavior { + PollingWaitBehavior::Constant(duration) => duration, + PollingWaitBehavior::ExponentialBackoff => + Duration::from_secs(2u64.pow(retries)) + .min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION), + }; + let next_wait_duration = + next_wait_duration.min(max_allowed_wait_duration - total_wait_duration); + total_wait_duration += next_wait_duration; + retries += 1; - tokio::time::sleep(next_wait_duration).await; - } - ControlFlow::Break(output) => { - break Ok(output); - } - } - } + tokio::time::sleep(next_wait_duration).await; + }, + ControlFlow::Break(output) => { + break Ok(output); + }, + } + } } #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default)] pub enum PollingWaitBehavior { - Constant(Duration), - #[default] - ExponentialBackoff, + Constant(Duration), + #[default] + ExponentialBackoff, } diff --git a/crates/common/src/iterators/either_iter.rs b/crates/common/src/iterators/either_iter.rs index d327c6f..90d79e0 100644 --- a/crates/common/src/iterators/either_iter.rs +++ b/crates/common/src/iterators/either_iter.rs @@ -1,21 +1,21 @@ /// An iterator that could be either of two iterators. #[derive(Clone, Debug)] pub enum EitherIter { - A(A), - B(B), + A(A), + B(B), } impl Iterator for EitherIter where - A: Iterator, - B: Iterator, + A: Iterator, + B: Iterator, { - type Item = T; + type Item = T; - fn next(&mut self) -> Option { - match self { - EitherIter::A(iter) => iter.next(), - EitherIter::B(iter) => iter.next(), - } - } + fn next(&mut self) -> Option { + match self { + EitherIter::A(iter) => iter.next(), + EitherIter::B(iter) => iter.next(), + } + } } diff --git a/crates/common/src/iterators/files_with_extension_iterator.rs b/crates/common/src/iterators/files_with_extension_iterator.rs index 93bd709..0854e1a 100644 --- a/crates/common/src/iterators/files_with_extension_iterator.rs +++ b/crates/common/src/iterators/files_with_extension_iterator.rs @@ -1,91 +1,90 @@ use std::{ - borrow::Cow, - collections::HashSet, - path::{Path, PathBuf}, + borrow::Cow, + collections::HashSet, + path::{Path, PathBuf}, }; /// An iterator that finds files of a certain extension in the provided directory. You can think of /// this a glob pattern similar to: `${path}/**/*.md` pub struct FilesWithExtensionIterator { - /// The set of allowed extensions that that match the requirement and that should be returned - /// when found. - allowed_extensions: HashSet>, + /// The set of allowed extensions that that match the requirement and that should be returned + /// when found. + allowed_extensions: HashSet>, - /// The set of directories to visit next. This iterator does BFS and so these directories will - /// only be visited if we can't find any files in our state. - directories_to_search: Vec, + /// The set of directories to visit next. This iterator does BFS and so these directories will + /// only be visited if we can't find any files in our state. + directories_to_search: Vec, - /// The set of files matching the allowed extensions that were found. If there are entries in - /// this vector then they will be returned when the [`Iterator::next`] method is called. If not - /// then we visit one of the next directories to visit. - files_matching_allowed_extensions: Vec, + /// The set of files matching the allowed extensions that were found. If there are entries in + /// this vector then they will be returned when the [`Iterator::next`] method is called. If not + /// then we visit one of the next directories to visit. + files_matching_allowed_extensions: Vec, - /// This option controls if the the cached file system should be used or not. This could be - /// better for certain cases where the entries in the directories do not change and therefore - /// caching can be used. - use_cached_fs: bool, + /// This option controls if the the cached file system should be used or not. This could be + /// better for certain cases where the entries in the directories do not change and therefore + /// caching can be used. + use_cached_fs: bool, } impl FilesWithExtensionIterator { - pub fn new(root_directory: impl AsRef) -> Self { - Self { - allowed_extensions: Default::default(), - directories_to_search: vec![root_directory.as_ref().to_path_buf()], - files_matching_allowed_extensions: Default::default(), - use_cached_fs: Default::default(), - } - } + pub fn new(root_directory: impl AsRef) -> Self { + Self { + allowed_extensions: Default::default(), + directories_to_search: vec![root_directory.as_ref().to_path_buf()], + files_matching_allowed_extensions: Default::default(), + use_cached_fs: Default::default(), + } + } - pub fn with_allowed_extension( - mut self, - allowed_extension: impl Into>, - ) -> Self { - self.allowed_extensions.insert(allowed_extension.into()); - self - } + pub fn with_allowed_extension( + mut self, + allowed_extension: impl Into>, + ) -> Self { + self.allowed_extensions.insert(allowed_extension.into()); + self + } - pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self { - self.use_cached_fs = use_cached_fs; - self - } + pub fn with_use_cached_fs(mut self, use_cached_fs: bool) -> Self { + self.use_cached_fs = use_cached_fs; + self + } } impl Iterator for FilesWithExtensionIterator { - type Item = PathBuf; + type Item = PathBuf; - fn next(&mut self) -> Option { - if let Some(file_path) = self.files_matching_allowed_extensions.pop() { - return Some(file_path); - }; + fn next(&mut self) -> Option { + if let Some(file_path) = self.files_matching_allowed_extensions.pop() { + return Some(file_path); + }; - let directory_to_search = self.directories_to_search.pop()?; + let directory_to_search = self.directories_to_search.pop()?; - let iterator = if self.use_cached_fs { - let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else { - return self.next(); - }; - Box::new(dir_entries) as Box>> - } else { - let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else { - return self.next(); - }; - Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_> - }; + let iterator = if self.use_cached_fs { + let Ok(dir_entries) = crate::cached_fs::read_dir(directory_to_search.as_path()) else { + return self.next(); + }; + Box::new(dir_entries) as Box>> + } else { + let Ok(dir_entries) = std::fs::read_dir(directory_to_search) else { + return self.next(); + }; + Box::new(dir_entries.map(|maybe_entry| maybe_entry.map(|entry| entry.path()))) as Box<_> + }; - for entry_path in iterator.flatten() { - if entry_path.is_dir() { - self.directories_to_search.push(entry_path) - } else if entry_path.is_file() - && entry_path.extension().is_some_and(|ext| { - self.allowed_extensions - .iter() - .any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref())) - }) - { - self.files_matching_allowed_extensions.push(entry_path) - } - } + for entry_path in iterator.flatten() { + if entry_path.is_dir() { + self.directories_to_search.push(entry_path) + } else if entry_path.is_file() && + entry_path.extension().is_some_and(|ext| { + self.allowed_extensions + .iter() + .any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref())) + }) { + self.files_matching_allowed_extensions.push(entry_path) + } + } - self.next() - } + self.next() + } } diff --git a/crates/common/src/macros/define_wrapper_type.rs b/crates/common/src/macros/define_wrapper_type.rs index a5f1098..3b2575c 100644 --- a/crates/common/src/macros/define_wrapper_type.rs +++ b/crates/common/src/macros/define_wrapper_type.rs @@ -1,23 +1,23 @@ #[macro_export] macro_rules! impl_for_wrapper { - (Display, $ident: ident) => { - #[automatically_derived] - impl std::fmt::Display for $ident { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - std::fmt::Display::fmt(&self.0, f) - } - } - }; - (FromStr, $ident: ident) => { - #[automatically_derived] - impl std::str::FromStr for $ident { - type Err = anyhow::Error; + (Display, $ident: ident) => { + #[automatically_derived] + impl std::fmt::Display for $ident { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + std::fmt::Display::fmt(&self.0, f) + } + } + }; + (FromStr, $ident: ident) => { + #[automatically_derived] + impl std::str::FromStr for $ident { + type Err = anyhow::Error; - fn from_str(s: &str) -> anyhow::Result { - s.parse().map(Self).map_err(Into::into) - } - } - }; + fn from_str(s: &str) -> anyhow::Result { + s.parse().map(Self).map_err(Into::into) + } + } + }; } /// Defines wrappers around types. @@ -135,6 +135,6 @@ macro_rules! define_wrapper_type { }; } -/// Technically not needed but this allows for the macro to be found in the `macros` module of the -/// crate in addition to being found in the root of the crate. +/// Technically not needed but this allows for the macro to be found in the `macros` module of +/// the crate in addition to being found in the root of the crate. pub use {define_wrapper_type, impl_for_wrapper}; diff --git a/crates/common/src/types/identifiers.rs b/crates/common/src/types/identifiers.rs index 2dd8563..dd4f8cb 100644 --- a/crates/common/src/types/identifiers.rs +++ b/crates/common/src/types/identifiers.rs @@ -7,128 +7,128 @@ use strum::{AsRefStr, Display, EnumString, IntoStaticStr}; /// could be thought of like the target triple from Rust and LLVM where it specifies the platform /// completely starting with the node, the vm, and finally the compiler used for this combination. #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - ValueEnum, - EnumString, - Display, - AsRefStr, - IntoStaticStr, - JsonSchema, + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + ValueEnum, + EnumString, + Display, + AsRefStr, + IntoStaticStr, + JsonSchema, )] #[serde(rename_all = "kebab-case")] #[strum(serialize_all = "kebab-case")] pub enum PlatformIdentifier { - /// The Go-ethereum reference full node EVM implementation with the solc compiler. - GethEvmSolc, - /// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler. - LighthouseGethEvmSolc, - /// The kitchensink node with the PolkaVM backend with the resolc compiler. - KitchensinkPolkavmResolc, - /// The kitchensink node with the REVM backend with the solc compiler. - KitchensinkRevmSolc, - /// The revive dev node with the PolkaVM backend with the resolc compiler. - ReviveDevNodePolkavmResolc, - /// The revive dev node with the REVM backend with the solc compiler. - ReviveDevNodeRevmSolc, - /// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler. - ZombienetPolkavmResolc, - /// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler. - ZombienetRevmSolc, + /// The Go-ethereum reference full node EVM implementation with the solc compiler. + GethEvmSolc, + /// The Lighthouse Go-ethereum reference full node EVM implementation with the solc compiler. + LighthouseGethEvmSolc, + /// The kitchensink node with the PolkaVM backend with the resolc compiler. + KitchensinkPolkavmResolc, + /// The kitchensink node with the REVM backend with the solc compiler. + KitchensinkRevmSolc, + /// The revive dev node with the PolkaVM backend with the resolc compiler. + ReviveDevNodePolkavmResolc, + /// The revive dev node with the REVM backend with the solc compiler. + ReviveDevNodeRevmSolc, + /// A zombienet based Substrate/Polkadot node with the PolkaVM backend with the resolc compiler. + ZombienetPolkavmResolc, + /// A zombienet based Substrate/Polkadot node with the REVM backend with the solc compiler. + ZombienetRevmSolc, } /// An enum of the platform identifiers of all of the platforms supported by this framework. #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - ValueEnum, - EnumString, - Display, - AsRefStr, - IntoStaticStr, - JsonSchema, + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + ValueEnum, + EnumString, + Display, + AsRefStr, + IntoStaticStr, + JsonSchema, )] pub enum CompilerIdentifier { - /// The solc compiler. - Solc, - /// The resolc compiler. - Resolc, + /// The solc compiler. + Solc, + /// The resolc compiler. + Resolc, } /// An enum representing the identifiers of the supported nodes. #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - ValueEnum, - EnumString, - Display, - AsRefStr, - IntoStaticStr, - JsonSchema, + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + ValueEnum, + EnumString, + Display, + AsRefStr, + IntoStaticStr, + JsonSchema, )] pub enum NodeIdentifier { - /// The go-ethereum node implementation. - Geth, - /// The go-ethereum node implementation. - LighthouseGeth, - /// The Kitchensink node implementation. - Kitchensink, - /// The revive dev node implementation. - ReviveDevNode, - /// A zombienet spawned nodes - Zombienet, + /// The go-ethereum node implementation. + Geth, + /// The go-ethereum node implementation. + LighthouseGeth, + /// The Kitchensink node implementation. + Kitchensink, + /// The revive dev node implementation. + ReviveDevNode, + /// A zombienet spawned nodes + Zombienet, } /// An enum representing the identifiers of the supported VMs. #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - Deserialize, - ValueEnum, - EnumString, - Display, - AsRefStr, - IntoStaticStr, - JsonSchema, + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + Deserialize, + ValueEnum, + EnumString, + Display, + AsRefStr, + IntoStaticStr, + JsonSchema, )] #[serde(rename_all = "lowercase")] #[strum(serialize_all = "lowercase")] pub enum VmIdentifier { - /// The ethereum virtual machine. - Evm, - /// The EraVM virtual machine. - EraVM, - /// Polkadot's PolaVM Risc-v based virtual machine. - PolkaVM, + /// The ethereum virtual machine. + Evm, + /// The EraVM virtual machine. + EraVM, + /// Polkadot's PolaVM Risc-v based virtual machine. + PolkaVM, } diff --git a/crates/common/src/types/mode.rs b/crates/common/src/types/mode.rs index 90f4153..0e11a3b 100644 --- a/crates/common/src/types/mode.rs +++ b/crates/common/src/types/mode.rs @@ -11,161 +11,159 @@ use std::{fmt::Display, str::FromStr, sync::LazyLock}; /// Use [`ParsedMode::to_test_modes()`] to do this. #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] pub struct Mode { - pub pipeline: ModePipeline, - pub optimize_setting: ModeOptimizerSetting, - pub version: Option, + pub pipeline: ModePipeline, + pub optimize_setting: ModeOptimizerSetting, + pub version: Option, } impl Display for Mode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.pipeline.fmt(f)?; - f.write_str(" ")?; - self.optimize_setting.fmt(f)?; + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.pipeline.fmt(f)?; + f.write_str(" ")?; + self.optimize_setting.fmt(f)?; - if let Some(version) = &self.version { - f.write_str(" ")?; - version.fmt(f)?; - } + if let Some(version) = &self.version { + f.write_str(" ")?; + version.fmt(f)?; + } - Ok(()) - } + Ok(()) + } } impl Mode { - /// Return all of the available mode combinations. - pub fn all() -> impl Iterator { - static ALL_MODES: LazyLock> = LazyLock::new(|| { - ModePipeline::test_cases() - .flat_map(|pipeline| { - ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode { - pipeline, - optimize_setting, - version: None, - }) - }) - .collect::>() - }); - ALL_MODES.iter() - } + /// Return all of the available mode combinations. + pub fn all() -> impl Iterator { + static ALL_MODES: LazyLock> = LazyLock::new(|| { + ModePipeline::test_cases() + .flat_map(|pipeline| { + ModeOptimizerSetting::test_cases().map(move |optimize_setting| Mode { + pipeline, + optimize_setting, + version: None, + }) + }) + .collect::>() + }); + ALL_MODES.iter() + } - /// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if - /// the requirement is present on the object. Otherwise, the passed default version is used. - pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement { - match self.version { - Some(ref requirement) => requirement.clone().into(), - None => default.into(), - } - } + /// Resolves the [`Mode`]'s solidity version requirement into a [`VersionOrRequirement`] if + /// the requirement is present on the object. Otherwise, the passed default version is used. + pub fn compiler_version_to_use(&self, default: Version) -> VersionOrRequirement { + match self.version { + Some(ref requirement) => requirement.clone().into(), + None => default.into(), + } + } } /// What do we want the compiler to do? #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum ModePipeline { - /// Compile Solidity code via Yul IR - ViaYulIR, - /// Compile Solidity direct to assembly - ViaEVMAssembly, + /// Compile Solidity code via Yul IR + ViaYulIR, + /// Compile Solidity direct to assembly + ViaEVMAssembly, } impl FromStr for ModePipeline { - type Err = anyhow::Error; - fn from_str(s: &str) -> Result { - match s { - // via Yul IR - "Y" => Ok(ModePipeline::ViaYulIR), - // Don't go via Yul IR - "E" => Ok(ModePipeline::ViaEVMAssembly), - // Anything else that we see isn't a mode at all - _ => Err(anyhow::anyhow!( - "Unsupported pipeline '{s}': expected 'Y' or 'E'" - )), - } - } + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + match s { + // via Yul IR + "Y" => Ok(ModePipeline::ViaYulIR), + // Don't go via Yul IR + "E" => Ok(ModePipeline::ViaEVMAssembly), + // Anything else that we see isn't a mode at all + _ => Err(anyhow::anyhow!("Unsupported pipeline '{s}': expected 'Y' or 'E'")), + } + } } impl Display for ModePipeline { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ModePipeline::ViaYulIR => f.write_str("Y"), - ModePipeline::ViaEVMAssembly => f.write_str("E"), - } - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ModePipeline::ViaYulIR => f.write_str("Y"), + ModePipeline::ViaEVMAssembly => f.write_str("E"), + } + } } impl ModePipeline { - /// Should we go via Yul IR? - pub fn via_yul_ir(&self) -> bool { - matches!(self, ModePipeline::ViaYulIR) - } + /// Should we go via Yul IR? + pub fn via_yul_ir(&self) -> bool { + matches!(self, ModePipeline::ViaYulIR) + } - /// An iterator over the available pipelines that we'd like to test, - /// when an explicit pipeline was not specified. - pub fn test_cases() -> impl Iterator + Clone { - [ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter() - } + /// An iterator over the available pipelines that we'd like to test, + /// when an explicit pipeline was not specified. + pub fn test_cases() -> impl Iterator + Clone { + [ModePipeline::ViaYulIR, ModePipeline::ViaEVMAssembly].into_iter() + } } #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum ModeOptimizerSetting { - /// 0 / -: Don't apply any optimizations - M0, - /// 1: Apply less than default optimizations - M1, - /// 2: Apply the default optimizations - M2, - /// 3 / +: Apply aggressive optimizations - M3, - /// s: Optimize for size - Ms, - /// z: Aggressively optimize for size - Mz, + /// 0 / -: Don't apply any optimizations + M0, + /// 1: Apply less than default optimizations + M1, + /// 2: Apply the default optimizations + M2, + /// 3 / +: Apply aggressive optimizations + M3, + /// s: Optimize for size + Ms, + /// z: Aggressively optimize for size + Mz, } impl FromStr for ModeOptimizerSetting { - type Err = anyhow::Error; - fn from_str(s: &str) -> Result { - match s { - "M0" => Ok(ModeOptimizerSetting::M0), - "M1" => Ok(ModeOptimizerSetting::M1), - "M2" => Ok(ModeOptimizerSetting::M2), - "M3" => Ok(ModeOptimizerSetting::M3), - "Ms" => Ok(ModeOptimizerSetting::Ms), - "Mz" => Ok(ModeOptimizerSetting::Mz), - _ => Err(anyhow::anyhow!( - "Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'" - )), - } - } + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + match s { + "M0" => Ok(ModeOptimizerSetting::M0), + "M1" => Ok(ModeOptimizerSetting::M1), + "M2" => Ok(ModeOptimizerSetting::M2), + "M3" => Ok(ModeOptimizerSetting::M3), + "Ms" => Ok(ModeOptimizerSetting::Ms), + "Mz" => Ok(ModeOptimizerSetting::Mz), + _ => Err(anyhow::anyhow!( + "Unsupported optimizer setting '{s}': expected 'M0', 'M1', 'M2', 'M3', 'Ms' or 'Mz'" + )), + } + } } impl Display for ModeOptimizerSetting { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ModeOptimizerSetting::M0 => f.write_str("M0"), - ModeOptimizerSetting::M1 => f.write_str("M1"), - ModeOptimizerSetting::M2 => f.write_str("M2"), - ModeOptimizerSetting::M3 => f.write_str("M3"), - ModeOptimizerSetting::Ms => f.write_str("Ms"), - ModeOptimizerSetting::Mz => f.write_str("Mz"), - } - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + ModeOptimizerSetting::M0 => f.write_str("M0"), + ModeOptimizerSetting::M1 => f.write_str("M1"), + ModeOptimizerSetting::M2 => f.write_str("M2"), + ModeOptimizerSetting::M3 => f.write_str("M3"), + ModeOptimizerSetting::Ms => f.write_str("Ms"), + ModeOptimizerSetting::Mz => f.write_str("Mz"), + } + } } impl ModeOptimizerSetting { - /// An iterator over the available optimizer settings that we'd like to test, - /// when an explicit optimizer setting was not specified. - pub fn test_cases() -> impl Iterator + Clone { - [ - // No optimizations: - ModeOptimizerSetting::M0, - // Aggressive optimizations: - ModeOptimizerSetting::M3, - ] - .into_iter() - } + /// An iterator over the available optimizer settings that we'd like to test, + /// when an explicit optimizer setting was not specified. + pub fn test_cases() -> impl Iterator + Clone { + [ + // No optimizations: + ModeOptimizerSetting::M0, + // Aggressive optimizations: + ModeOptimizerSetting::M3, + ] + .into_iter() + } - /// Are any optimizations enabled? - pub fn optimizations_enabled(&self) -> bool { - !matches!(self, ModeOptimizerSetting::M0) - } + /// Are any optimizations enabled? + pub fn optimizations_enabled(&self) -> bool { + !matches!(self, ModeOptimizerSetting::M0) + } } diff --git a/crates/common/src/types/private_key_allocator.rs b/crates/common/src/types/private_key_allocator.rs index c2495ab..67242a5 100644 --- a/crates/common/src/types/private_key_allocator.rs +++ b/crates/common/src/types/private_key_allocator.rs @@ -5,31 +5,28 @@ use anyhow::{Context, Result, bail}; /// sequentially and in order until the maximum private key specified is reached. #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct PrivateKeyAllocator { - /// The next private key to be returned by the allocator when requested. - next_private_key: U256, + /// The next private key to be returned by the allocator when requested. + next_private_key: U256, - /// The highest private key (exclusive) that can be returned by this allocator. - highest_private_key_inclusive: U256, + /// The highest private key (exclusive) that can be returned by this allocator. + highest_private_key_inclusive: U256, } impl PrivateKeyAllocator { - /// Creates a new instance of the private key allocator. - pub fn new(highest_private_key_inclusive: U256) -> Self { - Self { - next_private_key: U256::ONE, - highest_private_key_inclusive, - } - } + /// Creates a new instance of the private key allocator. + pub fn new(highest_private_key_inclusive: U256) -> Self { + Self { next_private_key: U256::ONE, highest_private_key_inclusive } + } - /// Allocates a new private key and errors out if the maximum private key has been reached. - pub fn allocate(&mut self) -> Result { - if self.next_private_key > self.highest_private_key_inclusive { - bail!("Attempted to allocate a private key but failed since all have been allocated"); - }; - let private_key = - PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice()) - .context("Failed to convert the private key digits into a private key")?; - self.next_private_key += U256::ONE; - Ok(private_key) - } + /// Allocates a new private key and errors out if the maximum private key has been reached. + pub fn allocate(&mut self) -> Result { + if self.next_private_key > self.highest_private_key_inclusive { + bail!("Attempted to allocate a private key but failed since all have been allocated"); + }; + let private_key = + PrivateKeySigner::from_slice(self.next_private_key.to_be_bytes::<32>().as_slice()) + .context("Failed to convert the private key digits into a private key")?; + self.next_private_key += U256::ONE; + Ok(private_key) + } } diff --git a/crates/common/src/types/round_robin_pool.rs b/crates/common/src/types/round_robin_pool.rs index 81882c1..1ea24d4 100644 --- a/crates/common/src/types/round_robin_pool.rs +++ b/crates/common/src/types/round_robin_pool.rs @@ -1,24 +1,21 @@ use std::sync::atomic::{AtomicUsize, Ordering}; pub struct RoundRobinPool { - next_index: AtomicUsize, - items: Vec, + next_index: AtomicUsize, + items: Vec, } impl RoundRobinPool { - pub fn new(items: Vec) -> Self { - Self { - next_index: Default::default(), - items, - } - } + pub fn new(items: Vec) -> Self { + Self { next_index: Default::default(), items } + } - pub fn round_robin(&self) -> &T { - let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len(); - self.items.get(current).unwrap() - } + pub fn round_robin(&self) -> &T { + let current = self.next_index.fetch_add(1, Ordering::SeqCst) % self.items.len(); + self.items.get(current).unwrap() + } - pub fn iter(&self) -> impl Iterator { - self.items.iter() - } + pub fn iter(&self) -> impl Iterator { + self.items.iter() + } } diff --git a/crates/common/src/types/version_or_requirement.rs b/crates/common/src/types/version_or_requirement.rs index 787a37d..27277d2 100644 --- a/crates/common/src/types/version_or_requirement.rs +++ b/crates/common/src/types/version_or_requirement.rs @@ -2,40 +2,40 @@ use semver::{Version, VersionReq}; #[derive(Clone, Debug)] pub enum VersionOrRequirement { - Version(Version), - Requirement(VersionReq), + Version(Version), + Requirement(VersionReq), } impl From for VersionOrRequirement { - fn from(value: Version) -> Self { - Self::Version(value) - } + fn from(value: Version) -> Self { + Self::Version(value) + } } impl From for VersionOrRequirement { - fn from(value: VersionReq) -> Self { - Self::Requirement(value) - } + fn from(value: VersionReq) -> Self { + Self::Requirement(value) + } } impl TryFrom for Version { - type Error = anyhow::Error; + type Error = anyhow::Error; - fn try_from(value: VersionOrRequirement) -> Result { - let VersionOrRequirement::Version(version) = value else { - anyhow::bail!("Version or requirement was not a version"); - }; - Ok(version) - } + fn try_from(value: VersionOrRequirement) -> Result { + let VersionOrRequirement::Version(version) = value else { + anyhow::bail!("Version or requirement was not a version"); + }; + Ok(version) + } } impl TryFrom for VersionReq { - type Error = anyhow::Error; + type Error = anyhow::Error; - fn try_from(value: VersionOrRequirement) -> Result { - let VersionOrRequirement::Requirement(requirement) = value else { - anyhow::bail!("Version or requirement was not a requirement"); - }; - Ok(requirement) - } + fn try_from(value: VersionOrRequirement) -> Result { + let VersionOrRequirement::Requirement(requirement) = value else { + anyhow::bail!("Version or requirement was not a requirement"); + }; + Ok(requirement) + } } diff --git a/crates/compiler/src/lib.rs b/crates/compiler/src/lib.rs index e696182..445cf20 100644 --- a/crates/compiler/src/lib.rs +++ b/crates/compiler/src/lib.rs @@ -4,10 +4,10 @@ //! - Polkadot revive Wasm compiler use std::{ - collections::HashMap, - hash::Hash, - path::{Path, PathBuf}, - pin::Pin, + collections::HashMap, + hash::Hash, + path::{Path, PathBuf}, + pin::Pin, }; use alloy::{json_abi::JsonAbi, primitives::Address}; @@ -27,149 +27,149 @@ pub mod solc; /// A common interface for all supported Solidity compilers. pub trait SolidityCompiler { - /// Returns the version of the compiler. - fn version(&self) -> &Version; + /// Returns the version of the compiler. + fn version(&self) -> &Version; - /// Returns the path of the compiler executable. - fn path(&self) -> &Path; + /// Returns the path of the compiler executable. + fn path(&self) -> &Path; - /// The low-level compiler interface. - fn build( - &self, - input: CompilerInput, - ) -> Pin> + '_>>; + /// The low-level compiler interface. + fn build( + &self, + input: CompilerInput, + ) -> Pin> + '_>>; - /// Does the compiler support the provided mode and version settings. - fn supports_mode( - &self, - optimizer_setting: ModeOptimizerSetting, - pipeline: ModePipeline, - ) -> bool; + /// Does the compiler support the provided mode and version settings. + fn supports_mode( + &self, + optimizer_setting: ModeOptimizerSetting, + pipeline: ModePipeline, + ) -> bool; } /// The generic compilation input configuration. #[derive(Clone, Debug, Default, Serialize, Deserialize)] pub struct CompilerInput { - pub pipeline: Option, - pub optimization: Option, - pub evm_version: Option, - pub allow_paths: Vec, - pub base_path: Option, - pub sources: HashMap, - pub libraries: HashMap>, - pub revert_string_handling: Option, + pub pipeline: Option, + pub optimization: Option, + pub evm_version: Option, + pub allow_paths: Vec, + pub base_path: Option, + pub sources: HashMap, + pub libraries: HashMap>, + pub revert_string_handling: Option, } /// The generic compilation output configuration. #[derive(Debug, Clone, Default, Serialize, Deserialize)] pub struct CompilerOutput { - /// The compiled contracts. The bytecode of the contract is kept as a string in case linking is - /// required and the compiled source has placeholders. - pub contracts: HashMap>, + /// The compiled contracts. The bytecode of the contract is kept as a string in case linking is + /// required and the compiled source has placeholders. + pub contracts: HashMap>, } /// A generic builder style interface for configuring the supported compiler options. #[derive(Default)] pub struct Compiler { - input: CompilerInput, + input: CompilerInput, } impl Compiler { - pub fn new() -> Self { - Self { - input: CompilerInput { - pipeline: Default::default(), - optimization: Default::default(), - evm_version: Default::default(), - allow_paths: Default::default(), - base_path: Default::default(), - sources: Default::default(), - libraries: Default::default(), - revert_string_handling: Default::default(), - }, - } - } + pub fn new() -> Self { + Self { + input: CompilerInput { + pipeline: Default::default(), + optimization: Default::default(), + evm_version: Default::default(), + allow_paths: Default::default(), + base_path: Default::default(), + sources: Default::default(), + libraries: Default::default(), + revert_string_handling: Default::default(), + }, + } + } - pub fn with_optimization(mut self, value: impl Into>) -> Self { - self.input.optimization = value.into(); - self - } + pub fn with_optimization(mut self, value: impl Into>) -> Self { + self.input.optimization = value.into(); + self + } - pub fn with_pipeline(mut self, value: impl Into>) -> Self { - self.input.pipeline = value.into(); - self - } + pub fn with_pipeline(mut self, value: impl Into>) -> Self { + self.input.pipeline = value.into(); + self + } - pub fn with_evm_version(mut self, version: impl Into>) -> Self { - self.input.evm_version = version.into(); - self - } + pub fn with_evm_version(mut self, version: impl Into>) -> Self { + self.input.evm_version = version.into(); + self + } - pub fn with_allow_path(mut self, path: impl AsRef) -> Self { - self.input.allow_paths.push(path.as_ref().into()); - self - } + pub fn with_allow_path(mut self, path: impl AsRef) -> Self { + self.input.allow_paths.push(path.as_ref().into()); + self + } - pub fn with_base_path(mut self, path: impl Into>) -> Self { - self.input.base_path = path.into(); - self - } + pub fn with_base_path(mut self, path: impl Into>) -> Self { + self.input.base_path = path.into(); + self + } - pub fn with_source(mut self, path: impl AsRef) -> Result { - self.input.sources.insert( - path.as_ref().to_path_buf(), - read_to_string(path.as_ref()).context("Failed to read the contract source")?, - ); - Ok(self) - } + pub fn with_source(mut self, path: impl AsRef) -> Result { + self.input.sources.insert( + path.as_ref().to_path_buf(), + read_to_string(path.as_ref()).context("Failed to read the contract source")?, + ); + Ok(self) + } - pub fn with_library( - mut self, - path: impl AsRef, - name: impl AsRef, - address: Address, - ) -> Self { - self.input - .libraries - .entry(path.as_ref().to_path_buf()) - .or_default() - .insert(name.as_ref().into(), address); - self - } + pub fn with_library( + mut self, + path: impl AsRef, + name: impl AsRef, + address: Address, + ) -> Self { + self.input + .libraries + .entry(path.as_ref().to_path_buf()) + .or_default() + .insert(name.as_ref().into(), address); + self + } - pub fn with_revert_string_handling( - mut self, - revert_string_handling: impl Into>, - ) -> Self { - self.input.revert_string_handling = revert_string_handling.into(); - self - } + pub fn with_revert_string_handling( + mut self, + revert_string_handling: impl Into>, + ) -> Self { + self.input.revert_string_handling = revert_string_handling.into(); + self + } - pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self { - callback(self) - } + pub fn then(self, callback: impl FnOnce(Self) -> Self) -> Self { + callback(self) + } - pub fn try_then(self, callback: impl FnOnce(Self) -> Result) -> Result { - callback(self) - } + pub fn try_then(self, callback: impl FnOnce(Self) -> Result) -> Result { + callback(self) + } - pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result { - compiler.build(self.input).await - } + pub async fn try_build(self, compiler: &dyn SolidityCompiler) -> Result { + compiler.build(self.input).await + } - pub fn input(&self) -> &CompilerInput { - &self.input - } + pub fn input(&self) -> &CompilerInput { + &self.input + } } /// Defines how the compiler should handle revert strings. #[derive( - Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize, + Clone, Debug, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Default, Serialize, Deserialize, )] pub enum RevertString { - #[default] - Default, - Debug, - Strip, - VerboseDebug, + #[default] + Default, + Debug, + Strip, + VerboseDebug, } diff --git a/crates/compiler/src/revive_resolc.rs b/crates/compiler/src/revive_resolc.rs index f0025ea..30a0b8d 100644 --- a/crates/compiler/src/revive_resolc.rs +++ b/crates/compiler/src/revive_resolc.rs @@ -2,24 +2,24 @@ //! compiling contracts to PolkaVM (PVM) bytecode. use std::{ - path::PathBuf, - pin::Pin, - process::Stdio, - sync::{Arc, LazyLock}, + path::PathBuf, + pin::Pin, + process::Stdio, + sync::{Arc, LazyLock}, }; use dashmap::DashMap; use revive_dt_common::types::VersionOrRequirement; use revive_dt_config::{ResolcConfiguration, SolcConfiguration, WorkingDirectoryConfiguration}; use revive_solc_json_interface::{ - SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings, - SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection, - SolcStandardJsonOutput, + SolcStandardJsonInput, SolcStandardJsonInputLanguage, SolcStandardJsonInputSettings, + SolcStandardJsonInputSettingsOptimizer, SolcStandardJsonInputSettingsSelection, + SolcStandardJsonOutput, }; use tracing::{Span, field::display}; use crate::{ - CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc, + CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, SolidityCompiler, solc::Solc, }; use alloy::json_abi::JsonAbi; @@ -33,55 +33,52 @@ pub struct Resolc(Arc); #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] struct ResolcInner { - /// The internal solc compiler that the resolc compiler uses as a compiler frontend. - solc: Solc, - /// Path to the `resolc` executable - resolc_path: PathBuf, + /// The internal solc compiler that the resolc compiler uses as a compiler frontend. + solc: Solc, + /// Path to the `resolc` executable + resolc_path: PathBuf, } impl Resolc { - pub async fn new( - context: impl AsRef - + AsRef - + AsRef, - version: impl Into>, - ) -> Result { - /// This is a cache of all of the resolc compiler objects. Since we do not currently support - /// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and - /// its version to the resolc compiler. - static COMPILERS_CACHE: LazyLock> = LazyLock::new(Default::default); + pub async fn new( + context: impl AsRef + + AsRef + + AsRef, + version: impl Into>, + ) -> Result { + /// This is a cache of all of the resolc compiler objects. Since we do not currently support + /// multiple resolc compiler versions, so our cache is just keyed by the solc compiler and + /// its version to the resolc compiler. + static COMPILERS_CACHE: LazyLock> = LazyLock::new(Default::default); - let resolc_configuration = AsRef::::as_ref(&context); + let resolc_configuration = AsRef::::as_ref(&context); - let solc = Solc::new(&context, version) - .await - .context("Failed to create the solc compiler frontend for resolc")?; + let solc = Solc::new(&context, version) + .await + .context("Failed to create the solc compiler frontend for resolc")?; - Ok(COMPILERS_CACHE - .entry(solc.clone()) - .or_insert_with(|| { - Self(Arc::new(ResolcInner { - solc, - resolc_path: resolc_configuration.path.clone(), - })) - }) - .clone()) - } + Ok(COMPILERS_CACHE + .entry(solc.clone()) + .or_insert_with(|| { + Self(Arc::new(ResolcInner { solc, resolc_path: resolc_configuration.path.clone() })) + }) + .clone()) + } } impl SolidityCompiler for Resolc { - fn version(&self) -> &Version { - // We currently return the solc compiler version since we do not support multiple resolc - // compiler versions. - SolidityCompiler::version(&self.0.solc) - } + fn version(&self) -> &Version { + // We currently return the solc compiler version since we do not support multiple resolc + // compiler versions. + SolidityCompiler::version(&self.0.solc) + } - fn path(&self) -> &std::path::Path { - &self.0.resolc_path - } + fn path(&self) -> &std::path::Path { + &self.0.resolc_path + } - #[tracing::instrument(level = "debug", ret)] - #[tracing::instrument( + #[tracing::instrument(level = "debug", ret)] + #[tracing::instrument( level = "error", skip_all, fields( @@ -91,221 +88,216 @@ impl SolidityCompiler for Resolc { ), err(Debug) )] - fn build( - &self, - CompilerInput { - pipeline, - optimization, - evm_version, - allow_paths, - base_path, - sources, - libraries, - // TODO: this is currently not being handled since there is no way to pass it into - // resolc. So, we need to go back to this later once it's supported. - revert_string_handling: _, - }: CompilerInput, - ) -> Pin> + '_>> { - Box::pin(async move { - if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) { - anyhow::bail!( - "Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}" - ); - } + fn build( + &self, + CompilerInput { + pipeline, + optimization, + evm_version, + allow_paths, + base_path, + sources, + libraries, + // TODO: this is currently not being handled since there is no way to pass it into + // resolc. So, we need to go back to this later once it's supported. + revert_string_handling: _, + }: CompilerInput, + ) -> Pin> + '_>> { + Box::pin(async move { + if !matches!(pipeline, None | Some(ModePipeline::ViaYulIR)) { + anyhow::bail!( + "Resolc only supports the Y (via Yul IR) pipeline, but the provided pipeline is {pipeline:?}" + ); + } - let input = SolcStandardJsonInput { - language: SolcStandardJsonInputLanguage::Solidity, - sources: sources - .into_iter() - .map(|(path, source)| (path.display().to_string(), source.into())) - .collect(), - settings: SolcStandardJsonInputSettings { - evm_version, - libraries: Some( - libraries - .into_iter() - .map(|(source_code, libraries_map)| { - ( - source_code.display().to_string(), - libraries_map - .into_iter() - .map(|(library_ident, library_address)| { - (library_ident, library_address.to_string()) - }) - .collect(), - ) - }) - .collect(), - ), - remappings: None, - output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()), - via_ir: Some(true), - optimizer: SolcStandardJsonInputSettingsOptimizer::new( - optimization - .unwrap_or(ModeOptimizerSetting::M0) - .optimizations_enabled(), - None, - &Version::new(0, 0, 0), - false, - ), - metadata: None, - polkavm: None, - }, - }; - Span::current().record("json_in", display(serde_json::to_string(&input).unwrap())); + let input = SolcStandardJsonInput { + language: SolcStandardJsonInputLanguage::Solidity, + sources: sources + .into_iter() + .map(|(path, source)| (path.display().to_string(), source.into())) + .collect(), + settings: SolcStandardJsonInputSettings { + evm_version, + libraries: Some( + libraries + .into_iter() + .map(|(source_code, libraries_map)| { + ( + source_code.display().to_string(), + libraries_map + .into_iter() + .map(|(library_ident, library_address)| { + (library_ident, library_address.to_string()) + }) + .collect(), + ) + }) + .collect(), + ), + remappings: None, + output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()), + via_ir: Some(true), + optimizer: SolcStandardJsonInputSettingsOptimizer::new( + optimization.unwrap_or(ModeOptimizerSetting::M0).optimizations_enabled(), + None, + &Version::new(0, 0, 0), + false, + ), + metadata: None, + polkavm: None, + }, + }; + Span::current().record("json_in", display(serde_json::to_string(&input).unwrap())); - let path = &self.0.resolc_path; - let mut command = AsyncCommand::new(path); - command - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .arg("--solc") - .arg(self.0.solc.path()) - .arg("--standard-json"); + let path = &self.0.resolc_path; + let mut command = AsyncCommand::new(path); + command + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .arg("--solc") + .arg(self.0.solc.path()) + .arg("--standard-json"); - if let Some(ref base_path) = base_path { - command.arg("--base-path").arg(base_path); - } - if !allow_paths.is_empty() { - command.arg("--allow-paths").arg( - allow_paths - .iter() - .map(|path| path.display().to_string()) - .collect::>() - .join(","), - ); - } - let mut child = command - .spawn() - .with_context(|| format!("Failed to spawn resolc at {}", path.display()))?; + if let Some(ref base_path) = base_path { + command.arg("--base-path").arg(base_path); + } + if !allow_paths.is_empty() { + command.arg("--allow-paths").arg( + allow_paths + .iter() + .map(|path| path.display().to_string()) + .collect::>() + .join(","), + ); + } + let mut child = command + .spawn() + .with_context(|| format!("Failed to spawn resolc at {}", path.display()))?; - let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); - let serialized_input = serde_json::to_vec(&input) - .context("Failed to serialize Standard JSON input for resolc")?; - stdin_pipe - .write_all(&serialized_input) - .await - .context("Failed to write Standard JSON to resolc stdin")?; + let stdin_pipe = child.stdin.as_mut().expect("stdin must be piped"); + let serialized_input = serde_json::to_vec(&input) + .context("Failed to serialize Standard JSON input for resolc")?; + stdin_pipe + .write_all(&serialized_input) + .await + .context("Failed to write Standard JSON to resolc stdin")?; - let output = child - .wait_with_output() - .await - .context("Failed while waiting for resolc process to finish")?; - let stdout = output.stdout; - let stderr = output.stderr; + let output = child + .wait_with_output() + .await + .context("Failed while waiting for resolc process to finish")?; + let stdout = output.stdout; + let stderr = output.stderr; - if !output.status.success() { - let json_in = serde_json::to_string_pretty(&input) - .context("Failed to pretty-print Standard JSON input for logging")?; - let message = String::from_utf8_lossy(&stderr); - tracing::error!( - status = %output.status, - message = %message, - json_input = json_in, - "Compilation using resolc failed" - ); - anyhow::bail!("Compilation failed with an error: {message}"); - } + if !output.status.success() { + let json_in = serde_json::to_string_pretty(&input) + .context("Failed to pretty-print Standard JSON input for logging")?; + let message = String::from_utf8_lossy(&stderr); + tracing::error!( + status = %output.status, + message = %message, + json_input = json_in, + "Compilation using resolc failed" + ); + anyhow::bail!("Compilation failed with an error: {message}"); + } - let parsed = serde_json::from_slice::(&stdout) - .map_err(|e| { - anyhow::anyhow!( - "failed to parse resolc JSON output: {e}\nstderr: {}", - String::from_utf8_lossy(&stderr) - ) - }) - .context("Failed to parse resolc standard JSON output")?; + let parsed = serde_json::from_slice::(&stdout) + .map_err(|e| { + anyhow::anyhow!( + "failed to parse resolc JSON output: {e}\nstderr: {}", + String::from_utf8_lossy(&stderr) + ) + }) + .context("Failed to parse resolc standard JSON output")?; - tracing::debug!( - output = %serde_json::to_string(&parsed).unwrap(), - "Compiled successfully" - ); + tracing::debug!( + output = %serde_json::to_string(&parsed).unwrap(), + "Compiled successfully" + ); - // Detecting if the compiler output contained errors and reporting them through logs and - // errors instead of returning the compiler output that might contain errors. - for error in parsed.errors.iter().flatten() { - if error.severity == "error" { - tracing::error!( - ?error, - ?input, - output = %serde_json::to_string(&parsed).unwrap(), - "Encountered an error in the compilation" - ); - anyhow::bail!("Encountered an error in the compilation: {error}") - } - } + // Detecting if the compiler output contained errors and reporting them through logs and + // errors instead of returning the compiler output that might contain errors. + for error in parsed.errors.iter().flatten() { + if error.severity == "error" { + tracing::error!( + ?error, + ?input, + output = %serde_json::to_string(&parsed).unwrap(), + "Encountered an error in the compilation" + ); + anyhow::bail!("Encountered an error in the compilation: {error}") + } + } - let Some(contracts) = parsed.contracts else { - anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section"); - }; + let Some(contracts) = parsed.contracts else { + anyhow::bail!("Unexpected error - resolc output doesn't have a contracts section"); + }; - let mut compiler_output = CompilerOutput::default(); - for (source_path, contracts) in contracts.into_iter() { - let src_for_msg = source_path.clone(); - let source_path = PathBuf::from(source_path) - .canonicalize() - .with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?; + let mut compiler_output = CompilerOutput::default(); + for (source_path, contracts) in contracts.into_iter() { + let src_for_msg = source_path.clone(); + let source_path = PathBuf::from(source_path) + .canonicalize() + .with_context(|| format!("Failed to canonicalize path {src_for_msg}"))?; - let map = compiler_output.contracts.entry(source_path).or_default(); - for (contract_name, contract_information) in contracts.into_iter() { - let bytecode = contract_information - .evm - .and_then(|evm| evm.bytecode.clone()) - .context("Unexpected - Contract compiled with resolc has no bytecode")?; - let abi = { - let metadata = contract_information - .metadata - .as_ref() - .context("No metadata found for the contract")?; - let solc_metadata_str = match metadata { - serde_json::Value::String(solc_metadata_str) => { - solc_metadata_str.as_str() - } - serde_json::Value::Object(metadata_object) => { - let solc_metadata_value = metadata_object - .get("solc_metadata") - .context("Contract doesn't have a 'solc_metadata' field")?; - solc_metadata_value - .as_str() - .context("The 'solc_metadata' field is not a string")? - } - serde_json::Value::Null - | serde_json::Value::Bool(_) - | serde_json::Value::Number(_) - | serde_json::Value::Array(_) => { - anyhow::bail!("Unsupported type of metadata {metadata:?}") - } - }; - let solc_metadata = serde_json::from_str::( - solc_metadata_str, - ) - .context( - "Failed to deserialize the solc_metadata as a serde_json generic value", - )?; - let output_value = solc_metadata - .get("output") - .context("solc_metadata doesn't have an output field")?; - let abi_value = output_value - .get("abi") - .context("solc_metadata output doesn't contain an abi field")?; - serde_json::from_value::(abi_value.clone()) - .context("ABI found in solc_metadata output is not valid ABI")? - }; - map.insert(contract_name, (bytecode.object, abi)); - } - } + let map = compiler_output.contracts.entry(source_path).or_default(); + for (contract_name, contract_information) in contracts.into_iter() { + let bytecode = contract_information + .evm + .and_then(|evm| evm.bytecode.clone()) + .context("Unexpected - Contract compiled with resolc has no bytecode")?; + let abi = { + let metadata = contract_information + .metadata + .as_ref() + .context("No metadata found for the contract")?; + let solc_metadata_str = match metadata { + serde_json::Value::String(solc_metadata_str) => + solc_metadata_str.as_str(), + serde_json::Value::Object(metadata_object) => { + let solc_metadata_value = metadata_object + .get("solc_metadata") + .context("Contract doesn't have a 'solc_metadata' field")?; + solc_metadata_value + .as_str() + .context("The 'solc_metadata' field is not a string")? + }, + serde_json::Value::Null | + serde_json::Value::Bool(_) | + serde_json::Value::Number(_) | + serde_json::Value::Array(_) => { + anyhow::bail!("Unsupported type of metadata {metadata:?}") + }, + }; + let solc_metadata = + serde_json::from_str::(solc_metadata_str).context( + "Failed to deserialize the solc_metadata as a serde_json generic value", + )?; + let output_value = solc_metadata + .get("output") + .context("solc_metadata doesn't have an output field")?; + let abi_value = output_value + .get("abi") + .context("solc_metadata output doesn't contain an abi field")?; + serde_json::from_value::(abi_value.clone()) + .context("ABI found in solc_metadata output is not valid ABI")? + }; + map.insert(contract_name, (bytecode.object, abi)); + } + } - Ok(compiler_output) - }) - } + Ok(compiler_output) + }) + } - fn supports_mode( - &self, - optimize_setting: ModeOptimizerSetting, - pipeline: ModePipeline, - ) -> bool { - pipeline == ModePipeline::ViaYulIR - && SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline) - } + fn supports_mode( + &self, + optimize_setting: ModeOptimizerSetting, + pipeline: ModePipeline, + ) -> bool { + pipeline == ModePipeline::ViaYulIR && + SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline) + } } diff --git a/crates/compiler/src/solc.rs b/crates/compiler/src/solc.rs index 624b0da..23a9fcb 100644 --- a/crates/compiler/src/solc.rs +++ b/crates/compiler/src/solc.rs @@ -2,10 +2,10 @@ //! compiling contracts to EVM bytecode. use std::{ - path::PathBuf, - pin::Pin, - process::Stdio, - sync::{Arc, LazyLock}, + path::PathBuf, + pin::Pin, + process::Stdio, + sync::{Arc, LazyLock}, }; use dashmap::DashMap; @@ -18,10 +18,10 @@ use crate::{CompilerInput, CompilerOutput, ModeOptimizerSetting, ModePipeline, S use anyhow::{Context as _, Result}; use foundry_compilers_artifacts::{ - output_selection::{ - BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection, - }, - solc::{CompilerOutput as SolcOutput, *}, + output_selection::{ + BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection, + }, + solc::{CompilerOutput as SolcOutput, *}, }; use semver::Version; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; @@ -31,268 +31,261 @@ pub struct Solc(Arc); #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] struct SolcInner { - /// The path of the solidity compiler executable that this object uses. - solc_path: PathBuf, - /// The version of the solidity compiler executable that this object uses. - solc_version: Version, + /// The path of the solidity compiler executable that this object uses. + solc_path: PathBuf, + /// The version of the solidity compiler executable that this object uses. + solc_version: Version, } impl Solc { - pub async fn new( - context: impl AsRef + AsRef, - version: impl Into>, - ) -> Result { - // This is a cache for the compiler objects so that whenever the same compiler version is - // requested the same object is returned. We do this as we do not want to keep cloning the - // compiler around. - static COMPILERS_CACHE: LazyLock> = - LazyLock::new(Default::default); + pub async fn new( + context: impl AsRef + AsRef, + version: impl Into>, + ) -> Result { + // This is a cache for the compiler objects so that whenever the same compiler version is + // requested the same object is returned. We do this as we do not want to keep cloning the + // compiler around. + static COMPILERS_CACHE: LazyLock> = + LazyLock::new(Default::default); - let working_directory_configuration = - AsRef::::as_ref(&context); - let solc_configuration = AsRef::::as_ref(&context); + let working_directory_configuration = + AsRef::::as_ref(&context); + let solc_configuration = AsRef::::as_ref(&context); - // We attempt to download the solc binary. Note the following: this call does the version - // resolution for us. Therefore, even if the download didn't proceed, this function will - // resolve the version requirement into a canonical version of the compiler. It's then up - // to us to either use the provided path or not. - let version = version - .into() - .unwrap_or_else(|| solc_configuration.version.clone().into()); - let (version, path) = - download_solc(working_directory_configuration.as_path(), version, false) - .await - .context("Failed to download/get path to solc binary")?; + // We attempt to download the solc binary. Note the following: this call does the version + // resolution for us. Therefore, even if the download didn't proceed, this function will + // resolve the version requirement into a canonical version of the compiler. It's then up + // to us to either use the provided path or not. + let version = version.into().unwrap_or_else(|| solc_configuration.version.clone().into()); + let (version, path) = + download_solc(working_directory_configuration.as_path(), version, false) + .await + .context("Failed to download/get path to solc binary")?; - Ok(COMPILERS_CACHE - .entry((path.clone(), version.clone())) - .or_insert_with(|| { - info!( - solc_path = %path.display(), - solc_version = %version, - "Created a new solc compiler object" - ); - Self(Arc::new(SolcInner { - solc_path: path, - solc_version: version, - })) - }) - .clone()) - } + Ok(COMPILERS_CACHE + .entry((path.clone(), version.clone())) + .or_insert_with(|| { + info!( + solc_path = %path.display(), + solc_version = %version, + "Created a new solc compiler object" + ); + Self(Arc::new(SolcInner { solc_path: path, solc_version: version })) + }) + .clone()) + } } impl SolidityCompiler for Solc { - fn version(&self) -> &Version { - &self.0.solc_version - } + fn version(&self) -> &Version { + &self.0.solc_version + } - fn path(&self) -> &std::path::Path { - &self.0.solc_path - } + fn path(&self) -> &std::path::Path { + &self.0.solc_path + } - #[tracing::instrument(level = "debug", ret)] - #[tracing::instrument( + #[tracing::instrument(level = "debug", ret)] + #[tracing::instrument( level = "error", skip_all, fields(json_in = tracing::field::Empty), err(Debug) )] - fn build( - &self, - CompilerInput { - pipeline, - optimization, - evm_version, - allow_paths, - base_path, - sources, - libraries, - revert_string_handling, - }: CompilerInput, - ) -> Pin> + '_>> { - Box::pin(async move { - // Be careful to entirely omit the viaIR field if the compiler does not support it, - // as it will error if you provide fields it does not know about. Because - // `supports_mode` is called prior to instantiating a compiler, we should never - // ask for something which is invalid. - let via_ir = match (pipeline, self.compiler_supports_yul()) { - (pipeline, true) => pipeline.map(|p| p.via_yul_ir()), - (_pipeline, false) => None, - }; + fn build( + &self, + CompilerInput { + pipeline, + optimization, + evm_version, + allow_paths, + base_path, + sources, + libraries, + revert_string_handling, + }: CompilerInput, + ) -> Pin> + '_>> { + Box::pin(async move { + // Be careful to entirely omit the viaIR field if the compiler does not support it, + // as it will error if you provide fields it does not know about. Because + // `supports_mode` is called prior to instantiating a compiler, we should never + // ask for something which is invalid. + let via_ir = match (pipeline, self.compiler_supports_yul()) { + (pipeline, true) => pipeline.map(|p| p.via_yul_ir()), + (_pipeline, false) => None, + }; - let input = SolcInput { - language: SolcLanguage::Solidity, - sources: Sources( - sources - .into_iter() - .map(|(source_path, source_code)| (source_path, Source::new(source_code))) - .collect(), - ), - settings: Settings { - optimizer: Optimizer { - enabled: optimization.map(|o| o.optimizations_enabled()), - details: Some(Default::default()), - ..Default::default() - }, - output_selection: OutputSelection::common_output_selection( - [ - ContractOutputSelection::Abi, - ContractOutputSelection::Evm(EvmOutputSelection::ByteCode( - BytecodeOutputSelection::Object, - )), - ] - .into_iter() - .map(|item| item.to_string()), - ), - evm_version: evm_version.map(|version| version.to_string().parse().unwrap()), - via_ir, - libraries: Libraries { - libs: libraries - .into_iter() - .map(|(file_path, libraries)| { - ( - file_path, - libraries - .into_iter() - .map(|(library_name, library_address)| { - (library_name, library_address.to_string()) - }) - .collect(), - ) - }) - .collect(), - }, - debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings { - revert_strings: match revert_string_handling { - crate::RevertString::Default => Some(RevertStrings::Default), - crate::RevertString::Debug => Some(RevertStrings::Debug), - crate::RevertString::Strip => Some(RevertStrings::Strip), - crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug), - }, - debug_info: Default::default(), - }), - ..Default::default() - }, - }; + let input = SolcInput { + language: SolcLanguage::Solidity, + sources: Sources( + sources + .into_iter() + .map(|(source_path, source_code)| (source_path, Source::new(source_code))) + .collect(), + ), + settings: Settings { + optimizer: Optimizer { + enabled: optimization.map(|o| o.optimizations_enabled()), + details: Some(Default::default()), + ..Default::default() + }, + output_selection: OutputSelection::common_output_selection( + [ + ContractOutputSelection::Abi, + ContractOutputSelection::Evm(EvmOutputSelection::ByteCode( + BytecodeOutputSelection::Object, + )), + ] + .into_iter() + .map(|item| item.to_string()), + ), + evm_version: evm_version.map(|version| version.to_string().parse().unwrap()), + via_ir, + libraries: Libraries { + libs: libraries + .into_iter() + .map(|(file_path, libraries)| { + ( + file_path, + libraries + .into_iter() + .map(|(library_name, library_address)| { + (library_name, library_address.to_string()) + }) + .collect(), + ) + }) + .collect(), + }, + debug: revert_string_handling.map(|revert_string_handling| DebuggingSettings { + revert_strings: match revert_string_handling { + crate::RevertString::Default => Some(RevertStrings::Default), + crate::RevertString::Debug => Some(RevertStrings::Debug), + crate::RevertString::Strip => Some(RevertStrings::Strip), + crate::RevertString::VerboseDebug => Some(RevertStrings::VerboseDebug), + }, + debug_info: Default::default(), + }), + ..Default::default() + }, + }; - Span::current().record("json_in", display(serde_json::to_string(&input).unwrap())); + Span::current().record("json_in", display(serde_json::to_string(&input).unwrap())); - let path = &self.0.solc_path; - let mut command = AsyncCommand::new(path); - command - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::null()) - .arg("--standard-json"); + let path = &self.0.solc_path; + let mut command = AsyncCommand::new(path); + command + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .arg("--standard-json"); - if let Some(ref base_path) = base_path { - command.arg("--base-path").arg(base_path); - } - if !allow_paths.is_empty() { - command.arg("--allow-paths").arg( - allow_paths - .iter() - .map(|path| path.display().to_string()) - .collect::>() - .join(","), - ); - } - let mut child = command - .spawn() - .with_context(|| format!("Failed to spawn solc at {}", path.display()))?; + if let Some(ref base_path) = base_path { + command.arg("--base-path").arg(base_path); + } + if !allow_paths.is_empty() { + command.arg("--allow-paths").arg( + allow_paths + .iter() + .map(|path| path.display().to_string()) + .collect::>() + .join(","), + ); + } + let mut child = command + .spawn() + .with_context(|| format!("Failed to spawn solc at {}", path.display()))?; - let stdin = child.stdin.as_mut().expect("should be piped"); - let serialized_input = serde_json::to_vec(&input) - .context("Failed to serialize Standard JSON input for solc")?; - stdin - .write_all(&serialized_input) - .await - .context("Failed to write Standard JSON to solc stdin")?; - let output = child - .wait_with_output() - .await - .context("Failed while waiting for solc process to finish")?; + let stdin = child.stdin.as_mut().expect("should be piped"); + let serialized_input = serde_json::to_vec(&input) + .context("Failed to serialize Standard JSON input for solc")?; + stdin + .write_all(&serialized_input) + .await + .context("Failed to write Standard JSON to solc stdin")?; + let output = child + .wait_with_output() + .await + .context("Failed while waiting for solc process to finish")?; - if !output.status.success() { - let json_in = serde_json::to_string_pretty(&input) - .context("Failed to pretty-print Standard JSON input for logging")?; - tracing::error!( - status = %output.status, - json_input = json_in, - "Compilation using solc failed" - ); - anyhow::bail!("Compilation failed"); - } + if !output.status.success() { + let json_in = serde_json::to_string_pretty(&input) + .context("Failed to pretty-print Standard JSON input for logging")?; + tracing::error!( + status = %output.status, + json_input = json_in, + "Compilation using solc failed" + ); + anyhow::bail!("Compilation failed"); + } - let parsed = serde_json::from_slice::(&output.stdout) - .map_err(|e| { - anyhow::anyhow!( - "failed to parse resolc JSON output: {e}\nstdout: {}", - String::from_utf8_lossy(&output.stdout) - ) - }) - .context("Failed to parse solc standard JSON output")?; + let parsed = serde_json::from_slice::(&output.stdout) + .map_err(|e| { + anyhow::anyhow!( + "failed to parse resolc JSON output: {e}\nstdout: {}", + String::from_utf8_lossy(&output.stdout) + ) + }) + .context("Failed to parse solc standard JSON output")?; - // Detecting if the compiler output contained errors and reporting them through logs and - // errors instead of returning the compiler output that might contain errors. - for error in parsed.errors.iter() { - if error.severity == Severity::Error { - tracing::error!(?error, ?input, "Encountered an error in the compilation"); - anyhow::bail!("Encountered an error in the compilation: {error}") - } - } + // Detecting if the compiler output contained errors and reporting them through logs and + // errors instead of returning the compiler output that might contain errors. + for error in parsed.errors.iter() { + if error.severity == Severity::Error { + tracing::error!(?error, ?input, "Encountered an error in the compilation"); + anyhow::bail!("Encountered an error in the compilation: {error}") + } + } - tracing::debug!( - output = %String::from_utf8_lossy(&output.stdout).to_string(), - "Compiled successfully" - ); + tracing::debug!( + output = %String::from_utf8_lossy(&output.stdout).to_string(), + "Compiled successfully" + ); - let mut compiler_output = CompilerOutput::default(); - for (contract_path, contracts) in parsed.contracts { - let map = compiler_output - .contracts - .entry(contract_path.canonicalize().with_context(|| { - format!( - "Failed to canonicalize contract path {}", - contract_path.display() - ) - })?) - .or_default(); - for (contract_name, contract_info) in contracts.into_iter() { - let source_code = contract_info - .evm - .and_then(|evm| evm.bytecode) - .map(|bytecode| match bytecode.object { - BytecodeObject::Bytecode(bytecode) => bytecode.to_string(), - BytecodeObject::Unlinked(unlinked) => unlinked, - }) - .context("Unexpected - contract compiled with solc has no source code")?; - let abi = contract_info - .abi - .context("Unexpected - contract compiled with solc as no ABI")?; - map.insert(contract_name, (source_code, abi)); - } - } + let mut compiler_output = CompilerOutput::default(); + for (contract_path, contracts) in parsed.contracts { + let map = compiler_output + .contracts + .entry(contract_path.canonicalize().with_context(|| { + format!("Failed to canonicalize contract path {}", contract_path.display()) + })?) + .or_default(); + for (contract_name, contract_info) in contracts.into_iter() { + let source_code = contract_info + .evm + .and_then(|evm| evm.bytecode) + .map(|bytecode| match bytecode.object { + BytecodeObject::Bytecode(bytecode) => bytecode.to_string(), + BytecodeObject::Unlinked(unlinked) => unlinked, + }) + .context("Unexpected - contract compiled with solc has no source code")?; + let abi = contract_info + .abi + .context("Unexpected - contract compiled with solc as no ABI")?; + map.insert(contract_name, (source_code, abi)); + } + } - Ok(compiler_output) - }) - } + Ok(compiler_output) + }) + } - fn supports_mode( - &self, - _optimize_setting: ModeOptimizerSetting, - pipeline: ModePipeline, - ) -> bool { - // solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E - // (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough. - pipeline == ModePipeline::ViaEVMAssembly - || (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul()) - } + fn supports_mode( + &self, + _optimize_setting: ModeOptimizerSetting, + pipeline: ModePipeline, + ) -> bool { + // solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support + // mode E (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler + // is new enough. + pipeline == ModePipeline::ViaEVMAssembly || + (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul()) + } } impl Solc { - fn compiler_supports_yul(&self) -> bool { - const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13); - SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR - } + fn compiler_supports_yul(&self) -> bool { + const SOLC_VERSION_SUPPORTING_VIA_YUL_IR: Version = Version::new(0, 8, 13); + SolidityCompiler::version(self) >= &SOLC_VERSION_SUPPORTING_VIA_YUL_IR + } } diff --git a/crates/compiler/tests/lib.rs b/crates/compiler/tests/lib.rs index 5de4b90..d11fd88 100644 --- a/crates/compiler/tests/lib.rs +++ b/crates/compiler/tests/lib.rs @@ -7,82 +7,82 @@ use semver::Version; #[tokio::test] async fn contracts_can_be_compiled_with_solc() { - // Arrange - let args = TestExecutionContext::default(); - let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) - .await - .unwrap(); + // Arrange + let args = TestExecutionContext::default(); + let solc = Solc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) + .await + .unwrap(); - // Act - let output = Compiler::new() - .with_source("./tests/assets/array_one_element/callable.sol") - .unwrap() - .with_source("./tests/assets/array_one_element/main.sol") - .unwrap() - .try_build(&solc) - .await; + // Act + let output = Compiler::new() + .with_source("./tests/assets/array_one_element/callable.sol") + .unwrap() + .with_source("./tests/assets/array_one_element/main.sol") + .unwrap() + .try_build(&solc) + .await; - // Assert - let output = output.expect("Failed to compile"); - assert_eq!(output.contracts.len(), 2); + // Assert + let output = output.expect("Failed to compile"); + assert_eq!(output.contracts.len(), 2); - let main_file_contracts = output - .contracts - .get( - &PathBuf::from("./tests/assets/array_one_element/main.sol") - .canonicalize() - .unwrap(), - ) - .unwrap(); - let callable_file_contracts = output - .contracts - .get( - &PathBuf::from("./tests/assets/array_one_element/callable.sol") - .canonicalize() - .unwrap(), - ) - .unwrap(); - assert!(main_file_contracts.contains_key("Main")); - assert!(callable_file_contracts.contains_key("Callable")); + let main_file_contracts = output + .contracts + .get( + &PathBuf::from("./tests/assets/array_one_element/main.sol") + .canonicalize() + .unwrap(), + ) + .unwrap(); + let callable_file_contracts = output + .contracts + .get( + &PathBuf::from("./tests/assets/array_one_element/callable.sol") + .canonicalize() + .unwrap(), + ) + .unwrap(); + assert!(main_file_contracts.contains_key("Main")); + assert!(callable_file_contracts.contains_key("Callable")); } #[tokio::test] async fn contracts_can_be_compiled_with_resolc() { - // Arrange - let args = TestExecutionContext::default(); - let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) - .await - .unwrap(); + // Arrange + let args = TestExecutionContext::default(); + let resolc = Resolc::new(&args, VersionOrRequirement::Version(Version::new(0, 8, 30))) + .await + .unwrap(); - // Act - let output = Compiler::new() - .with_source("./tests/assets/array_one_element/callable.sol") - .unwrap() - .with_source("./tests/assets/array_one_element/main.sol") - .unwrap() - .try_build(&resolc) - .await; + // Act + let output = Compiler::new() + .with_source("./tests/assets/array_one_element/callable.sol") + .unwrap() + .with_source("./tests/assets/array_one_element/main.sol") + .unwrap() + .try_build(&resolc) + .await; - // Assert - let output = output.expect("Failed to compile"); - assert_eq!(output.contracts.len(), 2); + // Assert + let output = output.expect("Failed to compile"); + assert_eq!(output.contracts.len(), 2); - let main_file_contracts = output - .contracts - .get( - &PathBuf::from("./tests/assets/array_one_element/main.sol") - .canonicalize() - .unwrap(), - ) - .unwrap(); - let callable_file_contracts = output - .contracts - .get( - &PathBuf::from("./tests/assets/array_one_element/callable.sol") - .canonicalize() - .unwrap(), - ) - .unwrap(); - assert!(main_file_contracts.contains_key("Main")); - assert!(callable_file_contracts.contains_key("Callable")); + let main_file_contracts = output + .contracts + .get( + &PathBuf::from("./tests/assets/array_one_element/main.sol") + .canonicalize() + .unwrap(), + ) + .unwrap(); + let callable_file_contracts = output + .contracts + .get( + &PathBuf::from("./tests/assets/array_one_element/callable.sol") + .canonicalize() + .unwrap(), + ) + .unwrap(); + assert!(main_file_contracts.contains_key("Main")); + assert!(callable_file_contracts.contains_key("Callable")); } diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index c75b9e5..87ce15a 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -1,21 +1,21 @@ //! The global configuration used across all revive differential testing crates. use std::{ - fmt::Display, - fs::read_to_string, - ops::Deref, - path::{Path, PathBuf}, - str::FromStr, - sync::{Arc, LazyLock, OnceLock}, - time::Duration, + fmt::Display, + fs::read_to_string, + ops::Deref, + path::{Path, PathBuf}, + str::FromStr, + sync::{Arc, LazyLock, OnceLock}, + time::Duration, }; use alloy::{ - genesis::Genesis, - hex::ToHexExt, - network::EthereumWallet, - primitives::{FixedBytes, U256}, - signers::local::PrivateKeySigner, + genesis::Genesis, + hex::ToHexExt, + network::EthereumWallet, + primitives::{FixedBytes, U256}, + signers::local::PrivateKeySigner, }; use clap::{Parser, ValueEnum, ValueHint}; use revive_dt_common::types::PlatformIdentifier; @@ -27,940 +27,927 @@ use temp_dir::TempDir; #[derive(Clone, Debug, Parser, Serialize)] #[command(name = "retester")] pub enum Context { - /// Executes tests in the MatterLabs format differentially on multiple targets concurrently. - Test(Box), + /// Executes tests in the MatterLabs format differentially on multiple targets concurrently. + Test(Box), - /// Executes differential benchmarks on various platforms. - Benchmark(Box), + /// Executes differential benchmarks on various platforms. + Benchmark(Box), - /// Exports the JSON schema of the MatterLabs test format used by the tool. - ExportJsonSchema, + /// Exports the JSON schema of the MatterLabs test format used by the tool. + ExportJsonSchema, } impl Context { - pub fn working_directory_configuration(&self) -> &WorkingDirectoryConfiguration { - self.as_ref() - } + pub fn working_directory_configuration(&self) -> &WorkingDirectoryConfiguration { + self.as_ref() + } - pub fn report_configuration(&self) -> &ReportConfiguration { - self.as_ref() - } + pub fn report_configuration(&self) -> &ReportConfiguration { + self.as_ref() + } } impl AsRef for Context { - fn as_ref(&self) -> &WorkingDirectoryConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &WorkingDirectoryConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &CorpusConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &CorpusConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &SolcConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &SolcConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &ResolcConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &ResolcConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &GethConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &GethConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &KurtosisConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &KurtosisConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &PolkadotParachainConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &PolkadotParachainConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &KitchensinkConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &KitchensinkConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &ReviveDevNodeConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &ReviveDevNodeConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &EthRpcConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &EthRpcConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &GenesisConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(..) => { - static GENESIS: LazyLock = LazyLock::new(Default::default); - &GENESIS - } - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &GenesisConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(..) => { + static GENESIS: LazyLock = LazyLock::new(Default::default); + &GENESIS + }, + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &WalletConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &WalletConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &ConcurrencyConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &ConcurrencyConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &CompilationConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &CompilationConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } impl AsRef for Context { - fn as_ref(&self) -> &ReportConfiguration { - match self { - Self::Test(context) => context.as_ref().as_ref(), - Self::Benchmark(context) => context.as_ref().as_ref(), - Self::ExportJsonSchema => unreachable!(), - } - } + fn as_ref(&self) -> &ReportConfiguration { + match self { + Self::Test(context) => context.as_ref().as_ref(), + Self::Benchmark(context) => context.as_ref().as_ref(), + Self::ExportJsonSchema => unreachable!(), + } + } } #[derive(Clone, Debug, Parser, Serialize)] pub struct TestExecutionContext { - /// The working directory that the program will use for all of the temporary artifacts needed at - /// runtime. - /// - /// If not specified, then a temporary directory will be created and used by the program for all - /// temporary artifacts. - #[clap( + /// The working directory that the program will use for all of the temporary artifacts needed + /// at runtime. + /// + /// If not specified, then a temporary directory will be created and used by the program for + /// all temporary artifacts. + #[clap( short, long, default_value = "", value_hint = ValueHint::DirPath, )] - pub working_directory: WorkingDirectoryConfiguration, + pub working_directory: WorkingDirectoryConfiguration, - /// The set of platforms that the differential tests should run on. - #[arg( + /// The set of platforms that the differential tests should run on. + #[arg( short = 'p', long = "platform", default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"] )] - pub platforms: Vec, + pub platforms: Vec, - /// Configuration parameters for the corpus files to use. - #[clap(flatten, next_help_heading = "Corpus Configuration")] - pub corpus_configuration: CorpusConfiguration, + /// Configuration parameters for the corpus files to use. + #[clap(flatten, next_help_heading = "Corpus Configuration")] + pub corpus_configuration: CorpusConfiguration, - /// Configuration parameters for the solc compiler. - #[clap(flatten, next_help_heading = "Solc Configuration")] - pub solc_configuration: SolcConfiguration, + /// Configuration parameters for the solc compiler. + #[clap(flatten, next_help_heading = "Solc Configuration")] + pub solc_configuration: SolcConfiguration, - /// Configuration parameters for the resolc compiler. - #[clap(flatten, next_help_heading = "Resolc Configuration")] - pub resolc_configuration: ResolcConfiguration, + /// Configuration parameters for the resolc compiler. + #[clap(flatten, next_help_heading = "Resolc Configuration")] + pub resolc_configuration: ResolcConfiguration, - /// Configuration parameters for the Polkadot Parachain. - #[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")] - pub polkadot_parachain_configuration: PolkadotParachainConfiguration, + /// Configuration parameters for the Polkadot Parachain. + #[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")] + pub polkadot_parachain_configuration: PolkadotParachainConfiguration, - /// Configuration parameters for the geth node. - #[clap(flatten, next_help_heading = "Geth Configuration")] - pub geth_configuration: GethConfiguration, + /// Configuration parameters for the geth node. + #[clap(flatten, next_help_heading = "Geth Configuration")] + pub geth_configuration: GethConfiguration, - /// Configuration parameters for the lighthouse node. - #[clap(flatten, next_help_heading = "Lighthouse Configuration")] - pub lighthouse_configuration: KurtosisConfiguration, + /// Configuration parameters for the lighthouse node. + #[clap(flatten, next_help_heading = "Lighthouse Configuration")] + pub lighthouse_configuration: KurtosisConfiguration, - /// Configuration parameters for the Kitchensink. - #[clap(flatten, next_help_heading = "Kitchensink Configuration")] - pub kitchensink_configuration: KitchensinkConfiguration, + /// Configuration parameters for the Kitchensink. + #[clap(flatten, next_help_heading = "Kitchensink Configuration")] + pub kitchensink_configuration: KitchensinkConfiguration, - /// Configuration parameters for the Revive Dev Node. - #[clap(flatten, next_help_heading = "Revive Dev Node Configuration")] - pub revive_dev_node_configuration: ReviveDevNodeConfiguration, + /// Configuration parameters for the Revive Dev Node. + #[clap(flatten, next_help_heading = "Revive Dev Node Configuration")] + pub revive_dev_node_configuration: ReviveDevNodeConfiguration, - /// Configuration parameters for the Eth Rpc. - #[clap(flatten, next_help_heading = "Eth RPC Configuration")] - pub eth_rpc_configuration: EthRpcConfiguration, + /// Configuration parameters for the Eth Rpc. + #[clap(flatten, next_help_heading = "Eth RPC Configuration")] + pub eth_rpc_configuration: EthRpcConfiguration, - /// Configuration parameters for the genesis. - #[clap(flatten, next_help_heading = "Genesis Configuration")] - pub genesis_configuration: GenesisConfiguration, + /// Configuration parameters for the genesis. + #[clap(flatten, next_help_heading = "Genesis Configuration")] + pub genesis_configuration: GenesisConfiguration, - /// Configuration parameters for the wallet. - #[clap(flatten, next_help_heading = "Wallet Configuration")] - pub wallet_configuration: WalletConfiguration, + /// Configuration parameters for the wallet. + #[clap(flatten, next_help_heading = "Wallet Configuration")] + pub wallet_configuration: WalletConfiguration, - /// Configuration parameters for concurrency. - #[clap(flatten, next_help_heading = "Concurrency Configuration")] - pub concurrency_configuration: ConcurrencyConfiguration, + /// Configuration parameters for concurrency. + #[clap(flatten, next_help_heading = "Concurrency Configuration")] + pub concurrency_configuration: ConcurrencyConfiguration, - /// Configuration parameters for the compilers and compilation. - #[clap(flatten, next_help_heading = "Compilation Configuration")] - pub compilation_configuration: CompilationConfiguration, + /// Configuration parameters for the compilers and compilation. + #[clap(flatten, next_help_heading = "Compilation Configuration")] + pub compilation_configuration: CompilationConfiguration, - /// Configuration parameters for the report. - #[clap(flatten, next_help_heading = "Report Configuration")] - pub report_configuration: ReportConfiguration, + /// Configuration parameters for the report. + #[clap(flatten, next_help_heading = "Report Configuration")] + pub report_configuration: ReportConfiguration, } #[derive(Clone, Debug, Parser, Serialize)] pub struct BenchmarkingContext { - /// The working directory that the program will use for all of the temporary artifacts needed at - /// runtime. - /// - /// If not specified, then a temporary directory will be created and used by the program for all - /// temporary artifacts. - #[clap( + /// The working directory that the program will use for all of the temporary artifacts needed + /// at runtime. + /// + /// If not specified, then a temporary directory will be created and used by the program for + /// all temporary artifacts. + #[clap( short, long, default_value = "", value_hint = ValueHint::DirPath, )] - pub working_directory: WorkingDirectoryConfiguration, + pub working_directory: WorkingDirectoryConfiguration, - /// The set of platforms that the differential tests should run on. - #[arg( + /// The set of platforms that the differential tests should run on. + #[arg( short = 'p', long = "platform", default_values = ["geth-evm-solc", "revive-dev-node-polkavm-resolc"] )] - pub platforms: Vec, + pub platforms: Vec, - /// The default repetition count for any workload specified but that doesn't contain a repeat - /// step. - #[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)] - pub default_repetition_count: usize, + /// The default repetition count for any workload specified but that doesn't contain a repeat + /// step. + #[arg(short = 'r', long = "default-repetition-count", default_value_t = 1000)] + pub default_repetition_count: usize, - /// Configuration parameters for the corpus files to use. - #[clap(flatten, next_help_heading = "Corpus Configuration")] - pub corpus_configuration: CorpusConfiguration, + /// Configuration parameters for the corpus files to use. + #[clap(flatten, next_help_heading = "Corpus Configuration")] + pub corpus_configuration: CorpusConfiguration, - /// Configuration parameters for the solc compiler. - #[clap(flatten, next_help_heading = "Solc Configuration")] - pub solc_configuration: SolcConfiguration, + /// Configuration parameters for the solc compiler. + #[clap(flatten, next_help_heading = "Solc Configuration")] + pub solc_configuration: SolcConfiguration, - /// Configuration parameters for the resolc compiler. - #[clap(flatten, next_help_heading = "Resolc Configuration")] - pub resolc_configuration: ResolcConfiguration, + /// Configuration parameters for the resolc compiler. + #[clap(flatten, next_help_heading = "Resolc Configuration")] + pub resolc_configuration: ResolcConfiguration, - /// Configuration parameters for the geth node. - #[clap(flatten, next_help_heading = "Geth Configuration")] - pub geth_configuration: GethConfiguration, + /// Configuration parameters for the geth node. + #[clap(flatten, next_help_heading = "Geth Configuration")] + pub geth_configuration: GethConfiguration, - /// Configuration parameters for the lighthouse node. - #[clap(flatten, next_help_heading = "Lighthouse Configuration")] - pub lighthouse_configuration: KurtosisConfiguration, + /// Configuration parameters for the lighthouse node. + #[clap(flatten, next_help_heading = "Lighthouse Configuration")] + pub lighthouse_configuration: KurtosisConfiguration, - /// Configuration parameters for the Kitchensink. - #[clap(flatten, next_help_heading = "Kitchensink Configuration")] - pub kitchensink_configuration: KitchensinkConfiguration, + /// Configuration parameters for the Kitchensink. + #[clap(flatten, next_help_heading = "Kitchensink Configuration")] + pub kitchensink_configuration: KitchensinkConfiguration, - /// Configuration parameters for the Polkadot Parachain. - #[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")] - pub polkadot_parachain_configuration: PolkadotParachainConfiguration, + /// Configuration parameters for the Polkadot Parachain. + #[clap(flatten, next_help_heading = "Polkadot Parachain Configuration")] + pub polkadot_parachain_configuration: PolkadotParachainConfiguration, - /// Configuration parameters for the Revive Dev Node. - #[clap(flatten, next_help_heading = "Revive Dev Node Configuration")] - pub revive_dev_node_configuration: ReviveDevNodeConfiguration, + /// Configuration parameters for the Revive Dev Node. + #[clap(flatten, next_help_heading = "Revive Dev Node Configuration")] + pub revive_dev_node_configuration: ReviveDevNodeConfiguration, - /// Configuration parameters for the Eth Rpc. - #[clap(flatten, next_help_heading = "Eth RPC Configuration")] - pub eth_rpc_configuration: EthRpcConfiguration, + /// Configuration parameters for the Eth Rpc. + #[clap(flatten, next_help_heading = "Eth RPC Configuration")] + pub eth_rpc_configuration: EthRpcConfiguration, - /// Configuration parameters for the wallet. - #[clap(flatten, next_help_heading = "Wallet Configuration")] - pub wallet_configuration: WalletConfiguration, + /// Configuration parameters for the wallet. + #[clap(flatten, next_help_heading = "Wallet Configuration")] + pub wallet_configuration: WalletConfiguration, - /// Configuration parameters for concurrency. - #[clap(flatten, next_help_heading = "Concurrency Configuration")] - pub concurrency_configuration: ConcurrencyConfiguration, + /// Configuration parameters for concurrency. + #[clap(flatten, next_help_heading = "Concurrency Configuration")] + pub concurrency_configuration: ConcurrencyConfiguration, - /// Configuration parameters for the compilers and compilation. - #[clap(flatten, next_help_heading = "Compilation Configuration")] - pub compilation_configuration: CompilationConfiguration, + /// Configuration parameters for the compilers and compilation. + #[clap(flatten, next_help_heading = "Compilation Configuration")] + pub compilation_configuration: CompilationConfiguration, - /// Configuration parameters for the report. - #[clap(flatten, next_help_heading = "Report Configuration")] - pub report_configuration: ReportConfiguration, + /// Configuration parameters for the report. + #[clap(flatten, next_help_heading = "Report Configuration")] + pub report_configuration: ReportConfiguration, } impl Default for TestExecutionContext { - fn default() -> Self { - Self::parse_from(["execution-context"]) - } + fn default() -> Self { + Self::parse_from(["execution-context"]) + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &WorkingDirectoryConfiguration { - &self.working_directory - } + fn as_ref(&self) -> &WorkingDirectoryConfiguration { + &self.working_directory + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &CorpusConfiguration { - &self.corpus_configuration - } + fn as_ref(&self) -> &CorpusConfiguration { + &self.corpus_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &SolcConfiguration { - &self.solc_configuration - } + fn as_ref(&self) -> &SolcConfiguration { + &self.solc_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &ResolcConfiguration { - &self.resolc_configuration - } + fn as_ref(&self) -> &ResolcConfiguration { + &self.resolc_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &GethConfiguration { - &self.geth_configuration - } + fn as_ref(&self) -> &GethConfiguration { + &self.geth_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &PolkadotParachainConfiguration { - &self.polkadot_parachain_configuration - } + fn as_ref(&self) -> &PolkadotParachainConfiguration { + &self.polkadot_parachain_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &KurtosisConfiguration { - &self.lighthouse_configuration - } + fn as_ref(&self) -> &KurtosisConfiguration { + &self.lighthouse_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &KitchensinkConfiguration { - &self.kitchensink_configuration - } + fn as_ref(&self) -> &KitchensinkConfiguration { + &self.kitchensink_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &ReviveDevNodeConfiguration { - &self.revive_dev_node_configuration - } + fn as_ref(&self) -> &ReviveDevNodeConfiguration { + &self.revive_dev_node_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &EthRpcConfiguration { - &self.eth_rpc_configuration - } + fn as_ref(&self) -> &EthRpcConfiguration { + &self.eth_rpc_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &GenesisConfiguration { - &self.genesis_configuration - } + fn as_ref(&self) -> &GenesisConfiguration { + &self.genesis_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &WalletConfiguration { - &self.wallet_configuration - } + fn as_ref(&self) -> &WalletConfiguration { + &self.wallet_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &ConcurrencyConfiguration { - &self.concurrency_configuration - } + fn as_ref(&self) -> &ConcurrencyConfiguration { + &self.concurrency_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &CompilationConfiguration { - &self.compilation_configuration - } + fn as_ref(&self) -> &CompilationConfiguration { + &self.compilation_configuration + } } impl AsRef for TestExecutionContext { - fn as_ref(&self) -> &ReportConfiguration { - &self.report_configuration - } + fn as_ref(&self) -> &ReportConfiguration { + &self.report_configuration + } } impl Default for BenchmarkingContext { - fn default() -> Self { - Self::parse_from(["execution-context"]) - } + fn default() -> Self { + Self::parse_from(["execution-context"]) + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &WorkingDirectoryConfiguration { - &self.working_directory - } + fn as_ref(&self) -> &WorkingDirectoryConfiguration { + &self.working_directory + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &CorpusConfiguration { - &self.corpus_configuration - } + fn as_ref(&self) -> &CorpusConfiguration { + &self.corpus_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &SolcConfiguration { - &self.solc_configuration - } + fn as_ref(&self) -> &SolcConfiguration { + &self.solc_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &ResolcConfiguration { - &self.resolc_configuration - } + fn as_ref(&self) -> &ResolcConfiguration { + &self.resolc_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &GethConfiguration { - &self.geth_configuration - } + fn as_ref(&self) -> &GethConfiguration { + &self.geth_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &KurtosisConfiguration { - &self.lighthouse_configuration - } + fn as_ref(&self) -> &KurtosisConfiguration { + &self.lighthouse_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &PolkadotParachainConfiguration { - &self.polkadot_parachain_configuration - } + fn as_ref(&self) -> &PolkadotParachainConfiguration { + &self.polkadot_parachain_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &KitchensinkConfiguration { - &self.kitchensink_configuration - } + fn as_ref(&self) -> &KitchensinkConfiguration { + &self.kitchensink_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &ReviveDevNodeConfiguration { - &self.revive_dev_node_configuration - } + fn as_ref(&self) -> &ReviveDevNodeConfiguration { + &self.revive_dev_node_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &EthRpcConfiguration { - &self.eth_rpc_configuration - } + fn as_ref(&self) -> &EthRpcConfiguration { + &self.eth_rpc_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &WalletConfiguration { - &self.wallet_configuration - } + fn as_ref(&self) -> &WalletConfiguration { + &self.wallet_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &ConcurrencyConfiguration { - &self.concurrency_configuration - } + fn as_ref(&self) -> &ConcurrencyConfiguration { + &self.concurrency_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &CompilationConfiguration { - &self.compilation_configuration - } + fn as_ref(&self) -> &CompilationConfiguration { + &self.compilation_configuration + } } impl AsRef for BenchmarkingContext { - fn as_ref(&self) -> &ReportConfiguration { - &self.report_configuration - } + fn as_ref(&self) -> &ReportConfiguration { + &self.report_configuration + } } /// A set of configuration parameters for the corpus files to use for the execution. #[derive(Clone, Debug, Parser, Serialize)] pub struct CorpusConfiguration { - /// A list of test corpus JSON files to be tested. - #[arg(short = 'c', long = "corpus")] - pub paths: Vec, + /// A list of test corpus JSON files to be tested. + #[arg(short = 'c', long = "corpus")] + pub paths: Vec, } /// A set of configuration parameters for Solc. #[derive(Clone, Debug, Parser, Serialize)] pub struct SolcConfiguration { - /// Specifies the default version of the Solc compiler that should be used if there is no - /// override specified by one of the test cases. - #[clap(long = "solc.version", default_value = "0.8.29")] - pub version: Version, + /// Specifies the default version of the Solc compiler that should be used if there is no + /// override specified by one of the test cases. + #[clap(long = "solc.version", default_value = "0.8.29")] + pub version: Version, } /// A set of configuration parameters for Resolc. #[derive(Clone, Debug, Parser, Serialize)] pub struct ResolcConfiguration { - /// Specifies the path of the resolc compiler to be used by the tool. - /// - /// If this is not specified, then the tool assumes that it should use the resolc binary that's - /// provided in the user's $PATH. - #[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")] - pub path: PathBuf, + /// Specifies the path of the resolc compiler to be used by the tool. + /// + /// If this is not specified, then the tool assumes that it should use the resolc binary that's + /// provided in the user's $PATH. + #[clap(id = "resolc.path", long = "resolc.path", default_value = "resolc")] + pub path: PathBuf, } /// A set of configuration parameters for Polkadot Parachain. #[derive(Clone, Debug, Parser, Serialize)] pub struct PolkadotParachainConfiguration { - /// Specifies the path of the polkadot-parachain node to be used by the tool. - /// - /// If this is not specified, then the tool assumes that it should use the polkadot-parachain binary - /// that's provided in the user's $PATH. - #[clap( - id = "polkadot-parachain.path", - long = "polkadot-parachain.path", - default_value = "polkadot-parachain" - )] - pub path: PathBuf, + /// Specifies the path of the polkadot-parachain node to be used by the tool. + /// + /// If this is not specified, then the tool assumes that it should use the polkadot-parachain + /// binary that's provided in the user's $PATH. + #[clap( + id = "polkadot-parachain.path", + long = "polkadot-parachain.path", + default_value = "polkadot-parachain" + )] + pub path: PathBuf, - /// The amount of time to wait upon startup before considering that the node timed out. - #[clap( + /// The amount of time to wait upon startup before considering that the node timed out. + #[clap( id = "polkadot-parachain.start-timeout-ms", long = "polkadot-parachain.start-timeout-ms", default_value = "5000", value_parser = parse_duration )] - pub start_timeout_ms: Duration, + pub start_timeout_ms: Duration, } /// A set of configuration parameters for Geth. #[derive(Clone, Debug, Parser, Serialize)] pub struct GethConfiguration { - /// Specifies the path of the geth node to be used by the tool. - /// - /// If this is not specified, then the tool assumes that it should use the geth binary that's - /// provided in the user's $PATH. - #[clap(id = "geth.path", long = "geth.path", default_value = "geth")] - pub path: PathBuf, + /// Specifies the path of the geth node to be used by the tool. + /// + /// If this is not specified, then the tool assumes that it should use the geth binary that's + /// provided in the user's $PATH. + #[clap(id = "geth.path", long = "geth.path", default_value = "geth")] + pub path: PathBuf, - /// The amount of time to wait upon startup before considering that the node timed out. - #[clap( + /// The amount of time to wait upon startup before considering that the node timed out. + #[clap( id = "geth.start-timeout-ms", long = "geth.start-timeout-ms", default_value = "30000", value_parser = parse_duration )] - pub start_timeout_ms: Duration, + pub start_timeout_ms: Duration, } /// A set of configuration parameters for kurtosis. #[derive(Clone, Debug, Parser, Serialize)] pub struct KurtosisConfiguration { - /// Specifies the path of the kurtosis node to be used by the tool. - /// - /// If this is not specified, then the tool assumes that it should use the kurtosis binary that's - /// provided in the user's $PATH. - #[clap( - id = "kurtosis.path", - long = "kurtosis.path", - default_value = "kurtosis" - )] - pub path: PathBuf, + /// Specifies the path of the kurtosis node to be used by the tool. + /// + /// If this is not specified, then the tool assumes that it should use the kurtosis binary + /// that's provided in the user's $PATH. + #[clap(id = "kurtosis.path", long = "kurtosis.path", default_value = "kurtosis")] + pub path: PathBuf, } /// A set of configuration parameters for Kitchensink. #[derive(Clone, Debug, Parser, Serialize)] pub struct KitchensinkConfiguration { - /// Specifies the path of the kitchensink node to be used by the tool. - /// - /// If this is not specified, then the tool assumes that it should use the kitchensink binary - /// that's provided in the user's $PATH. - #[clap( - id = "kitchensink.path", - long = "kitchensink.path", - default_value = "substrate-node" - )] - pub path: PathBuf, + /// Specifies the path of the kitchensink node to be used by the tool. + /// + /// If this is not specified, then the tool assumes that it should use the kitchensink binary + /// that's provided in the user's $PATH. + #[clap(id = "kitchensink.path", long = "kitchensink.path", default_value = "substrate-node")] + pub path: PathBuf, - /// The amount of time to wait upon startup before considering that the node timed out. - #[clap( + /// The amount of time to wait upon startup before considering that the node timed out. + #[clap( id = "kitchensink.start-timeout-ms", long = "kitchensink.start-timeout-ms", default_value = "30000", value_parser = parse_duration )] - pub start_timeout_ms: Duration, + pub start_timeout_ms: Duration, } /// A set of configuration parameters for the revive dev node. #[derive(Clone, Debug, Parser, Serialize)] pub struct ReviveDevNodeConfiguration { - /// Specifies the path of the revive dev node to be used by the tool. - /// - /// If this is not specified, then the tool assumes that it should use the revive dev node binary - /// that's provided in the user's $PATH. - #[clap( - id = "revive-dev-node.path", - long = "revive-dev-node.path", - default_value = "revive-dev-node" - )] - pub path: PathBuf, + /// Specifies the path of the revive dev node to be used by the tool. + /// + /// If this is not specified, then the tool assumes that it should use the revive dev node + /// binary that's provided in the user's $PATH. + #[clap( + id = "revive-dev-node.path", + long = "revive-dev-node.path", + default_value = "revive-dev-node" + )] + pub path: PathBuf, - /// The amount of time to wait upon startup before considering that the node timed out. - #[clap( + /// The amount of time to wait upon startup before considering that the node timed out. + #[clap( id = "revive-dev-node.start-timeout-ms", long = "revive-dev-node.start-timeout-ms", default_value = "30000", value_parser = parse_duration )] - pub start_timeout_ms: Duration, + pub start_timeout_ms: Duration, } /// A set of configuration parameters for the ETH RPC. #[derive(Clone, Debug, Parser, Serialize)] pub struct EthRpcConfiguration { - /// Specifies the path of the ETH RPC to be used by the tool. - /// - /// If this is not specified, then the tool assumes that it should use the ETH RPC binary - /// that's provided in the user's $PATH. - #[clap(id = "eth-rpc.path", long = "eth-rpc.path", default_value = "eth-rpc")] - pub path: PathBuf, + /// Specifies the path of the ETH RPC to be used by the tool. + /// + /// If this is not specified, then the tool assumes that it should use the ETH RPC binary + /// that's provided in the user's $PATH. + #[clap(id = "eth-rpc.path", long = "eth-rpc.path", default_value = "eth-rpc")] + pub path: PathBuf, - /// The amount of time to wait upon startup before considering that the node timed out. - #[clap( + /// The amount of time to wait upon startup before considering that the node timed out. + #[clap( id = "eth-rpc.start-timeout-ms", long = "eth-rpc.start-timeout-ms", default_value = "30000", value_parser = parse_duration )] - pub start_timeout_ms: Duration, + pub start_timeout_ms: Duration, } /// A set of configuration parameters for the genesis. #[derive(Clone, Debug, Default, Parser, Serialize)] pub struct GenesisConfiguration { - /// Specifies the path of the genesis file to use for the nodes that are started. - /// - /// This is expected to be the path of a JSON geth genesis file. - #[clap(id = "genesis.path", long = "genesis.path")] - path: Option, + /// Specifies the path of the genesis file to use for the nodes that are started. + /// + /// This is expected to be the path of a JSON geth genesis file. + #[clap(id = "genesis.path", long = "genesis.path")] + path: Option, - /// The genesis object found at the provided path. - #[clap(skip)] - #[serde(skip)] - genesis: OnceLock, + /// The genesis object found at the provided path. + #[clap(skip)] + #[serde(skip)] + genesis: OnceLock, } impl GenesisConfiguration { - pub fn genesis(&self) -> anyhow::Result<&Genesis> { - static DEFAULT_GENESIS: LazyLock = LazyLock::new(|| { - let genesis = include_str!("../../../assets/dev-genesis.json"); - serde_json::from_str(genesis).unwrap() - }); + pub fn genesis(&self) -> anyhow::Result<&Genesis> { + static DEFAULT_GENESIS: LazyLock = LazyLock::new(|| { + let genesis = include_str!("../../../assets/dev-genesis.json"); + serde_json::from_str(genesis).unwrap() + }); - match self.genesis.get() { - Some(genesis) => Ok(genesis), - None => { - let genesis = match self.path.as_ref() { - Some(genesis_path) => { - let genesis_content = read_to_string(genesis_path)?; - serde_json::from_str(genesis_content.as_str())? - } - None => DEFAULT_GENESIS.clone(), - }; - Ok(self.genesis.get_or_init(|| genesis)) - } - } - } + match self.genesis.get() { + Some(genesis) => Ok(genesis), + None => { + let genesis = match self.path.as_ref() { + Some(genesis_path) => { + let genesis_content = read_to_string(genesis_path)?; + serde_json::from_str(genesis_content.as_str())? + }, + None => DEFAULT_GENESIS.clone(), + }; + Ok(self.genesis.get_or_init(|| genesis)) + }, + } + } } /// A set of configuration parameters for the wallet. #[derive(Clone, Debug, Parser, Serialize)] pub struct WalletConfiguration { - /// The private key of the default signer. - #[clap( - long = "wallet.default-private-key", - default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d" - )] - #[serde(serialize_with = "serialize_private_key")] - default_key: PrivateKeySigner, + /// The private key of the default signer. + #[clap( + long = "wallet.default-private-key", + default_value = "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d" + )] + #[serde(serialize_with = "serialize_private_key")] + default_key: PrivateKeySigner, - /// This argument controls which private keys the nodes should have access to and be added to - /// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set - /// of the node. - #[clap(long = "wallet.additional-keys", default_value_t = 100_000)] - pub additional_keys: usize, + /// This argument controls which private keys the nodes should have access to and be added to + /// its wallet signers. With a value of N, private keys (0, N] will be added to the signer set + /// of the node. + #[clap(long = "wallet.additional-keys", default_value_t = 100_000)] + pub additional_keys: usize, - /// The wallet object that will be used. - #[clap(skip)] - #[serde(skip)] - wallet: OnceLock>, + /// The wallet object that will be used. + #[clap(skip)] + #[serde(skip)] + wallet: OnceLock>, } impl WalletConfiguration { - pub fn wallet(&self) -> Arc { - self.wallet - .get_or_init(|| { - let mut wallet = EthereumWallet::new(self.default_key.clone()); - for signer in (1..=self.additional_keys) - .map(|id| U256::from(id)) - .map(|id| id.to_be_bytes::<32>()) - .map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap()) - { - wallet.register_signer(signer); - } - Arc::new(wallet) - }) - .clone() - } + pub fn wallet(&self) -> Arc { + self.wallet + .get_or_init(|| { + let mut wallet = EthereumWallet::new(self.default_key.clone()); + for signer in (1..=self.additional_keys) + .map(|id| U256::from(id)) + .map(|id| id.to_be_bytes::<32>()) + .map(|id| PrivateKeySigner::from_bytes(&FixedBytes(id)).unwrap()) + { + wallet.register_signer(signer); + } + Arc::new(wallet) + }) + .clone() + } - pub fn highest_private_key_exclusive(&self) -> U256 { - U256::try_from(self.additional_keys).unwrap() - } + pub fn highest_private_key_exclusive(&self) -> U256 { + U256::try_from(self.additional_keys).unwrap() + } } impl Default for WalletConfiguration { - fn default() -> Self { - let mut config = Self::parse_from::<[&str; 0], &str>([]); - config.additional_keys = 0; - config - // config.default_key = PrivateKeySigner::from_bytes( - // &FixedBytes::from_hex_str( - // "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d", - // ) - // .unwrap(), - // ) - } + fn default() -> Self { + let mut config = Self::parse_from::<[&str; 0], &str>([]); + config.additional_keys = 0; + config + // config.default_key = PrivateKeySigner::from_bytes( + // &FixedBytes::from_hex_str( + // "0x4f3edf983ac636a65a842ce7c78d9aa706d3b113bce9c46f30d7d21715b23b1d", + // ) + // .unwrap(), + // ) + } } fn serialize_private_key(value: &PrivateKeySigner, serializer: S) -> Result where - S: Serializer, + S: Serializer, { - value.to_bytes().encode_hex().serialize(serializer) + value.to_bytes().encode_hex().serialize(serializer) } /// A set of configuration for concurrency. #[derive(Clone, Debug, Parser, Serialize)] pub struct ConcurrencyConfiguration { - /// Determines the amount of nodes that will be spawned for each chain. - #[clap(long = "concurrency.number-of-nodes", default_value_t = 5)] - pub number_of_nodes: usize, + /// Determines the amount of nodes that will be spawned for each chain. + #[clap(long = "concurrency.number-of-nodes", default_value_t = 5)] + pub number_of_nodes: usize, - /// Determines the amount of tokio worker threads that will will be used. - #[arg( + /// Determines the amount of tokio worker threads that will will be used. + #[arg( long = "concurrency.number-of-threads", default_value_t = std::thread::available_parallelism() .map(|n| n.get()) .unwrap_or(1) )] - pub number_of_threads: usize, + pub number_of_threads: usize, - /// Determines the amount of concurrent tasks that will be spawned to run tests. - /// - /// Defaults to 10 x the number of nodes. - #[arg(long = "concurrency.number-of-concurrent-tasks")] - number_concurrent_tasks: Option, + /// Determines the amount of concurrent tasks that will be spawned to run tests. + /// + /// Defaults to 10 x the number of nodes. + #[arg(long = "concurrency.number-of-concurrent-tasks")] + number_concurrent_tasks: Option, - /// Determines if the concurrency limit should be ignored or not. - #[arg(long = "concurrency.ignore-concurrency-limit")] - ignore_concurrency_limit: bool, + /// Determines if the concurrency limit should be ignored or not. + #[arg(long = "concurrency.ignore-concurrency-limit")] + ignore_concurrency_limit: bool, } impl ConcurrencyConfiguration { - pub fn concurrency_limit(&self) -> Option { - match self.ignore_concurrency_limit { - true => None, - false => Some( - self.number_concurrent_tasks - .unwrap_or(20 * self.number_of_nodes), - ), - } - } + pub fn concurrency_limit(&self) -> Option { + match self.ignore_concurrency_limit { + true => None, + false => Some(self.number_concurrent_tasks.unwrap_or(20 * self.number_of_nodes)), + } + } } #[derive(Clone, Debug, Parser, Serialize)] pub struct CompilationConfiguration { - /// Controls if the compilation cache should be invalidated or not. - #[arg(long = "compilation.invalidate-cache")] - pub invalidate_compilation_cache: bool, + /// Controls if the compilation cache should be invalidated or not. + #[arg(long = "compilation.invalidate-cache")] + pub invalidate_compilation_cache: bool, } #[derive(Clone, Debug, Parser, Serialize)] pub struct ReportConfiguration { - /// Controls if the compiler input is included in the final report. - #[clap(long = "report.include-compiler-input")] - pub include_compiler_input: bool, + /// Controls if the compiler input is included in the final report. + #[clap(long = "report.include-compiler-input")] + pub include_compiler_input: bool, - /// Controls if the compiler output is included in the final report. - #[clap(long = "report.include-compiler-output")] - pub include_compiler_output: bool, + /// Controls if the compiler output is included in the final report. + #[clap(long = "report.include-compiler-output")] + pub include_compiler_output: bool, } /// Represents the working directory that the program uses. #[derive(Debug, Clone)] pub enum WorkingDirectoryConfiguration { - /// A temporary directory is used as the working directory. This will be removed when dropped. - TemporaryDirectory(Arc), - /// A directory with a path is used as the working directory. - Path(PathBuf), + /// A temporary directory is used as the working directory. This will be removed when dropped. + TemporaryDirectory(Arc), + /// A directory with a path is used as the working directory. + Path(PathBuf), } impl WorkingDirectoryConfiguration { - pub fn as_path(&self) -> &Path { - self.as_ref() - } + pub fn as_path(&self) -> &Path { + self.as_ref() + } } impl Deref for WorkingDirectoryConfiguration { - type Target = Path; + type Target = Path; - fn deref(&self) -> &Self::Target { - self.as_path() - } + fn deref(&self) -> &Self::Target { + self.as_path() + } } impl AsRef for WorkingDirectoryConfiguration { - fn as_ref(&self) -> &Path { - match self { - WorkingDirectoryConfiguration::TemporaryDirectory(temp_dir) => temp_dir.path(), - WorkingDirectoryConfiguration::Path(path) => path.as_path(), - } - } + fn as_ref(&self) -> &Path { + match self { + WorkingDirectoryConfiguration::TemporaryDirectory(temp_dir) => temp_dir.path(), + WorkingDirectoryConfiguration::Path(path) => path.as_path(), + } + } } impl Default for WorkingDirectoryConfiguration { - fn default() -> Self { - TempDir::new() - .map(Arc::new) - .map(Self::TemporaryDirectory) - .expect("Failed to create the temporary directory") - } + fn default() -> Self { + TempDir::new() + .map(Arc::new) + .map(Self::TemporaryDirectory) + .expect("Failed to create the temporary directory") + } } impl FromStr for WorkingDirectoryConfiguration { - type Err = anyhow::Error; + type Err = anyhow::Error; - fn from_str(s: &str) -> Result { - match s { - "" => Ok(Default::default()), - _ => Ok(Self::Path(PathBuf::from(s))), - } - } + fn from_str(s: &str) -> Result { + match s { + "" => Ok(Default::default()), + _ => Ok(Self::Path(PathBuf::from(s))), + } + } } impl Display for WorkingDirectoryConfiguration { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - Display::fmt(&self.as_path().display(), f) - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + Display::fmt(&self.as_path().display(), f) + } } impl Serialize for WorkingDirectoryConfiguration { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - self.as_path().serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + self.as_path().serialize(serializer) + } } fn parse_duration(s: &str) -> anyhow::Result { - u64::from_str(s) - .map(Duration::from_millis) - .map_err(Into::into) + u64::from_str(s).map(Duration::from_millis).map_err(Into::into) } /// The Solidity compatible node implementation. /// /// This describes the solutions to be tested against on a high level. #[derive( - Clone, - Copy, - Debug, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Serialize, - ValueEnum, - EnumString, - Display, - AsRefStr, - IntoStaticStr, + Clone, + Copy, + Debug, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Serialize, + ValueEnum, + EnumString, + Display, + AsRefStr, + IntoStaticStr, )] #[strum(serialize_all = "kebab-case")] pub enum TestingPlatform { - /// The go-ethereum reference full node EVM implementation. - Geth, - /// The kitchensink runtime provides the PolkaVM (PVM) based node implementation. - Kitchensink, - /// A polkadot/Substrate based network - Zombienet, + /// The go-ethereum reference full node EVM implementation. + Geth, + /// The kitchensink runtime provides the PolkaVM (PVM) based node implementation. + Kitchensink, + /// A polkadot/Substrate based network + Zombienet, } diff --git a/crates/core/src/differential_benchmarks/driver.rs b/crates/core/src/differential_benchmarks/driver.rs index abdc6fd..9529698 100644 --- a/crates/core/src/differential_benchmarks/driver.rs +++ b/crates/core/src/differential_benchmarks/driver.rs @@ -1,270 +1,266 @@ use std::{ - collections::HashMap, - ops::ControlFlow, - sync::{ - Arc, - atomic::{AtomicUsize, Ordering}, - }, - time::Duration, + collections::HashMap, + ops::ControlFlow, + sync::{ + Arc, + atomic::{AtomicUsize, Ordering}, + }, + time::Duration, }; use alloy::{ - hex, - json_abi::JsonAbi, - network::{Ethereum, TransactionBuilder}, - primitives::{Address, TxHash, U256}, - rpc::types::{ - TransactionReceipt, TransactionRequest, - trace::geth::{ - CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType, - GethDebugTracingOptions, - }, - }, + hex, + json_abi::JsonAbi, + network::{Ethereum, TransactionBuilder}, + primitives::{Address, TxHash, U256}, + rpc::types::{ + TransactionReceipt, TransactionRequest, + trace::geth::{ + CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType, + GethDebugTracingOptions, + }, + }, }; use anyhow::{Context as _, Result, bail}; use indexmap::IndexMap; use revive_dt_common::{ - futures::{PollingWaitBehavior, poll}, - types::PrivateKeyAllocator, + futures::{PollingWaitBehavior, poll}, + types::PrivateKeyAllocator, }; use revive_dt_format::{ - metadata::{ContractInstance, ContractPathAndIdent}, - steps::{ - AllocateAccountStep, BalanceAssertionStep, Calldata, EtherValue, FunctionCallStep, Method, - RepeatStep, Step, StepAddress, StepIdx, StepPath, StorageEmptyAssertionStep, - }, - traits::{ResolutionContext, ResolverApi}, + metadata::{ContractInstance, ContractPathAndIdent}, + steps::{ + AllocateAccountStep, BalanceAssertionStep, Calldata, EtherValue, FunctionCallStep, Method, + RepeatStep, Step, StepAddress, StepIdx, StepPath, StorageEmptyAssertionStep, + }, + traits::{ResolutionContext, ResolverApi}, }; use tokio::sync::{Mutex, mpsc::UnboundedSender}; use tracing::{Instrument, Span, debug, error, field::display, info, info_span, instrument}; use crate::{ - differential_benchmarks::{ExecutionState, WatcherEvent}, - helpers::{CachedCompiler, TestDefinition, TestPlatformInformation}, + differential_benchmarks::{ExecutionState, WatcherEvent}, + helpers::{CachedCompiler, TestDefinition, TestPlatformInformation}, }; static DRIVER_COUNT: AtomicUsize = AtomicUsize::new(0); /// The differential tests driver for a single platform. pub struct Driver<'a, I> { - /// The id of the driver. - driver_id: usize, + /// The id of the driver. + driver_id: usize, - /// The information of the platform that this driver is for. - platform_information: &'a TestPlatformInformation<'a>, + /// The information of the platform that this driver is for. + platform_information: &'a TestPlatformInformation<'a>, - /// The resolver of the platform. - resolver: Arc, + /// The resolver of the platform. + resolver: Arc, - /// The definition of the test that the driver is instructed to execute. - test_definition: &'a TestDefinition<'a>, + /// The definition of the test that the driver is instructed to execute. + test_definition: &'a TestDefinition<'a>, - /// The private key allocator used by this driver and other drivers when account allocations are - /// needed. - private_key_allocator: Arc>, + /// The private key allocator used by this driver and other drivers when account allocations + /// are needed. + private_key_allocator: Arc>, - /// The execution state associated with the platform. - execution_state: ExecutionState, + /// The execution state associated with the platform. + execution_state: ExecutionState, - /// The send side of the watcher's unbounded channel associated with this driver. - watcher_tx: UnboundedSender, + /// The send side of the watcher's unbounded channel associated with this driver. + watcher_tx: UnboundedSender, - /// The number of steps that were executed on the driver. - steps_executed: usize, + /// The number of steps that were executed on the driver. + steps_executed: usize, - /// This is the queue of steps that are to be executed by the driver for this test case. Each - /// time `execute_step` is called one of the steps is executed. - steps_iterator: I, + /// This is the queue of steps that are to be executed by the driver for this test case. Each + /// time `execute_step` is called one of the steps is executed. + steps_iterator: I, } impl<'a, I> Driver<'a, I> where - I: Iterator, + I: Iterator, { - // region:Constructors & Initialization - pub async fn new( - platform_information: &'a TestPlatformInformation<'a>, - test_definition: &'a TestDefinition<'a>, - private_key_allocator: Arc>, - cached_compiler: &CachedCompiler<'a>, - watcher_tx: UnboundedSender, - steps: I, - ) -> Result { - let mut this = Driver { - driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst), - platform_information, - resolver: platform_information - .node - .resolver() - .await - .context("Failed to create resolver")?, - test_definition, - private_key_allocator, - execution_state: ExecutionState::empty(), - steps_executed: 0, - steps_iterator: steps, - watcher_tx, - }; - this.init_execution_state(cached_compiler) - .await - .context("Failed to initialize the execution state of the platform")?; - Ok(this) - } + // region:Constructors & Initialization + pub async fn new( + platform_information: &'a TestPlatformInformation<'a>, + test_definition: &'a TestDefinition<'a>, + private_key_allocator: Arc>, + cached_compiler: &CachedCompiler<'a>, + watcher_tx: UnboundedSender, + steps: I, + ) -> Result { + let mut this = Driver { + driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst), + platform_information, + resolver: platform_information + .node + .resolver() + .await + .context("Failed to create resolver")?, + test_definition, + private_key_allocator, + execution_state: ExecutionState::empty(), + steps_executed: 0, + steps_iterator: steps, + watcher_tx, + }; + this.init_execution_state(cached_compiler) + .await + .context("Failed to initialize the execution state of the platform")?; + Ok(this) + } - async fn init_execution_state(&mut self, cached_compiler: &CachedCompiler<'a>) -> Result<()> { - let compiler_output = cached_compiler - .compile_contracts( - self.test_definition.metadata, - self.test_definition.metadata_file_path, - self.test_definition.mode.clone(), - None, - self.platform_information.compiler.as_ref(), - self.platform_information.platform, - &self.platform_information.reporter, - ) - .await - .inspect_err(|err| { - error!( - ?err, - platform_identifier = %self.platform_information.platform.platform_identifier(), - "Pre-linking compilation failed" - ) - }) - .context("Failed to produce the pre-linking compiled contracts")?; + async fn init_execution_state(&mut self, cached_compiler: &CachedCompiler<'a>) -> Result<()> { + let compiler_output = cached_compiler + .compile_contracts( + self.test_definition.metadata, + self.test_definition.metadata_file_path, + self.test_definition.mode.clone(), + None, + self.platform_information.compiler.as_ref(), + self.platform_information.platform, + &self.platform_information.reporter, + ) + .await + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %self.platform_information.platform.platform_identifier(), + "Pre-linking compilation failed" + ) + }) + .context("Failed to produce the pre-linking compiled contracts")?; - let mut deployed_libraries = None::>; - let mut contract_sources = self - .test_definition - .metadata - .contract_sources() - .inspect_err(|err| { - error!( - ?err, - platform_identifier = %self.platform_information.platform.platform_identifier(), - "Failed to retrieve contract sources from metadata" - ) - }) - .context("Failed to get the contract instances from the metadata file")?; - for library_instance in self - .test_definition - .metadata - .libraries - .iter() - .flatten() - .flat_map(|(_, map)| map.values()) - { - debug!(%library_instance, "Deploying Library Instance"); + let mut deployed_libraries = None::>; + let mut contract_sources = self + .test_definition + .metadata + .contract_sources() + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %self.platform_information.platform.platform_identifier(), + "Failed to retrieve contract sources from metadata" + ) + }) + .context("Failed to get the contract instances from the metadata file")?; + for library_instance in self + .test_definition + .metadata + .libraries + .iter() + .flatten() + .flat_map(|(_, map)| map.values()) + { + debug!(%library_instance, "Deploying Library Instance"); - let ContractPathAndIdent { - contract_source_path: library_source_path, - contract_ident: library_ident, - } = contract_sources - .remove(library_instance) - .context("Failed to get the contract sources of the contract instance")?; + let ContractPathAndIdent { + contract_source_path: library_source_path, + contract_ident: library_ident, + } = contract_sources + .remove(library_instance) + .context("Failed to get the contract sources of the contract instance")?; - let (code, abi) = compiler_output - .contracts - .get(&library_source_path) - .and_then(|contracts| contracts.get(library_ident.as_str())) - .context("Failed to get the code and abi for the instance")?; + let (code, abi) = compiler_output + .contracts + .get(&library_source_path) + .and_then(|contracts| contracts.get(library_ident.as_str())) + .context("Failed to get the code and abi for the instance")?; - let code = alloy::hex::decode(code)?; + let code = alloy::hex::decode(code)?; - // Getting the deployer address from the cases themselves. This is to ensure - // that we're doing the deployments from different accounts and therefore we're - // not slowed down by the nonce. - let deployer_address = self - .test_definition - .case - .steps - .iter() - .filter_map(|step| match step { - Step::FunctionCall(input) => input.caller.as_address().copied(), - Step::BalanceAssertion(..) => None, - Step::StorageEmptyAssertion(..) => None, - Step::Repeat(..) => None, - Step::AllocateAccount(..) => None, - }) - .next() - .unwrap_or(FunctionCallStep::default_caller_address()); - let tx = TransactionBuilder::::with_deploy_code( - TransactionRequest::default().from(deployer_address), - code, - ); - let receipt = self.execute_transaction(tx).await.inspect_err(|err| { - error!( - ?err, - %library_instance, - platform_identifier = %self.platform_information.platform.platform_identifier(), - "Failed to deploy the library" - ) - })?; + // Getting the deployer address from the cases themselves. This is to ensure + // that we're doing the deployments from different accounts and therefore we're + // not slowed down by the nonce. + let deployer_address = self + .test_definition + .case + .steps + .iter() + .filter_map(|step| match step { + Step::FunctionCall(input) => input.caller.as_address().copied(), + Step::BalanceAssertion(..) => None, + Step::StorageEmptyAssertion(..) => None, + Step::Repeat(..) => None, + Step::AllocateAccount(..) => None, + }) + .next() + .unwrap_or(FunctionCallStep::default_caller_address()); + let tx = TransactionBuilder::::with_deploy_code( + TransactionRequest::default().from(deployer_address), + code, + ); + let receipt = self.execute_transaction(tx).await.inspect_err(|err| { + error!( + ?err, + %library_instance, + platform_identifier = %self.platform_information.platform.platform_identifier(), + "Failed to deploy the library" + ) + })?; - debug!( - ?library_instance, - platform_identifier = %self.platform_information.platform.platform_identifier(), - "Deployed library" - ); + debug!( + ?library_instance, + platform_identifier = %self.platform_information.platform.platform_identifier(), + "Deployed library" + ); - let library_address = receipt - .contract_address - .expect("Failed to deploy the library"); + let library_address = receipt.contract_address.expect("Failed to deploy the library"); - deployed_libraries.get_or_insert_default().insert( - library_instance.clone(), - (library_ident.clone(), library_address, abi.clone()), - ); - } + deployed_libraries.get_or_insert_default().insert( + library_instance.clone(), + (library_ident.clone(), library_address, abi.clone()), + ); + } - let compiler_output = cached_compiler - .compile_contracts( - self.test_definition.metadata, - self.test_definition.metadata_file_path, - self.test_definition.mode.clone(), - deployed_libraries.as_ref(), - self.platform_information.compiler.as_ref(), - self.platform_information.platform, - &self.platform_information.reporter, - ) - .await - .inspect_err(|err| { - error!( - ?err, - platform_identifier = %self.platform_information.platform.platform_identifier(), - "Post-linking compilation failed" - ) - }) - .context("Failed to compile the post-link contracts")?; + let compiler_output = cached_compiler + .compile_contracts( + self.test_definition.metadata, + self.test_definition.metadata_file_path, + self.test_definition.mode.clone(), + deployed_libraries.as_ref(), + self.platform_information.compiler.as_ref(), + self.platform_information.platform, + &self.platform_information.reporter, + ) + .await + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %self.platform_information.platform.platform_identifier(), + "Post-linking compilation failed" + ) + }) + .context("Failed to compile the post-link contracts")?; - self.execution_state = ExecutionState::new( - compiler_output.contracts, - deployed_libraries.unwrap_or_default(), - ); + self.execution_state = + ExecutionState::new(compiler_output.contracts, deployed_libraries.unwrap_or_default()); - Ok(()) - } - // endregion:Constructors & Initialization + Ok(()) + } + // endregion:Constructors & Initialization - // region:Step Handling - pub async fn execute_all(mut self) -> Result { - while let Some(result) = self.execute_next_step().await { - result? - } - Ok(self.steps_executed) - } + // region:Step Handling + pub async fn execute_all(mut self) -> Result { + while let Some(result) = self.execute_next_step().await { + result? + } + Ok(self.steps_executed) + } - pub async fn execute_next_step(&mut self) -> Option> { - let (step_path, step) = self.steps_iterator.next()?; - info!(%step_path, "Executing Step"); - Some( - self.execute_step(&step_path, &step) - .await - .inspect(|_| info!(%step_path, "Step execution succeeded")) - .inspect_err(|err| error!(%step_path, ?err, "Step execution failed")), - ) - } + pub async fn execute_next_step(&mut self) -> Option> { + let (step_path, step) = self.steps_iterator.next()?; + info!(%step_path, "Executing Step"); + Some( + self.execute_step(&step_path, &step) + .await + .inspect(|_| info!(%step_path, "Step execution succeeded")) + .inspect_err(|err| error!(%step_path, ?err, "Step execution failed")), + ) + } - #[instrument( + #[instrument( level = "info", skip_all, fields( @@ -274,275 +270,258 @@ where ), err(Debug), )] - async fn execute_step(&mut self, step_path: &StepPath, step: &Step) -> Result<()> { - let steps_executed = match step { - Step::FunctionCall(step) => self - .execute_function_call(step_path, step.as_ref()) - .await - .context("Function call step Failed"), - Step::Repeat(step) => self - .execute_repeat_step(step_path, step.as_ref()) - .await - .context("Repetition Step Failed"), - Step::AllocateAccount(step) => self - .execute_account_allocation(step_path, step.as_ref()) - .await - .context("Account Allocation Step Failed"), - // The following steps are disabled in the benchmarking driver. - Step::BalanceAssertion(..) | Step::StorageEmptyAssertion(..) => Ok(0), - }?; - self.steps_executed += steps_executed; - Ok(()) - } + async fn execute_step(&mut self, step_path: &StepPath, step: &Step) -> Result<()> { + let steps_executed = match step { + Step::FunctionCall(step) => self + .execute_function_call(step_path, step.as_ref()) + .await + .context("Function call step Failed"), + Step::Repeat(step) => self + .execute_repeat_step(step_path, step.as_ref()) + .await + .context("Repetition Step Failed"), + Step::AllocateAccount(step) => self + .execute_account_allocation(step_path, step.as_ref()) + .await + .context("Account Allocation Step Failed"), + // The following steps are disabled in the benchmarking driver. + Step::BalanceAssertion(..) | Step::StorageEmptyAssertion(..) => Ok(0), + }?; + self.steps_executed += steps_executed; + Ok(()) + } - #[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))] - pub async fn execute_function_call( - &mut self, - _: &StepPath, - step: &FunctionCallStep, - ) -> Result { - let deployment_receipts = self - .handle_function_call_contract_deployment(step) - .await - .context("Failed to deploy contracts for the function call step")?; - let execution_receipt = self - .handle_function_call_execution(step, deployment_receipts) - .await - .context("Failed to handle the function call execution")?; - let tracing_result = self - .handle_function_call_call_frame_tracing(execution_receipt.transaction_hash) - .await - .context("Failed to handle the function call call frame tracing")?; - self.handle_function_call_variable_assignment(step, &tracing_result) - .await - .context("Failed to handle function call variable assignment")?; - Ok(1) - } + #[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))] + pub async fn execute_function_call( + &mut self, + _: &StepPath, + step: &FunctionCallStep, + ) -> Result { + let deployment_receipts = self + .handle_function_call_contract_deployment(step) + .await + .context("Failed to deploy contracts for the function call step")?; + let execution_receipt = self + .handle_function_call_execution(step, deployment_receipts) + .await + .context("Failed to handle the function call execution")?; + let tracing_result = self + .handle_function_call_call_frame_tracing(execution_receipt.transaction_hash) + .await + .context("Failed to handle the function call call frame tracing")?; + self.handle_function_call_variable_assignment(step, &tracing_result) + .await + .context("Failed to handle function call variable assignment")?; + Ok(1) + } - async fn handle_function_call_contract_deployment( - &mut self, - step: &FunctionCallStep, - ) -> Result> { - let mut instances_we_must_deploy = IndexMap::::new(); - for instance in step.find_all_contract_instances().into_iter() { - if !self - .execution_state - .deployed_contracts - .contains_key(&instance) - { - instances_we_must_deploy.entry(instance).or_insert(false); - } - } - if let Method::Deployer = step.method { - instances_we_must_deploy.swap_remove(&step.instance); - instances_we_must_deploy.insert(step.instance.clone(), true); - } + async fn handle_function_call_contract_deployment( + &mut self, + step: &FunctionCallStep, + ) -> Result> { + let mut instances_we_must_deploy = IndexMap::::new(); + for instance in step.find_all_contract_instances().into_iter() { + if !self.execution_state.deployed_contracts.contains_key(&instance) { + instances_we_must_deploy.entry(instance).or_insert(false); + } + } + if let Method::Deployer = step.method { + instances_we_must_deploy.swap_remove(&step.instance); + instances_we_must_deploy.insert(step.instance.clone(), true); + } - let mut receipts = HashMap::new(); - for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() { - let calldata = deploy_with_constructor_arguments.then_some(&step.calldata); - let value = deploy_with_constructor_arguments - .then_some(step.value) - .flatten(); + let mut receipts = HashMap::new(); + for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() { + let calldata = deploy_with_constructor_arguments.then_some(&step.calldata); + let value = deploy_with_constructor_arguments.then_some(step.value).flatten(); - let caller = { - let context = self.default_resolution_context(); - step.caller - .resolve_address(self.resolver.as_ref(), context) - .await? - }; - if let (_, _, Some(receipt)) = self - .get_or_deploy_contract_instance(&instance, caller, calldata, value) - .await - .context("Failed to get or deploy contract instance during input execution")? - { - receipts.insert(instance.clone(), receipt); - } - } + let caller = { + let context = self.default_resolution_context(); + step.caller.resolve_address(self.resolver.as_ref(), context).await? + }; + if let (_, _, Some(receipt)) = self + .get_or_deploy_contract_instance(&instance, caller, calldata, value) + .await + .context("Failed to get or deploy contract instance during input execution")? + { + receipts.insert(instance.clone(), receipt); + } + } - Ok(receipts) - } + Ok(receipts) + } - async fn handle_function_call_execution( - &mut self, - step: &FunctionCallStep, - mut deployment_receipts: HashMap, - ) -> Result { - match step.method { - // This step was already executed when `handle_step` was called. We just need to - // lookup the transaction receipt in this case and continue on. - Method::Deployer => deployment_receipts - .remove(&step.instance) - .context("Failed to find deployment receipt for constructor call"), - Method::Fallback | Method::FunctionName(_) => { - let tx = step - .as_transaction(self.resolver.as_ref(), self.default_resolution_context()) - .await?; - self.execute_transaction(tx).await - } - } - } + async fn handle_function_call_execution( + &mut self, + step: &FunctionCallStep, + mut deployment_receipts: HashMap, + ) -> Result { + match step.method { + // This step was already executed when `handle_step` was called. We just need to + // lookup the transaction receipt in this case and continue on. + Method::Deployer => deployment_receipts + .remove(&step.instance) + .context("Failed to find deployment receipt for constructor call"), + Method::Fallback | Method::FunctionName(_) => { + let tx = step + .as_transaction(self.resolver.as_ref(), self.default_resolution_context()) + .await?; + self.execute_transaction(tx).await + }, + } + } - async fn handle_function_call_call_frame_tracing( - &mut self, - tx_hash: TxHash, - ) -> Result { - self.platform_information - .node - .trace_transaction( - tx_hash, - GethDebugTracingOptions { - tracer: Some(GethDebugTracerType::BuiltInTracer( - GethDebugBuiltInTracerType::CallTracer, - )), - tracer_config: GethDebugTracerConfig(serde_json::json! {{ - "onlyTopCall": true, - "withLog": false, - "withStorage": false, - "withMemory": false, - "withStack": false, - "withReturnData": true - }}), - ..Default::default() - }, - ) - .await - .map(|trace| { - trace - .try_into_call_frame() - .expect("Impossible - we requested a callframe trace so we must get it back") - }) - } + async fn handle_function_call_call_frame_tracing( + &mut self, + tx_hash: TxHash, + ) -> Result { + self.platform_information + .node + .trace_transaction( + tx_hash, + GethDebugTracingOptions { + tracer: Some(GethDebugTracerType::BuiltInTracer( + GethDebugBuiltInTracerType::CallTracer, + )), + tracer_config: GethDebugTracerConfig(serde_json::json! {{ + "onlyTopCall": true, + "withLog": false, + "withStorage": false, + "withMemory": false, + "withStack": false, + "withReturnData": true + }}), + ..Default::default() + }, + ) + .await + .map(|trace| { + trace + .try_into_call_frame() + .expect("Impossible - we requested a callframe trace so we must get it back") + }) + } - async fn handle_function_call_variable_assignment( - &mut self, - step: &FunctionCallStep, - tracing_result: &CallFrame, - ) -> Result<()> { - let Some(ref assignments) = step.variable_assignments else { - return Ok(()); - }; + async fn handle_function_call_variable_assignment( + &mut self, + step: &FunctionCallStep, + tracing_result: &CallFrame, + ) -> Result<()> { + let Some(ref assignments) = step.variable_assignments else { + return Ok(()); + }; - // Handling the return data variable assignments. - for (variable_name, output_word) in assignments.return_data.iter().zip( - tracing_result - .output - .as_ref() - .unwrap_or_default() - .to_vec() - .chunks(32), - ) { - let value = U256::from_be_slice(output_word); - self.execution_state - .variables - .insert(variable_name.clone(), value); - tracing::info!( - variable_name, - variable_value = hex::encode(value.to_be_bytes::<32>()), - "Assigned variable" - ); - } + // Handling the return data variable assignments. + for (variable_name, output_word) in assignments + .return_data + .iter() + .zip(tracing_result.output.as_ref().unwrap_or_default().to_vec().chunks(32)) + { + let value = U256::from_be_slice(output_word); + self.execution_state.variables.insert(variable_name.clone(), value); + tracing::info!( + variable_name, + variable_value = hex::encode(value.to_be_bytes::<32>()), + "Assigned variable" + ); + } - Ok(()) - } + Ok(()) + } - #[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))] - pub async fn execute_balance_assertion( - &mut self, - _: &StepPath, - _: &BalanceAssertionStep, - ) -> anyhow::Result { - // Kept empty intentionally for the benchmark driver. - Ok(1) - } + #[instrument(level = "info", skip_all, fields(driver_id = self.driver_id))] + pub async fn execute_balance_assertion( + &mut self, + _: &StepPath, + _: &BalanceAssertionStep, + ) -> anyhow::Result { + // Kept empty intentionally for the benchmark driver. + Ok(1) + } - #[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))] - async fn execute_storage_empty_assertion_step( - &mut self, - _: &StepPath, - _: &StorageEmptyAssertionStep, - ) -> Result { - // Kept empty intentionally for the benchmark driver. - Ok(1) - } + #[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))] + async fn execute_storage_empty_assertion_step( + &mut self, + _: &StepPath, + _: &StorageEmptyAssertionStep, + ) -> Result { + // Kept empty intentionally for the benchmark driver. + Ok(1) + } - #[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))] - async fn execute_repeat_step( - &mut self, - step_path: &StepPath, - step: &RepeatStep, - ) -> Result { - let tasks = (0..step.repeat) - .map(|_| Driver { - driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst), - platform_information: self.platform_information, - resolver: self.resolver.clone(), - test_definition: self.test_definition, - private_key_allocator: self.private_key_allocator.clone(), - execution_state: self.execution_state.clone(), - steps_executed: 0, - steps_iterator: { - let steps = step - .steps - .iter() - .cloned() - .enumerate() - .map(|(step_idx, step)| { - let step_idx = StepIdx::new(step_idx); - let step_path = step_path.append(step_idx); - (step_path, step) - }) - .collect::>(); - steps.into_iter() - }, - watcher_tx: self.watcher_tx.clone(), - }) - .map(|driver| driver.execute_all()); + #[instrument(level = "info", skip_all, fields(driver_id = self.driver_id), err(Debug))] + async fn execute_repeat_step( + &mut self, + step_path: &StepPath, + step: &RepeatStep, + ) -> Result { + let tasks = (0..step.repeat) + .map(|_| Driver { + driver_id: DRIVER_COUNT.fetch_add(1, Ordering::SeqCst), + platform_information: self.platform_information, + resolver: self.resolver.clone(), + test_definition: self.test_definition, + private_key_allocator: self.private_key_allocator.clone(), + execution_state: self.execution_state.clone(), + steps_executed: 0, + steps_iterator: { + let steps = step + .steps + .iter() + .cloned() + .enumerate() + .map(|(step_idx, step)| { + let step_idx = StepIdx::new(step_idx); + let step_path = step_path.append(step_idx); + (step_path, step) + }) + .collect::>(); + steps.into_iter() + }, + watcher_tx: self.watcher_tx.clone(), + }) + .map(|driver| driver.execute_all()); - // TODO: Determine how we want to know the `ignore_block_before` and if it's through the - // receipt and how this would impact the architecture and the possibility of us not waiting - // for receipts in the future. - self.watcher_tx - .send(WatcherEvent::RepetitionStartEvent { - ignore_block_before: 0, - }) - .context("Failed to send message on the watcher's tx")?; + // TODO: Determine how we want to know the `ignore_block_before` and if it's through the + // receipt and how this would impact the architecture and the possibility of us not waiting + // for receipts in the future. + self.watcher_tx + .send(WatcherEvent::RepetitionStartEvent { ignore_block_before: 0 }) + .context("Failed to send message on the watcher's tx")?; - let res = futures::future::try_join_all(tasks) - .await - .context("Repetition execution failed")?; - Ok(res.into_iter().sum()) - } + let res = futures::future::try_join_all(tasks) + .await + .context("Repetition execution failed")?; + Ok(res.into_iter().sum()) + } - #[instrument(level = "info", fields(driver_id = self.driver_id), skip_all, err(Debug))] - pub async fn execute_account_allocation( - &mut self, - _: &StepPath, - step: &AllocateAccountStep, - ) -> Result { - let Some(variable_name) = step.variable_name.strip_prefix("$VARIABLE:") else { - bail!("Account allocation must start with $VARIABLE:"); - }; + #[instrument(level = "info", fields(driver_id = self.driver_id), skip_all, err(Debug))] + pub async fn execute_account_allocation( + &mut self, + _: &StepPath, + step: &AllocateAccountStep, + ) -> Result { + let Some(variable_name) = step.variable_name.strip_prefix("$VARIABLE:") else { + bail!("Account allocation must start with $VARIABLE:"); + }; - let private_key = self - .private_key_allocator - .lock() - .await - .allocate() - .context("Account allocation through the private key allocator failed")?; - let account = private_key.address(); - let variable = U256::from_be_slice(account.0.as_slice()); + let private_key = self + .private_key_allocator + .lock() + .await + .allocate() + .context("Account allocation through the private key allocator failed")?; + let account = private_key.address(); + let variable = U256::from_be_slice(account.0.as_slice()); - self.execution_state - .variables - .insert(variable_name.to_string(), variable); + self.execution_state.variables.insert(variable_name.to_string(), variable); - Ok(1) - } - // endregion:Step Handling + Ok(1) + } + // endregion:Step Handling - // region:Contract Deployment - #[instrument( + // region:Contract Deployment + #[instrument( level = "info", skip_all, fields( @@ -553,39 +532,37 @@ where ), err(Debug), )] - async fn get_or_deploy_contract_instance( - &mut self, - contract_instance: &ContractInstance, - deployer: Address, - calldata: Option<&Calldata>, - value: Option, - ) -> Result<(Address, JsonAbi, Option)> { - if let Some((_, address, abi)) = self - .execution_state - .deployed_contracts - .get(contract_instance) - { - info!( + async fn get_or_deploy_contract_instance( + &mut self, + contract_instance: &ContractInstance, + deployer: Address, + calldata: Option<&Calldata>, + value: Option, + ) -> Result<(Address, JsonAbi, Option)> { + if let Some((_, address, abi)) = + self.execution_state.deployed_contracts.get(contract_instance) + { + info!( - %address, - "Contract instance already deployed." - ); - Ok((*address, abi.clone(), None)) - } else { - info!("Contract instance requires deployment."); - let (address, abi, receipt) = self - .deploy_contract(contract_instance, deployer, calldata, value) - .await - .context("Failed to deploy contract")?; - info!( - %address, - "Contract instance has been deployed." - ); - Ok((address, abi, Some(receipt))) - } - } + %address, + "Contract instance already deployed." + ); + Ok((*address, abi.clone(), None)) + } else { + info!("Contract instance requires deployment."); + let (address, abi, receipt) = self + .deploy_contract(contract_instance, deployer, calldata, value) + .await + .context("Failed to deploy contract")?; + info!( + %address, + "Contract instance has been deployed." + ); + Ok((address, abi, Some(receipt))) + } + } - #[instrument( + #[instrument( level = "info", skip_all, fields( @@ -596,175 +573,160 @@ where ), err(Debug), )] - async fn deploy_contract( - &mut self, - contract_instance: &ContractInstance, - deployer: Address, - calldata: Option<&Calldata>, - value: Option, - ) -> Result<(Address, JsonAbi, TransactionReceipt)> { - let Some(ContractPathAndIdent { - contract_source_path, - contract_ident, - }) = self - .test_definition - .metadata - .contract_sources()? - .remove(contract_instance) - else { - anyhow::bail!( - "Contract source not found for instance {:?}", - contract_instance - ) - }; + async fn deploy_contract( + &mut self, + contract_instance: &ContractInstance, + deployer: Address, + calldata: Option<&Calldata>, + value: Option, + ) -> Result<(Address, JsonAbi, TransactionReceipt)> { + let Some(ContractPathAndIdent { contract_source_path, contract_ident }) = + self.test_definition.metadata.contract_sources()?.remove(contract_instance) + else { + anyhow::bail!("Contract source not found for instance {:?}", contract_instance) + }; - let Some((code, abi)) = self - .execution_state - .compiled_contracts - .get(&contract_source_path) - .and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref())) - .cloned() - else { - anyhow::bail!( - "Failed to find information for contract {:?}", - contract_instance - ) - }; + let Some((code, abi)) = self + .execution_state + .compiled_contracts + .get(&contract_source_path) + .and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref())) + .cloned() + else { + anyhow::bail!("Failed to find information for contract {:?}", contract_instance) + }; - let mut code = match alloy::hex::decode(&code) { - Ok(code) => code, - Err(error) => { - tracing::error!( - ?error, - contract_source_path = contract_source_path.display().to_string(), - contract_ident = contract_ident.as_ref(), - "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" - ); - anyhow::bail!("Failed to hex-decode the byte code {}", error) - } - }; + let mut code = match alloy::hex::decode(&code) { + Ok(code) => code, + Err(error) => { + tracing::error!( + ?error, + contract_source_path = contract_source_path.display().to_string(), + contract_ident = contract_ident.as_ref(), + "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" + ); + anyhow::bail!("Failed to hex-decode the byte code {}", error) + }, + }; - if let Some(calldata) = calldata { - let calldata = calldata - .calldata(self.resolver.as_ref(), self.default_resolution_context()) - .await?; - code.extend(calldata); - } + if let Some(calldata) = calldata { + let calldata = calldata + .calldata(self.resolver.as_ref(), self.default_resolution_context()) + .await?; + code.extend(calldata); + } - let tx = { - let tx = TransactionRequest::default().from(deployer); - let tx = match value { - Some(ref value) => tx.value(value.into_inner()), - _ => tx, - }; - TransactionBuilder::::with_deploy_code(tx, code) - }; + let tx = { + let tx = TransactionRequest::default().from(deployer); + let tx = match value { + Some(ref value) => tx.value(value.into_inner()), + _ => tx, + }; + TransactionBuilder::::with_deploy_code(tx, code) + }; - let receipt = match self.execute_transaction(tx).await { - Ok(receipt) => receipt, - Err(error) => { - tracing::error!(?error, "Contract deployment transaction failed."); - return Err(error); - } - }; + let receipt = match self.execute_transaction(tx).await { + Ok(receipt) => receipt, + Err(error) => { + tracing::error!(?error, "Contract deployment transaction failed."); + return Err(error); + }, + }; - let Some(address) = receipt.contract_address else { - anyhow::bail!("Contract deployment didn't return an address"); - }; - tracing::info!( - instance_name = ?contract_instance, - instance_address = ?address, - "Deployed contract" - ); - self.platform_information - .reporter - .report_contract_deployed_event(contract_instance.clone(), address)?; + let Some(address) = receipt.contract_address else { + anyhow::bail!("Contract deployment didn't return an address"); + }; + tracing::info!( + instance_name = ?contract_instance, + instance_address = ?address, + "Deployed contract" + ); + self.platform_information + .reporter + .report_contract_deployed_event(contract_instance.clone(), address)?; - self.execution_state.deployed_contracts.insert( - contract_instance.clone(), - (contract_ident, address, abi.clone()), - ); + self.execution_state + .deployed_contracts + .insert(contract_instance.clone(), (contract_ident, address, abi.clone())); - Ok((address, abi, receipt)) - } + Ok((address, abi, receipt)) + } - #[instrument(level = "info", fields(driver_id = self.driver_id), skip_all)] - async fn step_address_auto_deployment( - &mut self, - step_address: &StepAddress, - ) -> Result
{ - match step_address { - StepAddress::Address(address) => Ok(*address), - StepAddress::ResolvableAddress(resolvable) => { - let Some(instance) = resolvable - .strip_suffix(".address") - .map(ContractInstance::new) - else { - bail!("Not an address variable"); - }; + #[instrument(level = "info", fields(driver_id = self.driver_id), skip_all)] + async fn step_address_auto_deployment( + &mut self, + step_address: &StepAddress, + ) -> Result
{ + match step_address { + StepAddress::Address(address) => Ok(*address), + StepAddress::ResolvableAddress(resolvable) => { + let Some(instance) = resolvable.strip_suffix(".address").map(ContractInstance::new) + else { + bail!("Not an address variable"); + }; - self.get_or_deploy_contract_instance( - &instance, - FunctionCallStep::default_caller_address(), - None, - None, - ) - .await - .map(|v| v.0) - } - } - } - // endregion:Contract Deployment + self.get_or_deploy_contract_instance( + &instance, + FunctionCallStep::default_caller_address(), + None, + None, + ) + .await + .map(|v| v.0) + }, + } + } + // endregion:Contract Deployment - // region:Resolution & Resolver - fn default_resolution_context(&self) -> ResolutionContext<'_> { - ResolutionContext::default() - .with_deployed_contracts(&self.execution_state.deployed_contracts) - .with_variables(&self.execution_state.variables) - } - // endregion:Resolution & Resolver + // region:Resolution & Resolver + fn default_resolution_context(&self) -> ResolutionContext<'_> { + ResolutionContext::default() + .with_deployed_contracts(&self.execution_state.deployed_contracts) + .with_variables(&self.execution_state.variables) + } + // endregion:Resolution & Resolver - // region:Transaction Execution - /// Executes the transaction on the driver's node with some custom waiting logic for the receipt - #[instrument( + // region:Transaction Execution + /// Executes the transaction on the driver's node with some custom waiting logic for the receipt + #[instrument( level = "info", skip_all, fields(driver_id = self.driver_id, transaction_hash = tracing::field::Empty) )] - async fn execute_transaction( - &self, - transaction: TransactionRequest, - ) -> anyhow::Result { - let node = self.platform_information.node; - let transaction_hash = node - .submit_transaction(transaction) - .await - .context("Failed to submit transaction")?; - Span::current().record("transaction_hash", display(transaction_hash)); + async fn execute_transaction( + &self, + transaction: TransactionRequest, + ) -> anyhow::Result { + let node = self.platform_information.node; + let transaction_hash = node + .submit_transaction(transaction) + .await + .context("Failed to submit transaction")?; + Span::current().record("transaction_hash", display(transaction_hash)); - info!("Submitted transaction"); - self.watcher_tx - .send(WatcherEvent::SubmittedTransaction { transaction_hash }) - .context("Failed to send the transaction hash to the watcher")?; + info!("Submitted transaction"); + self.watcher_tx + .send(WatcherEvent::SubmittedTransaction { transaction_hash }) + .context("Failed to send the transaction hash to the watcher")?; - info!("Starting to poll for transaction receipt"); - poll( - Duration::from_secs(30 * 60), - PollingWaitBehavior::Constant(Duration::from_secs(1)), - || { - async move { - match node.get_receipt(transaction_hash).await { - Ok(receipt) => { - info!("Polling succeeded, receipt found"); - Ok(ControlFlow::Break(receipt)) - } - Err(_) => Ok(ControlFlow::Continue(())), - } - } - .instrument(info_span!("Polling for receipt")) - }, - ) - .await - } - // endregion:Transaction Execution + info!("Starting to poll for transaction receipt"); + poll( + Duration::from_secs(30 * 60), + PollingWaitBehavior::Constant(Duration::from_secs(1)), + || { + async move { + match node.get_receipt(transaction_hash).await { + Ok(receipt) => { + info!("Polling succeeded, receipt found"); + Ok(ControlFlow::Break(receipt)) + }, + Err(_) => Ok(ControlFlow::Continue(())), + } + } + .instrument(info_span!("Polling for receipt")) + }, + ) + .await + } + // endregion:Transaction Execution } diff --git a/crates/core/src/differential_benchmarks/entry_point.rs b/crates/core/src/differential_benchmarks/entry_point.rs index 7702d52..82b9daf 100644 --- a/crates/core/src/differential_benchmarks/entry_point.rs +++ b/crates/core/src/differential_benchmarks/entry_point.rs @@ -14,164 +14,157 @@ use revive_dt_config::{BenchmarkingContext, Context}; use revive_dt_report::Reporter; use crate::{ - differential_benchmarks::{Driver, Watcher, WatcherEvent}, - helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream}, + differential_benchmarks::{Driver, Watcher, WatcherEvent}, + helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream}, }; /// Handles the differential testing executing it according to the information defined in the /// context #[instrument(level = "info", err(Debug), skip_all)] pub async fn handle_differential_benchmarks( - mut context: BenchmarkingContext, - reporter: Reporter, + mut context: BenchmarkingContext, + reporter: Reporter, ) -> anyhow::Result<()> { - // A bit of a hack but we need to override the number of nodes specified through the CLI since - // benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to - // do this. But, for the time being, we need to override the cli arguments. - if context.concurrency_configuration.number_of_nodes != 1 { - warn!( - specified_number_of_nodes = context.concurrency_configuration.number_of_nodes, - updated_number_of_nodes = 1, - "Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments." - ); - context.concurrency_configuration.number_of_nodes = 1; - }; - let full_context = Context::Benchmark(Box::new(context.clone())); + // A bit of a hack but we need to override the number of nodes specified through the CLI since + // benchmarks can only be run on a single node. Perhaps in the future we'd have a cleaner way to + // do this. But, for the time being, we need to override the cli arguments. + if context.concurrency_configuration.number_of_nodes != 1 { + warn!( + specified_number_of_nodes = context.concurrency_configuration.number_of_nodes, + updated_number_of_nodes = 1, + "Invalid number of nodes specified through the CLI. Benchmarks can only be run on a single node. Updated the arguments." + ); + context.concurrency_configuration.number_of_nodes = 1; + }; + let full_context = Context::Benchmark(Box::new(context.clone())); - // Discover all of the metadata files that are defined in the context. - let metadata_files = collect_metadata_files(&context) - .context("Failed to collect metadata files for differential testing")?; - info!(len = metadata_files.len(), "Discovered metadata files"); + // Discover all of the metadata files that are defined in the context. + let metadata_files = collect_metadata_files(&context) + .context("Failed to collect metadata files for differential testing")?; + info!(len = metadata_files.len(), "Discovered metadata files"); - // Discover the list of platforms that the tests should run on based on the context. - let platforms = context - .platforms - .iter() - .copied() - .map(Into::<&dyn Platform>::into) - .collect::>(); + // Discover the list of platforms that the tests should run on based on the context. + let platforms = context + .platforms + .iter() + .copied() + .map(Into::<&dyn Platform>::into) + .collect::>(); - // Starting the nodes of the various platforms specified in the context. Note that we use the - // node pool since it contains all of the code needed to spawn nodes from A to Z and therefore - // it's the preferred way for us to start nodes even when we're starting just a single node. The - // added overhead from it is quite small (performance wise) since it's involved only when we're - // creating the test definitions, but it might have other maintenance overhead as it obscures - // the fact that only a single node is spawned. - let platforms_and_nodes = { - let mut map = BTreeMap::new(); + // Starting the nodes of the various platforms specified in the context. Note that we use the + // node pool since it contains all of the code needed to spawn nodes from A to Z and therefore + // it's the preferred way for us to start nodes even when we're starting just a single node. The + // added overhead from it is quite small (performance wise) since it's involved only when we're + // creating the test definitions, but it might have other maintenance overhead as it obscures + // the fact that only a single node is spawned. + let platforms_and_nodes = { + let mut map = BTreeMap::new(); - for platform in platforms.iter() { - let platform_identifier = platform.platform_identifier(); + for platform in platforms.iter() { + let platform_identifier = platform.platform_identifier(); - let node_pool = NodePool::new(full_context.clone(), *platform) - .await - .inspect_err(|err| { - error!( - ?err, - %platform_identifier, - "Failed to initialize the node pool for the platform." - ) - }) - .context("Failed to initialize the node pool")?; + let node_pool = NodePool::new(full_context.clone(), *platform) + .await + .inspect_err(|err| { + error!( + ?err, + %platform_identifier, + "Failed to initialize the node pool for the platform." + ) + }) + .context("Failed to initialize the node pool")?; - map.insert(platform_identifier, (*platform, node_pool)); - } + map.insert(platform_identifier, (*platform, node_pool)); + } - map - }; - info!("Spawned the platform nodes"); + map + }; + info!("Spawned the platform nodes"); - // Preparing test definitions for the execution. - let test_definitions = create_test_definitions_stream( - &full_context, - metadata_files.iter(), - &platforms_and_nodes, - reporter.clone(), - ) - .await - .collect::>() - .await; - info!(len = test_definitions.len(), "Created test definitions"); + // Preparing test definitions for the execution. + let test_definitions = create_test_definitions_stream( + &full_context, + metadata_files.iter(), + &platforms_and_nodes, + reporter.clone(), + ) + .await + .collect::>() + .await; + info!(len = test_definitions.len(), "Created test definitions"); - // Creating the objects that will be shared between the various runs. The cached compiler is the - // only one at the current moment of time that's safe to share between runs. - let cached_compiler = CachedCompiler::new( - context - .working_directory - .as_path() - .join("compilation_cache"), - context - .compilation_configuration - .invalidate_compilation_cache, - ) - .await - .map(Arc::new) - .context("Failed to initialize cached compiler")?; + // Creating the objects that will be shared between the various runs. The cached compiler is the + // only one at the current moment of time that's safe to share between runs. + let cached_compiler = CachedCompiler::new( + context.working_directory.as_path().join("compilation_cache"), + context.compilation_configuration.invalidate_compilation_cache, + ) + .await + .map(Arc::new) + .context("Failed to initialize cached compiler")?; - // Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd - // like to run all of the workloads for one platform, and then the next sequentially as we'd - // like for the effect of concurrency to be minimized when we're doing the benchmarking. - for platform in platforms.iter() { - let platform_identifier = platform.platform_identifier(); + // Note: we do not want to run all of the workloads concurrently on all platforms. Rather, we'd + // like to run all of the workloads for one platform, and then the next sequentially as we'd + // like for the effect of concurrency to be minimized when we're doing the benchmarking. + for platform in platforms.iter() { + let platform_identifier = platform.platform_identifier(); - let span = info_span!("Benchmarking for the platform", %platform_identifier); - let _guard = span.enter(); + let span = info_span!("Benchmarking for the platform", %platform_identifier); + let _guard = span.enter(); - for test_definition in test_definitions.iter() { - let platform_information = &test_definition.platforms[&platform_identifier]; + for test_definition in test_definitions.iter() { + let platform_information = &test_definition.platforms[&platform_identifier]; - let span = info_span!( - "Executing workload", - metadata_file_path = %test_definition.metadata_file_path.display(), - case_idx = %test_definition.case_idx, - mode = %test_definition.mode, - ); - let _guard = span.enter(); + let span = info_span!( + "Executing workload", + metadata_file_path = %test_definition.metadata_file_path.display(), + case_idx = %test_definition.case_idx, + mode = %test_definition.mode, + ); + let _guard = span.enter(); - // Initializing all of the components requires to execute this particular workload. - let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new( - context.wallet_configuration.highest_private_key_exclusive(), - ))); - let (watcher, watcher_tx) = Watcher::new( - platform_identifier, - platform_information - .node - .subscribe_to_full_blocks_information() - .await - .context("Failed to subscribe to full blocks information from the node")?, - ); - let driver = Driver::new( - platform_information, - test_definition, - private_key_allocator, - cached_compiler.as_ref(), - watcher_tx.clone(), - test_definition - .case - .steps_iterator_for_benchmarks(context.default_repetition_count) - .enumerate() - .map(|(step_idx, step)| -> (StepPath, Step) { - (StepPath::new(vec![StepIdx::new(step_idx)]), step) - }), - ) - .await - .context("Failed to create the benchmarks driver")?; + // Initializing all of the components requires to execute this particular workload. + let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new( + context.wallet_configuration.highest_private_key_exclusive(), + ))); + let (watcher, watcher_tx) = Watcher::new( + platform_identifier, + platform_information + .node + .subscribe_to_full_blocks_information() + .await + .context("Failed to subscribe to full blocks information from the node")?, + ); + let driver = Driver::new( + platform_information, + test_definition, + private_key_allocator, + cached_compiler.as_ref(), + watcher_tx.clone(), + test_definition + .case + .steps_iterator_for_benchmarks(context.default_repetition_count) + .enumerate() + .map(|(step_idx, step)| -> (StepPath, Step) { + (StepPath::new(vec![StepIdx::new(step_idx)]), step) + }), + ) + .await + .context("Failed to create the benchmarks driver")?; - futures::future::try_join( - watcher.run(), - driver.execute_all().inspect(|_| { - info!("All transactions submitted - driver completed execution"); - watcher_tx - .send(WatcherEvent::AllTransactionsSubmitted) - .unwrap() - }), - ) - .await - .context("Failed to run the driver and executor") - .inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded")) - .inspect_err(|err| error!(?err, "Workload Execution Failed"))?; - } - } + futures::future::try_join( + watcher.run(), + driver.execute_all().inspect(|_| { + info!("All transactions submitted - driver completed execution"); + watcher_tx.send(WatcherEvent::AllTransactionsSubmitted).unwrap() + }), + ) + .await + .context("Failed to run the driver and executor") + .inspect(|(_, steps_executed)| info!(steps_executed, "Workload Execution Succeeded")) + .inspect_err(|err| error!(?err, "Workload Execution Failed"))?; + } + } - Ok(()) + Ok(()) } diff --git a/crates/core/src/differential_benchmarks/execution_state.rs b/crates/core/src/differential_benchmarks/execution_state.rs index 501526f..f9a4fd5 100644 --- a/crates/core/src/differential_benchmarks/execution_state.rs +++ b/crates/core/src/differential_benchmarks/execution_state.rs @@ -1,8 +1,8 @@ use std::{collections::HashMap, path::PathBuf}; use alloy::{ - json_abi::JsonAbi, - primitives::{Address, U256}, + json_abi::JsonAbi, + primitives::{Address, U256}, }; use revive_dt_format::metadata::{ContractIdent, ContractInstance}; @@ -10,34 +10,31 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance}; #[derive(Clone)] /// The state associated with the test execution of one of the workloads. pub struct ExecutionState { - /// The compiled contracts, these contracts have been compiled and have had the libraries linked - /// against them and therefore they're ready to be deployed on-demand. - pub compiled_contracts: HashMap>, + /// The compiled contracts, these contracts have been compiled and have had the libraries + /// linked against them and therefore they're ready to be deployed on-demand. + pub compiled_contracts: HashMap>, - /// A map of all of the deployed contracts and information about them. - pub deployed_contracts: HashMap, + /// A map of all of the deployed contracts and information about them. + pub deployed_contracts: HashMap, - /// This map stores the variables used for each one of the cases contained in the metadata file. - pub variables: HashMap, + /// This map stores the variables used for each one of the cases contained in the metadata + /// file. + pub variables: HashMap, } impl ExecutionState { - pub fn new( - compiled_contracts: HashMap>, - deployed_contracts: HashMap, - ) -> Self { - Self { - compiled_contracts, - deployed_contracts, - variables: Default::default(), - } - } + pub fn new( + compiled_contracts: HashMap>, + deployed_contracts: HashMap, + ) -> Self { + Self { compiled_contracts, deployed_contracts, variables: Default::default() } + } - pub fn empty() -> Self { - Self { - compiled_contracts: Default::default(), - deployed_contracts: Default::default(), - variables: Default::default(), - } - } + pub fn empty() -> Self { + Self { + compiled_contracts: Default::default(), + deployed_contracts: Default::default(), + variables: Default::default(), + } + } } diff --git a/crates/core/src/differential_benchmarks/watcher.rs b/crates/core/src/differential_benchmarks/watcher.rs index 12ea840..d948278 100644 --- a/crates/core/src/differential_benchmarks/watcher.rs +++ b/crates/core/src/differential_benchmarks/watcher.rs @@ -6,8 +6,8 @@ use futures::{Stream, StreamExt}; use revive_dt_common::types::PlatformIdentifier; use revive_dt_node_interaction::MinedBlockInformation; use tokio::sync::{ - RwLock, - mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel}, + RwLock, + mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel}, }; use tracing::{info, instrument}; @@ -15,193 +15,175 @@ use tracing::{info, instrument}; /// and MUST NOT be re-used between workloads since it holds important internal state for a given /// workload and is not designed for reuse. pub struct Watcher { - /// The identifier of the platform that this watcher is for. - platform_identifier: PlatformIdentifier, + /// The identifier of the platform that this watcher is for. + platform_identifier: PlatformIdentifier, - /// The receive side of the channel that all of the drivers and various other parts of the code - /// send events to the watcher on. - rx: UnboundedReceiver, + /// The receive side of the channel that all of the drivers and various other parts of the code + /// send events to the watcher on. + rx: UnboundedReceiver, - /// This is a stream of the blocks that were mined by the node. This is for a single platform - /// and a single node from that platform. - blocks_stream: Pin>>, + /// This is a stream of the blocks that were mined by the node. This is for a single platform + /// and a single node from that platform. + blocks_stream: Pin>>, } impl Watcher { - pub fn new( - platform_identifier: PlatformIdentifier, - blocks_stream: Pin>>, - ) -> (Self, UnboundedSender) { - let (tx, rx) = unbounded_channel::(); - ( - Self { - platform_identifier, - rx, - blocks_stream, - }, - tx, - ) - } + pub fn new( + platform_identifier: PlatformIdentifier, + blocks_stream: Pin>>, + ) -> (Self, UnboundedSender) { + let (tx, rx) = unbounded_channel::(); + (Self { platform_identifier, rx, blocks_stream }, tx) + } - #[instrument(level = "info", skip_all)] - pub async fn run(mut self) -> Result<()> { - // The first event that the watcher receives must be a `RepetitionStartEvent` that informs - // the watcher of the last block number that it should ignore and what the block number is - // for the first important block that it should look for. - let ignore_block_before = loop { - let Some(WatcherEvent::RepetitionStartEvent { - ignore_block_before, - }) = self.rx.recv().await - else { - continue; - }; - break ignore_block_before; - }; + #[instrument(level = "info", skip_all)] + pub async fn run(mut self) -> Result<()> { + // The first event that the watcher receives must be a `RepetitionStartEvent` that informs + // the watcher of the last block number that it should ignore and what the block number is + // for the first important block that it should look for. + let ignore_block_before = loop { + let Some(WatcherEvent::RepetitionStartEvent { ignore_block_before }) = + self.rx.recv().await + else { + continue; + }; + break ignore_block_before; + }; - // This is the set of the transaction hashes that the watcher should be looking for and - // watch for them in the blocks. The watcher will keep watching for blocks until it sees - // that all of the transactions that it was watching for has been seen in the mined blocks. - let watch_for_transaction_hashes = Arc::new(RwLock::new(HashSet::::new())); + // This is the set of the transaction hashes that the watcher should be looking for and + // watch for them in the blocks. The watcher will keep watching for blocks until it sees + // that all of the transactions that it was watching for has been seen in the mined blocks. + let watch_for_transaction_hashes = Arc::new(RwLock::new(HashSet::::new())); - // A boolean that keeps track of whether all of the transactions were submitted or if more - // txs are expected to come through the receive side of the channel. We do not want to rely - // on the channel closing alone for the watcher to know that all of the transactions were - // submitted and for there to be an explicit event sent by the core orchestrator that - // informs the watcher that no further transactions are to be expected and that it can - // safely ignore the channel. - let all_transactions_submitted = Arc::new(RwLock::new(false)); + // A boolean that keeps track of whether all of the transactions were submitted or if more + // txs are expected to come through the receive side of the channel. We do not want to rely + // on the channel closing alone for the watcher to know that all of the transactions were + // submitted and for there to be an explicit event sent by the core orchestrator that + // informs the watcher that no further transactions are to be expected and that it can + // safely ignore the channel. + let all_transactions_submitted = Arc::new(RwLock::new(false)); - let watcher_event_watching_task = { - let watch_for_transaction_hashes = watch_for_transaction_hashes.clone(); - let all_transactions_submitted = all_transactions_submitted.clone(); - async move { - while let Some(watcher_event) = self.rx.recv().await { - match watcher_event { - // Subsequent repetition starts are ignored since certain workloads can - // contain nested repetitions and therefore there's no use in doing any - // action if the repetitions are nested. - WatcherEvent::RepetitionStartEvent { .. } => {} - WatcherEvent::SubmittedTransaction { transaction_hash } => { - watch_for_transaction_hashes - .write() - .await - .insert(transaction_hash); - } - WatcherEvent::AllTransactionsSubmitted => { - *all_transactions_submitted.write().await = true; - self.rx.close(); - info!("Watcher's Events Watching Task Finished"); - break; - } - } - } - } - }; - let block_information_watching_task = { - let watch_for_transaction_hashes = watch_for_transaction_hashes.clone(); - let all_transactions_submitted = all_transactions_submitted.clone(); - let mut blocks_information_stream = self.blocks_stream; - async move { - let mut mined_blocks_information = Vec::new(); + let watcher_event_watching_task = { + let watch_for_transaction_hashes = watch_for_transaction_hashes.clone(); + let all_transactions_submitted = all_transactions_submitted.clone(); + async move { + while let Some(watcher_event) = self.rx.recv().await { + match watcher_event { + // Subsequent repetition starts are ignored since certain workloads can + // contain nested repetitions and therefore there's no use in doing any + // action if the repetitions are nested. + WatcherEvent::RepetitionStartEvent { .. } => {}, + WatcherEvent::SubmittedTransaction { transaction_hash } => { + watch_for_transaction_hashes.write().await.insert(transaction_hash); + }, + WatcherEvent::AllTransactionsSubmitted => { + *all_transactions_submitted.write().await = true; + self.rx.close(); + info!("Watcher's Events Watching Task Finished"); + break; + }, + } + } + } + }; + let block_information_watching_task = { + let watch_for_transaction_hashes = watch_for_transaction_hashes.clone(); + let all_transactions_submitted = all_transactions_submitted.clone(); + let mut blocks_information_stream = self.blocks_stream; + async move { + let mut mined_blocks_information = Vec::new(); - while let Some(block) = blocks_information_stream.next().await { - // If the block number is equal to or less than the last block before the - // repetition then we ignore it and continue on to the next block. - if block.block_number <= ignore_block_before { - continue; - } + while let Some(block) = blocks_information_stream.next().await { + // If the block number is equal to or less than the last block before the + // repetition then we ignore it and continue on to the next block. + if block.block_number <= ignore_block_before { + continue; + } - if *all_transactions_submitted.read().await - && watch_for_transaction_hashes.read().await.is_empty() - { - break; - } + if *all_transactions_submitted.read().await && + watch_for_transaction_hashes.read().await.is_empty() + { + break; + } - info!( - remaining_transactions = watch_for_transaction_hashes.read().await.len(), - block_tx_count = block.transaction_hashes.len(), - "Observed a block" - ); + info!( + remaining_transactions = watch_for_transaction_hashes.read().await.len(), + block_tx_count = block.transaction_hashes.len(), + "Observed a block" + ); - // Remove all of the transaction hashes observed in this block from the txs we - // are currently watching for. - let mut watch_for_transaction_hashes = - watch_for_transaction_hashes.write().await; - for tx_hash in block.transaction_hashes.iter() { - watch_for_transaction_hashes.remove(tx_hash); - } + // Remove all of the transaction hashes observed in this block from the txs we + // are currently watching for. + let mut watch_for_transaction_hashes = + watch_for_transaction_hashes.write().await; + for tx_hash in block.transaction_hashes.iter() { + watch_for_transaction_hashes.remove(tx_hash); + } - mined_blocks_information.push(block); - } + mined_blocks_information.push(block); + } - info!("Watcher's Block Watching Task Finished"); - mined_blocks_information - } - }; + info!("Watcher's Block Watching Task Finished"); + mined_blocks_information + } + }; - let (_, mined_blocks_information) = - futures::future::join(watcher_event_watching_task, block_information_watching_task) - .await; + let (_, mined_blocks_information) = + futures::future::join(watcher_event_watching_task, block_information_watching_task) + .await; - // region:TEMPORARY - { - // TODO: The following core is TEMPORARY and will be removed once we have proper - // reporting in place and then it can be removed. This serves as as way of doing some - // very simple reporting for the time being. - use std::io::Write; + // region:TEMPORARY + { + // TODO: The following core is TEMPORARY and will be removed once we have proper + // reporting in place and then it can be removed. This serves as as way of doing some + // very simple reporting for the time being. + use std::io::Write; - let mut stderr = std::io::stderr().lock(); - writeln!( - stderr, - "Watcher information for {}", - self.platform_identifier - )?; - writeln!( - stderr, - "block_number,block_timestamp,mined_gas,block_gas_limit,tx_count" - )?; - for block in mined_blocks_information { - writeln!( - stderr, - "{},{},{},{},{}", - block.block_number, - block.block_timestamp, - block.mined_gas, - block.block_gas_limit, - block.transaction_hashes.len() - )? - } - } - // endregion:TEMPORARY + let mut stderr = std::io::stderr().lock(); + writeln!(stderr, "Watcher information for {}", self.platform_identifier)?; + writeln!(stderr, "block_number,block_timestamp,mined_gas,block_gas_limit,tx_count")?; + for block in mined_blocks_information { + writeln!( + stderr, + "{},{},{},{},{}", + block.block_number, + block.block_timestamp, + block.mined_gas, + block.block_gas_limit, + block.transaction_hashes.len() + )? + } + } + // endregion:TEMPORARY - Ok(()) - } + Ok(()) + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub enum WatcherEvent { - /// Informs the watcher that it should begin watching for the blocks mined by the platforms. - /// Before the watcher receives this event it will not be watching for the mined blocks. The - /// reason behind this is that we do not want the initialization transactions (e.g., contract - /// deployments) to be included in the overall TPS and GPS measurements since these blocks will - /// most likely only contain a single transaction since they're just being used for - /// initialization. - RepetitionStartEvent { - /// This is the block number of the last block seen before the repetition started. This is - /// used to instruct the watcher to ignore all block prior to this block when it starts - /// streaming the blocks. - ignore_block_before: BlockNumber, - }, + /// Informs the watcher that it should begin watching for the blocks mined by the platforms. + /// Before the watcher receives this event it will not be watching for the mined blocks. The + /// reason behind this is that we do not want the initialization transactions (e.g., contract + /// deployments) to be included in the overall TPS and GPS measurements since these blocks will + /// most likely only contain a single transaction since they're just being used for + /// initialization. + RepetitionStartEvent { + /// This is the block number of the last block seen before the repetition started. This is + /// used to instruct the watcher to ignore all block prior to this block when it starts + /// streaming the blocks. + ignore_block_before: BlockNumber, + }, - /// Informs the watcher that a transaction was submitted and that the watcher should watch for a - /// transaction with this hash in the blocks that it watches. - SubmittedTransaction { - /// The hash of the submitted transaction. - transaction_hash: TxHash, - }, + /// Informs the watcher that a transaction was submitted and that the watcher should watch for a + /// transaction with this hash in the blocks that it watches. + SubmittedTransaction { + /// The hash of the submitted transaction. + transaction_hash: TxHash, + }, - /// Informs the watcher that all of the transactions of this benchmark have been submitted and - /// that it can expect to receive no further transaction hashes and not even watch the channel - /// any longer. - AllTransactionsSubmitted, + /// Informs the watcher that all of the transactions of this benchmark have been submitted and + /// that it can expect to receive no further transaction hashes and not even watch the channel + /// any longer. + AllTransactionsSubmitted, } diff --git a/crates/core/src/differential_tests/driver.rs b/crates/core/src/differential_tests/driver.rs index 3be2fd7..ae73063 100644 --- a/crates/core/src/differential_tests/driver.rs +++ b/crates/core/src/differential_tests/driver.rs @@ -1,327 +1,317 @@ use std::{ - collections::{BTreeMap, HashMap}, - sync::Arc, + collections::{BTreeMap, HashMap}, + sync::Arc, }; use alloy::{ - consensus::EMPTY_ROOT_HASH, - hex, - json_abi::JsonAbi, - network::{Ethereum, TransactionBuilder}, - primitives::{Address, TxHash, U256}, - rpc::types::{ - TransactionReceipt, TransactionRequest, - trace::geth::{ - CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType, - GethDebugTracingOptions, - }, - }, + consensus::EMPTY_ROOT_HASH, + hex, + json_abi::JsonAbi, + network::{Ethereum, TransactionBuilder}, + primitives::{Address, TxHash, U256}, + rpc::types::{ + TransactionReceipt, TransactionRequest, + trace::geth::{ + CallFrame, GethDebugBuiltInTracerType, GethDebugTracerConfig, GethDebugTracerType, + GethDebugTracingOptions, + }, + }, }; use anyhow::{Context as _, Result, bail}; use futures::TryStreamExt; use indexmap::IndexMap; use revive_dt_common::types::{PlatformIdentifier, PrivateKeyAllocator}; use revive_dt_format::{ - metadata::{ContractInstance, ContractPathAndIdent}, - steps::{ - AllocateAccountStep, BalanceAssertionStep, Calldata, EtherValue, Expected, ExpectedOutput, - FunctionCallStep, Method, RepeatStep, Step, StepAddress, StepIdx, StepPath, - StorageEmptyAssertionStep, - }, - traits::ResolutionContext, + metadata::{ContractInstance, ContractPathAndIdent}, + steps::{ + AllocateAccountStep, BalanceAssertionStep, Calldata, EtherValue, Expected, ExpectedOutput, + FunctionCallStep, Method, RepeatStep, Step, StepAddress, StepIdx, StepPath, + StorageEmptyAssertionStep, + }, + traits::ResolutionContext, }; use tokio::sync::Mutex; use tracing::{error, info, instrument}; use crate::{ - differential_tests::ExecutionState, - helpers::{CachedCompiler, TestDefinition, TestPlatformInformation}, + differential_tests::ExecutionState, + helpers::{CachedCompiler, TestDefinition, TestPlatformInformation}, }; type StepsIterator = std::vec::IntoIter<(StepPath, Step)>; pub struct Driver<'a, I> { - /// The drivers for the various platforms that we're executing the tests on. - platform_drivers: BTreeMap>, + /// The drivers for the various platforms that we're executing the tests on. + platform_drivers: BTreeMap>, } impl<'a, I> Driver<'a, I> where I: Iterator {} impl<'a> Driver<'a, StepsIterator> { - // region:Constructors - pub async fn new_root( - test_definition: &'a TestDefinition<'a>, - private_key_allocator: Arc>, - cached_compiler: &CachedCompiler<'a>, - ) -> Result { - let platform_drivers = futures::future::try_join_all(test_definition.platforms.iter().map( - |(identifier, information)| { - let identifier = *identifier; - let private_key_allocator = private_key_allocator.clone(); - async move { - Self::create_platform_driver( - identifier, - information, - test_definition, - private_key_allocator, - cached_compiler, - ) - .await - .map(|driver| (identifier, driver)) - } - }, - )) - .await - .context("Failed to create the drivers for the various platforms")? - .into_iter() - .collect::>(); + // region:Constructors + pub async fn new_root( + test_definition: &'a TestDefinition<'a>, + private_key_allocator: Arc>, + cached_compiler: &CachedCompiler<'a>, + ) -> Result { + let platform_drivers = futures::future::try_join_all(test_definition.platforms.iter().map( + |(identifier, information)| { + let identifier = *identifier; + let private_key_allocator = private_key_allocator.clone(); + async move { + Self::create_platform_driver( + identifier, + information, + test_definition, + private_key_allocator, + cached_compiler, + ) + .await + .map(|driver| (identifier, driver)) + } + }, + )) + .await + .context("Failed to create the drivers for the various platforms")? + .into_iter() + .collect::>(); - Ok(Self { platform_drivers }) - } + Ok(Self { platform_drivers }) + } - async fn create_platform_driver( - identifier: PlatformIdentifier, - information: &'a TestPlatformInformation<'a>, - test_definition: &'a TestDefinition<'a>, - private_key_allocator: Arc>, - cached_compiler: &CachedCompiler<'a>, - ) -> Result> { - let steps: Vec<(StepPath, Step)> = test_definition - .case - .steps_iterator() - .enumerate() - .map(|(step_idx, step)| -> (StepPath, Step) { - (StepPath::new(vec![StepIdx::new(step_idx)]), step) - }) - .collect(); - let steps_iterator: StepsIterator = steps.into_iter(); + async fn create_platform_driver( + identifier: PlatformIdentifier, + information: &'a TestPlatformInformation<'a>, + test_definition: &'a TestDefinition<'a>, + private_key_allocator: Arc>, + cached_compiler: &CachedCompiler<'a>, + ) -> Result> { + let steps: Vec<(StepPath, Step)> = test_definition + .case + .steps_iterator() + .enumerate() + .map(|(step_idx, step)| -> (StepPath, Step) { + (StepPath::new(vec![StepIdx::new(step_idx)]), step) + }) + .collect(); + let steps_iterator: StepsIterator = steps.into_iter(); - PlatformDriver::new( - information, - test_definition, - private_key_allocator, - cached_compiler, - steps_iterator, - ) - .await - .context(format!("Failed to create driver for {identifier}")) - } - // endregion:Constructors + PlatformDriver::new( + information, + test_definition, + private_key_allocator, + cached_compiler, + steps_iterator, + ) + .await + .context(format!("Failed to create driver for {identifier}")) + } + // endregion:Constructors - // region:Execution - pub async fn execute_all(mut self) -> Result { - let platform_drivers = std::mem::take(&mut self.platform_drivers); - let results = futures::future::try_join_all( - platform_drivers - .into_values() - .map(|driver| driver.execute_all()), - ) - .await - .context("Failed to execute all of the steps on the driver")?; - Ok(results.first().copied().unwrap_or_default()) - } - // endregion:Execution + // region:Execution + pub async fn execute_all(mut self) -> Result { + let platform_drivers = std::mem::take(&mut self.platform_drivers); + let results = futures::future::try_join_all( + platform_drivers.into_values().map(|driver| driver.execute_all()), + ) + .await + .context("Failed to execute all of the steps on the driver")?; + Ok(results.first().copied().unwrap_or_default()) + } + // endregion:Execution } /// The differential tests driver for a single platform. pub struct PlatformDriver<'a, I> { - /// The information of the platform that this driver is for. - platform_information: &'a TestPlatformInformation<'a>, + /// The information of the platform that this driver is for. + platform_information: &'a TestPlatformInformation<'a>, - /// The definition of the test that the driver is instructed to execute. - test_definition: &'a TestDefinition<'a>, + /// The definition of the test that the driver is instructed to execute. + test_definition: &'a TestDefinition<'a>, - /// The private key allocator used by this driver and other drivers when account allocations are - /// needed. - private_key_allocator: Arc>, + /// The private key allocator used by this driver and other drivers when account allocations + /// are needed. + private_key_allocator: Arc>, - /// The execution state associated with the platform. - execution_state: ExecutionState, + /// The execution state associated with the platform. + execution_state: ExecutionState, - /// The number of steps that were executed on the driver. - steps_executed: usize, + /// The number of steps that were executed on the driver. + steps_executed: usize, - /// This is the queue of steps that are to be executed by the driver for this test case. Each - /// time `execute_step` is called one of the steps is executed. - steps_iterator: I, + /// This is the queue of steps that are to be executed by the driver for this test case. Each + /// time `execute_step` is called one of the steps is executed. + steps_iterator: I, } impl<'a, I> PlatformDriver<'a, I> where - I: Iterator, + I: Iterator, { - // region:Constructors & Initialization + // region:Constructors & Initialization - pub async fn new( - platform_information: &'a TestPlatformInformation<'a>, - test_definition: &'a TestDefinition<'a>, - private_key_allocator: Arc>, - cached_compiler: &CachedCompiler<'a>, - steps: I, - ) -> Result { - let execution_state = - Self::init_execution_state(platform_information, test_definition, cached_compiler) - .await - .context("Failed to initialize the execution state of the platform")?; - Ok(PlatformDriver { - platform_information, - test_definition, - private_key_allocator, - execution_state, - steps_executed: 0, - steps_iterator: steps, - }) - } + pub async fn new( + platform_information: &'a TestPlatformInformation<'a>, + test_definition: &'a TestDefinition<'a>, + private_key_allocator: Arc>, + cached_compiler: &CachedCompiler<'a>, + steps: I, + ) -> Result { + let execution_state = + Self::init_execution_state(platform_information, test_definition, cached_compiler) + .await + .context("Failed to initialize the execution state of the platform")?; + Ok(PlatformDriver { + platform_information, + test_definition, + private_key_allocator, + execution_state, + steps_executed: 0, + steps_iterator: steps, + }) + } - async fn init_execution_state( - platform_information: &'a TestPlatformInformation<'a>, - test_definition: &'a TestDefinition<'a>, - cached_compiler: &CachedCompiler<'a>, - ) -> Result { - let compiler_output = cached_compiler - .compile_contracts( - test_definition.metadata, - test_definition.metadata_file_path, - test_definition.mode.clone(), - None, - platform_information.compiler.as_ref(), - platform_information.platform, - &platform_information.reporter, - ) - .await - .inspect_err(|err| { - error!( - ?err, - platform_identifier = %platform_information.platform.platform_identifier(), - "Pre-linking compilation failed" - ) - }) - .context("Failed to produce the pre-linking compiled contracts")?; + async fn init_execution_state( + platform_information: &'a TestPlatformInformation<'a>, + test_definition: &'a TestDefinition<'a>, + cached_compiler: &CachedCompiler<'a>, + ) -> Result { + let compiler_output = cached_compiler + .compile_contracts( + test_definition.metadata, + test_definition.metadata_file_path, + test_definition.mode.clone(), + None, + platform_information.compiler.as_ref(), + platform_information.platform, + &platform_information.reporter, + ) + .await + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %platform_information.platform.platform_identifier(), + "Pre-linking compilation failed" + ) + }) + .context("Failed to produce the pre-linking compiled contracts")?; - let mut deployed_libraries = None::>; - let mut contract_sources = test_definition - .metadata - .contract_sources() - .inspect_err(|err| { - error!( - ?err, - platform_identifier = %platform_information.platform.platform_identifier(), - "Failed to retrieve contract sources from metadata" - ) - }) - .context("Failed to get the contract instances from the metadata file")?; - for library_instance in test_definition - .metadata - .libraries - .iter() - .flatten() - .flat_map(|(_, map)| map.values()) - { - let ContractPathAndIdent { - contract_source_path: library_source_path, - contract_ident: library_ident, - } = contract_sources - .remove(library_instance) - .context("Failed to get the contract sources of the contract instance")?; + let mut deployed_libraries = None::>; + let mut contract_sources = test_definition + .metadata + .contract_sources() + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %platform_information.platform.platform_identifier(), + "Failed to retrieve contract sources from metadata" + ) + }) + .context("Failed to get the contract instances from the metadata file")?; + for library_instance in test_definition + .metadata + .libraries + .iter() + .flatten() + .flat_map(|(_, map)| map.values()) + { + let ContractPathAndIdent { + contract_source_path: library_source_path, + contract_ident: library_ident, + } = contract_sources + .remove(library_instance) + .context("Failed to get the contract sources of the contract instance")?; - let (code, abi) = compiler_output - .contracts - .get(&library_source_path) - .and_then(|contracts| contracts.get(library_ident.as_str())) - .context("Failed to get the code and abi for the instance")?; + let (code, abi) = compiler_output + .contracts + .get(&library_source_path) + .and_then(|contracts| contracts.get(library_ident.as_str())) + .context("Failed to get the code and abi for the instance")?; - let code = alloy::hex::decode(code)?; + let code = alloy::hex::decode(code)?; - // Getting the deployer address from the cases themselves. This is to ensure - // that we're doing the deployments from different accounts and therefore we're - // not slowed down by the nonce. - let deployer_address = test_definition - .case - .steps - .iter() - .filter_map(|step| match step { - Step::FunctionCall(input) => input.caller.as_address().copied(), - Step::BalanceAssertion(..) => None, - Step::StorageEmptyAssertion(..) => None, - Step::Repeat(..) => None, - Step::AllocateAccount(..) => None, - }) - .next() - .unwrap_or(FunctionCallStep::default_caller_address()); - let tx = TransactionBuilder::::with_deploy_code( - TransactionRequest::default().from(deployer_address), - code, - ); - let receipt = platform_information - .node - .execute_transaction(tx) - .await - .inspect_err(|err| { - error!( - ?err, - %library_instance, - platform_identifier = %platform_information.platform.platform_identifier(), - "Failed to deploy the library" - ) - })?; + // Getting the deployer address from the cases themselves. This is to ensure + // that we're doing the deployments from different accounts and therefore we're + // not slowed down by the nonce. + let deployer_address = test_definition + .case + .steps + .iter() + .filter_map(|step| match step { + Step::FunctionCall(input) => input.caller.as_address().copied(), + Step::BalanceAssertion(..) => None, + Step::StorageEmptyAssertion(..) => None, + Step::Repeat(..) => None, + Step::AllocateAccount(..) => None, + }) + .next() + .unwrap_or(FunctionCallStep::default_caller_address()); + let tx = TransactionBuilder::::with_deploy_code( + TransactionRequest::default().from(deployer_address), + code, + ); + let receipt = + platform_information.node.execute_transaction(tx).await.inspect_err(|err| { + error!( + ?err, + %library_instance, + platform_identifier = %platform_information.platform.platform_identifier(), + "Failed to deploy the library" + ) + })?; - let library_address = receipt - .contract_address - .expect("Failed to deploy the library"); + let library_address = receipt.contract_address.expect("Failed to deploy the library"); - deployed_libraries.get_or_insert_default().insert( - library_instance.clone(), - (library_ident.clone(), library_address, abi.clone()), - ); - } + deployed_libraries.get_or_insert_default().insert( + library_instance.clone(), + (library_ident.clone(), library_address, abi.clone()), + ); + } - let compiler_output = cached_compiler - .compile_contracts( - test_definition.metadata, - test_definition.metadata_file_path, - test_definition.mode.clone(), - deployed_libraries.as_ref(), - platform_information.compiler.as_ref(), - platform_information.platform, - &platform_information.reporter, - ) - .await - .inspect_err(|err| { - error!( - ?err, - platform_identifier = %platform_information.platform.platform_identifier(), - "Pre-linking compilation failed" - ) - }) - .context("Failed to compile the post-link contracts")?; + let compiler_output = cached_compiler + .compile_contracts( + test_definition.metadata, + test_definition.metadata_file_path, + test_definition.mode.clone(), + deployed_libraries.as_ref(), + platform_information.compiler.as_ref(), + platform_information.platform, + &platform_information.reporter, + ) + .await + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %platform_information.platform.platform_identifier(), + "Pre-linking compilation failed" + ) + }) + .context("Failed to compile the post-link contracts")?; - Ok(ExecutionState::new( - compiler_output.contracts, - deployed_libraries.unwrap_or_default(), - )) - } - // endregion:Constructors & Initialization + Ok(ExecutionState::new(compiler_output.contracts, deployed_libraries.unwrap_or_default())) + } + // endregion:Constructors & Initialization - // region:Step Handling - pub async fn execute_all(mut self) -> Result { - while let Some(result) = self.execute_next_step().await { - result? - } - Ok(self.steps_executed) - } + // region:Step Handling + pub async fn execute_all(mut self) -> Result { + while let Some(result) = self.execute_next_step().await { + result? + } + Ok(self.steps_executed) + } - pub async fn execute_next_step(&mut self) -> Option> { - let (step_path, step) = self.steps_iterator.next()?; - info!(%step_path, "Executing Step"); - Some( - self.execute_step(&step_path, &step) - .await - .inspect(|_| info!(%step_path, "Step execution succeeded")) - .inspect_err(|err| error!(%step_path, ?err, "Step execution failed")), - ) - } + pub async fn execute_next_step(&mut self) -> Option> { + let (step_path, step) = self.steps_iterator.next()?; + info!(%step_path, "Executing Step"); + Some( + self.execute_step(&step_path, &step) + .await + .inspect(|_| info!(%step_path, "Step execution succeeded")) + .inspect_err(|err| error!(%step_path, ?err, "Step execution failed")), + ) + } - #[instrument( + #[instrument( level = "info", skip_all, fields( @@ -331,536 +321,505 @@ where ), err(Debug), )] - async fn execute_step(&mut self, step_path: &StepPath, step: &Step) -> Result<()> { - let steps_executed = match step { - Step::FunctionCall(step) => self - .execute_function_call(step_path, step.as_ref()) - .await - .context("Function call step Failed"), - Step::BalanceAssertion(step) => self - .execute_balance_assertion(step_path, step.as_ref()) - .await - .context("Balance Assertion Step Failed"), - Step::StorageEmptyAssertion(step) => self - .execute_storage_empty_assertion_step(step_path, step.as_ref()) - .await - .context("Storage Empty Assertion Step Failed"), - Step::Repeat(step) => self - .execute_repeat_step(step_path, step.as_ref()) - .await - .context("Repetition Step Failed"), - Step::AllocateAccount(step) => self - .execute_account_allocation(step_path, step.as_ref()) - .await - .context("Account Allocation Step Failed"), - }?; - self.steps_executed += steps_executed; - Ok(()) - } + async fn execute_step(&mut self, step_path: &StepPath, step: &Step) -> Result<()> { + let steps_executed = match step { + Step::FunctionCall(step) => self + .execute_function_call(step_path, step.as_ref()) + .await + .context("Function call step Failed"), + Step::BalanceAssertion(step) => self + .execute_balance_assertion(step_path, step.as_ref()) + .await + .context("Balance Assertion Step Failed"), + Step::StorageEmptyAssertion(step) => self + .execute_storage_empty_assertion_step(step_path, step.as_ref()) + .await + .context("Storage Empty Assertion Step Failed"), + Step::Repeat(step) => self + .execute_repeat_step(step_path, step.as_ref()) + .await + .context("Repetition Step Failed"), + Step::AllocateAccount(step) => self + .execute_account_allocation(step_path, step.as_ref()) + .await + .context("Account Allocation Step Failed"), + }?; + self.steps_executed += steps_executed; + Ok(()) + } - #[instrument(level = "info", skip_all)] - pub async fn execute_function_call( - &mut self, - _: &StepPath, - step: &FunctionCallStep, - ) -> Result { - let deployment_receipts = self - .handle_function_call_contract_deployment(step) - .await - .context("Failed to deploy contracts for the function call step")?; - let execution_receipt = self - .handle_function_call_execution(step, deployment_receipts) - .await - .context("Failed to handle the function call execution")?; - let tracing_result = self - .handle_function_call_call_frame_tracing(execution_receipt.transaction_hash) - .await - .context("Failed to handle the function call call frame tracing")?; - self.handle_function_call_variable_assignment(step, &tracing_result) - .await - .context("Failed to handle function call variable assignment")?; - self.handle_function_call_assertions(step, &execution_receipt, &tracing_result) - .await - .context("Failed to handle function call assertions")?; - Ok(1) - } + #[instrument(level = "info", skip_all)] + pub async fn execute_function_call( + &mut self, + _: &StepPath, + step: &FunctionCallStep, + ) -> Result { + let deployment_receipts = self + .handle_function_call_contract_deployment(step) + .await + .context("Failed to deploy contracts for the function call step")?; + let execution_receipt = self + .handle_function_call_execution(step, deployment_receipts) + .await + .context("Failed to handle the function call execution")?; + let tracing_result = self + .handle_function_call_call_frame_tracing(execution_receipt.transaction_hash) + .await + .context("Failed to handle the function call call frame tracing")?; + self.handle_function_call_variable_assignment(step, &tracing_result) + .await + .context("Failed to handle function call variable assignment")?; + self.handle_function_call_assertions(step, &execution_receipt, &tracing_result) + .await + .context("Failed to handle function call assertions")?; + Ok(1) + } - #[instrument(level = "debug", skip_all)] - async fn handle_function_call_contract_deployment( - &mut self, - step: &FunctionCallStep, - ) -> Result> { - let mut instances_we_must_deploy = IndexMap::::new(); - for instance in step.find_all_contract_instances().into_iter() { - if !self - .execution_state - .deployed_contracts - .contains_key(&instance) - { - instances_we_must_deploy.entry(instance).or_insert(false); - } - } - if let Method::Deployer = step.method { - instances_we_must_deploy.swap_remove(&step.instance); - instances_we_must_deploy.insert(step.instance.clone(), true); - } + #[instrument(level = "debug", skip_all)] + async fn handle_function_call_contract_deployment( + &mut self, + step: &FunctionCallStep, + ) -> Result> { + let mut instances_we_must_deploy = IndexMap::::new(); + for instance in step.find_all_contract_instances().into_iter() { + if !self.execution_state.deployed_contracts.contains_key(&instance) { + instances_we_must_deploy.entry(instance).or_insert(false); + } + } + if let Method::Deployer = step.method { + instances_we_must_deploy.swap_remove(&step.instance); + instances_we_must_deploy.insert(step.instance.clone(), true); + } - let mut receipts = HashMap::new(); - for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() { - let calldata = deploy_with_constructor_arguments.then_some(&step.calldata); - let value = deploy_with_constructor_arguments - .then_some(step.value) - .flatten(); + let mut receipts = HashMap::new(); + for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() { + let calldata = deploy_with_constructor_arguments.then_some(&step.calldata); + let value = deploy_with_constructor_arguments.then_some(step.value).flatten(); - let caller = { - let context = self.default_resolution_context(); - let resolver = self.platform_information.node.resolver().await?; - let resolved = step - .caller - .resolve_address(resolver.as_ref(), context) - .await?; - self.platform_information - .node - .resolve_signer_or_default(resolved) - }; - if let (_, _, Some(receipt)) = self - .get_or_deploy_contract_instance(&instance, caller, calldata, value) - .await - .context("Failed to get or deploy contract instance during input execution")? - { - receipts.insert(instance.clone(), receipt); - } - } + let caller = { + let context = self.default_resolution_context(); + let resolver = self.platform_information.node.resolver().await?; + let resolved = step.caller.resolve_address(resolver.as_ref(), context).await?; + self.platform_information.node.resolve_signer_or_default(resolved) + }; + if let (_, _, Some(receipt)) = self + .get_or_deploy_contract_instance(&instance, caller, calldata, value) + .await + .context("Failed to get or deploy contract instance during input execution")? + { + receipts.insert(instance.clone(), receipt); + } + } - Ok(receipts) - } + Ok(receipts) + } - #[instrument(level = "debug", skip_all)] - async fn handle_function_call_execution( - &mut self, - step: &FunctionCallStep, - mut deployment_receipts: HashMap, - ) -> Result { - match step.method { - // This step was already executed when `handle_step` was called. We just need to - // lookup the transaction receipt in this case and continue on. - Method::Deployer => deployment_receipts - .remove(&step.instance) - .context("Failed to find deployment receipt for constructor call"), - Method::Fallback | Method::FunctionName(_) => { - let resolver = self.platform_information.node.resolver().await?; - let mut tx = match step - .as_transaction(resolver.as_ref(), self.default_resolution_context()) - .await - { - Ok(tx) => tx, - Err(err) => { - return Err(err); - } - }; + #[instrument(level = "debug", skip_all)] + async fn handle_function_call_execution( + &mut self, + step: &FunctionCallStep, + mut deployment_receipts: HashMap, + ) -> Result { + match step.method { + // This step was already executed when `handle_step` was called. We just need to + // lookup the transaction receipt in this case and continue on. + Method::Deployer => deployment_receipts + .remove(&step.instance) + .context("Failed to find deployment receipt for constructor call"), + Method::Fallback | Method::FunctionName(_) => { + let resolver = self.platform_information.node.resolver().await?; + let mut tx = match step + .as_transaction(resolver.as_ref(), self.default_resolution_context()) + .await + { + Ok(tx) => tx, + Err(err) => { + return Err(err); + }, + }; - // Resolve the signer to ensure we use an address that has keys - if let Some(from) = tx.from { - tx.from = Some( - self.platform_information - .node - .resolve_signer_or_default(from), - ); - } + // Resolve the signer to ensure we use an address that has keys + if let Some(from) = tx.from { + tx.from = Some(self.platform_information.node.resolve_signer_or_default(from)); + } - self.platform_information.node.execute_transaction(tx).await - } - } - } + self.platform_information.node.execute_transaction(tx).await + }, + } + } - #[instrument(level = "debug", skip_all)] - async fn handle_function_call_call_frame_tracing( - &mut self, - tx_hash: TxHash, - ) -> Result { - self.platform_information - .node - .trace_transaction( - tx_hash, - GethDebugTracingOptions { - tracer: Some(GethDebugTracerType::BuiltInTracer( - GethDebugBuiltInTracerType::CallTracer, - )), - tracer_config: GethDebugTracerConfig(serde_json::json! {{ - "onlyTopCall": true, - "withLog": false, - "withStorage": false, - "withMemory": false, - "withStack": false, - "withReturnData": true - }}), - ..Default::default() - }, - ) - .await - .map(|trace| { - trace - .try_into_call_frame() - .expect("Impossible - we requested a callframe trace so we must get it back") - }) - } + #[instrument(level = "debug", skip_all)] + async fn handle_function_call_call_frame_tracing( + &mut self, + tx_hash: TxHash, + ) -> Result { + self.platform_information + .node + .trace_transaction( + tx_hash, + GethDebugTracingOptions { + tracer: Some(GethDebugTracerType::BuiltInTracer( + GethDebugBuiltInTracerType::CallTracer, + )), + tracer_config: GethDebugTracerConfig(serde_json::json! {{ + "onlyTopCall": true, + "withLog": false, + "withStorage": false, + "withMemory": false, + "withStack": false, + "withReturnData": true + }}), + ..Default::default() + }, + ) + .await + .map(|trace| { + trace + .try_into_call_frame() + .expect("Impossible - we requested a callframe trace so we must get it back") + }) + } - #[instrument(level = "debug", skip_all)] - async fn handle_function_call_variable_assignment( - &mut self, - step: &FunctionCallStep, - tracing_result: &CallFrame, - ) -> Result<()> { - let Some(ref assignments) = step.variable_assignments else { - return Ok(()); - }; + #[instrument(level = "debug", skip_all)] + async fn handle_function_call_variable_assignment( + &mut self, + step: &FunctionCallStep, + tracing_result: &CallFrame, + ) -> Result<()> { + let Some(ref assignments) = step.variable_assignments else { + return Ok(()); + }; - // Handling the return data variable assignments. - for (variable_name, output_word) in assignments.return_data.iter().zip( - tracing_result - .output - .as_ref() - .unwrap_or_default() - .to_vec() - .chunks(32), - ) { - let value = U256::from_be_slice(output_word); - self.execution_state - .variables - .insert(variable_name.clone(), value); - tracing::info!( - variable_name, - variable_value = hex::encode(value.to_be_bytes::<32>()), - "Assigned variable" - ); - } + // Handling the return data variable assignments. + for (variable_name, output_word) in assignments + .return_data + .iter() + .zip(tracing_result.output.as_ref().unwrap_or_default().to_vec().chunks(32)) + { + let value = U256::from_be_slice(output_word); + self.execution_state.variables.insert(variable_name.clone(), value); + tracing::info!( + variable_name, + variable_value = hex::encode(value.to_be_bytes::<32>()), + "Assigned variable" + ); + } - Ok(()) - } + Ok(()) + } - #[instrument(level = "debug", skip_all)] - async fn handle_function_call_assertions( - &mut self, - step: &FunctionCallStep, - receipt: &TransactionReceipt, - tracing_result: &CallFrame, - ) -> Result<()> { - // Resolving the `step.expected` into a series of expectations that we can then assert on. - let mut expectations = match step { - FunctionCallStep { - expected: Some(Expected::Calldata(calldata)), - .. - } => vec![ExpectedOutput::new().with_calldata(calldata.clone())], - FunctionCallStep { - expected: Some(Expected::Expected(expected)), - .. - } => vec![expected.clone()], - FunctionCallStep { - expected: Some(Expected::ExpectedMany(expected)), - .. - } => expected.clone(), - FunctionCallStep { expected: None, .. } => vec![ExpectedOutput::new().with_success()], - }; + #[instrument(level = "debug", skip_all)] + async fn handle_function_call_assertions( + &mut self, + step: &FunctionCallStep, + receipt: &TransactionReceipt, + tracing_result: &CallFrame, + ) -> Result<()> { + // Resolving the `step.expected` into a series of expectations that we can then assert on. + let mut expectations = match step { + FunctionCallStep { expected: Some(Expected::Calldata(calldata)), .. } => + vec![ExpectedOutput::new().with_calldata(calldata.clone())], + FunctionCallStep { expected: Some(Expected::Expected(expected)), .. } => + vec![expected.clone()], + FunctionCallStep { expected: Some(Expected::ExpectedMany(expected)), .. } => + expected.clone(), + FunctionCallStep { expected: None, .. } => vec![ExpectedOutput::new().with_success()], + }; - // This is a bit of a special case and we have to support it separately on it's own. If it's - // a call to the deployer method, then the tests will assert that it "returns" the address - // of the contract. Deployments do not return the address of the contract but the runtime - // code of the contracts. Therefore, this assertion would always fail. So, we replace it - // with an assertion of "check if it succeeded" - if let Method::Deployer = &step.method { - for expectation in expectations.iter_mut() { - expectation.return_data = None; - } - } + // This is a bit of a special case and we have to support it separately on it's own. If it's + // a call to the deployer method, then the tests will assert that it "returns" the address + // of the contract. Deployments do not return the address of the contract but the runtime + // code of the contracts. Therefore, this assertion would always fail. So, we replace it + // with an assertion of "check if it succeeded" + if let Method::Deployer = &step.method { + for expectation in expectations.iter_mut() { + expectation.return_data = None; + } + } - futures::stream::iter(expectations.into_iter().map(Ok)) - .try_for_each_concurrent(None, |expectation| async { - self.handle_function_call_assertion_item(receipt, tracing_result, expectation) - .await - }) - .await - } + futures::stream::iter(expectations.into_iter().map(Ok)) + .try_for_each_concurrent(None, |expectation| async { + self.handle_function_call_assertion_item(receipt, tracing_result, expectation) + .await + }) + .await + } - #[instrument(level = "debug", skip_all)] - async fn handle_function_call_assertion_item( - &self, - receipt: &TransactionReceipt, - tracing_result: &CallFrame, - assertion: ExpectedOutput, - ) -> Result<()> { - let resolver = self - .platform_information - .node - .resolver() - .await - .context("Failed to create the resolver for the node")?; + #[instrument(level = "debug", skip_all)] + async fn handle_function_call_assertion_item( + &self, + receipt: &TransactionReceipt, + tracing_result: &CallFrame, + assertion: ExpectedOutput, + ) -> Result<()> { + let resolver = self + .platform_information + .node + .resolver() + .await + .context("Failed to create the resolver for the node")?; - if let Some(ref version_requirement) = assertion.compiler_version { - if !version_requirement.matches(self.platform_information.compiler.version()) { - return Ok(()); - } - } + if let Some(ref version_requirement) = assertion.compiler_version { + if !version_requirement.matches(self.platform_information.compiler.version()) { + return Ok(()); + } + } - let resolution_context = self - .default_resolution_context() - .with_block_number(receipt.block_number.as_ref()) - .with_transaction_hash(&receipt.transaction_hash); + let resolution_context = self + .default_resolution_context() + .with_block_number(receipt.block_number.as_ref()) + .with_transaction_hash(&receipt.transaction_hash); - // Handling the receipt state assertion. - let expected = !assertion.exception; - let actual = receipt.status(); - if actual != expected { - tracing::error!( - expected, - actual, - ?receipt, - ?tracing_result, - "Transaction status assertion failed" - ); - anyhow::bail!( - "Transaction status assertion failed - Expected {expected} but got {actual}", - ); - } + // Handling the receipt state assertion. + let expected = !assertion.exception; + let actual = receipt.status(); + if actual != expected { + tracing::error!( + expected, + actual, + ?receipt, + ?tracing_result, + "Transaction status assertion failed" + ); + anyhow::bail!( + "Transaction status assertion failed - Expected {expected} but got {actual}", + ); + } - // Handling the calldata assertion - if let Some(ref expected_calldata) = assertion.return_data { - let expected = expected_calldata; - let actual = &tracing_result.output.as_ref().unwrap_or_default(); - if !expected - .is_equivalent(actual, resolver.as_ref(), resolution_context) - .await - .context("Failed to resolve calldata equivalence for return data assertion")? - { - tracing::error!( - ?receipt, - ?expected, - %actual, - "Calldata assertion failed" - ); - anyhow::bail!("Calldata assertion failed - Expected {expected:?} but got {actual}",); - } - } + // Handling the calldata assertion + if let Some(ref expected_calldata) = assertion.return_data { + let expected = expected_calldata; + let actual = &tracing_result.output.as_ref().unwrap_or_default(); + if !expected + .is_equivalent(actual, resolver.as_ref(), resolution_context) + .await + .context("Failed to resolve calldata equivalence for return data assertion")? + { + tracing::error!( + ?receipt, + ?expected, + %actual, + "Calldata assertion failed" + ); + anyhow::bail!("Calldata assertion failed - Expected {expected:?} but got {actual}",); + } + } - // Handling the events assertion - if let Some(ref expected_events) = assertion.events { - // Handling the events length assertion. - let expected = expected_events.len(); - let actual = receipt.logs().len(); - if actual != expected { - tracing::error!(expected, actual, "Event count assertion failed",); - anyhow::bail!( - "Event count assertion failed - Expected {expected} but got {actual}", - ); - } + // Handling the events assertion + if let Some(ref expected_events) = assertion.events { + // Handling the events length assertion. + let expected = expected_events.len(); + let actual = receipt.logs().len(); + if actual != expected { + tracing::error!(expected, actual, "Event count assertion failed",); + anyhow::bail!( + "Event count assertion failed - Expected {expected} but got {actual}", + ); + } - // Handling the events assertion. - for (event_idx, (expected_event, actual_event)) in - expected_events.iter().zip(receipt.logs()).enumerate() - { - // Handling the emitter assertion. - if let Some(ref expected_address) = expected_event.address { - let expected = expected_address - .resolve_address(resolver.as_ref(), resolution_context) - .await?; - let actual = actual_event.address(); - if actual != expected { - tracing::error!( - event_idx, - %expected, - %actual, - "Event emitter assertion failed", - ); - anyhow::bail!( - "Event emitter assertion failed - Expected {expected} but got {actual}", - ); - } - } + // Handling the events assertion. + for (event_idx, (expected_event, actual_event)) in + expected_events.iter().zip(receipt.logs()).enumerate() + { + // Handling the emitter assertion. + if let Some(ref expected_address) = expected_event.address { + let expected = expected_address + .resolve_address(resolver.as_ref(), resolution_context) + .await?; + let actual = actual_event.address(); + if actual != expected { + tracing::error!( + event_idx, + %expected, + %actual, + "Event emitter assertion failed", + ); + anyhow::bail!( + "Event emitter assertion failed - Expected {expected} but got {actual}", + ); + } + } - // Handling the topics assertion. - for (expected, actual) in expected_event - .topics - .as_slice() - .iter() - .zip(actual_event.topics()) - { - let expected = Calldata::new_compound([expected]); - if !expected - .is_equivalent(&actual.0, resolver.as_ref(), resolution_context) - .await - .context("Failed to resolve event topic equivalence")? - { - tracing::error!( - event_idx, - ?receipt, - ?expected, - ?actual, - "Event topics assertion failed", - ); - anyhow::bail!( - "Event topics assertion failed - Expected {expected:?} but got {actual:?}", - ); - } - } + // Handling the topics assertion. + for (expected, actual) in + expected_event.topics.as_slice().iter().zip(actual_event.topics()) + { + let expected = Calldata::new_compound([expected]); + if !expected + .is_equivalent(&actual.0, resolver.as_ref(), resolution_context) + .await + .context("Failed to resolve event topic equivalence")? + { + tracing::error!( + event_idx, + ?receipt, + ?expected, + ?actual, + "Event topics assertion failed", + ); + anyhow::bail!( + "Event topics assertion failed - Expected {expected:?} but got {actual:?}", + ); + } + } - // Handling the values assertion. - let expected = &expected_event.values; - let actual = &actual_event.data().data; - if !expected - .is_equivalent(&actual.0, resolver.as_ref(), resolution_context) - .await - .context("Failed to resolve event value equivalence")? - { - tracing::error!( - event_idx, - ?receipt, - ?expected, - ?actual, - "Event value assertion failed", - ); - anyhow::bail!( - "Event value assertion failed - Expected {expected:?} but got {actual:?}", - ); - } - } - } + // Handling the values assertion. + let expected = &expected_event.values; + let actual = &actual_event.data().data; + if !expected + .is_equivalent(&actual.0, resolver.as_ref(), resolution_context) + .await + .context("Failed to resolve event value equivalence")? + { + tracing::error!( + event_idx, + ?receipt, + ?expected, + ?actual, + "Event value assertion failed", + ); + anyhow::bail!( + "Event value assertion failed - Expected {expected:?} but got {actual:?}", + ); + } + } + } - Ok(()) - } + Ok(()) + } - #[instrument(level = "info", skip_all)] - pub async fn execute_balance_assertion( - &mut self, - _: &StepPath, - step: &BalanceAssertionStep, - ) -> anyhow::Result { - self.step_address_auto_deployment(&step.address) - .await - .context("Failed to perform auto-deployment for the step address")?; + #[instrument(level = "info", skip_all)] + pub async fn execute_balance_assertion( + &mut self, + _: &StepPath, + step: &BalanceAssertionStep, + ) -> anyhow::Result { + self.step_address_auto_deployment(&step.address) + .await + .context("Failed to perform auto-deployment for the step address")?; - let resolver = self.platform_information.node.resolver().await?; - let address = step - .address - .resolve_address(resolver.as_ref(), self.default_resolution_context()) - .await?; + let resolver = self.platform_information.node.resolver().await?; + let address = step + .address + .resolve_address(resolver.as_ref(), self.default_resolution_context()) + .await?; - let balance = self.platform_information.node.balance_of(address).await?; + let balance = self.platform_information.node.balance_of(address).await?; - let expected = step.expected_balance; - let actual = balance; - if expected != actual { - tracing::error!(%expected, %actual, %address, "Balance assertion failed"); - anyhow::bail!( - "Balance assertion failed - Expected {} but got {} for {} resolved to {}", - expected, - actual, - address, - address, - ) - } + let expected = step.expected_balance; + let actual = balance; + if expected != actual { + tracing::error!(%expected, %actual, %address, "Balance assertion failed"); + anyhow::bail!( + "Balance assertion failed - Expected {} but got {} for {} resolved to {}", + expected, + actual, + address, + address, + ) + } - Ok(1) - } + Ok(1) + } - #[instrument(level = "info", skip_all, err(Debug))] - async fn execute_storage_empty_assertion_step( - &mut self, - _: &StepPath, - step: &StorageEmptyAssertionStep, - ) -> Result { - self.step_address_auto_deployment(&step.address) - .await - .context("Failed to perform auto-deployment for the step address")?; + #[instrument(level = "info", skip_all, err(Debug))] + async fn execute_storage_empty_assertion_step( + &mut self, + _: &StepPath, + step: &StorageEmptyAssertionStep, + ) -> Result { + self.step_address_auto_deployment(&step.address) + .await + .context("Failed to perform auto-deployment for the step address")?; - let resolver = self.platform_information.node.resolver().await?; - let address = step - .address - .resolve_address(resolver.as_ref(), self.default_resolution_context()) - .await?; + let resolver = self.platform_information.node.resolver().await?; + let address = step + .address + .resolve_address(resolver.as_ref(), self.default_resolution_context()) + .await?; - let storage = self - .platform_information - .node - .latest_state_proof(address, Default::default()) - .await?; - let is_empty = storage.storage_hash == EMPTY_ROOT_HASH; + let storage = self + .platform_information + .node + .latest_state_proof(address, Default::default()) + .await?; + let is_empty = storage.storage_hash == EMPTY_ROOT_HASH; - let expected = step.is_storage_empty; - let actual = is_empty; + let expected = step.is_storage_empty; + let actual = is_empty; - if expected != actual { - tracing::error!(%expected, %actual, %address, "Storage Empty Assertion failed"); - anyhow::bail!( - "Storage Empty Assertion failed - Expected {} but got {} for {} resolved to {}", - expected, - actual, - address, - address, - ) - }; + if expected != actual { + tracing::error!(%expected, %actual, %address, "Storage Empty Assertion failed"); + anyhow::bail!( + "Storage Empty Assertion failed - Expected {} but got {} for {} resolved to {}", + expected, + actual, + address, + address, + ) + }; - Ok(1) - } + Ok(1) + } - #[instrument(level = "info", skip_all, err(Debug))] - async fn execute_repeat_step( - &mut self, - step_path: &StepPath, - step: &RepeatStep, - ) -> Result { - let tasks = (0..step.repeat) - .map(|_| PlatformDriver { - platform_information: self.platform_information, - test_definition: self.test_definition, - private_key_allocator: self.private_key_allocator.clone(), - execution_state: self.execution_state.clone(), - steps_executed: 0, - steps_iterator: { - let steps: Vec<(StepPath, Step)> = step - .steps - .iter() - .cloned() - .enumerate() - .map(|(step_idx, step)| { - let step_idx = StepIdx::new(step_idx); - let step_path = step_path.append(step_idx); - (step_path, step) - }) - .collect(); - steps.into_iter() - }, - }) - .map(|driver| driver.execute_all()) - .collect::>(); - let res = futures::future::try_join_all(tasks) - .await - .context("Repetition execution failed")?; - Ok(res.first().copied().unwrap_or_default()) - } + #[instrument(level = "info", skip_all, err(Debug))] + async fn execute_repeat_step( + &mut self, + step_path: &StepPath, + step: &RepeatStep, + ) -> Result { + let tasks = (0..step.repeat) + .map(|_| PlatformDriver { + platform_information: self.platform_information, + test_definition: self.test_definition, + private_key_allocator: self.private_key_allocator.clone(), + execution_state: self.execution_state.clone(), + steps_executed: 0, + steps_iterator: { + let steps: Vec<(StepPath, Step)> = step + .steps + .iter() + .cloned() + .enumerate() + .map(|(step_idx, step)| { + let step_idx = StepIdx::new(step_idx); + let step_path = step_path.append(step_idx); + (step_path, step) + }) + .collect(); + steps.into_iter() + }, + }) + .map(|driver| driver.execute_all()) + .collect::>(); + let res = futures::future::try_join_all(tasks) + .await + .context("Repetition execution failed")?; + Ok(res.first().copied().unwrap_or_default()) + } - #[instrument(level = "info", skip_all, err(Debug))] - pub async fn execute_account_allocation( - &mut self, - _: &StepPath, - step: &AllocateAccountStep, - ) -> Result { - let Some(variable_name) = step.variable_name.strip_prefix("$VARIABLE:") else { - bail!("Account allocation must start with $VARIABLE:"); - }; + #[instrument(level = "info", skip_all, err(Debug))] + pub async fn execute_account_allocation( + &mut self, + _: &StepPath, + step: &AllocateAccountStep, + ) -> Result { + let Some(variable_name) = step.variable_name.strip_prefix("$VARIABLE:") else { + bail!("Account allocation must start with $VARIABLE:"); + }; - let private_key = self.private_key_allocator.lock().await.allocate()?; - let account = private_key.address(); - let variable = U256::from_be_slice(account.0.as_slice()); + let private_key = self.private_key_allocator.lock().await.allocate()?; + let account = private_key.address(); + let variable = U256::from_be_slice(account.0.as_slice()); - self.execution_state - .variables - .insert(variable_name.to_string(), variable); + self.execution_state.variables.insert(variable_name.to_string(), variable); - Ok(1) - } - // endregion:Step Handling + Ok(1) + } + // endregion:Step Handling - // region:Contract Deployment - #[instrument( + // region:Contract Deployment + #[instrument( level = "info", skip_all, fields( @@ -869,40 +828,38 @@ where ), err(Debug), )] - async fn get_or_deploy_contract_instance( - &mut self, - contract_instance: &ContractInstance, - deployer: Address, - calldata: Option<&Calldata>, - value: Option, - ) -> Result<(Address, JsonAbi, Option)> { - if let Some((_, address, abi)) = self - .execution_state - .deployed_contracts - .get(contract_instance) - { - info!( + async fn get_or_deploy_contract_instance( + &mut self, + contract_instance: &ContractInstance, + deployer: Address, + calldata: Option<&Calldata>, + value: Option, + ) -> Result<(Address, JsonAbi, Option)> { + if let Some((_, address, abi)) = + self.execution_state.deployed_contracts.get(contract_instance) + { + info!( - %address, - "Contract instance already deployed." - ); - Ok((*address, abi.clone(), None)) - } else { - info!("Contract instance requires deployment."); + %address, + "Contract instance already deployed." + ); + Ok((*address, abi.clone(), None)) + } else { + info!("Contract instance requires deployment."); - let (address, abi, receipt) = self - .deploy_contract(contract_instance, deployer, calldata, value) - .await - .context("Failed to deploy contract")?; - info!( - %address, - "Contract instance has been deployed." - ); - Ok((address, abi, Some(receipt))) - } - } + let (address, abi, receipt) = self + .deploy_contract(contract_instance, deployer, calldata, value) + .await + .context("Failed to deploy contract")?; + info!( + %address, + "Contract instance has been deployed." + ); + Ok((address, abi, Some(receipt))) + } + } - #[instrument( + #[instrument( level = "info", skip_all, fields( @@ -911,136 +868,117 @@ where ), err(Debug), )] - async fn deploy_contract( - &mut self, - contract_instance: &ContractInstance, - deployer: Address, - calldata: Option<&Calldata>, - value: Option, - ) -> Result<(Address, JsonAbi, TransactionReceipt)> { - let Some(ContractPathAndIdent { - contract_source_path, - contract_ident, - }) = self - .test_definition - .metadata - .contract_sources()? - .remove(contract_instance) - else { - anyhow::bail!( - "Contract source not found for instance {:?}", - contract_instance - ) - }; + async fn deploy_contract( + &mut self, + contract_instance: &ContractInstance, + deployer: Address, + calldata: Option<&Calldata>, + value: Option, + ) -> Result<(Address, JsonAbi, TransactionReceipt)> { + let Some(ContractPathAndIdent { contract_source_path, contract_ident }) = + self.test_definition.metadata.contract_sources()?.remove(contract_instance) + else { + anyhow::bail!("Contract source not found for instance {:?}", contract_instance) + }; - let Some((code, abi)) = self - .execution_state - .compiled_contracts - .get(&contract_source_path) - .and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref())) - .cloned() - else { - anyhow::bail!( - "Failed to find information for contract {:?}", - contract_instance - ) - }; + let Some((code, abi)) = self + .execution_state + .compiled_contracts + .get(&contract_source_path) + .and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref())) + .cloned() + else { + anyhow::bail!("Failed to find information for contract {:?}", contract_instance) + }; - let mut code = match alloy::hex::decode(&code) { - Ok(code) => code, - Err(error) => { - tracing::error!( - ?error, - contract_source_path = contract_source_path.display().to_string(), - contract_ident = contract_ident.as_ref(), - "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" - ); - anyhow::bail!("Failed to hex-decode the byte code {}", error) - } - }; + let mut code = match alloy::hex::decode(&code) { + Ok(code) => code, + Err(error) => { + tracing::error!( + ?error, + contract_source_path = contract_source_path.display().to_string(), + contract_ident = contract_ident.as_ref(), + "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" + ); + anyhow::bail!("Failed to hex-decode the byte code {}", error) + }, + }; - if let Some(calldata) = calldata { - let resolver = self.platform_information.node.resolver().await?; - let calldata = calldata - .calldata(resolver.as_ref(), self.default_resolution_context()) - .await?; - code.extend(calldata); - } + if let Some(calldata) = calldata { + let resolver = self.platform_information.node.resolver().await?; + let calldata = + calldata.calldata(resolver.as_ref(), self.default_resolution_context()).await?; + code.extend(calldata); + } - let tx = { - let deployer = self - .platform_information - .node - .resolve_signer_or_default(deployer); - let tx = TransactionRequest::default().from(deployer); - let tx = match value { - Some(ref value) => tx.value(value.into_inner()), - _ => tx, - }; - TransactionBuilder::::with_deploy_code(tx, code) - }; + let tx = { + let deployer = self.platform_information.node.resolve_signer_or_default(deployer); + let tx = TransactionRequest::default().from(deployer); + let tx = match value { + Some(ref value) => tx.value(value.into_inner()), + _ => tx, + }; + TransactionBuilder::::with_deploy_code(tx, code) + }; - let receipt = match self.platform_information.node.execute_transaction(tx).await { - Ok(receipt) => receipt, - Err(error) => { - tracing::error!(?error, "Contract deployment transaction failed."); - return Err(error); - } - }; + let receipt = match self.platform_information.node.execute_transaction(tx).await { + Ok(receipt) => receipt, + Err(error) => { + tracing::error!(?error, "Contract deployment transaction failed."); + return Err(error); + }, + }; - let Some(address) = receipt.contract_address else { - anyhow::bail!("Contract deployment didn't return an address"); - }; - tracing::info!( - instance_name = ?contract_instance, - instance_address = ?address, - "Deployed contract" - ); - self.platform_information - .reporter - .report_contract_deployed_event(contract_instance.clone(), address)?; + let Some(address) = receipt.contract_address else { + anyhow::bail!("Contract deployment didn't return an address"); + }; + tracing::info!( + instance_name = ?contract_instance, + instance_address = ?address, + "Deployed contract" + ); + self.platform_information + .reporter + .report_contract_deployed_event(contract_instance.clone(), address)?; - self.execution_state.deployed_contracts.insert( - contract_instance.clone(), - (contract_ident, address, abi.clone()), - ); + self.execution_state + .deployed_contracts + .insert(contract_instance.clone(), (contract_ident, address, abi.clone())); - Ok((address, abi, receipt)) - } + Ok((address, abi, receipt)) + } - #[instrument(level = "info", skip_all)] - async fn step_address_auto_deployment( - &mut self, - step_address: &StepAddress, - ) -> Result
{ - match step_address { - StepAddress::Address(address) => Ok(*address), - StepAddress::ResolvableAddress(resolvable) => { - let Some(instance) = resolvable - .strip_suffix(".address") - .map(ContractInstance::new) - else { - bail!("Not an address variable"); - }; + #[instrument(level = "info", skip_all)] + async fn step_address_auto_deployment( + &mut self, + step_address: &StepAddress, + ) -> Result
{ + match step_address { + StepAddress::Address(address) => Ok(*address), + StepAddress::ResolvableAddress(resolvable) => { + let Some(instance) = resolvable.strip_suffix(".address").map(ContractInstance::new) + else { + bail!("Not an address variable"); + }; - self.get_or_deploy_contract_instance( - &instance, - FunctionCallStep::default_caller_address(), - None, - None, - ) - .await - .map(|v| v.0) - } - } - } - // endregion:Contract Deployment + self.get_or_deploy_contract_instance( + &instance, + FunctionCallStep::default_caller_address(), + None, + None, + ) + .await + .map(|v| v.0) + }, + } + } + // endregion:Contract Deployment - // region:Resolution & Resolver - fn default_resolution_context(&self) -> ResolutionContext<'_> { - ResolutionContext::default() - .with_deployed_contracts(&self.execution_state.deployed_contracts) - .with_variables(&self.execution_state.variables) - } - // endregion:Resolution & Resolver + // region:Resolution & Resolver + fn default_resolution_context(&self) -> ResolutionContext<'_> { + ResolutionContext::default() + .with_deployed_contracts(&self.execution_state.deployed_contracts) + .with_variables(&self.execution_state.variables) + } + // endregion:Resolution & Resolver } diff --git a/crates/core/src/differential_tests/entry_point.rs b/crates/core/src/differential_tests/entry_point.rs index 142bfa2..eca17ae 100644 --- a/crates/core/src/differential_tests/entry_point.rs +++ b/crates/core/src/differential_tests/entry_point.rs @@ -1,10 +1,10 @@ //! The main entry point into differential testing. use std::{ - collections::{BTreeMap, BTreeSet}, - io::{BufWriter, Write, stderr}, - sync::Arc, - time::{Duration, Instant}, + collections::{BTreeMap, BTreeSet}, + io::{BufWriter, Write, stderr}, + sync::Arc, + time::{Duration, Instant}, }; use crate::Platform; @@ -18,260 +18,249 @@ use revive_dt_config::{Context, TestExecutionContext}; use revive_dt_report::{Reporter, ReporterEvent, TestCaseStatus}; use crate::{ - differential_tests::Driver, - helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream}, + differential_tests::Driver, + helpers::{CachedCompiler, NodePool, collect_metadata_files, create_test_definitions_stream}, }; /// Handles the differential testing executing it according to the information defined in the /// context #[instrument(level = "info", err(Debug), skip_all)] pub async fn handle_differential_tests( - context: TestExecutionContext, - reporter: Reporter, + context: TestExecutionContext, + reporter: Reporter, ) -> anyhow::Result<()> { - let reporter_clone = reporter.clone(); + let reporter_clone = reporter.clone(); - // Discover all of the metadata files that are defined in the context. - let metadata_files = collect_metadata_files(&context) - .context("Failed to collect metadata files for differential testing")?; - info!(len = metadata_files.len(), "Discovered metadata files"); + // Discover all of the metadata files that are defined in the context. + let metadata_files = collect_metadata_files(&context) + .context("Failed to collect metadata files for differential testing")?; + info!(len = metadata_files.len(), "Discovered metadata files"); - // Discover the list of platforms that the tests should run on based on the context. - let platforms = context - .platforms - .iter() - .copied() - .map(Into::<&dyn Platform>::into) - .collect::>(); + // Discover the list of platforms that the tests should run on based on the context. + let platforms = context + .platforms + .iter() + .copied() + .map(Into::<&dyn Platform>::into) + .collect::>(); - // Starting the nodes of the various platforms specified in the context. - let platforms_and_nodes = { - let mut map = BTreeMap::new(); + // Starting the nodes of the various platforms specified in the context. + let platforms_and_nodes = { + let mut map = BTreeMap::new(); - for platform in platforms.iter() { - let platform_identifier = platform.platform_identifier(); + for platform in platforms.iter() { + let platform_identifier = platform.platform_identifier(); - let context = Context::Test(Box::new(context.clone())); - let node_pool = NodePool::new(context, *platform) - .await - .inspect_err(|err| { - error!( - ?err, - %platform_identifier, - "Failed to initialize the node pool for the platform." - ) - }) - .context("Failed to initialize the node pool")?; + let context = Context::Test(Box::new(context.clone())); + let node_pool = NodePool::new(context, *platform) + .await + .inspect_err(|err| { + error!( + ?err, + %platform_identifier, + "Failed to initialize the node pool for the platform." + ) + }) + .context("Failed to initialize the node pool")?; - map.insert(platform_identifier, (*platform, node_pool)); - } + map.insert(platform_identifier, (*platform, node_pool)); + } - map - }; - info!("Spawned the platform nodes"); + map + }; + info!("Spawned the platform nodes"); - // Preparing test definitions. - let full_context = Context::Test(Box::new(context.clone())); - let test_definitions = create_test_definitions_stream( - &full_context, - metadata_files.iter(), - &platforms_and_nodes, - reporter.clone(), - ) - .await - .collect::>() - .await; - info!(len = test_definitions.len(), "Created test definitions"); + // Preparing test definitions. + let full_context = Context::Test(Box::new(context.clone())); + let test_definitions = create_test_definitions_stream( + &full_context, + metadata_files.iter(), + &platforms_and_nodes, + reporter.clone(), + ) + .await + .collect::>() + .await; + info!(len = test_definitions.len(), "Created test definitions"); - // Creating everything else required for the driver to run. - let cached_compiler = CachedCompiler::new( - context - .working_directory - .as_path() - .join("compilation_cache"), - context - .compilation_configuration - .invalidate_compilation_cache, - ) - .await - .map(Arc::new) - .context("Failed to initialize cached compiler")?; - let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new( - context.wallet_configuration.highest_private_key_exclusive(), - ))); + // Creating everything else required for the driver to run. + let cached_compiler = CachedCompiler::new( + context.working_directory.as_path().join("compilation_cache"), + context.compilation_configuration.invalidate_compilation_cache, + ) + .await + .map(Arc::new) + .context("Failed to initialize cached compiler")?; + let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new( + context.wallet_configuration.highest_private_key_exclusive(), + ))); - // Creating the driver and executing all of the steps. - let semaphore = context - .concurrency_configuration - .concurrency_limit() - .map(Semaphore::new) - .map(Arc::new); - let running_task_list = Arc::new(RwLock::new(BTreeSet::::new())); - let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map( - |(test_id, test_definition)| { - let running_task_list = running_task_list.clone(); - let semaphore = semaphore.clone(); + // Creating the driver and executing all of the steps. + let semaphore = context + .concurrency_configuration + .concurrency_limit() + .map(Semaphore::new) + .map(Arc::new); + let running_task_list = Arc::new(RwLock::new(BTreeSet::::new())); + let driver_task = futures::future::join_all(test_definitions.iter().enumerate().map( + |(test_id, test_definition)| { + let running_task_list = running_task_list.clone(); + let semaphore = semaphore.clone(); - let private_key_allocator = private_key_allocator.clone(); - let cached_compiler = cached_compiler.clone(); - let mode = test_definition.mode.clone(); - let span = info_span!( - "Executing Test Case", - test_id, - metadata_file_path = %test_definition.metadata_file_path.display(), - case_idx = %test_definition.case_idx, - mode = %mode, - ); - async move { - let permit = match semaphore.as_ref() { - Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")), - None => None, - }; + let private_key_allocator = private_key_allocator.clone(); + let cached_compiler = cached_compiler.clone(); + let mode = test_definition.mode.clone(); + let span = info_span!( + "Executing Test Case", + test_id, + metadata_file_path = %test_definition.metadata_file_path.display(), + case_idx = %test_definition.case_idx, + mode = %mode, + ); + async move { + let permit = match semaphore.as_ref() { + Some(semaphore) => Some(semaphore.acquire().await.expect("Can't fail")), + None => None, + }; - running_task_list.write().await.insert(test_id); - let driver = match Driver::new_root( - test_definition, - private_key_allocator, - &cached_compiler, - ) - .await - { - Ok(driver) => driver, - Err(error) => { - test_definition - .reporter - .report_test_failed_event(format!("{error:#}")) - .expect("Can't fail"); - error!("Test Case Failed"); - drop(permit); - running_task_list.write().await.remove(&test_id); - return; - } - }; - info!("Created the driver for the test case"); + running_task_list.write().await.insert(test_id); + let driver = match Driver::new_root( + test_definition, + private_key_allocator, + &cached_compiler, + ) + .await + { + Ok(driver) => driver, + Err(error) => { + test_definition + .reporter + .report_test_failed_event(format!("{error:#}")) + .expect("Can't fail"); + error!("Test Case Failed"); + drop(permit); + running_task_list.write().await.remove(&test_id); + return; + }, + }; + info!("Created the driver for the test case"); - match driver.execute_all().await { - Ok(steps_executed) => test_definition - .reporter - .report_test_succeeded_event(steps_executed) - .expect("Can't fail"), - Err(error) => { - test_definition - .reporter - .report_test_failed_event(format!("{error:#}")) - .expect("Can't fail"); - error!("Test Case Failed"); - } - }; - info!("Finished the execution of the test case"); - drop(permit); - running_task_list.write().await.remove(&test_id); - } - .instrument(span) - }, - )) - .inspect(|_| { - info!("Finished executing all test cases"); - reporter_clone - .report_completion_event() - .expect("Can't fail") - }); - let cli_reporting_task = start_cli_reporting_task(reporter); + match driver.execute_all().await { + Ok(steps_executed) => test_definition + .reporter + .report_test_succeeded_event(steps_executed) + .expect("Can't fail"), + Err(error) => { + test_definition + .reporter + .report_test_failed_event(format!("{error:#}")) + .expect("Can't fail"); + error!("Test Case Failed"); + }, + }; + info!("Finished the execution of the test case"); + drop(permit); + running_task_list.write().await.remove(&test_id); + } + .instrument(span) + }, + )) + .inspect(|_| { + info!("Finished executing all test cases"); + reporter_clone.report_completion_event().expect("Can't fail") + }); + let cli_reporting_task = start_cli_reporting_task(reporter); - tokio::task::spawn(async move { - loop { - let remaining_tasks = running_task_list.read().await; - info!( - count = remaining_tasks.len(), - ?remaining_tasks, - "Remaining Tests" - ); - tokio::time::sleep(Duration::from_secs(10)).await - } - }); + tokio::task::spawn(async move { + loop { + let remaining_tasks = running_task_list.read().await; + info!(count = remaining_tasks.len(), ?remaining_tasks, "Remaining Tests"); + tokio::time::sleep(Duration::from_secs(10)).await + } + }); - futures::future::join(driver_task, cli_reporting_task).await; + futures::future::join(driver_task, cli_reporting_task).await; - Ok(()) + Ok(()) } #[allow(irrefutable_let_patterns, clippy::uninlined_format_args)] async fn start_cli_reporting_task(reporter: Reporter) { - let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail"); - drop(reporter); + let mut aggregator_events_rx = reporter.subscribe().await.expect("Can't fail"); + drop(reporter); - let start = Instant::now(); + let start = Instant::now(); - const GREEN: &str = "\x1B[32m"; - const RED: &str = "\x1B[31m"; - const GREY: &str = "\x1B[90m"; - const COLOR_RESET: &str = "\x1B[0m"; - const BOLD: &str = "\x1B[1m"; - const BOLD_RESET: &str = "\x1B[22m"; + const GREEN: &str = "\x1B[32m"; + const RED: &str = "\x1B[31m"; + const GREY: &str = "\x1B[90m"; + const COLOR_RESET: &str = "\x1B[0m"; + const BOLD: &str = "\x1B[1m"; + const BOLD_RESET: &str = "\x1B[22m"; - let mut number_of_successes = 0; - let mut number_of_failures = 0; + let mut number_of_successes = 0; + let mut number_of_failures = 0; - let mut buf = BufWriter::new(stderr()); - while let Ok(event) = aggregator_events_rx.recv().await { - let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted { - metadata_file_path, - mode, - case_status, - } = event - else { - continue; - }; + let mut buf = BufWriter::new(stderr()); + while let Ok(event) = aggregator_events_rx.recv().await { + let ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted { + metadata_file_path, + mode, + case_status, + } = event + else { + continue; + }; - let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display()); - for (case_idx, case_status) in case_status.into_iter() { - let _ = write!(buf, "\tCase Index {case_idx:>3}: "); - let _ = match case_status { - TestCaseStatus::Succeeded { steps_executed } => { - number_of_successes += 1; - writeln!( - buf, - "{}{}Case Succeeded{} - Steps Executed: {}{}", - GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET - ) - } - TestCaseStatus::Failed { reason } => { - number_of_failures += 1; - writeln!( - buf, - "{}{}Case Failed{} - Reason: {}{}", - RED, - BOLD, - BOLD_RESET, - reason.trim(), - COLOR_RESET, - ) - } - TestCaseStatus::Ignored { reason, .. } => writeln!( - buf, - "{}{}Case Ignored{} - Reason: {}{}", - GREY, - BOLD, - BOLD_RESET, - reason.trim(), - COLOR_RESET, - ), - }; - } - let _ = writeln!(buf); - } + let _ = writeln!(buf, "{} - {}", mode, metadata_file_path.display()); + for (case_idx, case_status) in case_status.into_iter() { + let _ = write!(buf, "\tCase Index {case_idx:>3}: "); + let _ = match case_status { + TestCaseStatus::Succeeded { steps_executed } => { + number_of_successes += 1; + writeln!( + buf, + "{}{}Case Succeeded{} - Steps Executed: {}{}", + GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET + ) + }, + TestCaseStatus::Failed { reason } => { + number_of_failures += 1; + writeln!( + buf, + "{}{}Case Failed{} - Reason: {}{}", + RED, + BOLD, + BOLD_RESET, + reason.trim(), + COLOR_RESET, + ) + }, + TestCaseStatus::Ignored { reason, .. } => writeln!( + buf, + "{}{}Case Ignored{} - Reason: {}{}", + GREY, + BOLD, + BOLD_RESET, + reason.trim(), + COLOR_RESET, + ), + }; + } + let _ = writeln!(buf); + } - // Summary at the end. - let _ = writeln!( - buf, - "{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds", - number_of_successes + number_of_failures, - GREEN, - number_of_successes, - COLOR_RESET, - RED, - number_of_failures, - COLOR_RESET, - start.elapsed().as_secs() - ); + // Summary at the end. + let _ = writeln!( + buf, + "{} cases: {}{}{} cases succeeded, {}{}{} cases failed in {} seconds", + number_of_successes + number_of_failures, + GREEN, + number_of_successes, + COLOR_RESET, + RED, + number_of_failures, + COLOR_RESET, + start.elapsed().as_secs() + ); } diff --git a/crates/core/src/differential_tests/execution_state.rs b/crates/core/src/differential_tests/execution_state.rs index 5cae329..a5707b9 100644 --- a/crates/core/src/differential_tests/execution_state.rs +++ b/crates/core/src/differential_tests/execution_state.rs @@ -1,8 +1,8 @@ use std::{collections::HashMap, path::PathBuf}; use alloy::{ - json_abi::JsonAbi, - primitives::{Address, U256}, + json_abi::JsonAbi, + primitives::{Address, U256}, }; use revive_dt_format::metadata::{ContractIdent, ContractInstance}; @@ -10,26 +10,23 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance}; #[derive(Clone)] /// The state associated with the test execution of one of the tests. pub struct ExecutionState { - /// The compiled contracts, these contracts have been compiled and have had the libraries linked - /// against them and therefore they're ready to be deployed on-demand. - pub compiled_contracts: HashMap>, + /// The compiled contracts, these contracts have been compiled and have had the libraries + /// linked against them and therefore they're ready to be deployed on-demand. + pub compiled_contracts: HashMap>, - /// A map of all of the deployed contracts and information about them. - pub deployed_contracts: HashMap, + /// A map of all of the deployed contracts and information about them. + pub deployed_contracts: HashMap, - /// This map stores the variables used for each one of the cases contained in the metadata file. - pub variables: HashMap, + /// This map stores the variables used for each one of the cases contained in the metadata + /// file. + pub variables: HashMap, } impl ExecutionState { - pub fn new( - compiled_contracts: HashMap>, - deployed_contracts: HashMap, - ) -> Self { - Self { - compiled_contracts, - deployed_contracts, - variables: Default::default(), - } - } + pub fn new( + compiled_contracts: HashMap>, + deployed_contracts: HashMap, + ) -> Self { + Self { compiled_contracts, deployed_contracts, variables: Default::default() } + } } diff --git a/crates/core/src/helpers/cached_compiler.rs b/crates/core/src/helpers/cached_compiler.rs index f0c3044..2ea81de 100644 --- a/crates/core/src/helpers/cached_compiler.rs +++ b/crates/core/src/helpers/cached_compiler.rs @@ -2,10 +2,10 @@ //! be reused between runs. use std::{ - borrow::Cow, - collections::HashMap, - path::{Path, PathBuf}, - sync::{Arc, LazyLock}, + borrow::Cow, + collections::HashMap, + path::{Path, PathBuf}, + sync::{Arc, LazyLock}, }; use crate::Platform; @@ -23,33 +23,30 @@ use tokio::sync::{Mutex, RwLock, Semaphore}; use tracing::{Instrument, debug, debug_span, instrument}; pub struct CachedCompiler<'a> { - /// The cache that stores the compiled contracts. - artifacts_cache: ArtifactsCache, + /// The cache that stores the compiled contracts. + artifacts_cache: ArtifactsCache, - /// This is a mechanism that the cached compiler uses so that if multiple compilation requests - /// come in for the same contract we never compile all of them and only compile it once and all - /// other tasks that request this same compilation concurrently get the cached version. - cache_key_lock: RwLock, Arc>>>, + /// This is a mechanism that the cached compiler uses so that if multiple compilation requests + /// come in for the same contract we never compile all of them and only compile it once and all + /// other tasks that request this same compilation concurrently get the cached version. + cache_key_lock: RwLock, Arc>>>, } impl<'a> CachedCompiler<'a> { - pub async fn new(path: impl AsRef, invalidate_cache: bool) -> Result { - let mut cache = ArtifactsCache::new(path); - if invalidate_cache { - cache = cache - .with_invalidated_cache() - .await - .context("Failed to invalidate compilation cache directory")?; - } - Ok(Self { - artifacts_cache: cache, - cache_key_lock: Default::default(), - }) - } + pub async fn new(path: impl AsRef, invalidate_cache: bool) -> Result { + let mut cache = ArtifactsCache::new(path); + if invalidate_cache { + cache = cache + .with_invalidated_cache() + .await + .context("Failed to invalidate compilation cache directory")?; + } + Ok(Self { artifacts_cache: cache, cache_key_lock: Default::default() }) + } - /// Compiles or gets the compilation artifacts from the cache. - #[allow(clippy::too_many_arguments)] - #[instrument( + /// Compiles or gets the compilation artifacts from the cache. + #[allow(clippy::too_many_arguments)] + #[instrument( level = "debug", skip_all, fields( @@ -59,317 +56,309 @@ impl<'a> CachedCompiler<'a> { ), err )] - pub async fn compile_contracts( - &self, - metadata: &'a Metadata, - metadata_file_path: &'a Path, - mode: Cow<'a, Mode>, - deployed_libraries: Option<&HashMap>, - compiler: &dyn SolidityCompiler, - platform: &dyn Platform, - reporter: &ExecutionSpecificReporter, - ) -> Result { - let cache_key = CacheKey { - compiler_identifier: platform.compiler_identifier(), - compiler_version: compiler.version().clone(), - metadata_file_path, - solc_mode: mode.clone(), - }; + pub async fn compile_contracts( + &self, + metadata: &'a Metadata, + metadata_file_path: &'a Path, + mode: Cow<'a, Mode>, + deployed_libraries: Option<&HashMap>, + compiler: &dyn SolidityCompiler, + platform: &dyn Platform, + reporter: &ExecutionSpecificReporter, + ) -> Result { + let cache_key = CacheKey { + compiler_identifier: platform.compiler_identifier(), + compiler_version: compiler.version().clone(), + metadata_file_path, + solc_mode: mode.clone(), + }; - let compilation_callback = || { - async move { - compile_contracts( - metadata - .directory() - .context("Failed to get metadata directory while preparing compilation")?, - metadata - .files_to_compile() - .context("Failed to enumerate files to compile from metadata")?, - &mode, - deployed_libraries, - compiler, - reporter, - ) - .map(|compilation_result| compilation_result.map(CacheValue::new)) - .await - } - .instrument(debug_span!( - "Running compilation for the cache key", - cache_key.compiler_identifier = %cache_key.compiler_identifier, - cache_key.compiler_version = %cache_key.compiler_version, - cache_key.metadata_file_path = %cache_key.metadata_file_path.display(), - cache_key.solc_mode = %cache_key.solc_mode, - )) - }; + let compilation_callback = || { + async move { + compile_contracts( + metadata + .directory() + .context("Failed to get metadata directory while preparing compilation")?, + metadata + .files_to_compile() + .context("Failed to enumerate files to compile from metadata")?, + &mode, + deployed_libraries, + compiler, + reporter, + ) + .map(|compilation_result| compilation_result.map(CacheValue::new)) + .await + } + .instrument(debug_span!( + "Running compilation for the cache key", + cache_key.compiler_identifier = %cache_key.compiler_identifier, + cache_key.compiler_version = %cache_key.compiler_version, + cache_key.metadata_file_path = %cache_key.metadata_file_path.display(), + cache_key.solc_mode = %cache_key.solc_mode, + )) + }; - let compiled_contracts = match deployed_libraries { - // If deployed libraries have been specified then we will re-compile the contract as it - // means that linking is required in this case. - Some(_) => { - debug!("Deployed libraries defined, recompilation must take place"); - debug!("Cache miss"); - compilation_callback() - .await - .context("Compilation callback for deployed libraries failed")? - .compiler_output - } - // If no deployed libraries are specified then we can follow the cached flow and attempt - // to lookup the compilation artifacts in the cache. - None => { - debug!("Deployed libraries undefined, attempting to make use of cache"); + let compiled_contracts = match deployed_libraries { + // If deployed libraries have been specified then we will re-compile the contract as it + // means that linking is required in this case. + Some(_) => { + debug!("Deployed libraries defined, recompilation must take place"); + debug!("Cache miss"); + compilation_callback() + .await + .context("Compilation callback for deployed libraries failed")? + .compiler_output + }, + // If no deployed libraries are specified then we can follow the cached flow and attempt + // to lookup the compilation artifacts in the cache. + None => { + debug!("Deployed libraries undefined, attempting to make use of cache"); - // Lock this specific cache key such that we do not get inconsistent state. We want - // that when multiple cases come in asking for the compilation artifacts then they - // don't all trigger a compilation if there's a cache miss. Hence, the lock here. - let read_guard = self.cache_key_lock.read().await; - let mutex = match read_guard.get(&cache_key).cloned() { - Some(value) => { - drop(read_guard); - value - } - None => { - drop(read_guard); - self.cache_key_lock - .write() - .await - .entry(cache_key.clone()) - .or_default() - .clone() - } - }; - let _guard = mutex.lock().await; + // Lock this specific cache key such that we do not get inconsistent state. We want + // that when multiple cases come in asking for the compilation artifacts then they + // don't all trigger a compilation if there's a cache miss. Hence, the lock here. + let read_guard = self.cache_key_lock.read().await; + let mutex = match read_guard.get(&cache_key).cloned() { + Some(value) => { + drop(read_guard); + value + }, + None => { + drop(read_guard); + self.cache_key_lock + .write() + .await + .entry(cache_key.clone()) + .or_default() + .clone() + }, + }; + let _guard = mutex.lock().await; - match self.artifacts_cache.get(&cache_key).await { - Some(cache_value) => { - if deployed_libraries.is_some() { - reporter - .report_post_link_contracts_compilation_succeeded_event( - compiler.version().clone(), - compiler.path(), - true, - None, - cache_value.compiler_output.clone(), - ) - .expect("Can't happen"); - } else { - reporter - .report_pre_link_contracts_compilation_succeeded_event( - compiler.version().clone(), - compiler.path(), - true, - None, - cache_value.compiler_output.clone(), - ) - .expect("Can't happen"); - } - cache_value.compiler_output - } - None => { - let compiler_output = compilation_callback() - .await - .context("Compilation callback failed (cache miss path)")? - .compiler_output; - self.artifacts_cache - .insert( - &cache_key, - &CacheValue { - compiler_output: compiler_output.clone(), - }, - ) - .await - .context( - "Failed to write the cached value of the compilation artifacts", - )?; - compiler_output - } - } - } - }; + match self.artifacts_cache.get(&cache_key).await { + Some(cache_value) => { + if deployed_libraries.is_some() { + reporter + .report_post_link_contracts_compilation_succeeded_event( + compiler.version().clone(), + compiler.path(), + true, + None, + cache_value.compiler_output.clone(), + ) + .expect("Can't happen"); + } else { + reporter + .report_pre_link_contracts_compilation_succeeded_event( + compiler.version().clone(), + compiler.path(), + true, + None, + cache_value.compiler_output.clone(), + ) + .expect("Can't happen"); + } + cache_value.compiler_output + }, + None => { + let compiler_output = compilation_callback() + .await + .context("Compilation callback failed (cache miss path)")? + .compiler_output; + self.artifacts_cache + .insert( + &cache_key, + &CacheValue { compiler_output: compiler_output.clone() }, + ) + .await + .context( + "Failed to write the cached value of the compilation artifacts", + )?; + compiler_output + }, + } + }, + }; - Ok(compiled_contracts) - } + Ok(compiled_contracts) + } } async fn compile_contracts( - metadata_directory: impl AsRef, - mut files_to_compile: impl Iterator, - mode: &Mode, - deployed_libraries: Option<&HashMap>, - compiler: &dyn SolidityCompiler, - reporter: &ExecutionSpecificReporter, + metadata_directory: impl AsRef, + mut files_to_compile: impl Iterator, + mode: &Mode, + deployed_libraries: Option<&HashMap>, + compiler: &dyn SolidityCompiler, + reporter: &ExecutionSpecificReporter, ) -> Result { - // Puts a limit on how many compilations we can perform at any given instance which helps us - // with some of the errors we've been seeing with high concurrency on MacOS (we have not tried - // it on Linux so we don't know if these issues also persist there or not.) - static SPAWN_GATE: LazyLock = LazyLock::new(|| Semaphore::new(5)); - let _permit = SPAWN_GATE.acquire().await?; + // Puts a limit on how many compilations we can perform at any given instance which helps us + // with some of the errors we've been seeing with high concurrency on MacOS (we have not tried + // it on Linux so we don't know if these issues also persist there or not.) + static SPAWN_GATE: LazyLock = LazyLock::new(|| Semaphore::new(5)); + let _permit = SPAWN_GATE.acquire().await?; - let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref()) - .with_allowed_extension("sol") - .with_use_cached_fs(true) - .collect::>(); + let all_sources_in_dir = FilesWithExtensionIterator::new(metadata_directory.as_ref()) + .with_allowed_extension("sol") + .with_use_cached_fs(true) + .collect::>(); - let compilation = Compiler::new() - .with_allow_path(metadata_directory) - // Handling the modes - .with_optimization(mode.optimize_setting) - .with_pipeline(mode.pipeline) - // Adding the contract sources to the compiler. - .try_then(|compiler| { - files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path)) - })? - // Adding the deployed libraries to the compiler. - .then(|compiler| { - deployed_libraries - .iter() - .flat_map(|value| value.iter()) - .map(|(instance, (ident, address, abi))| (instance, ident, address, abi)) - .flat_map(|(_, ident, address, _)| { - all_sources_in_dir - .iter() - .map(move |path| (ident, address, path)) - }) - .fold(compiler, |compiler, (ident, address, path)| { - compiler.with_library(path, ident.as_str(), *address) - }) - }); + let compilation = Compiler::new() + .with_allow_path(metadata_directory) + // Handling the modes + .with_optimization(mode.optimize_setting) + .with_pipeline(mode.pipeline) + // Adding the contract sources to the compiler. + .try_then(|compiler| { + files_to_compile.try_fold(compiler, |compiler, path| compiler.with_source(path)) + })? + // Adding the deployed libraries to the compiler. + .then(|compiler| { + deployed_libraries + .iter() + .flat_map(|value| value.iter()) + .map(|(instance, (ident, address, abi))| (instance, ident, address, abi)) + .flat_map(|(_, ident, address, _)| { + all_sources_in_dir.iter().map(move |path| (ident, address, path)) + }) + .fold(compiler, |compiler, (ident, address, path)| { + compiler.with_library(path, ident.as_str(), *address) + }) + }); - let input = compilation.input().clone(); - let output = compilation.try_build(compiler).await; + let input = compilation.input().clone(); + let output = compilation.try_build(compiler).await; - match (output.as_ref(), deployed_libraries.is_some()) { - (Ok(output), true) => { - reporter - .report_post_link_contracts_compilation_succeeded_event( - compiler.version().clone(), - compiler.path(), - false, - input, - output.clone(), - ) - .expect("Can't happen"); - } - (Ok(output), false) => { - reporter - .report_pre_link_contracts_compilation_succeeded_event( - compiler.version().clone(), - compiler.path(), - false, - input, - output.clone(), - ) - .expect("Can't happen"); - } - (Err(err), true) => { - reporter - .report_post_link_contracts_compilation_failed_event( - compiler.version().clone(), - compiler.path().to_path_buf(), - input, - format!("{err:#}"), - ) - .expect("Can't happen"); - } - (Err(err), false) => { - reporter - .report_pre_link_contracts_compilation_failed_event( - compiler.version().clone(), - compiler.path().to_path_buf(), - input, - format!("{err:#}"), - ) - .expect("Can't happen"); - } - } + match (output.as_ref(), deployed_libraries.is_some()) { + (Ok(output), true) => { + reporter + .report_post_link_contracts_compilation_succeeded_event( + compiler.version().clone(), + compiler.path(), + false, + input, + output.clone(), + ) + .expect("Can't happen"); + }, + (Ok(output), false) => { + reporter + .report_pre_link_contracts_compilation_succeeded_event( + compiler.version().clone(), + compiler.path(), + false, + input, + output.clone(), + ) + .expect("Can't happen"); + }, + (Err(err), true) => { + reporter + .report_post_link_contracts_compilation_failed_event( + compiler.version().clone(), + compiler.path().to_path_buf(), + input, + format!("{err:#}"), + ) + .expect("Can't happen"); + }, + (Err(err), false) => { + reporter + .report_pre_link_contracts_compilation_failed_event( + compiler.version().clone(), + compiler.path().to_path_buf(), + input, + format!("{err:#}"), + ) + .expect("Can't happen"); + }, + } - output + output } struct ArtifactsCache { - path: PathBuf, + path: PathBuf, } impl ArtifactsCache { - pub fn new(path: impl AsRef) -> Self { - Self { - path: path.as_ref().to_path_buf(), - } - } + pub fn new(path: impl AsRef) -> Self { + Self { path: path.as_ref().to_path_buf() } + } - #[instrument(level = "debug", skip_all, err)] - pub async fn with_invalidated_cache(self) -> Result { - cacache::clear(self.path.as_path()) - .await - .map_err(Into::::into) - .with_context(|| format!("Failed to clear cache at {}", self.path.display()))?; - Ok(self) - } + #[instrument(level = "debug", skip_all, err)] + pub async fn with_invalidated_cache(self) -> Result { + cacache::clear(self.path.as_path()) + .await + .map_err(Into::::into) + .with_context(|| format!("Failed to clear cache at {}", self.path.display()))?; + Ok(self) + } - #[instrument(level = "debug", skip_all, err)] - pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> { - let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?; - let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?; - cacache::write(self.path.as_path(), key.encode_hex(), value) - .await - .with_context(|| { - format!("Failed to write cache entry under {}", self.path.display()) - })?; - Ok(()) - } + #[instrument(level = "debug", skip_all, err)] + pub async fn insert(&self, key: &CacheKey<'_>, value: &CacheValue) -> Result<()> { + let key = bson::to_vec(key).context("Failed to serialize cache key (bson)")?; + let value = bson::to_vec(value).context("Failed to serialize cache value (bson)")?; + cacache::write(self.path.as_path(), key.encode_hex(), value) + .await + .with_context(|| { + format!("Failed to write cache entry under {}", self.path.display()) + })?; + Ok(()) + } - pub async fn get(&self, key: &CacheKey<'_>) -> Option { - let key = bson::to_vec(key).ok()?; - let value = cacache::read(self.path.as_path(), key.encode_hex()) - .await - .ok()?; - let value = bson::from_slice::(&value).ok()?; - Some(value) - } + pub async fn get(&self, key: &CacheKey<'_>) -> Option { + let key = bson::to_vec(key).ok()?; + let value = cacache::read(self.path.as_path(), key.encode_hex()).await.ok()?; + let value = bson::from_slice::(&value).ok()?; + Some(value) + } - #[instrument(level = "debug", skip_all, err)] - pub async fn get_or_insert_with( - &self, - key: &CacheKey<'_>, - callback: impl AsyncFnOnce() -> Result, - ) -> Result { - match self.get(key).await { - Some(value) => { - debug!("Cache hit"); - Ok(value) - } - None => { - debug!("Cache miss"); - let value = callback().await?; - self.insert(key, &value).await?; - Ok(value) - } - } - } + #[instrument(level = "debug", skip_all, err)] + pub async fn get_or_insert_with( + &self, + key: &CacheKey<'_>, + callback: impl AsyncFnOnce() -> Result, + ) -> Result { + match self.get(key).await { + Some(value) => { + debug!("Cache hit"); + Ok(value) + }, + None => { + debug!("Cache miss"); + let value = callback().await?; + self.insert(key, &value).await?; + Ok(value) + }, + } + } } #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize)] struct CacheKey<'a> { - /// The identifier of the used compiler. - compiler_identifier: CompilerIdentifier, + /// The identifier of the used compiler. + compiler_identifier: CompilerIdentifier, - /// The version of the compiler that was used to compile the artifacts. - compiler_version: Version, + /// The version of the compiler that was used to compile the artifacts. + compiler_version: Version, - /// The path of the metadata file that the compilation artifacts are for. - metadata_file_path: &'a Path, + /// The path of the metadata file that the compilation artifacts are for. + metadata_file_path: &'a Path, - /// The mode that the compilation artifacts where compiled with. - solc_mode: Cow<'a, Mode>, + /// The mode that the compilation artifacts where compiled with. + solc_mode: Cow<'a, Mode>, } #[derive(Clone, Debug, Serialize, Deserialize)] struct CacheValue { - /// The compiler output from the compilation run. - compiler_output: CompilerOutput, + /// The compiler output from the compilation run. + compiler_output: CompilerOutput, } impl CacheValue { - pub fn new(compiler_output: CompilerOutput) -> Self { - Self { compiler_output } - } + pub fn new(compiler_output: CompilerOutput) -> Self { + Self { compiler_output } + } } diff --git a/crates/core/src/helpers/metadata.rs b/crates/core/src/helpers/metadata.rs index 60f351b..adca03f 100644 --- a/crates/core/src/helpers/metadata.rs +++ b/crates/core/src/helpers/metadata.rs @@ -6,28 +6,28 @@ use tracing::{info, info_span, instrument}; /// corpus files and produces a map containing all of the [`MetadataFile`]s discovered. #[instrument(level = "debug", name = "Collecting Corpora", skip_all)] pub fn collect_metadata_files( - context: impl AsRef, + context: impl AsRef, ) -> anyhow::Result> { - let mut metadata_files = Vec::new(); + let mut metadata_files = Vec::new(); - let corpus_configuration = AsRef::::as_ref(&context); - for path in &corpus_configuration.paths { - let span = info_span!("Processing corpus file", path = %path.display()); - let _guard = span.enter(); + let corpus_configuration = AsRef::::as_ref(&context); + for path in &corpus_configuration.paths { + let span = info_span!("Processing corpus file", path = %path.display()); + let _guard = span.enter(); - let corpus = Corpus::try_from_path(path)?; - info!( - name = corpus.name(), - number_of_contained_paths = corpus.path_count(), - "Deserialized corpus file" - ); - metadata_files.extend(corpus.enumerate_tests()); - } + let corpus = Corpus::try_from_path(path)?; + info!( + name = corpus.name(), + number_of_contained_paths = corpus.path_count(), + "Deserialized corpus file" + ); + metadata_files.extend(corpus.enumerate_tests()); + } - // There's a possibility that there are certain paths that all lead to the same metadata files - // and therefore it's important that we sort them and then deduplicate them. - metadata_files.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path)); - metadata_files.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path); + // There's a possibility that there are certain paths that all lead to the same metadata files + // and therefore it's important that we sort them and then deduplicate them. + metadata_files.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path)); + metadata_files.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path); - Ok(metadata_files) + Ok(metadata_files) } diff --git a/crates/core/src/helpers/pool.rs b/crates/core/src/helpers/pool.rs index 939a7e9..8e957ec 100644 --- a/crates/core/src/helpers/pool.rs +++ b/crates/core/src/helpers/pool.rs @@ -10,50 +10,45 @@ use revive_dt_node_interaction::EthereumNode; /// The node pool starts one or more [Node] which then can be accessed /// in a round robbin fashion. pub struct NodePool { - next: AtomicUsize, - nodes: Vec>, + next: AtomicUsize, + nodes: Vec>, } impl NodePool { - /// Create a new Pool. This will start as many nodes as there are workers in `config`. - pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result { - let concurrency_configuration = AsRef::::as_ref(&context); - let nodes = concurrency_configuration.number_of_nodes; + /// Create a new Pool. This will start as many nodes as there are workers in `config`. + pub async fn new(context: Context, platform: &dyn Platform) -> anyhow::Result { + let concurrency_configuration = AsRef::::as_ref(&context); + let nodes = concurrency_configuration.number_of_nodes; - let mut handles = Vec::with_capacity(nodes); - for _ in 0..nodes { - let context = context.clone(); - handles.push(platform.new_node(context)?); - } + let mut handles = Vec::with_capacity(nodes); + for _ in 0..nodes { + let context = context.clone(); + handles.push(platform.new_node(context)?); + } - let mut nodes = Vec::with_capacity(nodes); - for handle in handles { - nodes.push( - handle - .join() - .map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error)) - .context("Failed to join node spawn thread")? - .context("Node failed to spawn")?, - ); - } + let mut nodes = Vec::with_capacity(nodes); + for handle in handles { + nodes.push( + handle + .join() + .map_err(|error| anyhow::anyhow!("failed to spawn node: {:?}", error)) + .context("Failed to join node spawn thread")? + .context("Node failed to spawn")?, + ); + } - let pre_transactions_tasks = nodes - .iter_mut() - .map(|node| node.pre_transactions()) - .collect::>(); - futures::future::try_join_all(pre_transactions_tasks) - .await - .context("Failed to run the pre-transactions task")?; + let pre_transactions_tasks = + nodes.iter_mut().map(|node| node.pre_transactions()).collect::>(); + futures::future::try_join_all(pre_transactions_tasks) + .await + .context("Failed to run the pre-transactions task")?; - Ok(Self { - nodes, - next: Default::default(), - }) - } + Ok(Self { nodes, next: Default::default() }) + } - /// Get a handle to the next node. - pub fn round_robbin(&self) -> &dyn EthereumNode { - let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len(); - self.nodes.get(current).unwrap().as_ref() - } + /// Get a handle to the next node. + pub fn round_robbin(&self) -> &dyn EthereumNode { + let current = self.next.fetch_add(1, Ordering::SeqCst) % self.nodes.len(); + self.nodes.get(current).unwrap().as_ref() + } } diff --git a/crates/core/src/helpers/test.rs b/crates/core/src/helpers/test.rs index dce60c4..a440df5 100644 --- a/crates/core/src/helpers/test.rs +++ b/crates/core/src/helpers/test.rs @@ -9,8 +9,8 @@ use serde_json::{Value, json}; use revive_dt_compiler::{Mode, SolidityCompiler}; use revive_dt_format::{ - case::{Case, CaseIdx}, - metadata::MetadataFile, + case::{Case, CaseIdx}, + metadata::MetadataFile, }; use revive_dt_node_interaction::EthereumNode; use revive_dt_report::{ExecutionSpecificReporter, Reporter, TestSpecificReporter, TestSpecifier}; @@ -19,154 +19,145 @@ use tracing::{debug, error, info}; use crate::{Platform, helpers::NodePool}; pub async fn create_test_definitions_stream<'a>( - // This is only required for creating the compiler objects and is not used anywhere else in the - // function. - context: &Context, - metadata_files: impl IntoIterator, - platforms_and_nodes: &'a BTreeMap, - reporter: Reporter, + // This is only required for creating the compiler objects and is not used anywhere else in the + // function. + context: &Context, + metadata_files: impl IntoIterator, + platforms_and_nodes: &'a BTreeMap, + reporter: Reporter, ) -> impl Stream> { - stream::iter( - metadata_files - .into_iter() - // Flatten over the cases. - .flat_map(|metadata_file| { - metadata_file - .cases - .iter() - .enumerate() - .map(move |(case_idx, case)| (metadata_file, case_idx, case)) - }) - // Flatten over the modes, prefer the case modes over the metadata file modes. - .flat_map(move |(metadata_file, case_idx, case)| { - let reporter = reporter.clone(); + stream::iter( + metadata_files + .into_iter() + // Flatten over the cases. + .flat_map(|metadata_file| { + metadata_file + .cases + .iter() + .enumerate() + .map(move |(case_idx, case)| (metadata_file, case_idx, case)) + }) + // Flatten over the modes, prefer the case modes over the metadata file modes. + .flat_map(move |(metadata_file, case_idx, case)| { + let reporter = reporter.clone(); - let modes = case.modes.as_ref().or(metadata_file.modes.as_ref()); - let modes = match modes { - Some(modes) => EitherIter::A( - ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned), - ), - None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)), - }; + let modes = case.modes.as_ref().or(metadata_file.modes.as_ref()); + let modes = match modes { + Some(modes) => EitherIter::A( + ParsedMode::many_to_modes(modes.iter()).map(Cow::<'static, _>::Owned), + ), + None => EitherIter::B(Mode::all().map(Cow::<'static, _>::Borrowed)), + }; - modes.into_iter().map(move |mode| { - ( - metadata_file, - case_idx, - case, - mode.clone(), - reporter.test_specific_reporter(Arc::new(TestSpecifier { - solc_mode: mode.as_ref().clone(), - metadata_file_path: metadata_file.metadata_file_path.clone(), - case_idx: CaseIdx::new(case_idx), - })), - ) - }) - }) - // Inform the reporter of each one of the test cases that were discovered which we expect to - // run. - .inspect(|(_, _, _, _, reporter)| { - reporter - .report_test_case_discovery_event() - .expect("Can't fail"); - }), - ) - // Creating the Test Definition objects from all of the various objects we have and creating - // their required dependencies (e.g., compiler). - .filter_map( - move |(metadata_file, case_idx, case, mode, reporter)| async move { - let mut platforms = BTreeMap::new(); - for (platform, node_pool) in platforms_and_nodes.values() { - let node = node_pool.round_robbin(); - let compiler = platform - .new_compiler(context.clone(), mode.version.clone().map(Into::into)) - .await - .inspect_err(|err| { - error!( - ?err, - platform_identifier = %platform.platform_identifier(), - "Failed to instantiate the compiler" - ) - }) - .ok()?; + modes.into_iter().map(move |mode| { + ( + metadata_file, + case_idx, + case, + mode.clone(), + reporter.test_specific_reporter(Arc::new(TestSpecifier { + solc_mode: mode.as_ref().clone(), + metadata_file_path: metadata_file.metadata_file_path.clone(), + case_idx: CaseIdx::new(case_idx), + })), + ) + }) + }) + // Inform the reporter of each one of the test cases that were discovered which we + // expect to run. + .inspect(|(_, _, _, _, reporter)| { + reporter.report_test_case_discovery_event().expect("Can't fail"); + }), + ) + // Creating the Test Definition objects from all of the various objects we have and creating + // their required dependencies (e.g., compiler). + .filter_map(move |(metadata_file, case_idx, case, mode, reporter)| async move { + let mut platforms = BTreeMap::new(); + for (platform, node_pool) in platforms_and_nodes.values() { + let node = node_pool.round_robbin(); + let compiler = platform + .new_compiler(context.clone(), mode.version.clone().map(Into::into)) + .await + .inspect_err(|err| { + error!( + ?err, + platform_identifier = %platform.platform_identifier(), + "Failed to instantiate the compiler" + ) + }) + .ok()?; - reporter - .report_node_assigned_event( - node.id(), - platform.platform_identifier(), - node.connection_string(), - ) - .expect("Can't fail"); + reporter + .report_node_assigned_event( + node.id(), + platform.platform_identifier(), + node.connection_string(), + ) + .expect("Can't fail"); - let reporter = - reporter.execution_specific_reporter(node.id(), platform.platform_identifier()); + let reporter = + reporter.execution_specific_reporter(node.id(), platform.platform_identifier()); - platforms.insert( - platform.platform_identifier(), - TestPlatformInformation { - platform: *platform, - node, - compiler, - reporter, - }, - ); - } + platforms.insert( + platform.platform_identifier(), + TestPlatformInformation { platform: *platform, node, compiler, reporter }, + ); + } - Some(TestDefinition { - /* Metadata file information */ - metadata: metadata_file, - metadata_file_path: metadata_file.metadata_file_path.as_path(), + Some(TestDefinition { + /* Metadata file information */ + metadata: metadata_file, + metadata_file_path: metadata_file.metadata_file_path.as_path(), - /* Mode Information */ - mode: mode.clone(), + /* Mode Information */ + mode: mode.clone(), - /* Case Information */ - case_idx: CaseIdx::new(case_idx), - case, + /* Case Information */ + case_idx: CaseIdx::new(case_idx), + case, - /* Platform and Node Assignment Information */ - platforms, + /* Platform and Node Assignment Information */ + platforms, - /* Reporter */ - reporter, - }) - }, - ) - // Filter out the test cases which are incompatible or that can't run in the current setup. - .filter_map(move |test| async move { - match test.check_compatibility() { - Ok(()) => Some(test), - Err((reason, additional_information)) => { - debug!( - metadata_file_path = %test.metadata.metadata_file_path.display(), - case_idx = %test.case_idx, - mode = %test.mode, - reason, - additional_information = - serde_json::to_string(&additional_information).unwrap(), - "Ignoring Test Case" - ); - test.reporter - .report_test_ignored_event( - reason.to_string(), - additional_information - .into_iter() - .map(|(k, v)| (k.into(), v)) - .collect::>(), - ) - .expect("Can't fail"); - None - } - } - }) - .inspect(|test| { - info!( - metadata_file_path = %test.metadata_file_path.display(), - case_idx = %test.case_idx, - mode = %test.mode, - "Created a test case definition" - ); - }) + /* Reporter */ + reporter, + }) + }) + // Filter out the test cases which are incompatible or that can't run in the current setup. + .filter_map(move |test| async move { + match test.check_compatibility() { + Ok(()) => Some(test), + Err((reason, additional_information)) => { + debug!( + metadata_file_path = %test.metadata.metadata_file_path.display(), + case_idx = %test.case_idx, + mode = %test.mode, + reason, + additional_information = + serde_json::to_string(&additional_information).unwrap(), + "Ignoring Test Case" + ); + test.reporter + .report_test_ignored_event( + reason.to_string(), + additional_information + .into_iter() + .map(|(k, v)| (k.into(), v)) + .collect::>(), + ) + .expect("Can't fail"); + None + }, + } + }) + .inspect(|test| { + info!( + metadata_file_path = %test.metadata_file_path.display(), + case_idx = %test.case_idx, + mode = %test.mode, + "Created a test case definition" + ); + }) } /// This is a full description of a differential test to run alongside the full metadata file, the @@ -174,146 +165,142 @@ pub async fn create_test_definitions_stream<'a>( /// these platforms that they should run on, the compilers to use, and everything else needed making /// it a complete description. pub struct TestDefinition<'a> { - /* Metadata file information */ - pub metadata: &'a MetadataFile, - pub metadata_file_path: &'a Path, + /* Metadata file information */ + pub metadata: &'a MetadataFile, + pub metadata_file_path: &'a Path, - /* Mode Information */ - pub mode: Cow<'a, Mode>, + /* Mode Information */ + pub mode: Cow<'a, Mode>, - /* Case Information */ - pub case_idx: CaseIdx, - pub case: &'a Case, + /* Case Information */ + pub case_idx: CaseIdx, + pub case: &'a Case, - /* Platform and Node Assignment Information */ - pub platforms: BTreeMap>, + /* Platform and Node Assignment Information */ + pub platforms: BTreeMap>, - /* Reporter */ - pub reporter: TestSpecificReporter, + /* Reporter */ + pub reporter: TestSpecificReporter, } impl<'a> TestDefinition<'a> { - /// Checks if this test can be ran with the current configuration. - pub fn check_compatibility(&self) -> TestCheckFunctionResult { - self.check_metadata_file_ignored()?; - self.check_case_file_ignored()?; - self.check_target_compatibility()?; - self.check_evm_version_compatibility()?; - self.check_compiler_compatibility()?; - Ok(()) - } + /// Checks if this test can be ran with the current configuration. + pub fn check_compatibility(&self) -> TestCheckFunctionResult { + self.check_metadata_file_ignored()?; + self.check_case_file_ignored()?; + self.check_target_compatibility()?; + self.check_evm_version_compatibility()?; + self.check_compiler_compatibility()?; + Ok(()) + } - /// Checks if the metadata file is ignored or not. - fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult { - if self.metadata.ignore.is_some_and(|ignore| ignore) { - Err(("Metadata file is ignored.", indexmap! {})) - } else { - Ok(()) - } - } + /// Checks if the metadata file is ignored or not. + fn check_metadata_file_ignored(&self) -> TestCheckFunctionResult { + if self.metadata.ignore.is_some_and(|ignore| ignore) { + Err(("Metadata file is ignored.", indexmap! {})) + } else { + Ok(()) + } + } - /// Checks if the case file is ignored or not. - fn check_case_file_ignored(&self) -> TestCheckFunctionResult { - if self.case.ignore.is_some_and(|ignore| ignore) { - Err(("Case is ignored.", indexmap! {})) - } else { - Ok(()) - } - } + /// Checks if the case file is ignored or not. + fn check_case_file_ignored(&self) -> TestCheckFunctionResult { + if self.case.ignore.is_some_and(|ignore| ignore) { + Err(("Case is ignored.", indexmap! {})) + } else { + Ok(()) + } + } - /// Checks if the platforms all support the desired targets in the metadata file. - fn check_target_compatibility(&self) -> TestCheckFunctionResult { - let mut error_map = indexmap! { - "test_desired_targets" => json!(self.metadata.targets.as_ref()), - }; - let mut is_allowed = true; - for (_, platform_information) in self.platforms.iter() { - let is_allowed_for_platform = match self.metadata.targets.as_ref() { - None => true, - Some(required_vm_identifiers) => { - required_vm_identifiers.contains(&platform_information.platform.vm_identifier()) - } - }; - is_allowed &= is_allowed_for_platform; - error_map.insert( - platform_information.platform.platform_identifier().into(), - json!(is_allowed_for_platform), - ); - } + /// Checks if the platforms all support the desired targets in the metadata file. + fn check_target_compatibility(&self) -> TestCheckFunctionResult { + let mut error_map = indexmap! { + "test_desired_targets" => json!(self.metadata.targets.as_ref()), + }; + let mut is_allowed = true; + for (_, platform_information) in self.platforms.iter() { + let is_allowed_for_platform = match self.metadata.targets.as_ref() { + None => true, + Some(required_vm_identifiers) => + required_vm_identifiers.contains(&platform_information.platform.vm_identifier()), + }; + is_allowed &= is_allowed_for_platform; + error_map.insert( + platform_information.platform.platform_identifier().into(), + json!(is_allowed_for_platform), + ); + } - if is_allowed { - Ok(()) - } else { - Err(( - "One of the platforms do do not support the targets allowed by the test.", - error_map, - )) - } - } + if is_allowed { + Ok(()) + } else { + Err(( + "One of the platforms do do not support the targets allowed by the test.", + error_map, + )) + } + } - // Checks for the compatibility of the EVM version with the platforms specified. - fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult { - let Some(evm_version_requirement) = self.metadata.required_evm_version else { - return Ok(()); - }; + // Checks for the compatibility of the EVM version with the platforms specified. + fn check_evm_version_compatibility(&self) -> TestCheckFunctionResult { + let Some(evm_version_requirement) = self.metadata.required_evm_version else { + return Ok(()); + }; - let mut error_map = indexmap! { - "test_desired_evm_version" => json!(self.metadata.required_evm_version), - }; - let mut is_allowed = true; - for (_, platform_information) in self.platforms.iter() { - let is_allowed_for_platform = - evm_version_requirement.matches(&platform_information.node.evm_version()); - is_allowed &= is_allowed_for_platform; - error_map.insert( - platform_information.platform.platform_identifier().into(), - json!(is_allowed_for_platform), - ); - } + let mut error_map = indexmap! { + "test_desired_evm_version" => json!(self.metadata.required_evm_version), + }; + let mut is_allowed = true; + for (_, platform_information) in self.platforms.iter() { + let is_allowed_for_platform = + evm_version_requirement.matches(&platform_information.node.evm_version()); + is_allowed &= is_allowed_for_platform; + error_map.insert( + platform_information.platform.platform_identifier().into(), + json!(is_allowed_for_platform), + ); + } - if is_allowed { - Ok(()) - } else { - Err(( - "EVM version is incompatible for the platforms specified", - error_map, - )) - } - } + if is_allowed { + Ok(()) + } else { + Err(("EVM version is incompatible for the platforms specified", error_map)) + } + } - /// Checks if the platforms compilers support the mode that the test is for. - fn check_compiler_compatibility(&self) -> TestCheckFunctionResult { - let mut error_map = indexmap! { - "test_desired_evm_version" => json!(self.metadata.required_evm_version), - }; - let mut is_allowed = true; - for (_, platform_information) in self.platforms.iter() { - let is_allowed_for_platform = platform_information - .compiler - .supports_mode(self.mode.optimize_setting, self.mode.pipeline); - is_allowed &= is_allowed_for_platform; - error_map.insert( - platform_information.platform.platform_identifier().into(), - json!(is_allowed_for_platform), - ); - } + /// Checks if the platforms compilers support the mode that the test is for. + fn check_compiler_compatibility(&self) -> TestCheckFunctionResult { + let mut error_map = indexmap! { + "test_desired_evm_version" => json!(self.metadata.required_evm_version), + }; + let mut is_allowed = true; + for (_, platform_information) in self.platforms.iter() { + let is_allowed_for_platform = platform_information + .compiler + .supports_mode(self.mode.optimize_setting, self.mode.pipeline); + is_allowed &= is_allowed_for_platform; + error_map.insert( + platform_information.platform.platform_identifier().into(), + json!(is_allowed_for_platform), + ); + } - if is_allowed { - Ok(()) - } else { - Err(( - "Compilers do not support this mode either for the provided platforms.", - error_map, - )) - } - } + if is_allowed { + Ok(()) + } else { + Err(( + "Compilers do not support this mode either for the provided platforms.", + error_map, + )) + } + } } pub struct TestPlatformInformation<'a> { - pub platform: &'a dyn Platform, - pub node: &'a dyn EthereumNode, - pub compiler: Box, - pub reporter: ExecutionSpecificReporter, + pub platform: &'a dyn Platform, + pub node: &'a dyn EthereumNode, + pub compiler: Box, + pub reporter: ExecutionSpecificReporter, } type TestCheckFunctionResult = Result<(), (&'static str, IndexMap<&'static str, Value>)>; diff --git a/crates/core/src/lib.rs b/crates/core/src/lib.rs index 954064c..de81ce4 100644 --- a/crates/core/src/lib.rs +++ b/crates/core/src/lib.rs @@ -7,8 +7,8 @@ pub mod differential_tests; pub mod helpers; use std::{ - pin::Pin, - thread::{self, JoinHandle}, + pin::Pin, + thread::{self, JoinHandle}, }; use alloy::genesis::Genesis; @@ -17,11 +17,11 @@ use revive_dt_common::types::*; use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_config::*; use revive_dt_node::{ - Node, - node_implementations::{ - geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode, - zombienet::ZombieNode, - }, + Node, + node_implementations::{ + geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode, + zombienet::ZombieNode, + }, }; use revive_dt_node_interaction::EthereumNode; use tracing::info; @@ -32,506 +32,473 @@ pub use helpers::CachedCompiler; /// A trait that describes the interface for the platforms that are supported by the tool. #[allow(clippy::type_complexity)] pub trait Platform { - /// Returns the identifier of this platform. This is a combination of the node and the compiler - /// used. - fn platform_identifier(&self) -> PlatformIdentifier; + /// Returns the identifier of this platform. This is a combination of the node and the compiler + /// used. + fn platform_identifier(&self) -> PlatformIdentifier; - /// Returns a full identifier for the platform. - fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) { - ( - self.node_identifier(), - self.vm_identifier(), - self.compiler_identifier(), - ) - } + /// Returns a full identifier for the platform. + fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) { + (self.node_identifier(), self.vm_identifier(), self.compiler_identifier()) + } - /// Returns the identifier of the node used. - fn node_identifier(&self) -> NodeIdentifier; + /// Returns the identifier of the node used. + fn node_identifier(&self) -> NodeIdentifier; - /// Returns the identifier of the vm used. - fn vm_identifier(&self) -> VmIdentifier; + /// Returns the identifier of the vm used. + fn vm_identifier(&self) -> VmIdentifier; - /// Returns the identifier of the compiler used. - fn compiler_identifier(&self) -> CompilerIdentifier; + /// Returns the identifier of the compiler used. + fn compiler_identifier(&self) -> CompilerIdentifier; - /// Creates a new node for the platform by spawning a new thread, creating the node object, - /// initializing it, spawning it, and waiting for it to start up. - fn new_node( - &self, - context: Context, - ) -> anyhow::Result>>>; + /// Creates a new node for the platform by spawning a new thread, creating the node object, + /// initializing it, spawning it, and waiting for it to start up. + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>>; - /// Creates a new compiler for the provided platform - fn new_compiler( - &self, - context: Context, - version: Option, - ) -> Pin>>>>; + /// Creates a new compiler for the provided platform + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>>; } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] pub struct GethEvmSolcPlatform; impl Platform for GethEvmSolcPlatform { - fn platform_identifier(&self) -> PlatformIdentifier { - PlatformIdentifier::GethEvmSolc - } + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::GethEvmSolc + } - fn node_identifier(&self) -> NodeIdentifier { - NodeIdentifier::Geth - } + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::Geth + } - fn vm_identifier(&self) -> VmIdentifier { - VmIdentifier::Evm - } + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::Evm + } - fn compiler_identifier(&self) -> CompilerIdentifier { - CompilerIdentifier::Solc - } + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Solc + } - fn new_node( - &self, - context: Context, - ) -> anyhow::Result>>> { - let genesis_configuration = AsRef::::as_ref(&context); - let genesis = genesis_configuration.genesis()?.clone(); - Ok(thread::spawn(move || { - let node = GethNode::new(context); - let node = spawn_node::(node, genesis)?; - Ok(Box::new(node) as Box<_>) - })) - } + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = GethNode::new(context); + let node = spawn_node::(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } - fn new_compiler( - &self, - context: Context, - version: Option, - ) -> Pin>>>> { - Box::pin(async move { - let compiler = Solc::new(context, version).await; - compiler.map(|compiler| Box::new(compiler) as Box) - }) - } + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Solc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] pub struct LighthouseGethEvmSolcPlatform; impl Platform for LighthouseGethEvmSolcPlatform { - fn platform_identifier(&self) -> PlatformIdentifier { - PlatformIdentifier::LighthouseGethEvmSolc - } + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::LighthouseGethEvmSolc + } - fn node_identifier(&self) -> NodeIdentifier { - NodeIdentifier::LighthouseGeth - } + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::LighthouseGeth + } - fn vm_identifier(&self) -> VmIdentifier { - VmIdentifier::Evm - } + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::Evm + } - fn compiler_identifier(&self) -> CompilerIdentifier { - CompilerIdentifier::Solc - } + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Solc + } - fn new_node( - &self, - context: Context, - ) -> anyhow::Result>>> { - let genesis_configuration = AsRef::::as_ref(&context); - let genesis = genesis_configuration.genesis()?.clone(); - Ok(thread::spawn(move || { - let node = LighthouseGethNode::new(context); - let node = spawn_node::(node, genesis)?; - Ok(Box::new(node) as Box<_>) - })) - } + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = LighthouseGethNode::new(context); + let node = spawn_node::(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } - fn new_compiler( - &self, - context: Context, - version: Option, - ) -> Pin>>>> { - Box::pin(async move { - let compiler = Solc::new(context, version).await; - compiler.map(|compiler| Box::new(compiler) as Box) - }) - } + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Solc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] pub struct KitchensinkPolkavmResolcPlatform; impl Platform for KitchensinkPolkavmResolcPlatform { - fn platform_identifier(&self) -> PlatformIdentifier { - PlatformIdentifier::KitchensinkPolkavmResolc - } + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::KitchensinkPolkavmResolc + } - fn node_identifier(&self) -> NodeIdentifier { - NodeIdentifier::Kitchensink - } + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::Kitchensink + } - fn vm_identifier(&self) -> VmIdentifier { - VmIdentifier::PolkaVM - } + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::PolkaVM + } - fn compiler_identifier(&self) -> CompilerIdentifier { - CompilerIdentifier::Resolc - } + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Resolc + } - fn new_node( - &self, - context: Context, - ) -> anyhow::Result>>> { - let genesis_configuration = AsRef::::as_ref(&context); - let kitchensink_path = AsRef::::as_ref(&context) - .path - .clone(); - let genesis = genesis_configuration.genesis()?.clone(); - Ok(thread::spawn(move || { - let node = SubstrateNode::new( - kitchensink_path, - SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, - context, - ); - let node = spawn_node(node, genesis)?; - Ok(Box::new(node) as Box<_>) - })) - } + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let kitchensink_path = AsRef::::as_ref(&context).path.clone(); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = SubstrateNode::new( + kitchensink_path, + SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, + context, + ); + let node = spawn_node(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } - fn new_compiler( - &self, - context: Context, - version: Option, - ) -> Pin>>>> { - Box::pin(async move { - let compiler = Resolc::new(context, version).await; - compiler.map(|compiler| Box::new(compiler) as Box) - }) - } + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Resolc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] pub struct KitchensinkRevmSolcPlatform; impl Platform for KitchensinkRevmSolcPlatform { - fn platform_identifier(&self) -> PlatformIdentifier { - PlatformIdentifier::KitchensinkRevmSolc - } + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::KitchensinkRevmSolc + } - fn node_identifier(&self) -> NodeIdentifier { - NodeIdentifier::Kitchensink - } + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::Kitchensink + } - fn vm_identifier(&self) -> VmIdentifier { - VmIdentifier::Evm - } + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::Evm + } - fn compiler_identifier(&self) -> CompilerIdentifier { - CompilerIdentifier::Solc - } + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Solc + } - fn new_node( - &self, - context: Context, - ) -> anyhow::Result>>> { - let genesis_configuration = AsRef::::as_ref(&context); - let kitchensink_path = AsRef::::as_ref(&context) - .path - .clone(); - let genesis = genesis_configuration.genesis()?.clone(); - Ok(thread::spawn(move || { - let node = SubstrateNode::new( - kitchensink_path, - SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, - context, - ); - let node = spawn_node(node, genesis)?; - Ok(Box::new(node) as Box<_>) - })) - } + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let kitchensink_path = AsRef::::as_ref(&context).path.clone(); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = SubstrateNode::new( + kitchensink_path, + SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, + context, + ); + let node = spawn_node(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } - fn new_compiler( - &self, - context: Context, - version: Option, - ) -> Pin>>>> { - Box::pin(async move { - let compiler = Solc::new(context, version).await; - compiler.map(|compiler| Box::new(compiler) as Box) - }) - } + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Solc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] pub struct ReviveDevNodePolkavmResolcPlatform; impl Platform for ReviveDevNodePolkavmResolcPlatform { - fn platform_identifier(&self) -> PlatformIdentifier { - PlatformIdentifier::ReviveDevNodePolkavmResolc - } + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::ReviveDevNodePolkavmResolc + } - fn node_identifier(&self) -> NodeIdentifier { - NodeIdentifier::ReviveDevNode - } + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::ReviveDevNode + } - fn vm_identifier(&self) -> VmIdentifier { - VmIdentifier::PolkaVM - } + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::PolkaVM + } - fn compiler_identifier(&self) -> CompilerIdentifier { - CompilerIdentifier::Resolc - } + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Resolc + } - fn new_node( - &self, - context: Context, - ) -> anyhow::Result>>> { - let genesis_configuration = AsRef::::as_ref(&context); - let revive_dev_node_path = AsRef::::as_ref(&context) - .path - .clone(); - let genesis = genesis_configuration.genesis()?.clone(); - Ok(thread::spawn(move || { - let node = SubstrateNode::new( - revive_dev_node_path, - SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, - context, - ); - let node = spawn_node(node, genesis)?; - Ok(Box::new(node) as Box<_>) - })) - } + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let revive_dev_node_path = + AsRef::::as_ref(&context).path.clone(); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = SubstrateNode::new( + revive_dev_node_path, + SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, + context, + ); + let node = spawn_node(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } - fn new_compiler( - &self, - context: Context, - version: Option, - ) -> Pin>>>> { - Box::pin(async move { - let compiler = Resolc::new(context, version).await; - compiler.map(|compiler| Box::new(compiler) as Box) - }) - } + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Resolc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] pub struct ReviveDevNodeRevmSolcPlatform; impl Platform for ReviveDevNodeRevmSolcPlatform { - fn platform_identifier(&self) -> PlatformIdentifier { - PlatformIdentifier::ReviveDevNodeRevmSolc - } + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::ReviveDevNodeRevmSolc + } - fn node_identifier(&self) -> NodeIdentifier { - NodeIdentifier::ReviveDevNode - } + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::ReviveDevNode + } - fn vm_identifier(&self) -> VmIdentifier { - VmIdentifier::Evm - } + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::Evm + } - fn compiler_identifier(&self) -> CompilerIdentifier { - CompilerIdentifier::Solc - } + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Solc + } - fn new_node( - &self, - context: Context, - ) -> anyhow::Result>>> { - let genesis_configuration = AsRef::::as_ref(&context); - let revive_dev_node_path = AsRef::::as_ref(&context) - .path - .clone(); - let genesis = genesis_configuration.genesis()?.clone(); - Ok(thread::spawn(move || { - let node = SubstrateNode::new( - revive_dev_node_path, - SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, - context, - ); - let node = spawn_node(node, genesis)?; - Ok(Box::new(node) as Box<_>) - })) - } + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let revive_dev_node_path = + AsRef::::as_ref(&context).path.clone(); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = SubstrateNode::new( + revive_dev_node_path, + SubstrateNode::REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND, + context, + ); + let node = spawn_node(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } - fn new_compiler( - &self, - context: Context, - version: Option, - ) -> Pin>>>> { - Box::pin(async move { - let compiler = Solc::new(context, version).await; - compiler.map(|compiler| Box::new(compiler) as Box) - }) - } + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Solc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] pub struct ZombienetPolkavmResolcPlatform; impl Platform for ZombienetPolkavmResolcPlatform { - fn platform_identifier(&self) -> PlatformIdentifier { - PlatformIdentifier::ZombienetPolkavmResolc - } + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::ZombienetPolkavmResolc + } - fn node_identifier(&self) -> NodeIdentifier { - NodeIdentifier::Zombienet - } + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::Zombienet + } - fn vm_identifier(&self) -> VmIdentifier { - VmIdentifier::PolkaVM - } + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::PolkaVM + } - fn compiler_identifier(&self) -> CompilerIdentifier { - CompilerIdentifier::Resolc - } + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Resolc + } - fn new_node( - &self, - context: Context, - ) -> anyhow::Result>>> { - let genesis_configuration = AsRef::::as_ref(&context); - let polkadot_parachain_path = AsRef::::as_ref(&context) - .path - .clone(); - let genesis = genesis_configuration.genesis()?.clone(); - Ok(thread::spawn(move || { - let node = ZombieNode::new(polkadot_parachain_path, context); - let node = spawn_node(node, genesis)?; - Ok(Box::new(node) as Box<_>) - })) - } + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let polkadot_parachain_path = + AsRef::::as_ref(&context).path.clone(); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = ZombieNode::new(polkadot_parachain_path, context); + let node = spawn_node(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } - fn new_compiler( - &self, - context: Context, - version: Option, - ) -> Pin>>>> { - Box::pin(async move { - let compiler = Solc::new(context, version).await; - compiler.map(|compiler| Box::new(compiler) as Box) - }) - } + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Solc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Default, Hash)] pub struct ZombienetRevmSolcPlatform; impl Platform for ZombienetRevmSolcPlatform { - fn platform_identifier(&self) -> PlatformIdentifier { - PlatformIdentifier::ZombienetRevmSolc - } + fn platform_identifier(&self) -> PlatformIdentifier { + PlatformIdentifier::ZombienetRevmSolc + } - fn node_identifier(&self) -> NodeIdentifier { - NodeIdentifier::Zombienet - } + fn node_identifier(&self) -> NodeIdentifier { + NodeIdentifier::Zombienet + } - fn vm_identifier(&self) -> VmIdentifier { - VmIdentifier::Evm - } + fn vm_identifier(&self) -> VmIdentifier { + VmIdentifier::Evm + } - fn compiler_identifier(&self) -> CompilerIdentifier { - CompilerIdentifier::Solc - } + fn compiler_identifier(&self) -> CompilerIdentifier { + CompilerIdentifier::Solc + } - fn new_node( - &self, - context: Context, - ) -> anyhow::Result>>> { - let genesis_configuration = AsRef::::as_ref(&context); - let polkadot_parachain_path = AsRef::::as_ref(&context) - .path - .clone(); - let genesis = genesis_configuration.genesis()?.clone(); - Ok(thread::spawn(move || { - let node = ZombieNode::new(polkadot_parachain_path, context); - let node = spawn_node(node, genesis)?; - Ok(Box::new(node) as Box<_>) - })) - } + fn new_node( + &self, + context: Context, + ) -> anyhow::Result>>> { + let genesis_configuration = AsRef::::as_ref(&context); + let polkadot_parachain_path = + AsRef::::as_ref(&context).path.clone(); + let genesis = genesis_configuration.genesis()?.clone(); + Ok(thread::spawn(move || { + let node = ZombieNode::new(polkadot_parachain_path, context); + let node = spawn_node(node, genesis)?; + Ok(Box::new(node) as Box<_>) + })) + } - fn new_compiler( - &self, - context: Context, - version: Option, - ) -> Pin>>>> { - Box::pin(async move { - let compiler = Solc::new(context, version).await; - compiler.map(|compiler| Box::new(compiler) as Box) - }) - } + fn new_compiler( + &self, + context: Context, + version: Option, + ) -> Pin>>>> { + Box::pin(async move { + let compiler = Solc::new(context, version).await; + compiler.map(|compiler| Box::new(compiler) as Box) + }) + } } impl From for Box { - fn from(value: PlatformIdentifier) -> Self { - match value { - PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>, - PlatformIdentifier::LighthouseGethEvmSolc => { - Box::new(LighthouseGethEvmSolcPlatform) as Box<_> - } - PlatformIdentifier::KitchensinkPolkavmResolc => { - Box::new(KitchensinkPolkavmResolcPlatform) as Box<_> - } - PlatformIdentifier::KitchensinkRevmSolc => { - Box::new(KitchensinkRevmSolcPlatform) as Box<_> - } - PlatformIdentifier::ReviveDevNodePolkavmResolc => { - Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_> - } - PlatformIdentifier::ReviveDevNodeRevmSolc => { - Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_> - } - PlatformIdentifier::ZombienetPolkavmResolc => { - Box::new(ZombienetPolkavmResolcPlatform) as Box<_> - } - PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>, - } - } + fn from(value: PlatformIdentifier) -> Self { + match value { + PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>, + PlatformIdentifier::LighthouseGethEvmSolc => + Box::new(LighthouseGethEvmSolcPlatform) as Box<_>, + PlatformIdentifier::KitchensinkPolkavmResolc => + Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>, + PlatformIdentifier::KitchensinkRevmSolc => + Box::new(KitchensinkRevmSolcPlatform) as Box<_>, + PlatformIdentifier::ReviveDevNodePolkavmResolc => + Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>, + PlatformIdentifier::ReviveDevNodeRevmSolc => + Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>, + PlatformIdentifier::ZombienetPolkavmResolc => + Box::new(ZombienetPolkavmResolcPlatform) as Box<_>, + PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>, + } + } } impl From for &dyn Platform { - fn from(value: PlatformIdentifier) -> Self { - match value { - PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform, - PlatformIdentifier::LighthouseGethEvmSolc => { - &LighthouseGethEvmSolcPlatform as &dyn Platform - } - PlatformIdentifier::KitchensinkPolkavmResolc => { - &KitchensinkPolkavmResolcPlatform as &dyn Platform - } - PlatformIdentifier::KitchensinkRevmSolc => { - &KitchensinkRevmSolcPlatform as &dyn Platform - } - PlatformIdentifier::ReviveDevNodePolkavmResolc => { - &ReviveDevNodePolkavmResolcPlatform as &dyn Platform - } - PlatformIdentifier::ReviveDevNodeRevmSolc => { - &ReviveDevNodeRevmSolcPlatform as &dyn Platform - } - PlatformIdentifier::ZombienetPolkavmResolc => { - &ZombienetPolkavmResolcPlatform as &dyn Platform - } - PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform, - } - } + fn from(value: PlatformIdentifier) -> Self { + match value { + PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform, + PlatformIdentifier::LighthouseGethEvmSolc => + &LighthouseGethEvmSolcPlatform as &dyn Platform, + PlatformIdentifier::KitchensinkPolkavmResolc => + &KitchensinkPolkavmResolcPlatform as &dyn Platform, + PlatformIdentifier::KitchensinkRevmSolc => + &KitchensinkRevmSolcPlatform as &dyn Platform, + PlatformIdentifier::ReviveDevNodePolkavmResolc => + &ReviveDevNodePolkavmResolcPlatform as &dyn Platform, + PlatformIdentifier::ReviveDevNodeRevmSolc => + &ReviveDevNodeRevmSolcPlatform as &dyn Platform, + PlatformIdentifier::ZombienetPolkavmResolc => + &ZombienetPolkavmResolcPlatform as &dyn Platform, + PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform, + } + } } fn spawn_node( - mut node: T, - genesis: Genesis, + mut node: T, + genesis: Genesis, ) -> anyhow::Result { - info!( - id = node.id(), - connection_string = node.connection_string(), - "Spawning node" - ); - node.spawn(genesis) - .context("Failed to spawn node process")?; - info!( - id = node.id(), - connection_string = node.connection_string(), - "Spawned node" - ); - Ok(node) + info!(id = node.id(), connection_string = node.connection_string(), "Spawning node"); + node.spawn(genesis).context("Failed to spawn node process")?; + info!(id = node.id(), connection_string = node.connection_string(), "Spawned node"); + Ok(node) } diff --git a/crates/core/src/main.rs b/crates/core/src/main.rs index 12cd8bb..fd4992b 100644 --- a/crates/core/src/main.rs +++ b/crates/core/src/main.rs @@ -13,69 +13,69 @@ use revive_dt_core::Platform; use revive_dt_format::metadata::Metadata; use crate::{ - differential_benchmarks::handle_differential_benchmarks, - differential_tests::handle_differential_tests, + differential_benchmarks::handle_differential_benchmarks, + differential_tests::handle_differential_tests, }; fn main() -> anyhow::Result<()> { - let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default() - .lossy(false) - // Assuming that each line contains 255 characters and that each character is one byte, then - // this means that our buffer is about 4GBs large. - .buffered_lines_limit(0x1000000) - .thread_name("buffered writer") - .finish(std::io::stdout()); + let (writer, _guard) = tracing_appender::non_blocking::NonBlockingBuilder::default() + .lossy(false) + // Assuming that each line contains 255 characters and that each character is one byte, then + // this means that our buffer is about 4GBs large. + .buffered_lines_limit(0x1000000) + .thread_name("buffered writer") + .finish(std::io::stdout()); - let subscriber = FmtSubscriber::builder() - .with_writer(writer) - .with_thread_ids(false) - .with_thread_names(false) - .with_env_filter(EnvFilter::from_default_env()) - .with_ansi(false) - .pretty() - .finish(); - tracing::subscriber::set_global_default(subscriber)?; - info!("Differential testing tool is starting"); + let subscriber = FmtSubscriber::builder() + .with_writer(writer) + .with_thread_ids(false) + .with_thread_names(false) + .with_env_filter(EnvFilter::from_default_env()) + .with_ansi(false) + .pretty() + .finish(); + tracing::subscriber::set_global_default(subscriber)?; + info!("Differential testing tool is starting"); - let context = Context::try_parse()?; - let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task(); + let context = Context::try_parse()?; + let (reporter, report_aggregator_task) = ReportAggregator::new(context.clone()).into_task(); - match context { - Context::Test(context) => tokio::runtime::Builder::new_multi_thread() - .worker_threads(context.concurrency_configuration.number_of_threads) - .enable_all() - .build() - .expect("Failed building the Runtime") - .block_on(async move { - let differential_tests_handling_task = - handle_differential_tests(*context, reporter); + match context { + Context::Test(context) => tokio::runtime::Builder::new_multi_thread() + .worker_threads(context.concurrency_configuration.number_of_threads) + .enable_all() + .build() + .expect("Failed building the Runtime") + .block_on(async move { + let differential_tests_handling_task = + handle_differential_tests(*context, reporter); - futures::future::try_join(differential_tests_handling_task, report_aggregator_task) - .await?; + futures::future::try_join(differential_tests_handling_task, report_aggregator_task) + .await?; - Ok(()) - }), - Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread() - .worker_threads(context.concurrency_configuration.number_of_threads) - .enable_all() - .build() - .expect("Failed building the Runtime") - .block_on(async move { - let differential_benchmarks_handling_task = - handle_differential_benchmarks(*context, reporter); + Ok(()) + }), + Context::Benchmark(context) => tokio::runtime::Builder::new_multi_thread() + .worker_threads(context.concurrency_configuration.number_of_threads) + .enable_all() + .build() + .expect("Failed building the Runtime") + .block_on(async move { + let differential_benchmarks_handling_task = + handle_differential_benchmarks(*context, reporter); - futures::future::try_join( - differential_benchmarks_handling_task, - report_aggregator_task, - ) - .await?; + futures::future::try_join( + differential_benchmarks_handling_task, + report_aggregator_task, + ) + .await?; - Ok(()) - }), - Context::ExportJsonSchema => { - let schema = schema_for!(Metadata); - println!("{}", serde_json::to_string_pretty(&schema).unwrap()); - Ok(()) - } - } + Ok(()) + }), + Context::ExportJsonSchema => { + let schema = schema_for!(Metadata); + println!("{}", serde_json::to_string_pretty(&schema).unwrap()); + Ok(()) + }, + } } diff --git a/crates/format/src/case.rs b/crates/format/src/case.rs index 7c48279..039f9c7 100644 --- a/crates/format/src/case.rs +++ b/crates/format/src/case.rs @@ -7,108 +7,102 @@ use crate::{mode::ParsedMode, steps::*}; #[derive(Debug, Default, Serialize, Deserialize, Clone, Eq, PartialEq, JsonSchema)] pub struct Case { - /// An optional name of the test case. - #[serde(skip_serializing_if = "Option::is_none")] - pub name: Option, + /// An optional name of the test case. + #[serde(skip_serializing_if = "Option::is_none")] + pub name: Option, - /// An optional comment on the case which has no impact on the execution in any way. - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, + /// An optional comment on the case which has no impact on the execution in any way. + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, - /// This represents a mode that has been parsed from test metadata. - /// - /// Mode strings can take the following form (in pseudo-regex): - /// - /// ```text - /// [YEILV][+-]? (M[0123sz])? ? - /// ``` - /// - /// If this is provided then it takes higher priority than the modes specified in the metadata - /// file. - #[serde(skip_serializing_if = "Option::is_none")] - pub modes: Option>, + /// This represents a mode that has been parsed from test metadata. + /// + /// Mode strings can take the following form (in pseudo-regex): + /// + /// ```text + /// [YEILV][+-]? (M[0123sz])? ? + /// ``` + /// + /// If this is provided then it takes higher priority than the modes specified in the metadata + /// file. + #[serde(skip_serializing_if = "Option::is_none")] + pub modes: Option>, - /// The set of steps to run as part of this test case. - #[serde(rename = "inputs")] - pub steps: Vec, + /// The set of steps to run as part of this test case. + #[serde(rename = "inputs")] + pub steps: Vec, - /// An optional name of the group of tests that this test belongs to. - #[serde(skip_serializing_if = "Option::is_none")] - pub group: Option, + /// An optional name of the group of tests that this test belongs to. + #[serde(skip_serializing_if = "Option::is_none")] + pub group: Option, - /// An optional set of expectations and assertions to make about the transaction after it ran. - /// - /// If this is not specified then the only assertion that will be ran is that the transaction - /// was successful. - /// - /// This expectation that's on the case itself will be attached to the final step of the case. - #[serde(skip_serializing_if = "Option::is_none")] - pub expected: Option, + /// An optional set of expectations and assertions to make about the transaction after it ran. + /// + /// If this is not specified then the only assertion that will be ran is that the transaction + /// was successful. + /// + /// This expectation that's on the case itself will be attached to the final step of the case. + #[serde(skip_serializing_if = "Option::is_none")] + pub expected: Option, - /// An optional boolean which defines if the case as a whole should be ignored. If null then the - /// case will not be ignored. - #[serde(skip_serializing_if = "Option::is_none")] - pub ignore: Option, + /// An optional boolean which defines if the case as a whole should be ignored. If null then + /// the case will not be ignored. + #[serde(skip_serializing_if = "Option::is_none")] + pub ignore: Option, } impl Case { - pub fn steps_iterator(&self) -> impl Iterator { - let steps_len = self.steps.len(); - self.steps - .clone() - .into_iter() - .enumerate() - .map(move |(idx, mut step)| { - let Step::FunctionCall(ref mut input) = step else { - return step; - }; + pub fn steps_iterator(&self) -> impl Iterator { + let steps_len = self.steps.len(); + self.steps.clone().into_iter().enumerate().map(move |(idx, mut step)| { + let Step::FunctionCall(ref mut input) = step else { + return step; + }; - if idx + 1 == steps_len { - if input.expected.is_none() { - input.expected = self.expected.clone(); - } + if idx + 1 == steps_len { + if input.expected.is_none() { + input.expected = self.expected.clone(); + } - // TODO: What does it mean for us to have an `expected` field on the case itself - // but the final input also has an expected field that doesn't match the one on - // the case? What are we supposed to do with that final expected field on the - // case? + // TODO: What does it mean for us to have an `expected` field on the case itself + // but the final input also has an expected field that doesn't match the one on + // the case? What are we supposed to do with that final expected field on the + // case? - step - } else { - step - } - }) - } + step + } else { + step + } + }) + } - pub fn steps_iterator_for_benchmarks( - &self, - default_repeat_count: usize, - ) -> Box + '_> { - let contains_repeat = self - .steps_iterator() - .any(|step| matches!(&step, Step::Repeat(..))); - if contains_repeat { - Box::new(self.steps_iterator()) as Box<_> - } else { - Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep { - comment: None, - repeat: default_repeat_count, - steps: self.steps_iterator().collect(), - })))) as Box<_> - } - } + pub fn steps_iterator_for_benchmarks( + &self, + default_repeat_count: usize, + ) -> Box + '_> { + let contains_repeat = self.steps_iterator().any(|step| matches!(&step, Step::Repeat(..))); + if contains_repeat { + Box::new(self.steps_iterator()) as Box<_> + } else { + Box::new(std::iter::once(Step::Repeat(Box::new(RepeatStep { + comment: None, + repeat: default_repeat_count, + steps: self.steps_iterator().collect(), + })))) as Box<_> + } + } - pub fn solc_modes(&self) -> Vec { - match &self.modes { - Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), - None => Mode::all().cloned().collect(), - } - } + pub fn solc_modes(&self) -> Vec { + match &self.modes { + Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), + None => Mode::all().cloned().collect(), + } + } } define_wrapper_type!( - /// A wrapper type for the index of test cases found in metadata file. - #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] - #[serde(transparent)] - pub struct CaseIdx(usize) impl Display, FromStr; + /// A wrapper type for the index of test cases found in metadata file. + #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] + #[serde(transparent)] + pub struct CaseIdx(usize) impl Display, FromStr; ); diff --git a/crates/format/src/corpus.rs b/crates/format/src/corpus.rs index 62e81f6..3e40f7e 100644 --- a/crates/format/src/corpus.rs +++ b/crates/format/src/corpus.rs @@ -1,6 +1,6 @@ use std::{ - fs::File, - path::{Path, PathBuf}, + fs::File, + path::{Path, PathBuf}, }; use revive_dt_common::iterators::FilesWithExtensionIterator; @@ -13,119 +13,111 @@ use anyhow::Context as _; #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] #[serde(untagged)] pub enum Corpus { - SinglePath { name: String, path: PathBuf }, - MultiplePaths { name: String, paths: Vec }, + SinglePath { name: String, path: PathBuf }, + MultiplePaths { name: String, paths: Vec }, } impl Corpus { - pub fn try_from_path(file_path: impl AsRef) -> anyhow::Result { - let mut corpus = File::open(file_path.as_ref()) - .map_err(anyhow::Error::from) - .and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into)) - .with_context(|| { - format!( - "Failed to open and deserialize corpus file at {}", - file_path.as_ref().display() - ) - })?; + pub fn try_from_path(file_path: impl AsRef) -> anyhow::Result { + let mut corpus = File::open(file_path.as_ref()) + .map_err(anyhow::Error::from) + .and_then(|file| serde_json::from_reader::<_, Corpus>(file).map_err(Into::into)) + .with_context(|| { + format!( + "Failed to open and deserialize corpus file at {}", + file_path.as_ref().display() + ) + })?; - let corpus_directory = file_path - .as_ref() - .canonicalize() - .context("Failed to canonicalize the path to the corpus file")? - .parent() - .context("Corpus file has no parent")? - .to_path_buf(); + let corpus_directory = file_path + .as_ref() + .canonicalize() + .context("Failed to canonicalize the path to the corpus file")? + .parent() + .context("Corpus file has no parent")? + .to_path_buf(); - for path in corpus.paths_iter_mut() { - *path = corpus_directory.join(path.as_path()) - } + for path in corpus.paths_iter_mut() { + *path = corpus_directory.join(path.as_path()) + } - Ok(corpus) - } + Ok(corpus) + } - pub fn enumerate_tests(&self) -> Vec { - let mut tests = self - .paths_iter() - .flat_map(|root_path| { - if !root_path.is_dir() { - Box::new(std::iter::once(root_path.to_path_buf())) - as Box> - } else { - Box::new( - FilesWithExtensionIterator::new(root_path) - .with_use_cached_fs(true) - .with_allowed_extension("sol") - .with_allowed_extension("json"), - ) - } - .map(move |metadata_file_path| (root_path, metadata_file_path)) - }) - .filter_map(|(root_path, metadata_file_path)| { - Metadata::try_from_file(&metadata_file_path) - .or_else(|| { - debug!( - discovered_from = %root_path.display(), - metadata_file_path = %metadata_file_path.display(), - "Skipping file since it doesn't contain valid metadata" - ); - None - }) - .map(|metadata| MetadataFile { - metadata_file_path, - corpus_file_path: root_path.to_path_buf(), - content: metadata, - }) - .inspect(|metadata_file| { - debug!( - metadata_file_path = %metadata_file.relative_path().display(), - "Loaded metadata file" - ) - }) - }) - .collect::>(); - tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path)); - tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path); - info!( - len = tests.len(), - corpus_name = self.name(), - "Found tests in Corpus" - ); - tests - } + pub fn enumerate_tests(&self) -> Vec { + let mut tests = self + .paths_iter() + .flat_map(|root_path| { + if !root_path.is_dir() { + Box::new(std::iter::once(root_path.to_path_buf())) + as Box> + } else { + Box::new( + FilesWithExtensionIterator::new(root_path) + .with_use_cached_fs(true) + .with_allowed_extension("sol") + .with_allowed_extension("json"), + ) + } + .map(move |metadata_file_path| (root_path, metadata_file_path)) + }) + .filter_map(|(root_path, metadata_file_path)| { + Metadata::try_from_file(&metadata_file_path) + .or_else(|| { + debug!( + discovered_from = %root_path.display(), + metadata_file_path = %metadata_file_path.display(), + "Skipping file since it doesn't contain valid metadata" + ); + None + }) + .map(|metadata| MetadataFile { + metadata_file_path, + corpus_file_path: root_path.to_path_buf(), + content: metadata, + }) + .inspect(|metadata_file| { + debug!( + metadata_file_path = %metadata_file.relative_path().display(), + "Loaded metadata file" + ) + }) + }) + .collect::>(); + tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path)); + tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path); + info!(len = tests.len(), corpus_name = self.name(), "Found tests in Corpus"); + tests + } - pub fn name(&self) -> &str { - match self { - Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(), - } - } + pub fn name(&self) -> &str { + match self { + Corpus::SinglePath { name, .. } | Corpus::MultiplePaths { name, .. } => name.as_str(), + } + } - pub fn paths_iter(&self) -> impl Iterator { - match self { - Corpus::SinglePath { path, .. } => { - Box::new(std::iter::once(path.as_path())) as Box> - } - Corpus::MultiplePaths { paths, .. } => { - Box::new(paths.iter().map(|path| path.as_path())) as Box> - } - } - } + pub fn paths_iter(&self) -> impl Iterator { + match self { + Corpus::SinglePath { path, .. } => + Box::new(std::iter::once(path.as_path())) as Box>, + Corpus::MultiplePaths { paths, .. } => + Box::new(paths.iter().map(|path| path.as_path())) as Box>, + } + } - pub fn paths_iter_mut(&mut self) -> impl Iterator { - match self { - Corpus::SinglePath { path, .. } => { - Box::new(std::iter::once(path)) as Box> - } - Corpus::MultiplePaths { paths, .. } => { - Box::new(paths.iter_mut()) as Box> - } - } - } + pub fn paths_iter_mut(&mut self) -> impl Iterator { + match self { + Corpus::SinglePath { path, .. } => + Box::new(std::iter::once(path)) as Box>, + Corpus::MultiplePaths { paths, .. } => + Box::new(paths.iter_mut()) as Box>, + } + } - pub fn path_count(&self) -> usize { - match self { - Corpus::SinglePath { .. } => 1, - Corpus::MultiplePaths { paths, .. } => paths.len(), - } - } + pub fn path_count(&self) -> usize { + match self { + Corpus::SinglePath { .. } => 1, + Corpus::MultiplePaths { paths, .. } => paths.len(), + } + } } diff --git a/crates/format/src/metadata.rs b/crates/format/src/metadata.rs index 7632866..076db73 100644 --- a/crates/format/src/metadata.rs +++ b/crates/format/src/metadata.rs @@ -1,11 +1,11 @@ use std::{ - cmp::Ordering, - collections::BTreeMap, - fmt::Display, - fs::File, - ops::Deref, - path::{Path, PathBuf}, - str::FromStr, + cmp::Ordering, + collections::BTreeMap, + fmt::Display, + fs::File, + ops::Deref, + path::{Path, PathBuf}, + str::FromStr, }; use schemars::JsonSchema; @@ -13,10 +13,10 @@ use serde::{Deserialize, Serialize}; use revive_common::EVMVersion; use revive_dt_common::{ - cached_fs::read_to_string, - iterators::FilesWithExtensionIterator, - macros::define_wrapper_type, - types::{Mode, VmIdentifier}, + cached_fs::read_to_string, + iterators::FilesWithExtensionIterator, + macros::define_wrapper_type, + types::{Mode, VmIdentifier}, }; use tracing::error; @@ -28,35 +28,33 @@ pub const SOLIDITY_CASE_COMMENT_MARKER: &str = "//!"; #[derive(Debug, Default, Deserialize, Clone, Eq, PartialEq)] pub struct MetadataFile { - /// The path of the metadata file. This will either be a JSON or solidity file. - pub metadata_file_path: PathBuf, + /// The path of the metadata file. This will either be a JSON or solidity file. + pub metadata_file_path: PathBuf, - /// This is the path contained within the corpus file. This could either be the path of some dir - /// or could be the actual metadata file path. - pub corpus_file_path: PathBuf, + /// This is the path contained within the corpus file. This could either be the path of some + /// dir or could be the actual metadata file path. + pub corpus_file_path: PathBuf, - /// The metadata contained within the file. - pub content: Metadata, + /// The metadata contained within the file. + pub content: Metadata, } impl MetadataFile { - pub fn relative_path(&self) -> &Path { - if self.corpus_file_path.is_file() { - &self.corpus_file_path - } else { - self.metadata_file_path - .strip_prefix(&self.corpus_file_path) - .unwrap() - } - } + pub fn relative_path(&self) -> &Path { + if self.corpus_file_path.is_file() { + &self.corpus_file_path + } else { + self.metadata_file_path.strip_prefix(&self.corpus_file_path).unwrap() + } + } } impl Deref for MetadataFile { - type Target = Metadata; + type Target = Metadata; - fn deref(&self) -> &Self::Target { - &self.content - } + fn deref(&self) -> &Self::Target { + &self.content + } } /// A MatterLabs metadata file. @@ -69,249 +67,238 @@ impl Deref for MetadataFile { /// of steps and assertions that should be performed as part of the test case. #[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)] pub struct Metadata { - /// This is an optional comment on the metadata file which has no impact on the execution in any - /// way. - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, + /// This is an optional comment on the metadata file which has no impact on the execution in + /// any way. + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, - /// An optional boolean which defines if the metadata file as a whole should be ignored. If null - /// then the metadata file will not be ignored. - #[serde(skip_serializing_if = "Option::is_none")] - pub ignore: Option, + /// An optional boolean which defines if the metadata file as a whole should be ignored. If + /// null then the metadata file will not be ignored. + #[serde(skip_serializing_if = "Option::is_none")] + pub ignore: Option, - /// An optional vector of targets that this Metadata file's cases can be executed on. As an - /// example, if we wish for the metadata file's cases to only be run on PolkaVM then we'd - /// specify a target of "PolkaVM" in here. - #[serde(skip_serializing_if = "Option::is_none")] - pub targets: Option>, + /// An optional vector of targets that this Metadata file's cases can be executed on. As an + /// example, if we wish for the metadata file's cases to only be run on PolkaVM then we'd + /// specify a target of "PolkaVM" in here. + #[serde(skip_serializing_if = "Option::is_none")] + pub targets: Option>, - /// A vector of the test cases and workloads contained within the metadata file. This is their - /// primary description. - pub cases: Vec, + /// A vector of the test cases and workloads contained within the metadata file. This is their + /// primary description. + pub cases: Vec, - /// A map of all of the contracts that the test requires to run. - /// - /// This is a map where the key is the name of the contract instance and the value is the - /// contract's path and ident in the file. - /// - /// If any contract is to be used by the test then it must be included in here first so that the - /// framework is aware of its path, compiles it, and prepares it. - #[serde(skip_serializing_if = "Option::is_none")] - pub contracts: Option>, + /// A map of all of the contracts that the test requires to run. + /// + /// This is a map where the key is the name of the contract instance and the value is the + /// contract's path and ident in the file. + /// + /// If any contract is to be used by the test then it must be included in here first so that + /// the framework is aware of its path, compiles it, and prepares it. + #[serde(skip_serializing_if = "Option::is_none")] + pub contracts: Option>, - /// The set of libraries that this metadata file requires. - #[serde(skip_serializing_if = "Option::is_none")] - pub libraries: Option>>, + /// The set of libraries that this metadata file requires. + #[serde(skip_serializing_if = "Option::is_none")] + pub libraries: Option>>, - /// This represents a mode that has been parsed from test metadata. - /// - /// Mode strings can take the following form (in pseudo-regex): - /// - /// ```text - /// [YEILV][+-]? (M[0123sz])? ? - /// ``` - #[serde(skip_serializing_if = "Option::is_none")] - pub modes: Option>, + /// This represents a mode that has been parsed from test metadata. + /// + /// Mode strings can take the following form (in pseudo-regex): + /// + /// ```text + /// [YEILV][+-]? (M[0123sz])? ? + /// ``` + #[serde(skip_serializing_if = "Option::is_none")] + pub modes: Option>, - #[serde(skip_serializing_if = "Option::is_none")] - #[schemars(skip)] - pub file_path: Option, + #[serde(skip_serializing_if = "Option::is_none")] + #[schemars(skip)] + pub file_path: Option, - /// This field specifies an EVM version requirement that the test case has where the test might - /// be run of the evm version of the nodes match the evm version specified here. - #[serde(skip_serializing_if = "Option::is_none")] - pub required_evm_version: Option, + /// This field specifies an EVM version requirement that the test case has where the test might + /// be run of the evm version of the nodes match the evm version specified here. + #[serde(skip_serializing_if = "Option::is_none")] + pub required_evm_version: Option, - /// A set of compilation directives that will be passed to the compiler whenever the contracts - /// for the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] - /// is just a filter for when a test can run whereas this is an instruction to the compiler. - #[serde(skip_serializing_if = "Option::is_none")] - pub compiler_directives: Option, + /// A set of compilation directives that will be passed to the compiler whenever the contracts + /// for the test are being compiled. Note that this differs from the [`Mode`]s in that a + /// [`Mode`] is just a filter for when a test can run whereas this is an instruction to the + /// compiler. + #[serde(skip_serializing_if = "Option::is_none")] + pub compiler_directives: Option, } impl Metadata { - /// Returns the modes that we should test from this metadata. - pub fn solc_modes(&self) -> Vec { - match &self.modes { - Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), - None => Mode::all().cloned().collect(), - } - } + /// Returns the modes that we should test from this metadata. + pub fn solc_modes(&self) -> Vec { + match &self.modes { + Some(modes) => ParsedMode::many_to_modes(modes.iter()).collect(), + None => Mode::all().cloned().collect(), + } + } - /// Returns the base directory of this metadata. - pub fn directory(&self) -> anyhow::Result { - Ok(self - .file_path - .as_ref() - .and_then(|path| path.parent()) - .ok_or_else(|| anyhow::anyhow!("metadata invalid file path: {:?}", self.file_path))? - .to_path_buf()) - } + /// Returns the base directory of this metadata. + pub fn directory(&self) -> anyhow::Result { + Ok(self + .file_path + .as_ref() + .and_then(|path| path.parent()) + .ok_or_else(|| anyhow::anyhow!("metadata invalid file path: {:?}", self.file_path))? + .to_path_buf()) + } - /// Returns the contract sources with canonicalized paths for the files - pub fn contract_sources( - &self, - ) -> anyhow::Result> { - let directory = self.directory()?; - let mut sources = BTreeMap::new(); - let Some(contracts) = &self.contracts else { - return Ok(sources); - }; + /// Returns the contract sources with canonicalized paths for the files + pub fn contract_sources( + &self, + ) -> anyhow::Result> { + let directory = self.directory()?; + let mut sources = BTreeMap::new(); + let Some(contracts) = &self.contracts else { + return Ok(sources); + }; - for ( - alias, - ContractPathAndIdent { - contract_source_path, - contract_ident, - }, - ) in contracts - { - let alias = alias.clone(); - let absolute_path = directory - .join(contract_source_path) - .canonicalize() - .map_err(|error| { - anyhow::anyhow!( - "Failed to canonicalize contract source path '{}': {error}", - directory.join(contract_source_path).display() - ) - })?; - let contract_ident = contract_ident.clone(); + for (alias, ContractPathAndIdent { contract_source_path, contract_ident }) in contracts { + let alias = alias.clone(); + let absolute_path = + directory.join(contract_source_path).canonicalize().map_err(|error| { + anyhow::anyhow!( + "Failed to canonicalize contract source path '{}': {error}", + directory.join(contract_source_path).display() + ) + })?; + let contract_ident = contract_ident.clone(); - sources.insert( - alias, - ContractPathAndIdent { - contract_source_path: absolute_path, - contract_ident, - }, - ); - } + sources.insert( + alias, + ContractPathAndIdent { contract_source_path: absolute_path, contract_ident }, + ); + } - Ok(sources) - } + Ok(sources) + } - /// Try to parse the test metadata struct from the given file at `path`. - /// - /// Returns `None` if `path` didn't contain a test metadata or case definition. - /// - /// # Panics - /// Expects the supplied `path` to be a file. - pub fn try_from_file(path: &Path) -> Option { - assert!(path.is_file(), "not a file: {}", path.display()); + /// Try to parse the test metadata struct from the given file at `path`. + /// + /// Returns `None` if `path` didn't contain a test metadata or case definition. + /// + /// # Panics + /// Expects the supplied `path` to be a file. + pub fn try_from_file(path: &Path) -> Option { + assert!(path.is_file(), "not a file: {}", path.display()); - let file_extension = path.extension()?; + let file_extension = path.extension()?; - if file_extension == METADATA_FILE_EXTENSION { - return Self::try_from_json(path); - } + if file_extension == METADATA_FILE_EXTENSION { + return Self::try_from_json(path); + } - if file_extension == SOLIDITY_CASE_FILE_EXTENSION { - return Self::try_from_solidity(path); - } + if file_extension == SOLIDITY_CASE_FILE_EXTENSION { + return Self::try_from_solidity(path); + } - None - } + None + } - fn try_from_json(path: &Path) -> Option { - let file = File::open(path) - .inspect_err(|err| error!(path = %path.display(), %err, "Failed to open file")) - .ok()?; + fn try_from_json(path: &Path) -> Option { + let file = File::open(path) + .inspect_err(|err| error!(path = %path.display(), %err, "Failed to open file")) + .ok()?; - match serde_json::from_reader::<_, Metadata>(file) { - Ok(mut metadata) => { - metadata.file_path = Some(path.to_path_buf()); - Some(metadata) - } - Err(err) => { - error!(path = %path.display(), %err, "Deserialization of metadata failed"); - None - } - } - } + match serde_json::from_reader::<_, Metadata>(file) { + Ok(mut metadata) => { + metadata.file_path = Some(path.to_path_buf()); + Some(metadata) + }, + Err(err) => { + error!(path = %path.display(), %err, "Deserialization of metadata failed"); + None + }, + } + } - fn try_from_solidity(path: &Path) -> Option { - let spec = read_to_string(path) - .inspect_err(|err| error!(path = %path.display(), %err, "Failed to read file content")) - .ok()? - .lines() - .filter_map(|line| line.strip_prefix(SOLIDITY_CASE_COMMENT_MARKER)) - .fold(String::new(), |mut buf, string| { - buf.push_str(string); - buf - }); + fn try_from_solidity(path: &Path) -> Option { + let spec = read_to_string(path) + .inspect_err(|err| error!(path = %path.display(), %err, "Failed to read file content")) + .ok()? + .lines() + .filter_map(|line| line.strip_prefix(SOLIDITY_CASE_COMMENT_MARKER)) + .fold(String::new(), |mut buf, string| { + buf.push_str(string); + buf + }); - if spec.is_empty() { - return None; - } + if spec.is_empty() { + return None; + } - match serde_json::from_str::(&spec) { - Ok(mut metadata) => { - metadata.file_path = Some(path.to_path_buf()); - metadata.contracts = Some( - [( - ContractInstance::new("Test"), - ContractPathAndIdent { - contract_source_path: path.to_path_buf(), - contract_ident: ContractIdent::new("Test"), - }, - )] - .into(), - ); - Some(metadata) - } - Err(err) => { - error!(path = %path.display(), %err, "Failed to deserialize metadata"); - None - } - } - } + match serde_json::from_str::(&spec) { + Ok(mut metadata) => { + metadata.file_path = Some(path.to_path_buf()); + metadata.contracts = Some( + [( + ContractInstance::new("Test"), + ContractPathAndIdent { + contract_source_path: path.to_path_buf(), + contract_ident: ContractIdent::new("Test"), + }, + )] + .into(), + ); + Some(metadata) + }, + Err(err) => { + error!(path = %path.display(), %err, "Failed to deserialize metadata"); + None + }, + } + } - /// Returns an iterator over all of the solidity files that needs to be compiled for this - /// [`Metadata`] object - /// - /// Note: if the metadata is contained within a solidity file then this is the only file that - /// we wish to compile since this is a self-contained test. Otherwise, if it's a JSON file - /// then we need to compile all of the contracts that are in the directory since imports are - /// allowed in there. - pub fn files_to_compile(&self) -> anyhow::Result>> { - let Some(ref metadata_file_path) = self.file_path else { - anyhow::bail!("The metadata file path is not defined"); - }; - if metadata_file_path - .extension() - .is_some_and(|extension| extension.eq_ignore_ascii_case("sol")) - { - Ok(Box::new(std::iter::once(metadata_file_path.clone()))) - } else { - Ok(Box::new( - FilesWithExtensionIterator::new(self.directory()?) - .with_allowed_extension("sol") - .with_use_cached_fs(true), - )) - } - } + /// Returns an iterator over all of the solidity files that needs to be compiled for this + /// [`Metadata`] object + /// + /// Note: if the metadata is contained within a solidity file then this is the only file that + /// we wish to compile since this is a self-contained test. Otherwise, if it's a JSON file + /// then we need to compile all of the contracts that are in the directory since imports are + /// allowed in there. + pub fn files_to_compile(&self) -> anyhow::Result>> { + let Some(ref metadata_file_path) = self.file_path else { + anyhow::bail!("The metadata file path is not defined"); + }; + if metadata_file_path + .extension() + .is_some_and(|extension| extension.eq_ignore_ascii_case("sol")) + { + Ok(Box::new(std::iter::once(metadata_file_path.clone()))) + } else { + Ok(Box::new( + FilesWithExtensionIterator::new(self.directory()?) + .with_allowed_extension("sol") + .with_use_cached_fs(true), + )) + } + } } define_wrapper_type!( - /// Represents a contract instance found a metadata file. - /// - /// Typically, this is used as the key to the "contracts" field of metadata files. - #[derive( - Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema - )] - #[serde(transparent)] - pub struct ContractInstance(String) impl Display; + /// Represents a contract instance found a metadata file. + /// + /// Typically, this is used as the key to the "contracts" field of metadata files. + #[derive( + Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema + )] + #[serde(transparent)] + pub struct ContractInstance(String) impl Display; ); define_wrapper_type!( - /// Represents a contract identifier found a metadata file. - /// - /// A contract identifier is the name of the contract in the source code. - #[derive( - Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema - )] - #[serde(transparent)] - pub struct ContractIdent(String) impl Display; + /// Represents a contract identifier found a metadata file. + /// + /// A contract identifier is the name of the contract in the source code. + #[derive( + Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema + )] + #[serde(transparent)] + pub struct ContractIdent(String) impl Display; ); /// Represents an identifier used for contracts. @@ -322,82 +309,78 @@ define_wrapper_type!( /// ${path}:${contract_ident} /// ``` #[derive( - Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema, + Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema, )] #[serde(try_from = "String", into = "String")] pub struct ContractPathAndIdent { - /// The path of the contract source code relative to the directory containing the metadata file. - pub contract_source_path: PathBuf, + /// The path of the contract source code relative to the directory containing the metadata + /// file. + pub contract_source_path: PathBuf, - /// The identifier of the contract. - pub contract_ident: ContractIdent, + /// The identifier of the contract. + pub contract_ident: ContractIdent, } impl Display for ContractPathAndIdent { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!( - f, - "{}:{}", - self.contract_source_path.display(), - self.contract_ident.as_ref() - ) - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(f, "{}:{}", self.contract_source_path.display(), self.contract_ident.as_ref()) + } } impl FromStr for ContractPathAndIdent { - type Err = anyhow::Error; + type Err = anyhow::Error; - fn from_str(s: &str) -> Result { - let mut splitted_string = s.split(":").peekable(); - let mut path = None::; - let mut identifier = None::; - loop { - let Some(next_item) = splitted_string.next() else { - break; - }; - if splitted_string.peek().is_some() { - match path { - Some(ref mut path) => { - path.push(':'); - path.push_str(next_item); - } - None => path = Some(next_item.to_owned()), - } - } else { - identifier = Some(next_item.to_owned()) - } - } - match (path, identifier) { - (Some(path), Some(identifier)) => Ok(Self { - contract_source_path: PathBuf::from(path), - contract_ident: ContractIdent::new(identifier), - }), - (None, Some(path)) | (Some(path), None) => { - let Some(identifier) = path.split(".").next().map(ToOwned::to_owned) else { - anyhow::bail!("Failed to find identifier"); - }; - Ok(Self { - contract_source_path: PathBuf::from(path), - contract_ident: ContractIdent::new(identifier), - }) - } - (None, None) => anyhow::bail!("Failed to find the path and identifier"), - } - } + fn from_str(s: &str) -> Result { + let mut splitted_string = s.split(":").peekable(); + let mut path = None::; + let mut identifier = None::; + loop { + let Some(next_item) = splitted_string.next() else { + break; + }; + if splitted_string.peek().is_some() { + match path { + Some(ref mut path) => { + path.push(':'); + path.push_str(next_item); + }, + None => path = Some(next_item.to_owned()), + } + } else { + identifier = Some(next_item.to_owned()) + } + } + match (path, identifier) { + (Some(path), Some(identifier)) => Ok(Self { + contract_source_path: PathBuf::from(path), + contract_ident: ContractIdent::new(identifier), + }), + (None, Some(path)) | (Some(path), None) => { + let Some(identifier) = path.split(".").next().map(ToOwned::to_owned) else { + anyhow::bail!("Failed to find identifier"); + }; + Ok(Self { + contract_source_path: PathBuf::from(path), + contract_ident: ContractIdent::new(identifier), + }) + }, + (None, None) => anyhow::bail!("Failed to find the path and identifier"), + } + } } impl TryFrom for ContractPathAndIdent { - type Error = anyhow::Error; + type Error = anyhow::Error; - fn try_from(value: String) -> Result { - Self::from_str(&value) - } + fn try_from(value: String) -> Result { + Self::from_str(&value) + } } impl From for String { - fn from(value: ContractPathAndIdent) -> Self { - value.to_string() - } + fn from(value: ContractPathAndIdent) -> Self { + value.to_string() + } } /// An EVM version requirement that the test case has. This gets serialized and deserialized from @@ -407,128 +390,104 @@ impl From for String { /// When specified, the framework will only run the test if the node's EVM version matches that /// required by the metadata file. #[derive( - Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema, + Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema, )] #[serde(try_from = "String", into = "String")] pub struct EvmVersionRequirement { - ordering: Ordering, - or_equal: bool, - evm_version: EVMVersion, + ordering: Ordering, + or_equal: bool, + evm_version: EVMVersion, } impl EvmVersionRequirement { - pub fn new_greater_than_or_equals(version: EVMVersion) -> Self { - Self { - ordering: Ordering::Greater, - or_equal: true, - evm_version: version, - } - } + pub fn new_greater_than_or_equals(version: EVMVersion) -> Self { + Self { ordering: Ordering::Greater, or_equal: true, evm_version: version } + } - pub fn new_greater_than(version: EVMVersion) -> Self { - Self { - ordering: Ordering::Greater, - or_equal: false, - evm_version: version, - } - } + pub fn new_greater_than(version: EVMVersion) -> Self { + Self { ordering: Ordering::Greater, or_equal: false, evm_version: version } + } - pub fn new_equals(version: EVMVersion) -> Self { - Self { - ordering: Ordering::Equal, - or_equal: false, - evm_version: version, - } - } + pub fn new_equals(version: EVMVersion) -> Self { + Self { ordering: Ordering::Equal, or_equal: false, evm_version: version } + } - pub fn new_less_than(version: EVMVersion) -> Self { - Self { - ordering: Ordering::Less, - or_equal: false, - evm_version: version, - } - } + pub fn new_less_than(version: EVMVersion) -> Self { + Self { ordering: Ordering::Less, or_equal: false, evm_version: version } + } - pub fn new_less_than_or_equals(version: EVMVersion) -> Self { - Self { - ordering: Ordering::Less, - or_equal: true, - evm_version: version, - } - } + pub fn new_less_than_or_equals(version: EVMVersion) -> Self { + Self { ordering: Ordering::Less, or_equal: true, evm_version: version } + } - pub fn matches(&self, other: &EVMVersion) -> bool { - let ordering = other.cmp(&self.evm_version); - ordering == self.ordering || (self.or_equal && matches!(ordering, Ordering::Equal)) - } + pub fn matches(&self, other: &EVMVersion) -> bool { + let ordering = other.cmp(&self.evm_version); + ordering == self.ordering || (self.or_equal && matches!(ordering, Ordering::Equal)) + } } impl Display for EvmVersionRequirement { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let Self { - ordering, - or_equal, - evm_version, - } = self; - match ordering { - Ordering::Less => write!(f, "<")?, - Ordering::Equal => write!(f, "=")?, - Ordering::Greater => write!(f, ">")?, - } - if *or_equal && !matches!(ordering, Ordering::Equal) { - write!(f, "=")?; - } - write!(f, "{evm_version}") - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let Self { ordering, or_equal, evm_version } = self; + match ordering { + Ordering::Less => write!(f, "<")?, + Ordering::Equal => write!(f, "=")?, + Ordering::Greater => write!(f, ">")?, + } + if *or_equal && !matches!(ordering, Ordering::Equal) { + write!(f, "=")?; + } + write!(f, "{evm_version}") + } } impl FromStr for EvmVersionRequirement { - type Err = anyhow::Error; + type Err = anyhow::Error; - fn from_str(s: &str) -> Result { - match s.as_bytes() { - [b'>', b'=', remaining @ ..] => Ok(Self { - ordering: Ordering::Greater, - or_equal: true, - evm_version: str::from_utf8(remaining)?.try_into()?, - }), - [b'>', remaining @ ..] => Ok(Self { - ordering: Ordering::Greater, - or_equal: false, - evm_version: str::from_utf8(remaining)?.try_into()?, - }), - [b'<', b'=', remaining @ ..] => Ok(Self { - ordering: Ordering::Less, - or_equal: true, - evm_version: str::from_utf8(remaining)?.try_into()?, - }), - [b'<', remaining @ ..] => Ok(Self { - ordering: Ordering::Less, - or_equal: false, - evm_version: str::from_utf8(remaining)?.try_into()?, - }), - [b'=', remaining @ ..] => Ok(Self { - ordering: Ordering::Equal, - or_equal: false, - evm_version: str::from_utf8(remaining)?.try_into()?, - }), - _ => anyhow::bail!("Invalid EVM version requirement {s}"), - } - } + fn from_str(s: &str) -> Result { + match s.as_bytes() { + [b'>', b'=', remaining @ ..] => Ok(Self { + ordering: Ordering::Greater, + or_equal: true, + evm_version: str::from_utf8(remaining)?.try_into()?, + }), + [b'>', remaining @ ..] => Ok(Self { + ordering: Ordering::Greater, + or_equal: false, + evm_version: str::from_utf8(remaining)?.try_into()?, + }), + [b'<', b'=', remaining @ ..] => Ok(Self { + ordering: Ordering::Less, + or_equal: true, + evm_version: str::from_utf8(remaining)?.try_into()?, + }), + [b'<', remaining @ ..] => Ok(Self { + ordering: Ordering::Less, + or_equal: false, + evm_version: str::from_utf8(remaining)?.try_into()?, + }), + [b'=', remaining @ ..] => Ok(Self { + ordering: Ordering::Equal, + or_equal: false, + evm_version: str::from_utf8(remaining)?.try_into()?, + }), + _ => anyhow::bail!("Invalid EVM version requirement {s}"), + } + } } impl TryFrom for EvmVersionRequirement { - type Error = anyhow::Error; + type Error = anyhow::Error; - fn try_from(value: String) -> Result { - value.parse() - } + fn try_from(value: String) -> Result { + value.parse() + } } impl From for String { - fn from(value: EvmVersionRequirement) -> Self { - value.to_string() - } + fn from(value: EvmVersionRequirement) -> Self { + value.to_string() + } } /// A set of compilation directives that will be passed to the compiler whenever the contracts for @@ -536,88 +495,85 @@ impl From for String { /// just a filter for when a test can run whereas this is an instruction to the compiler. /// Defines how the compiler should handle revert strings. #[derive( - Clone, - Debug, - Copy, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Default, - Serialize, - Deserialize, - JsonSchema, + Clone, + Debug, + Copy, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Default, + Serialize, + Deserialize, + JsonSchema, )] pub struct CompilationDirectives { - /// Defines how the revert strings should be handled. - pub revert_string_handling: Option, + /// Defines how the revert strings should be handled. + pub revert_string_handling: Option, } /// Defines how the compiler should handle revert strings. #[derive( - Clone, - Debug, - Copy, - PartialEq, - Eq, - PartialOrd, - Ord, - Hash, - Default, - Serialize, - Deserialize, - JsonSchema, + Clone, + Debug, + Copy, + PartialEq, + Eq, + PartialOrd, + Ord, + Hash, + Default, + Serialize, + Deserialize, + JsonSchema, )] #[serde(rename_all = "camelCase")] pub enum RevertString { - /// The default handling of the revert strings. - #[default] - Default, - /// The debug handling of the revert strings. - Debug, - /// Strip the revert strings. - Strip, - /// Provide verbose debug strings for the revert string. - VerboseDebug, + /// The default handling of the revert strings. + #[default] + Default, + /// The debug handling of the revert strings. + Debug, + /// Strip the revert strings. + Strip, + /// Provide verbose debug strings for the revert string. + VerboseDebug, } #[cfg(test)] mod test { - use super::*; + use super::*; - #[test] - fn contract_identifier_respects_roundtrip_property() { - // Arrange - let string = "ERC20/ERC20.sol:ERC20"; + #[test] + fn contract_identifier_respects_roundtrip_property() { + // Arrange + let string = "ERC20/ERC20.sol:ERC20"; - // Act - let identifier = ContractPathAndIdent::from_str(string); + // Act + let identifier = ContractPathAndIdent::from_str(string); - // Assert - let identifier = identifier.expect("Failed to parse"); - assert_eq!( - identifier.contract_source_path.display().to_string(), - "ERC20/ERC20.sol" - ); - assert_eq!(identifier.contract_ident, "ERC20".to_owned().into()); + // Assert + let identifier = identifier.expect("Failed to parse"); + assert_eq!(identifier.contract_source_path.display().to_string(), "ERC20/ERC20.sol"); + assert_eq!(identifier.contract_ident, "ERC20".to_owned().into()); - // Act - let reserialized = identifier.to_string(); + // Act + let reserialized = identifier.to_string(); - // Assert - assert_eq!(string, reserialized); - } + // Assert + assert_eq!(string, reserialized); + } - #[test] - fn complex_metadata_file_can_be_deserialized() { - // Arrange - const JSON: &str = include_str!("../../../assets/test_metadata.json"); + #[test] + fn complex_metadata_file_can_be_deserialized() { + // Arrange + const JSON: &str = include_str!("../../../assets/test_metadata.json"); - // Act - let metadata = serde_json::from_str::(JSON); + // Act + let metadata = serde_json::from_str::(JSON); - // Assert - metadata.expect("Failed to deserialize metadata"); - } + // Assert + metadata.expect("Failed to deserialize metadata"); + } } diff --git a/crates/format/src/mode.rs b/crates/format/src/mode.rs index a684140..3553f1a 100644 --- a/crates/format/src/mode.rs +++ b/crates/format/src/mode.rs @@ -1,8 +1,8 @@ use anyhow::Context as _; use regex::Regex; use revive_dt_common::{ - iterators::EitherIter, - types::{Mode, ModeOptimizerSetting, ModePipeline}, + iterators::EitherIter, + types::{Mode, ModeOptimizerSetting, ModePipeline}, }; use schemars::JsonSchema; use serde::{Deserialize, Serialize}; @@ -20,17 +20,17 @@ use std::{collections::HashSet, fmt::Display, str::FromStr, sync::LazyLock}; #[derive(Clone, Debug, PartialEq, Eq, Hash, Deserialize, Serialize, JsonSchema)] #[serde(try_from = "String", into = "String")] pub struct ParsedMode { - pub pipeline: Option, - pub optimize_flag: Option, - pub optimize_setting: Option, - pub version: Option, + pub pipeline: Option, + pub optimize_flag: Option, + pub optimize_setting: Option, + pub version: Option, } impl FromStr for ParsedMode { - type Err = anyhow::Error; - fn from_str(s: &str) -> Result { - static REGEX: LazyLock = LazyLock::new(|| { - Regex::new(r"(?x) + type Err = anyhow::Error; + fn from_str(s: &str) -> Result { + static REGEX: LazyLock = LazyLock::new(|| { + Regex::new(r"(?x) ^ (?:(?P[YEILV])(?P[+-])?)? # Pipeline to use eg Y, E+, E- \s* @@ -39,218 +39,204 @@ impl FromStr for ParsedMode { (?P[>=<]*\d+(?:\.\d+)*)? # Optional semver version eg >=0.8.0, 0.7, <0.8 $ ").unwrap() - }); + }); - let Some(caps) = REGEX.captures(s) else { - anyhow::bail!("Cannot parse mode '{s}' from string"); - }; + let Some(caps) = REGEX.captures(s) else { + anyhow::bail!("Cannot parse mode '{s}' from string"); + }; - let pipeline = match caps.name("pipeline") { - Some(m) => Some( - ModePipeline::from_str(m.as_str()) - .context("Failed to parse mode pipeline from string")?, - ), - None => None, - }; + let pipeline = match caps.name("pipeline") { + Some(m) => Some( + ModePipeline::from_str(m.as_str()) + .context("Failed to parse mode pipeline from string")?, + ), + None => None, + }; - let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+"); + let optimize_flag = caps.name("optimize_flag").map(|m| m.as_str() == "+"); - let optimize_setting = match caps.name("optimize_setting") { - Some(m) => Some( - ModeOptimizerSetting::from_str(m.as_str()) - .context("Failed to parse optimizer setting from string")?, - ), - None => None, - }; + let optimize_setting = match caps.name("optimize_setting") { + Some(m) => Some( + ModeOptimizerSetting::from_str(m.as_str()) + .context("Failed to parse optimizer setting from string")?, + ), + None => None, + }; - let version = match caps.name("version") { - Some(m) => Some( - semver::VersionReq::parse(m.as_str()) - .map_err(|e| { - anyhow::anyhow!( - "Cannot parse the version requirement '{}': {e}", - m.as_str() - ) - }) - .context("Failed to parse semver requirement from mode string")?, - ), - None => None, - }; + let version = match caps.name("version") { + Some(m) => Some( + semver::VersionReq::parse(m.as_str()) + .map_err(|e| { + anyhow::anyhow!( + "Cannot parse the version requirement '{}': {e}", + m.as_str() + ) + }) + .context("Failed to parse semver requirement from mode string")?, + ), + None => None, + }; - Ok(ParsedMode { - pipeline, - optimize_flag, - optimize_setting, - version, - }) - } + Ok(ParsedMode { pipeline, optimize_flag, optimize_setting, version }) + } } impl Display for ParsedMode { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - let mut has_written = false; + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + let mut has_written = false; - if let Some(pipeline) = self.pipeline { - pipeline.fmt(f)?; - if let Some(optimize_flag) = self.optimize_flag { - f.write_str(if optimize_flag { "+" } else { "-" })?; - } - has_written = true; - } + if let Some(pipeline) = self.pipeline { + pipeline.fmt(f)?; + if let Some(optimize_flag) = self.optimize_flag { + f.write_str(if optimize_flag { "+" } else { "-" })?; + } + has_written = true; + } - if let Some(optimize_setting) = self.optimize_setting { - if has_written { - f.write_str(" ")?; - } - optimize_setting.fmt(f)?; - has_written = true; - } + if let Some(optimize_setting) = self.optimize_setting { + if has_written { + f.write_str(" ")?; + } + optimize_setting.fmt(f)?; + has_written = true; + } - if let Some(version) = &self.version { - if has_written { - f.write_str(" ")?; - } - version.fmt(f)?; - } + if let Some(version) = &self.version { + if has_written { + f.write_str(" ")?; + } + version.fmt(f)?; + } - Ok(()) - } + Ok(()) + } } impl From for String { - fn from(parsed_mode: ParsedMode) -> Self { - parsed_mode.to_string() - } + fn from(parsed_mode: ParsedMode) -> Self { + parsed_mode.to_string() + } } impl TryFrom for ParsedMode { - type Error = anyhow::Error; - fn try_from(value: String) -> Result { - ParsedMode::from_str(&value) - } + type Error = anyhow::Error; + fn try_from(value: String) -> Result { + ParsedMode::from_str(&value) + } } impl ParsedMode { - /// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try. - pub fn to_modes(&self) -> impl Iterator { - let pipeline_iter = self.pipeline.as_ref().map_or_else( - || EitherIter::A(ModePipeline::test_cases()), - |p| EitherIter::B(std::iter::once(*p)), - ); + /// This takes a [`ParsedMode`] and expands it into a list of [`Mode`]s that we should try. + pub fn to_modes(&self) -> impl Iterator { + let pipeline_iter = self.pipeline.as_ref().map_or_else( + || EitherIter::A(ModePipeline::test_cases()), + |p| EitherIter::B(std::iter::once(*p)), + ); - let optimize_flag_setting = self.optimize_flag.map(|flag| { - if flag { - ModeOptimizerSetting::M3 - } else { - ModeOptimizerSetting::M0 - } - }); + let optimize_flag_setting = self + .optimize_flag + .map(|flag| if flag { ModeOptimizerSetting::M3 } else { ModeOptimizerSetting::M0 }); - let optimize_flag_iter = match optimize_flag_setting { - Some(setting) => EitherIter::A(std::iter::once(setting)), - None => EitherIter::B(ModeOptimizerSetting::test_cases()), - }; + let optimize_flag_iter = match optimize_flag_setting { + Some(setting) => EitherIter::A(std::iter::once(setting)), + None => EitherIter::B(ModeOptimizerSetting::test_cases()), + }; - let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else( - || EitherIter::A(optimize_flag_iter), - |s| EitherIter::B(std::iter::once(*s)), - ); + let optimize_settings_iter = self.optimize_setting.as_ref().map_or_else( + || EitherIter::A(optimize_flag_iter), + |s| EitherIter::B(std::iter::once(*s)), + ); - pipeline_iter.flat_map(move |pipeline| { - optimize_settings_iter - .clone() - .map(move |optimize_setting| Mode { - pipeline, - optimize_setting, - version: self.version.clone(), - }) - }) - } + pipeline_iter.flat_map(move |pipeline| { + optimize_settings_iter.clone().map(move |optimize_setting| Mode { + pipeline, + optimize_setting, + version: self.version.clone(), + }) + }) + } - /// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s. - /// This avoids any duplicate entries. - pub fn many_to_modes<'a>( - parsed: impl Iterator, - ) -> impl Iterator { - let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect(); - modes.into_iter() - } + /// Return a set of [`Mode`]s that correspond to the given [`ParsedMode`]s. + /// This avoids any duplicate entries. + pub fn many_to_modes<'a>( + parsed: impl Iterator, + ) -> impl Iterator { + let modes: HashSet<_> = parsed.flat_map(|p| p.to_modes()).collect(); + modes.into_iter() + } } #[cfg(test)] mod tests { - use super::*; + use super::*; - #[test] - fn test_parsed_mode_from_str() { - let strings = vec![ - ("Mz", "Mz"), - ("Y", "Y"), - ("Y+", "Y+"), - ("Y-", "Y-"), - ("E", "E"), - ("E+", "E+"), - ("E-", "E-"), - ("Y M0", "Y M0"), - ("Y M1", "Y M1"), - ("Y M2", "Y M2"), - ("Y M3", "Y M3"), - ("Y Ms", "Y Ms"), - ("Y Mz", "Y Mz"), - ("E M0", "E M0"), - ("E M1", "E M1"), - ("E M2", "E M2"), - ("E M3", "E M3"), - ("E Ms", "E Ms"), - ("E Mz", "E Mz"), - // When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning) - ("Y 0.8.0", "Y ^0.8.0"), - ("E+ 0.8.0", "E+ ^0.8.0"), - ("Y M3 >=0.8.0", "Y M3 >=0.8.0"), - ("E Mz <0.7.0", "E Mz <0.7.0"), - // We can parse +- _and_ M1/M2 but the latter takes priority. - ("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"), - ("E- M2 0.7.0", "E- M2 ^0.7.0"), - // We don't see this in the wild but it is parsed. - ("<=0.8", "<=0.8"), - ]; + #[test] + fn test_parsed_mode_from_str() { + let strings = vec![ + ("Mz", "Mz"), + ("Y", "Y"), + ("Y+", "Y+"), + ("Y-", "Y-"), + ("E", "E"), + ("E+", "E+"), + ("E-", "E-"), + ("Y M0", "Y M0"), + ("Y M1", "Y M1"), + ("Y M2", "Y M2"), + ("Y M3", "Y M3"), + ("Y Ms", "Y Ms"), + ("Y Mz", "Y Mz"), + ("E M0", "E M0"), + ("E M1", "E M1"), + ("E M2", "E M2"), + ("E M3", "E M3"), + ("E Ms", "E Ms"), + ("E Mz", "E Mz"), + // When stringifying semver again, 0.8.0 becomes ^0.8.0 (same meaning) + ("Y 0.8.0", "Y ^0.8.0"), + ("E+ 0.8.0", "E+ ^0.8.0"), + ("Y M3 >=0.8.0", "Y M3 >=0.8.0"), + ("E Mz <0.7.0", "E Mz <0.7.0"), + // We can parse +- _and_ M1/M2 but the latter takes priority. + ("Y+ M1 0.8.0", "Y+ M1 ^0.8.0"), + ("E- M2 0.7.0", "E- M2 ^0.7.0"), + // We don't see this in the wild but it is parsed. + ("<=0.8", "<=0.8"), + ]; - for (actual, expected) in strings { - let parsed = ParsedMode::from_str(actual) - .unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'")); - assert_eq!( - expected, - parsed.to_string(), - "Mode string '{actual}' did not parse to '{expected}': got '{parsed}'" - ); - } - } + for (actual, expected) in strings { + let parsed = ParsedMode::from_str(actual) + .unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'")); + assert_eq!( + expected, + parsed.to_string(), + "Mode string '{actual}' did not parse to '{expected}': got '{parsed}'" + ); + } + } - #[test] - fn test_parsed_mode_to_test_modes() { - let strings = vec![ - ("Mz", vec!["Y Mz", "E Mz"]), - ("Y", vec!["Y M0", "Y M3"]), - ("E", vec!["E M0", "E M3"]), - ("Y+", vec!["Y M3"]), - ("Y-", vec!["Y M0"]), - ("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]), - ( - "<=0.8", - vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"], - ), - ]; + #[test] + fn test_parsed_mode_to_test_modes() { + let strings = vec![ + ("Mz", vec!["Y Mz", "E Mz"]), + ("Y", vec!["Y M0", "Y M3"]), + ("E", vec!["E M0", "E M3"]), + ("Y+", vec!["Y M3"]), + ("Y-", vec!["Y M0"]), + ("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]), + ("<=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"]), + ]; - for (actual, expected) in strings { - let parsed = ParsedMode::from_str(actual) - .unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'")); - let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect(); - let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect(); + for (actual, expected) in strings { + let parsed = ParsedMode::from_str(actual) + .unwrap_or_else(|_| panic!("Failed to parse mode string '{actual}'")); + let expected_set: HashSet<_> = expected.into_iter().map(|s| s.to_owned()).collect(); + let actual_set: HashSet<_> = parsed.to_modes().map(|m| m.to_string()).collect(); - assert_eq!( - expected_set, actual_set, - "Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'" - ); - } - } + assert_eq!( + expected_set, actual_set, + "Mode string '{actual}' did not expand to '{expected_set:?}': got '{actual_set:?}'" + ); + } + } } diff --git a/crates/format/src/steps.rs b/crates/format/src/steps.rs index 44024a9..91f240b 100644 --- a/crates/format/src/steps.rs +++ b/crates/format/src/steps.rs @@ -1,11 +1,11 @@ use std::{collections::HashMap, fmt::Display, str::FromStr}; use alloy::{ - eips::BlockNumberOrTag, - json_abi::Function, - network::TransactionBuilder, - primitives::{Address, Bytes, FixedBytes, U256, utils::parse_units}, - rpc::types::TransactionRequest, + eips::BlockNumberOrTag, + json_abi::Function, + network::TransactionBuilder, + primitives::{Address, Bytes, FixedBytes, U256, utils::parse_units}, + rpc::types::TransactionRequest, }; use anyhow::Context as _; use futures::{FutureExt, StreamExt, TryFutureExt, TryStreamExt, stream}; @@ -17,8 +17,8 @@ use revive_dt_common::macros::define_wrapper_type; use tracing::{Instrument, info_span, instrument}; use crate::{ - metadata::ContractInstance, - traits::{ResolutionContext, ResolverApi}, + metadata::ContractInstance, + traits::{ResolutionContext, ResolverApi}, }; /// A test step. @@ -28,200 +28,195 @@ use crate::{ #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)] #[serde(untagged)] pub enum Step { - /// A function call or an invocation to some function on some smart contract. - FunctionCall(Box), + /// A function call or an invocation to some function on some smart contract. + FunctionCall(Box), - /// A step for performing a balance assertion on some account or contract. - BalanceAssertion(Box), + /// A step for performing a balance assertion on some account or contract. + BalanceAssertion(Box), - /// A step for asserting that the storage of some contract or account is empty. - StorageEmptyAssertion(Box), + /// A step for asserting that the storage of some contract or account is empty. + StorageEmptyAssertion(Box), - /// A special step for repeating a bunch of steps a certain number of times. - Repeat(Box), + /// A special step for repeating a bunch of steps a certain number of times. + Repeat(Box), - /// A step type that allows for a new account address to be allocated and to later on be used - /// as the caller in another step. - AllocateAccount(Box), + /// A step type that allows for a new account address to be allocated and to later on be used + /// as the caller in another step. + AllocateAccount(Box), } define_wrapper_type!( - #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] - pub struct StepIdx(usize) impl Display, FromStr; + #[derive(Clone, Copy, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] + pub struct StepIdx(usize) impl Display, FromStr; ); define_wrapper_type!( - #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] - #[serde(try_from = "String", into = "String")] - pub struct StepPath(Vec); + #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] + #[serde(try_from = "String", into = "String")] + pub struct StepPath(Vec); ); impl StepPath { - pub fn from_iterator(path: impl IntoIterator>) -> Self { - Self(path.into_iter().map(|value| value.into()).collect()) - } + pub fn from_iterator(path: impl IntoIterator>) -> Self { + Self(path.into_iter().map(|value| value.into()).collect()) + } - pub fn increment(&self) -> Self { - let mut this = self.clone(); - if let Some(last) = this.last_mut() { - last.0 += 1 - } - this - } + pub fn increment(&self) -> Self { + let mut this = self.clone(); + if let Some(last) = this.last_mut() { + last.0 += 1 + } + this + } - pub fn append(&self, step_idx: impl Into) -> Self { - let mut this = self.clone(); - this.0.push(step_idx.into()); - this - } + pub fn append(&self, step_idx: impl Into) -> Self { + let mut this = self.clone(); + this.0.push(step_idx.into()); + this + } } impl Display for StepPath { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - self.0 - .iter() - .map(|idx| idx.to_string()) - .collect::>() - .join(".") - .fmt(f) - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + self.0.iter().map(|idx| idx.to_string()).collect::>().join(".").fmt(f) + } } impl FromStr for StepPath { - type Err = anyhow::Error; + type Err = anyhow::Error; - fn from_str(s: &str) -> Result { - s.split(".") - .map(StepIdx::from_str) - .collect::>>() - .map(Self) - } + fn from_str(s: &str) -> Result { + s.split(".") + .map(StepIdx::from_str) + .collect::>>() + .map(Self) + } } impl From for String { - fn from(value: StepPath) -> Self { - value.to_string() - } + fn from(value: StepPath) -> Self { + value.to_string() + } } impl TryFrom for StepPath { - type Error = anyhow::Error; + type Error = anyhow::Error; - fn try_from(value: String) -> Result { - value.parse() - } + fn try_from(value: String) -> Result { + value.parse() + } } /// This is an input step which is a transaction description that the framework translates into a /// transaction and executes on the nodes. #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)] pub struct FunctionCallStep { - /// The address of the account performing the call and paying the fees for it. - #[serde(default = "FunctionCallStep::default_caller")] - #[schemars(with = "String")] - pub caller: StepAddress, + /// The address of the account performing the call and paying the fees for it. + #[serde(default = "FunctionCallStep::default_caller")] + #[schemars(with = "String")] + pub caller: StepAddress, - /// An optional comment on the step which has no impact on the execution in any way. - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, + /// An optional comment on the step which has no impact on the execution in any way. + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, - /// The contract instance that's being called in this transaction step. - #[serde(default = "FunctionCallStep::default_instance")] - pub instance: ContractInstance, + /// The contract instance that's being called in this transaction step. + #[serde(default = "FunctionCallStep::default_instance")] + pub instance: ContractInstance, - /// The method that's being called in this step. - pub method: Method, + /// The method that's being called in this step. + pub method: Method, - /// The calldata that the function should be invoked with. - #[serde(default)] - pub calldata: Calldata, + /// The calldata that the function should be invoked with. + #[serde(default)] + pub calldata: Calldata, - /// A set of assertions and expectations to have for the transaction. - #[serde(skip_serializing_if = "Option::is_none")] - pub expected: Option, + /// A set of assertions and expectations to have for the transaction. + #[serde(skip_serializing_if = "Option::is_none")] + pub expected: Option, - /// An optional value to provide as part of the transaction. - #[serde(skip_serializing_if = "Option::is_none")] - pub value: Option, + /// An optional value to provide as part of the transaction. + #[serde(skip_serializing_if = "Option::is_none")] + pub value: Option, - #[serde(skip_serializing_if = "Option::is_none")] - #[schemars(skip)] - pub storage: Option>, + #[serde(skip_serializing_if = "Option::is_none")] + #[schemars(skip)] + pub storage: Option>, - /// Variable assignment to perform in the framework allowing us to reference them again later on - /// during the execution. - #[serde(skip_serializing_if = "Option::is_none")] - pub variable_assignments: Option, + /// Variable assignment to perform in the framework allowing us to reference them again later + /// on during the execution. + #[serde(skip_serializing_if = "Option::is_none")] + pub variable_assignments: Option, } /// This represents a balance assertion step where the framework needs to query the balance of some /// account or contract and assert that it's some amount. #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)] pub struct BalanceAssertionStep { - /// An optional comment on the balance assertion. - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, + /// An optional comment on the balance assertion. + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, - /// The address that the balance assertion should be done on. - /// - /// This is a string which will be resolved into an address when being processed. Therefore, - /// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a - /// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are - /// followed in the calldata. - pub address: StepAddress, + /// The address that the balance assertion should be done on. + /// + /// This is a string which will be resolved into an address when being processed. Therefore, + /// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a + /// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are + /// followed in the calldata. + pub address: StepAddress, - /// The amount of balance to assert that the account or contract has. This is a 256 bit string - /// that's serialized and deserialized into a decimal string. - #[schemars(with = "String")] - pub expected_balance: U256, + /// The amount of balance to assert that the account or contract has. This is a 256 bit string + /// that's serialized and deserialized into a decimal string. + #[schemars(with = "String")] + pub expected_balance: U256, } /// This represents an assertion for the storage of some contract or account and whether it's empty /// or not. #[derive(Clone, Debug, Serialize, Deserialize, Eq, PartialEq, JsonSchema)] pub struct StorageEmptyAssertionStep { - /// An optional comment on the storage empty assertion. - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, + /// An optional comment on the storage empty assertion. + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, - /// The address that the balance assertion should be done on. - /// - /// This is a string which will be resolved into an address when being processed. Therefore, - /// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a - /// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are - /// followed in the calldata. - pub address: StepAddress, + /// The address that the balance assertion should be done on. + /// + /// This is a string which will be resolved into an address when being processed. Therefore, + /// this could be a normal hex address, a variable such as `Test.address`, or perhaps even a + /// full on variable like `$VARIABLE:Uniswap`. It follows the same resolution rules that are + /// followed in the calldata. + pub address: StepAddress, - /// A boolean of whether the storage of the address is empty or not. - pub is_storage_empty: bool, + /// A boolean of whether the storage of the address is empty or not. + pub is_storage_empty: bool, } /// This represents a repetition step which is a special step type that allows for a sequence of /// steps to be repeated (on different drivers) a certain number of times. #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)] pub struct RepeatStep { - /// An optional comment on the repetition step. - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, + /// An optional comment on the repetition step. + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, - /// The number of repetitions that the steps should be repeated for. - pub repeat: usize, + /// The number of repetitions that the steps should be repeated for. + pub repeat: usize, - /// The sequence of steps to repeat for the above defined number of repetitions. - pub steps: Vec, + /// The sequence of steps to repeat for the above defined number of repetitions. + pub steps: Vec, } #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)] pub struct AllocateAccountStep { - /// An optional comment on the account allocation step. - #[serde(skip_serializing_if = "Option::is_none")] - pub comment: Option, + /// An optional comment on the account allocation step. + #[serde(skip_serializing_if = "Option::is_none")] + pub comment: Option, - /// An instruction to allocate a new account with the value being the variable name of that - /// account. This must start with `$VARIABLE:` and then be followed by the variable name of the - /// account. - #[serde(rename = "allocate_account")] - pub variable_name: String, + /// An instruction to allocate a new account with the value being the variable name of that + /// account. This must start with `$VARIABLE:` and then be followed by the variable name of the + /// account. + #[serde(rename = "allocate_account")] + pub variable_name: String, } /// A set of expectations and assertions to make about the transaction after it ran. @@ -231,46 +226,46 @@ pub struct AllocateAccountStep { #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, Eq, PartialEq)] #[serde(untagged)] pub enum Expected { - /// An assertion that the transaction succeeded and returned the provided set of data. - Calldata(Calldata), - /// A more complex assertion. - Expected(ExpectedOutput), - /// A set of assertions. - ExpectedMany(Vec), + /// An assertion that the transaction succeeded and returned the provided set of data. + Calldata(Calldata), + /// A more complex assertion. + Expected(ExpectedOutput), + /// A set of assertions. + ExpectedMany(Vec), } /// A set of assertions to run on the transaction. #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, Eq, PartialEq)] pub struct ExpectedOutput { - /// An optional compiler version that's required in order for this assertion to run. - #[serde(skip_serializing_if = "Option::is_none")] - #[schemars(with = "Option")] - pub compiler_version: Option, + /// An optional compiler version that's required in order for this assertion to run. + #[serde(skip_serializing_if = "Option::is_none")] + #[schemars(with = "Option")] + pub compiler_version: Option, - /// An optional field of the expected returns from the invocation. - #[serde(skip_serializing_if = "Option::is_none")] - pub return_data: Option, + /// An optional field of the expected returns from the invocation. + #[serde(skip_serializing_if = "Option::is_none")] + pub return_data: Option, - /// An optional set of assertions to run on the emitted events from the transaction. - #[serde(skip_serializing_if = "Option::is_none")] - pub events: Option>, + /// An optional set of assertions to run on the emitted events from the transaction. + #[serde(skip_serializing_if = "Option::is_none")] + pub events: Option>, - /// A boolean which defines whether we expect the transaction to succeed or fail. - #[serde(default)] - pub exception: bool, + /// A boolean which defines whether we expect the transaction to succeed or fail. + #[serde(default)] + pub exception: bool, } #[derive(Clone, Debug, Default, Serialize, Deserialize, JsonSchema, Eq, PartialEq)] pub struct Event { - /// An optional field of the address of the emitter of the event. - #[serde(skip_serializing_if = "Option::is_none")] - pub address: Option, + /// An optional field of the address of the emitter of the event. + #[serde(skip_serializing_if = "Option::is_none")] + pub address: Option, - /// The set of topics to expect the event to have. - pub topics: Vec, + /// The set of topics to expect the event to have. + pub topics: Vec, - /// The set of values to expect the event to have. - pub values: Calldata, + /// The set of values to expect the event to have. + pub values: Calldata, } /// A type definition for the calldata supported by the testing framework. @@ -327,74 +322,74 @@ pub struct Event { #[derive(Clone, Debug, Serialize, Deserialize, JsonSchema, Eq, PartialEq)] #[serde(untagged)] pub enum Calldata { - Single(#[schemars(with = "String")] Bytes), - Compound(Vec), + Single(#[schemars(with = "String")] Bytes), + Compound(Vec), } define_wrapper_type! { - /// This represents an item in the [`Calldata::Compound`] variant. Each item will be resolved - /// according to the resolution rules of the tool. - #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema)] - #[serde(transparent)] - pub struct CalldataItem(String) impl Display; + /// This represents an item in the [`Calldata::Compound`] variant. Each item will be resolved + /// according to the resolution rules of the tool. + #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize, JsonSchema)] + #[serde(transparent)] + pub struct CalldataItem(String) impl Display; } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] enum CalldataToken { - Item(T), - Operation(Operation), + Item(T), + Operation(Operation), } #[derive(Clone, Copy, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] enum Operation { - Addition, - Subtraction, - Multiplication, - Division, - BitwiseAnd, - BitwiseOr, - BitwiseXor, - ShiftLeft, - ShiftRight, + Addition, + Subtraction, + Multiplication, + Division, + BitwiseAnd, + BitwiseOr, + BitwiseXor, + ShiftLeft, + ShiftRight, } /// Specify how the contract is called. #[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)] pub enum Method { - /// Initiate a deploy transaction, calling contracts constructor. - /// - /// Indicated by `#deployer`. - #[serde(rename = "#deployer")] - Deployer, + /// Initiate a deploy transaction, calling contracts constructor. + /// + /// Indicated by `#deployer`. + #[serde(rename = "#deployer")] + Deployer, - /// Does not calculate and insert a function selector. - /// - /// Indicated by `#fallback`. - #[default] - #[serde(rename = "#fallback")] - Fallback, + /// Does not calculate and insert a function selector. + /// + /// Indicated by `#fallback`. + #[default] + #[serde(rename = "#fallback")] + Fallback, - /// Call the public function with the given name. - #[serde(untagged)] - FunctionName(String), + /// Call the public function with the given name. + #[serde(untagged)] + FunctionName(String), } define_wrapper_type!( - /// Defines an Ether value. - /// - /// This is an unsigned 256 bit integer that's followed by some denomination which can either be - /// eth, ether, gwei, or wei. - #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, JsonSchema)] - #[schemars(with = "String")] - pub struct EtherValue(U256) impl Display; + /// Defines an Ether value. + /// + /// This is an unsigned 256 bit integer that's followed by some denomination which can either be + /// eth, ether, gwei, or wei. + #[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord, Hash, JsonSchema)] + #[schemars(with = "String")] + pub struct EtherValue(U256) impl Display; ); #[derive(Clone, Debug, Default, Serialize, Deserialize, Eq, PartialEq, JsonSchema)] pub struct VariableAssignments { - /// A vector of the variable names to assign to the return data. - /// - /// Example: `UniswapV3PoolAddress` - pub return_data: Vec, + /// A vector of the variable names to assign to the return data. + /// + /// Example: `UniswapV3PoolAddress` + pub return_data: Vec, } /// An address type that might either be an address literal or a resolvable address. @@ -402,641 +397,627 @@ pub struct VariableAssignments { #[schemars(with = "String")] #[serde(untagged)] pub enum StepAddress { - Address(Address), - ResolvableAddress(String), + Address(Address), + ResolvableAddress(String), } impl Default for StepAddress { - fn default() -> Self { - Self::Address(Default::default()) - } + fn default() -> Self { + Self::Address(Default::default()) + } } impl Display for StepAddress { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - StepAddress::Address(address) => Display::fmt(address, f), - StepAddress::ResolvableAddress(address) => Display::fmt(address, f), - } - } + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + StepAddress::Address(address) => Display::fmt(address, f), + StepAddress::ResolvableAddress(address) => Display::fmt(address, f), + } + } } impl StepAddress { - pub fn as_address(&self) -> Option<&Address> { - match self { - StepAddress::Address(address) => Some(address), - StepAddress::ResolvableAddress(_) => None, - } - } + pub fn as_address(&self) -> Option<&Address> { + match self { + StepAddress::Address(address) => Some(address), + StepAddress::ResolvableAddress(_) => None, + } + } - pub fn as_resolvable_address(&self) -> Option<&str> { - match self { - StepAddress::ResolvableAddress(address) => Some(address), - StepAddress::Address(..) => None, - } - } + pub fn as_resolvable_address(&self) -> Option<&str> { + match self { + StepAddress::ResolvableAddress(address) => Some(address), + StepAddress::Address(..) => None, + } + } - pub async fn resolve_address( - &self, - resolver: &(impl ResolverApi + ?Sized), - context: ResolutionContext<'_>, - ) -> anyhow::Result
{ - match self { - StepAddress::Address(address) => Ok(*address), - StepAddress::ResolvableAddress(address) => Ok(Address::from_slice( - Calldata::new_compound([address]) - .calldata(resolver, context) - .await? - .get(12..32) - .expect("Can't fail"), - )), - } - } + pub async fn resolve_address( + &self, + resolver: &(impl ResolverApi + ?Sized), + context: ResolutionContext<'_>, + ) -> anyhow::Result
{ + match self { + StepAddress::Address(address) => Ok(*address), + StepAddress::ResolvableAddress(address) => Ok(Address::from_slice( + Calldata::new_compound([address]) + .calldata(resolver, context) + .await? + .get(12..32) + .expect("Can't fail"), + )), + } + } } impl FunctionCallStep { - pub const fn default_caller_address() -> Address { - Address(FixedBytes(alloy::hex!( - "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1" - ))) - } + pub const fn default_caller_address() -> Address { + Address(FixedBytes(alloy::hex!("0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1"))) + } - pub const fn default_caller() -> StepAddress { - StepAddress::Address(Self::default_caller_address()) - } + pub const fn default_caller() -> StepAddress { + StepAddress::Address(Self::default_caller_address()) + } - fn default_instance() -> ContractInstance { - ContractInstance::new("Test") - } + fn default_instance() -> ContractInstance { + ContractInstance::new("Test") + } - pub async fn encoded_input( - &self, - resolver: &(impl ResolverApi + ?Sized), - context: ResolutionContext<'_>, - ) -> anyhow::Result { - match self.method { - Method::Deployer | Method::Fallback => { - let calldata = self - .calldata - .calldata(resolver, context) - .await - .context("Failed to produce calldata for deployer/fallback method")?; + pub async fn encoded_input( + &self, + resolver: &(impl ResolverApi + ?Sized), + context: ResolutionContext<'_>, + ) -> anyhow::Result { + match self.method { + Method::Deployer | Method::Fallback => { + let calldata = self + .calldata + .calldata(resolver, context) + .await + .context("Failed to produce calldata for deployer/fallback method")?; - Ok(calldata.into()) - } - Method::FunctionName(ref function_name) => { - let Some(abi) = context.deployed_contract_abi(&self.instance) else { - anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref()); - }; + Ok(calldata.into()) + }, + Method::FunctionName(ref function_name) => { + let Some(abi) = context.deployed_contract_abi(&self.instance) else { + anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref()); + }; - // We follow the same logic that's implemented in the matter-labs-tester where they resolve - // the function name into a function selector and they assume that he function doesn't have - // any existing overloads. - // Overloads are handled by providing the full function signature in the "function - // name". - // https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190 - let selector = - if function_name.contains('(') && function_name.contains(')') { - Function::parse(function_name) - .context( - "Failed to parse the provided function name into a function signature", - )? - .selector() - } else { - abi.functions() - .find(|function| function.signature().starts_with(function_name)) - .ok_or_else(|| { - anyhow::anyhow!( - "Function with name {:?} not found in ABI for the instance {:?}", - function_name, - &self.instance - ) - }) - .with_context(|| format!( - "Failed to resolve function selector for {:?} on instance {:?}", - function_name, &self.instance - ))? - .selector() - }; + // We follow the same logic that's implemented in the matter-labs-tester where they + // resolve the function name into a function selector and they assume that he + // function doesn't have any existing overloads. + // Overloads are handled by providing the full function signature in the "function + // name". + // https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190 + let selector = if function_name.contains('(') && function_name.contains(')') { + Function::parse(function_name) + .context( + "Failed to parse the provided function name into a function signature", + )? + .selector() + } else { + abi.functions() + .find(|function| function.signature().starts_with(function_name)) + .ok_or_else(|| { + anyhow::anyhow!( + "Function with name {:?} not found in ABI for the instance {:?}", + function_name, + &self.instance + ) + }) + .with_context(|| { + format!( + "Failed to resolve function selector for {:?} on instance {:?}", + function_name, &self.instance + ) + })? + .selector() + }; - // Allocating a vector that we will be using for the calldata. The vector size will be: - // 4 bytes for the function selector. - // function.inputs.len() * 32 bytes for the arguments (each argument is a U256). - // - // We're using indices in the following code in order to avoid the need for us to allocate - // a new buffer for each one of the resolved arguments. - let mut calldata = Vec::::with_capacity(4 + self.calldata.size_requirement()); - calldata.extend(selector.0); - self.calldata - .calldata_into_slice(&mut calldata, resolver, context) - .await - .context("Failed to append encoded argument to calldata buffer")?; + // Allocating a vector that we will be using for the calldata. The vector size will + // be: 4 bytes for the function selector. + // function.inputs.len() * 32 bytes for the arguments (each argument is a U256). + // + // We're using indices in the following code in order to avoid the need for us to + // allocate a new buffer for each one of the resolved arguments. + let mut calldata = Vec::::with_capacity(4 + self.calldata.size_requirement()); + calldata.extend(selector.0); + self.calldata + .calldata_into_slice(&mut calldata, resolver, context) + .await + .context("Failed to append encoded argument to calldata buffer")?; - Ok(calldata.into()) - } - } - } + Ok(calldata.into()) + }, + } + } - /// Parse this input into a legacy transaction. - pub async fn as_transaction( - &self, - resolver: &(impl ResolverApi + ?Sized), - context: ResolutionContext<'_>, - ) -> anyhow::Result { - let input_data = self - .encoded_input(resolver, context) - .await - .context("Failed to encode input bytes for transaction request")?; - let caller = self.caller.resolve_address(resolver, context).await?; - let transaction_request = TransactionRequest::default().from(caller).value( - self.value - .map(|value| value.into_inner()) - .unwrap_or_default(), - ); - match self.method { - Method::Deployer => Ok(transaction_request.with_deploy_code(input_data)), - _ => Ok(transaction_request - .to(context - .deployed_contract_address(&self.instance) - .context("Failed to get the contract address") - .copied()?) - .input(input_data.into())), - } - } + /// Parse this input into a legacy transaction. + pub async fn as_transaction( + &self, + resolver: &(impl ResolverApi + ?Sized), + context: ResolutionContext<'_>, + ) -> anyhow::Result { + let input_data = self + .encoded_input(resolver, context) + .await + .context("Failed to encode input bytes for transaction request")?; + let caller = self.caller.resolve_address(resolver, context).await?; + let transaction_request = TransactionRequest::default() + .from(caller) + .value(self.value.map(|value| value.into_inner()).unwrap_or_default()); + match self.method { + Method::Deployer => Ok(transaction_request.with_deploy_code(input_data)), + _ => Ok(transaction_request + .to(context + .deployed_contract_address(&self.instance) + .context("Failed to get the contract address") + .copied()?) + .input(input_data.into())), + } + } - pub fn find_all_contract_instances(&self) -> Vec { - let mut vec = Vec::new(); - vec.push(self.instance.clone()); + pub fn find_all_contract_instances(&self) -> Vec { + let mut vec = Vec::new(); + vec.push(self.instance.clone()); - self.calldata.find_all_contract_instances(&mut vec); + self.calldata.find_all_contract_instances(&mut vec); - vec - } + vec + } } impl ExpectedOutput { - pub fn new() -> Self { - Default::default() - } + pub fn new() -> Self { + Default::default() + } - pub fn with_success(mut self) -> Self { - self.exception = false; - self - } + pub fn with_success(mut self) -> Self { + self.exception = false; + self + } - pub fn with_failure(mut self) -> Self { - self.exception = true; - self - } + pub fn with_failure(mut self) -> Self { + self.exception = true; + self + } - pub fn with_calldata(mut self, calldata: Calldata) -> Self { - self.return_data = Some(calldata); - self - } + pub fn with_calldata(mut self, calldata: Calldata) -> Self { + self.return_data = Some(calldata); + self + } } impl Default for Calldata { - fn default() -> Self { - Self::Compound(Default::default()) - } + fn default() -> Self { + Self::Compound(Default::default()) + } } impl Calldata { - pub fn new_single(item: impl Into) -> Self { - Self::Single(item.into()) - } + pub fn new_single(item: impl Into) -> Self { + Self::Single(item.into()) + } - pub fn new_compound(items: impl IntoIterator>) -> Self { - Self::Compound( - items - .into_iter() - .map(|item| item.as_ref().to_owned()) - .map(CalldataItem::new) - .collect(), - ) - } + pub fn new_compound(items: impl IntoIterator>) -> Self { + Self::Compound( + items + .into_iter() + .map(|item| item.as_ref().to_owned()) + .map(CalldataItem::new) + .collect(), + ) + } - pub fn find_all_contract_instances(&self, vec: &mut Vec) { - if let Calldata::Compound(compound) = self { - for item in compound { - if let Some(instance) = - item.strip_suffix(CalldataToken::<()>::ADDRESS_VARIABLE_SUFFIX) - { - vec.push(ContractInstance::new(instance)) - } - } - } - } + pub fn find_all_contract_instances(&self, vec: &mut Vec) { + if let Calldata::Compound(compound) = self { + for item in compound { + if let Some(instance) = + item.strip_suffix(CalldataToken::<()>::ADDRESS_VARIABLE_SUFFIX) + { + vec.push(ContractInstance::new(instance)) + } + } + } + } - pub async fn calldata( - &self, - resolver: &(impl ResolverApi + ?Sized), - context: ResolutionContext<'_>, - ) -> anyhow::Result> { - let mut buffer = Vec::::with_capacity(self.size_requirement()); - self.calldata_into_slice(&mut buffer, resolver, context) - .await?; - Ok(buffer) - } + pub async fn calldata( + &self, + resolver: &(impl ResolverApi + ?Sized), + context: ResolutionContext<'_>, + ) -> anyhow::Result> { + let mut buffer = Vec::::with_capacity(self.size_requirement()); + self.calldata_into_slice(&mut buffer, resolver, context).await?; + Ok(buffer) + } - pub async fn calldata_into_slice( - &self, - buffer: &mut Vec, - resolver: &(impl ResolverApi + ?Sized), - context: ResolutionContext<'_>, - ) -> anyhow::Result<()> { - match self { - Calldata::Single(bytes) => { - buffer.extend_from_slice(bytes); - } - Calldata::Compound(items) => { - let resolved = stream::iter(items.iter().enumerate()) - .map(|(arg_idx, arg)| async move { - arg.resolve(resolver, context) - .instrument(info_span!("Resolving argument", %arg, arg_idx)) - .map_ok(|value| value.to_be_bytes::<32>()) - .await - }) - .buffered(0xFF) - .try_collect::>() - .await - .context("Failed to resolve one or more calldata arguments")?; + pub async fn calldata_into_slice( + &self, + buffer: &mut Vec, + resolver: &(impl ResolverApi + ?Sized), + context: ResolutionContext<'_>, + ) -> anyhow::Result<()> { + match self { + Calldata::Single(bytes) => { + buffer.extend_from_slice(bytes); + }, + Calldata::Compound(items) => { + let resolved = stream::iter(items.iter().enumerate()) + .map(|(arg_idx, arg)| async move { + arg.resolve(resolver, context) + .instrument(info_span!("Resolving argument", %arg, arg_idx)) + .map_ok(|value| value.to_be_bytes::<32>()) + .await + }) + .buffered(0xFF) + .try_collect::>() + .await + .context("Failed to resolve one or more calldata arguments")?; - buffer.extend(resolved.into_iter().flatten()); - } - }; - Ok(()) - } + buffer.extend(resolved.into_iter().flatten()); + }, + }; + Ok(()) + } - pub fn size_requirement(&self) -> usize { - match self { - Calldata::Single(single) => single.len(), - Calldata::Compound(items) => items.len() * 32, - } - } + pub fn size_requirement(&self) -> usize { + match self { + Calldata::Single(single) => single.len(), + Calldata::Compound(items) => items.len() * 32, + } + } - /// Checks if this [`Calldata`] is equivalent to the passed calldata bytes. - pub async fn is_equivalent( - &self, - other: &[u8], - resolver: &(impl ResolverApi + ?Sized), - context: ResolutionContext<'_>, - ) -> anyhow::Result { - match self { - Calldata::Single(calldata) => Ok(calldata == other), - Calldata::Compound(items) => { - stream::iter(items.iter().zip(other.chunks(32))) - .map(|(this, other)| async move { - // The matterlabs format supports wildcards and therefore we - // also need to support them. - if this.as_ref() == "*" { - return Ok::<_, anyhow::Error>(true); - } + /// Checks if this [`Calldata`] is equivalent to the passed calldata bytes. + pub async fn is_equivalent( + &self, + other: &[u8], + resolver: &(impl ResolverApi + ?Sized), + context: ResolutionContext<'_>, + ) -> anyhow::Result { + match self { + Calldata::Single(calldata) => Ok(calldata == other), + Calldata::Compound(items) => { + stream::iter(items.iter().zip(other.chunks(32))) + .map(|(this, other)| async move { + // The matterlabs format supports wildcards and therefore we + // also need to support them. + if this.as_ref() == "*" { + return Ok::<_, anyhow::Error>(true); + } - let other = if other.len() < 32 { - let mut vec = other.to_vec(); - vec.resize(32, 0); - std::borrow::Cow::Owned(vec) - } else { - std::borrow::Cow::Borrowed(other) - }; + let other = if other.len() < 32 { + let mut vec = other.to_vec(); + vec.resize(32, 0); + std::borrow::Cow::Owned(vec) + } else { + std::borrow::Cow::Borrowed(other) + }; - let this = this - .resolve(resolver, context) - .await - .context("Failed to resolve calldata item during equivalence check")?; - let other = U256::from_be_slice(&other); - Ok(this == other) - }) - .buffered(0xFF) - .all(|v| async move { v.is_ok_and(|v| v) }) - .map(Ok) - .await - } - } - } + let this = this + .resolve(resolver, context) + .await + .context("Failed to resolve calldata item during equivalence check")?; + let other = U256::from_be_slice(&other); + Ok(this == other) + }) + .buffered(0xFF) + .all(|v| async move { v.is_ok_and(|v| v) }) + .map(Ok) + .await + }, + } + } } impl CalldataItem { - #[instrument(level = "info", skip_all, err)] - async fn resolve( - &self, - resolver: &(impl ResolverApi + ?Sized), - context: ResolutionContext<'_>, - ) -> anyhow::Result { - let mut stack = Vec::>::new(); + #[instrument(level = "info", skip_all, err)] + async fn resolve( + &self, + resolver: &(impl ResolverApi + ?Sized), + context: ResolutionContext<'_>, + ) -> anyhow::Result { + let mut stack = Vec::>::new(); - for token in self - .calldata_tokens() - .map(|token| token.resolve(resolver, context)) - { - let token = token.await?; - let new_token = match token { - CalldataToken::Item(_) => token, - CalldataToken::Operation(operation) => { - let right_operand = stack - .pop() - .and_then(CalldataToken::into_item) - .context("Invalid calldata arithmetic operation")?; - let left_operand = stack - .pop() - .and_then(CalldataToken::into_item) - .context("Invalid calldata arithmetic operation")?; + for token in self.calldata_tokens().map(|token| token.resolve(resolver, context)) { + let token = token.await?; + let new_token = match token { + CalldataToken::Item(_) => token, + CalldataToken::Operation(operation) => { + let right_operand = stack + .pop() + .and_then(CalldataToken::into_item) + .context("Invalid calldata arithmetic operation")?; + let left_operand = stack + .pop() + .and_then(CalldataToken::into_item) + .context("Invalid calldata arithmetic operation")?; - let result = match operation { - Operation::Addition => left_operand.checked_add(right_operand), - Operation::Subtraction => left_operand.checked_sub(right_operand), - Operation::Multiplication => left_operand.checked_mul(right_operand), - Operation::Division => left_operand.checked_div(right_operand), - Operation::BitwiseAnd => Some(left_operand & right_operand), - Operation::BitwiseOr => Some(left_operand | right_operand), - Operation::BitwiseXor => Some(left_operand ^ right_operand), - Operation::ShiftLeft => { - Some(left_operand << usize::try_from(right_operand)?) - } - Operation::ShiftRight => { - Some(left_operand >> usize::try_from(right_operand)?) - } - } - .context("Invalid calldata arithmetic operation - Invalid operation")?; + let result = match operation { + Operation::Addition => left_operand.checked_add(right_operand), + Operation::Subtraction => left_operand.checked_sub(right_operand), + Operation::Multiplication => left_operand.checked_mul(right_operand), + Operation::Division => left_operand.checked_div(right_operand), + Operation::BitwiseAnd => Some(left_operand & right_operand), + Operation::BitwiseOr => Some(left_operand | right_operand), + Operation::BitwiseXor => Some(left_operand ^ right_operand), + Operation::ShiftLeft => + Some(left_operand << usize::try_from(right_operand)?), + Operation::ShiftRight => + Some(left_operand >> usize::try_from(right_operand)?), + } + .context("Invalid calldata arithmetic operation - Invalid operation")?; - CalldataToken::Item(result) - } - }; - stack.push(new_token) - } + CalldataToken::Item(result) + }, + }; + stack.push(new_token) + } - match stack.as_slice() { - // Empty stack means that we got an empty compound calldata which we resolve to zero. - [] => Ok(U256::ZERO), - [CalldataToken::Item(item)] => Ok(*item), - _ => Err(anyhow::anyhow!( - "Invalid calldata arithmetic operation - Invalid stack" - )), - } - } + match stack.as_slice() { + // Empty stack means that we got an empty compound calldata which we resolve to zero. + [] => Ok(U256::ZERO), + [CalldataToken::Item(item)] => Ok(*item), + _ => Err(anyhow::anyhow!("Invalid calldata arithmetic operation - Invalid stack")), + } + } - fn calldata_tokens(&self) -> impl Iterator> { - self.0.split(' ').map(|item| match item { - "+" => CalldataToken::Operation(Operation::Addition), - "-" => CalldataToken::Operation(Operation::Subtraction), - "/" => CalldataToken::Operation(Operation::Division), - "*" => CalldataToken::Operation(Operation::Multiplication), - "&" => CalldataToken::Operation(Operation::BitwiseAnd), - "|" => CalldataToken::Operation(Operation::BitwiseOr), - "^" => CalldataToken::Operation(Operation::BitwiseXor), - "<<" => CalldataToken::Operation(Operation::ShiftLeft), - ">>" => CalldataToken::Operation(Operation::ShiftRight), - _ => CalldataToken::Item(item), - }) - } + fn calldata_tokens(&self) -> impl Iterator> { + self.0.split(' ').map(|item| match item { + "+" => CalldataToken::Operation(Operation::Addition), + "-" => CalldataToken::Operation(Operation::Subtraction), + "/" => CalldataToken::Operation(Operation::Division), + "*" => CalldataToken::Operation(Operation::Multiplication), + "&" => CalldataToken::Operation(Operation::BitwiseAnd), + "|" => CalldataToken::Operation(Operation::BitwiseOr), + "^" => CalldataToken::Operation(Operation::BitwiseXor), + "<<" => CalldataToken::Operation(Operation::ShiftLeft), + ">>" => CalldataToken::Operation(Operation::ShiftRight), + _ => CalldataToken::Item(item), + }) + } } impl CalldataToken { - const ADDRESS_VARIABLE_SUFFIX: &str = ".address"; - const NEGATIVE_VALUE_PREFIX: char = '-'; - const HEX_LITERAL_PREFIX: &str = "0x"; - const CHAIN_VARIABLE: &str = "$CHAIN_ID"; - const GAS_LIMIT_VARIABLE: &str = "$GAS_LIMIT"; - const COINBASE_VARIABLE: &str = "$COINBASE"; - const DIFFICULTY_VARIABLE: &str = "$DIFFICULTY"; - const BLOCK_BASE_FEE_VARIABLE: &str = "$BASE_FEE"; - const BLOCK_HASH_VARIABLE_PREFIX: &str = "$BLOCK_HASH"; - const BLOCK_NUMBER_VARIABLE: &str = "$BLOCK_NUMBER"; - const BLOCK_TIMESTAMP_VARIABLE: &str = "$BLOCK_TIMESTAMP"; - const TRANSACTION_GAS_PRICE: &str = "$TRANSACTION_GAS_PRICE"; - const VARIABLE_PREFIX: &str = "$VARIABLE:"; + const ADDRESS_VARIABLE_SUFFIX: &str = ".address"; + const NEGATIVE_VALUE_PREFIX: char = '-'; + const HEX_LITERAL_PREFIX: &str = "0x"; + const CHAIN_VARIABLE: &str = "$CHAIN_ID"; + const GAS_LIMIT_VARIABLE: &str = "$GAS_LIMIT"; + const COINBASE_VARIABLE: &str = "$COINBASE"; + const DIFFICULTY_VARIABLE: &str = "$DIFFICULTY"; + const BLOCK_BASE_FEE_VARIABLE: &str = "$BASE_FEE"; + const BLOCK_HASH_VARIABLE_PREFIX: &str = "$BLOCK_HASH"; + const BLOCK_NUMBER_VARIABLE: &str = "$BLOCK_NUMBER"; + const BLOCK_TIMESTAMP_VARIABLE: &str = "$BLOCK_TIMESTAMP"; + const TRANSACTION_GAS_PRICE: &str = "$TRANSACTION_GAS_PRICE"; + const VARIABLE_PREFIX: &str = "$VARIABLE:"; - fn into_item(self) -> Option { - match self { - CalldataToken::Item(item) => Some(item), - CalldataToken::Operation(_) => None, - } - } + fn into_item(self) -> Option { + match self { + CalldataToken::Item(item) => Some(item), + CalldataToken::Operation(_) => None, + } + } } impl> CalldataToken { - /// This function takes in the string calldata argument provided in the JSON input and resolves - /// it into a [`U256`] which is later used to construct the calldata. - /// - /// # Note - /// - /// This piece of code is taken from the matter-labs-tester repository which is licensed under - /// MIT or Apache. The original source code can be found here: - /// https://github.com/matter-labs/era-compiler-tester/blob/0ed598a27f6eceee7008deab3ff2311075a2ec69/compiler_tester/src/test/case/input/value.rs#L43-L146 - async fn resolve( - self, - resolver: &(impl ResolverApi + ?Sized), - context: ResolutionContext<'_>, - ) -> anyhow::Result> { - match self { - Self::Item(item) => { - let item = item.as_ref(); - let value = if let Some(instance) = item.strip_suffix(Self::ADDRESS_VARIABLE_SUFFIX) - { - context - .deployed_contract_address(&ContractInstance::new(instance)) - .ok_or_else(|| anyhow::anyhow!("Instance `{}` not found", instance)) - .map(AsRef::as_ref) - .map(U256::from_be_slice) - } else if let Some(value) = item.strip_prefix(Self::NEGATIVE_VALUE_PREFIX) { - let value = U256::from_str_radix(value, 10).map_err(|error| { - anyhow::anyhow!("Invalid decimal literal after `-`: {}", error) - })?; - if value > U256::ONE << 255u8 { - anyhow::bail!("Decimal literal after `-` is too big"); - } - let value = value - .checked_sub(U256::ONE) - .ok_or_else(|| anyhow::anyhow!("`-0` is invalid literal"))?; - Ok(U256::MAX.checked_sub(value).expect("Always valid")) - } else if let Some(value) = item.strip_prefix(Self::HEX_LITERAL_PREFIX) { - U256::from_str_radix(value, 16) - .map_err(|error| anyhow::anyhow!("Invalid hexadecimal literal: {}", error)) - } else if item == Self::CHAIN_VARIABLE { - resolver.chain_id().await.map(U256::from) - } else if item == Self::TRANSACTION_GAS_PRICE { - context - .transaction_hash() - .context("No transaction hash provided to get the transaction gas price") - .map(|tx_hash| resolver.transaction_gas_price(*tx_hash))? - .await - .map(U256::from) - } else if item == Self::GAS_LIMIT_VARIABLE { - resolver - .block_gas_limit(context.resolve_block_number(BlockNumberOrTag::Latest)) - .await - .map(U256::from) - } else if item == Self::COINBASE_VARIABLE { - resolver - .block_coinbase(context.resolve_block_number(BlockNumberOrTag::Latest)) - .await - .map(|address| U256::from_be_slice(address.as_ref())) - } else if item == Self::DIFFICULTY_VARIABLE { - resolver - .block_difficulty(context.resolve_block_number(BlockNumberOrTag::Latest)) - .await - } else if item == Self::BLOCK_BASE_FEE_VARIABLE { - resolver - .block_base_fee(context.resolve_block_number(BlockNumberOrTag::Latest)) - .await - .map(U256::from) - } else if item.starts_with(Self::BLOCK_HASH_VARIABLE_PREFIX) { - let offset: u64 = item - .split(':') - .next_back() - .and_then(|value| value.parse().ok()) - .unwrap_or_default(); + /// This function takes in the string calldata argument provided in the JSON input and resolves + /// it into a [`U256`] which is later used to construct the calldata. + /// + /// # Note + /// + /// This piece of code is taken from the matter-labs-tester repository which is licensed under + /// MIT or Apache. The original source code can be found here: + /// https://github.com/matter-labs/era-compiler-tester/blob/0ed598a27f6eceee7008deab3ff2311075a2ec69/compiler_tester/src/test/case/input/value.rs#L43-L146 + async fn resolve( + self, + resolver: &(impl ResolverApi + ?Sized), + context: ResolutionContext<'_>, + ) -> anyhow::Result> { + match self { + Self::Item(item) => { + let item = item.as_ref(); + let value = if let Some(instance) = item.strip_suffix(Self::ADDRESS_VARIABLE_SUFFIX) + { + context + .deployed_contract_address(&ContractInstance::new(instance)) + .ok_or_else(|| anyhow::anyhow!("Instance `{}` not found", instance)) + .map(AsRef::as_ref) + .map(U256::from_be_slice) + } else if let Some(value) = item.strip_prefix(Self::NEGATIVE_VALUE_PREFIX) { + let value = U256::from_str_radix(value, 10).map_err(|error| { + anyhow::anyhow!("Invalid decimal literal after `-`: {}", error) + })?; + if value > U256::ONE << 255u8 { + anyhow::bail!("Decimal literal after `-` is too big"); + } + let value = value + .checked_sub(U256::ONE) + .ok_or_else(|| anyhow::anyhow!("`-0` is invalid literal"))?; + Ok(U256::MAX.checked_sub(value).expect("Always valid")) + } else if let Some(value) = item.strip_prefix(Self::HEX_LITERAL_PREFIX) { + U256::from_str_radix(value, 16) + .map_err(|error| anyhow::anyhow!("Invalid hexadecimal literal: {}", error)) + } else if item == Self::CHAIN_VARIABLE { + resolver.chain_id().await.map(U256::from) + } else if item == Self::TRANSACTION_GAS_PRICE { + context + .transaction_hash() + .context("No transaction hash provided to get the transaction gas price") + .map(|tx_hash| resolver.transaction_gas_price(*tx_hash))? + .await + .map(U256::from) + } else if item == Self::GAS_LIMIT_VARIABLE { + resolver + .block_gas_limit(context.resolve_block_number(BlockNumberOrTag::Latest)) + .await + .map(U256::from) + } else if item == Self::COINBASE_VARIABLE { + resolver + .block_coinbase(context.resolve_block_number(BlockNumberOrTag::Latest)) + .await + .map(|address| U256::from_be_slice(address.as_ref())) + } else if item == Self::DIFFICULTY_VARIABLE { + resolver + .block_difficulty(context.resolve_block_number(BlockNumberOrTag::Latest)) + .await + } else if item == Self::BLOCK_BASE_FEE_VARIABLE { + resolver + .block_base_fee(context.resolve_block_number(BlockNumberOrTag::Latest)) + .await + .map(U256::from) + } else if item.starts_with(Self::BLOCK_HASH_VARIABLE_PREFIX) { + let offset: u64 = item + .split(':') + .next_back() + .and_then(|value| value.parse().ok()) + .unwrap_or_default(); - let current_block_number = match context.tip_block_number() { - Some(block_number) => *block_number, - None => resolver.last_block_number().await.context( - "Failed to query last block number while resolving $BLOCK_HASH", - )?, - }; - let desired_block_number = current_block_number.saturating_sub(offset); + let current_block_number = match context.tip_block_number() { + Some(block_number) => *block_number, + None => resolver.last_block_number().await.context( + "Failed to query last block number while resolving $BLOCK_HASH", + )?, + }; + let desired_block_number = current_block_number.saturating_sub(offset); - let block_hash = resolver - .block_hash(desired_block_number.into()) - .await - .context("Failed to resolve block hash for desired block number")?; + let block_hash = resolver + .block_hash(desired_block_number.into()) + .await + .context("Failed to resolve block hash for desired block number")?; - Ok(U256::from_be_bytes(block_hash.0)) - } else if item == Self::BLOCK_NUMBER_VARIABLE { - let current_block_number = match context.tip_block_number() { - Some(block_number) => *block_number, - None => resolver.last_block_number().await.context( - "Failed to query last block number while resolving $BLOCK_NUMBER", - )?, - }; - Ok(U256::from(current_block_number)) - } else if item == Self::BLOCK_TIMESTAMP_VARIABLE { - resolver - .block_timestamp(context.resolve_block_number(BlockNumberOrTag::Latest)) - .await - .map(U256::from) - } else if let Some(variable_name) = item.strip_prefix(Self::VARIABLE_PREFIX) { - context - .variable(variable_name) - .context("Variable lookup failed") - .copied() - } else { - U256::from_str_radix(item, 10) - .map_err(|error| anyhow::anyhow!("Invalid decimal literal: {}", error)) - }; - value.map(CalldataToken::Item) - } - Self::Operation(operation) => Ok(CalldataToken::Operation(operation)), - } - } + Ok(U256::from_be_bytes(block_hash.0)) + } else if item == Self::BLOCK_NUMBER_VARIABLE { + let current_block_number = match context.tip_block_number() { + Some(block_number) => *block_number, + None => resolver.last_block_number().await.context( + "Failed to query last block number while resolving $BLOCK_NUMBER", + )?, + }; + Ok(U256::from(current_block_number)) + } else if item == Self::BLOCK_TIMESTAMP_VARIABLE { + resolver + .block_timestamp(context.resolve_block_number(BlockNumberOrTag::Latest)) + .await + .map(U256::from) + } else if let Some(variable_name) = item.strip_prefix(Self::VARIABLE_PREFIX) { + context.variable(variable_name).context("Variable lookup failed").copied() + } else { + U256::from_str_radix(item, 10) + .map_err(|error| anyhow::anyhow!("Invalid decimal literal: {}", error)) + }; + value.map(CalldataToken::Item) + }, + Self::Operation(operation) => Ok(CalldataToken::Operation(operation)), + } + } } impl Serialize for EtherValue { - fn serialize(&self, serializer: S) -> Result - where - S: serde::Serializer, - { - format!("{} wei", self.0).serialize(serializer) - } + fn serialize(&self, serializer: S) -> Result + where + S: serde::Serializer, + { + format!("{} wei", self.0).serialize(serializer) + } } impl<'de> Deserialize<'de> for EtherValue { - fn deserialize(deserializer: D) -> Result - where - D: serde::Deserializer<'de>, - { - let string = String::deserialize(deserializer)?; - let mut splitted = string.split(' '); - let (Some(value), Some(unit)) = (splitted.next(), splitted.next()) else { - return Err(serde::de::Error::custom("Failed to parse the value")); - }; - let parsed = parse_units(value, unit.replace("eth", "ether")) - .map_err(|_| serde::de::Error::custom("Failed to parse units"))? - .into(); - Ok(Self(parsed)) - } + fn deserialize(deserializer: D) -> Result + where + D: serde::Deserializer<'de>, + { + let string = String::deserialize(deserializer)?; + let mut splitted = string.split(' '); + let (Some(value), Some(unit)) = (splitted.next(), splitted.next()) else { + return Err(serde::de::Error::custom("Failed to parse the value")); + }; + let parsed = parse_units(value, unit.replace("eth", "ether")) + .map_err(|_| serde::de::Error::custom("Failed to parse units"))? + .into(); + Ok(Self(parsed)) + } } #[cfg(test)] mod tests { - use alloy::{ - eips::BlockNumberOrTag, - json_abi::JsonAbi, - primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address}, - sol_types::SolValue, - }; - use std::{collections::HashMap, pin::Pin}; + use alloy::{ + eips::BlockNumberOrTag, + json_abi::JsonAbi, + primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address}, + sol_types::SolValue, + }; + use std::{collections::HashMap, pin::Pin}; - use super::*; - use crate::metadata::ContractIdent; + use super::*; + use crate::metadata::ContractIdent; - struct MockResolver; + struct MockResolver; - impl ResolverApi for MockResolver { - fn chain_id(&self) -> Pin> + '_>> { - Box::pin(async move { Ok(0x123) }) - } + impl ResolverApi for MockResolver { + fn chain_id(&self) -> Pin> + '_>> { + Box::pin(async move { Ok(0x123) }) + } - fn block_gas_limit( - &self, - _: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { Ok(0x1234) }) - } + fn block_gas_limit( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(0x1234) }) + } - fn block_coinbase( - &self, - _: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { Ok(Address::ZERO) }) - } + fn block_coinbase( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(Address::ZERO) }) + } - fn block_difficulty( - &self, - _: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { Ok(U256::from(0x12345u128)) }) - } + fn block_difficulty( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(U256::from(0x12345u128)) }) + } - fn block_base_fee( - &self, - _: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { Ok(0x100) }) - } + fn block_base_fee( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(0x100) }) + } - fn block_hash( - &self, - _: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { Ok([0xEE; 32].into()) }) - } + fn block_hash( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok([0xEE; 32].into()) }) + } - fn block_timestamp( - &self, - _: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { Ok(0x123456) }) - } + fn block_timestamp( + &self, + _: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(0x123456) }) + } - fn last_block_number( - &self, - ) -> Pin> + '_>> { - Box::pin(async move { Ok(0x1234567) }) - } + fn last_block_number( + &self, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(0x1234567) }) + } - fn transaction_gas_price( - &self, - _: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { Ok(0x200) }) - } - } + fn transaction_gas_price( + &self, + _: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { Ok(0x200) }) + } + } - #[tokio::test] - async fn test_encoded_input_uint256() { - let raw_metadata = r#" + #[tokio::test] + async fn test_encoded_input_uint256() { + let raw_metadata = r#" [ { "inputs": [{"name": "value", "type": "uint256"}], @@ -1048,41 +1029,35 @@ mod tests { ] "#; - let parsed_abi: JsonAbi = serde_json::from_str(raw_metadata).unwrap(); - let selector = parsed_abi - .function("store") - .unwrap() - .first() - .unwrap() - .selector() - .0; + let parsed_abi: JsonAbi = serde_json::from_str(raw_metadata).unwrap(); + let selector = parsed_abi.function("store").unwrap().first().unwrap().selector().0; - let input = FunctionCallStep { - instance: ContractInstance::new("Contract"), - method: Method::FunctionName("store".to_owned()), - calldata: Calldata::new_compound(["42"]), - ..Default::default() - }; + let input = FunctionCallStep { + instance: ContractInstance::new("Contract"), + method: Method::FunctionName("store".to_owned()), + calldata: Calldata::new_compound(["42"]), + ..Default::default() + }; - let mut contracts = HashMap::new(); - contracts.insert( - ContractInstance::new("Contract"), - (ContractIdent::new("Contract"), Address::ZERO, parsed_abi), - ); + let mut contracts = HashMap::new(); + contracts.insert( + ContractInstance::new("Contract"), + (ContractIdent::new("Contract"), Address::ZERO, parsed_abi), + ); - let resolver = MockResolver; - let context = ResolutionContext::default().with_deployed_contracts(&contracts); - let encoded = input.encoded_input(&resolver, context).await.unwrap(); - assert!(encoded.0.starts_with(&selector)); + let resolver = MockResolver; + let context = ResolutionContext::default().with_deployed_contracts(&contracts); + let encoded = input.encoded_input(&resolver, context).await.unwrap(); + assert!(encoded.0.starts_with(&selector)); - type T = (u64,); - let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap(); - assert_eq!(decoded.0, 42); - } + type T = (u64,); + let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap(); + assert_eq!(decoded.0, 42); + } - #[tokio::test] - async fn test_encoded_input_address_with_signature() { - let raw_abi = r#"[ + #[tokio::test] + async fn test_encoded_input_address_with_signature() { + let raw_abi = r#"[ { "inputs": [{"name": "recipient", "type": "address"}], "name": "send", @@ -1092,44 +1067,35 @@ mod tests { } ]"#; - let parsed_abi: JsonAbi = serde_json::from_str(raw_abi).unwrap(); - let selector = parsed_abi - .function("send") - .unwrap() - .first() - .unwrap() - .selector() - .0; + let parsed_abi: JsonAbi = serde_json::from_str(raw_abi).unwrap(); + let selector = parsed_abi.function("send").unwrap().first().unwrap().selector().0; - let input: FunctionCallStep = FunctionCallStep { - instance: "Contract".to_owned().into(), - method: Method::FunctionName("send(address)".to_owned()), - calldata: Calldata::new_compound(["0x1000000000000000000000000000000000000001"]), - ..Default::default() - }; + let input: FunctionCallStep = FunctionCallStep { + instance: "Contract".to_owned().into(), + method: Method::FunctionName("send(address)".to_owned()), + calldata: Calldata::new_compound(["0x1000000000000000000000000000000000000001"]), + ..Default::default() + }; - let mut contracts = HashMap::new(); - contracts.insert( - ContractInstance::new("Contract"), - (ContractIdent::new("Contract"), Address::ZERO, parsed_abi), - ); + let mut contracts = HashMap::new(); + contracts.insert( + ContractInstance::new("Contract"), + (ContractIdent::new("Contract"), Address::ZERO, parsed_abi), + ); - let resolver = MockResolver; - let context = ResolutionContext::default().with_deployed_contracts(&contracts); - let encoded = input.encoded_input(&resolver, context).await.unwrap(); - assert!(encoded.0.starts_with(&selector)); + let resolver = MockResolver; + let context = ResolutionContext::default().with_deployed_contracts(&contracts); + let encoded = input.encoded_input(&resolver, context).await.unwrap(); + assert!(encoded.0.starts_with(&selector)); - type T = (alloy::primitives::Address,); - let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap(); - assert_eq!( - decoded.0, - address!("0x1000000000000000000000000000000000000001") - ); - } + type T = (alloy::primitives::Address,); + let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap(); + assert_eq!(decoded.0, address!("0x1000000000000000000000000000000000000001")); + } - #[tokio::test] - async fn test_encoded_input_address() { - let raw_abi = r#"[ + #[tokio::test] + async fn test_encoded_input_address() { + let raw_abi = r#"[ { "inputs": [{"name": "recipient", "type": "address"}], "name": "send", @@ -1139,294 +1105,255 @@ mod tests { } ]"#; - let parsed_abi: JsonAbi = serde_json::from_str(raw_abi).unwrap(); - let selector = parsed_abi - .function("send") - .unwrap() - .first() - .unwrap() - .selector() - .0; + let parsed_abi: JsonAbi = serde_json::from_str(raw_abi).unwrap(); + let selector = parsed_abi.function("send").unwrap().first().unwrap().selector().0; - let input: FunctionCallStep = FunctionCallStep { - instance: ContractInstance::new("Contract"), - method: Method::FunctionName("send".to_owned()), - calldata: Calldata::new_compound(["0x1000000000000000000000000000000000000001"]), - ..Default::default() - }; + let input: FunctionCallStep = FunctionCallStep { + instance: ContractInstance::new("Contract"), + method: Method::FunctionName("send".to_owned()), + calldata: Calldata::new_compound(["0x1000000000000000000000000000000000000001"]), + ..Default::default() + }; - let mut contracts = HashMap::new(); - contracts.insert( - ContractInstance::new("Contract"), - (ContractIdent::new("Contract"), Address::ZERO, parsed_abi), - ); + let mut contracts = HashMap::new(); + contracts.insert( + ContractInstance::new("Contract"), + (ContractIdent::new("Contract"), Address::ZERO, parsed_abi), + ); - let resolver = MockResolver; - let context = ResolutionContext::default().with_deployed_contracts(&contracts); - let encoded = input.encoded_input(&resolver, context).await.unwrap(); - assert!(encoded.0.starts_with(&selector)); + let resolver = MockResolver; + let context = ResolutionContext::default().with_deployed_contracts(&contracts); + let encoded = input.encoded_input(&resolver, context).await.unwrap(); + assert!(encoded.0.starts_with(&selector)); - type T = (alloy::primitives::Address,); - let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap(); - assert_eq!( - decoded.0, - address!("0x1000000000000000000000000000000000000001") - ); - } + type T = (alloy::primitives::Address,); + let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap(); + assert_eq!(decoded.0, address!("0x1000000000000000000000000000000000000001")); + } - async fn resolve_calldata_item( - input: &str, - deployed_contracts: &HashMap, - resolver: &(impl ResolverApi + ?Sized), - ) -> anyhow::Result { - let context = ResolutionContext::default().with_deployed_contracts(deployed_contracts); - CalldataItem::new(input).resolve(resolver, context).await - } + async fn resolve_calldata_item( + input: &str, + deployed_contracts: &HashMap, + resolver: &(impl ResolverApi + ?Sized), + ) -> anyhow::Result { + let context = ResolutionContext::default().with_deployed_contracts(deployed_contracts); + CalldataItem::new(input).resolve(resolver, context).await + } - #[tokio::test] - async fn resolver_can_resolve_chain_id_variable() { - // Arrange - let input = "$CHAIN_ID"; + #[tokio::test] + async fn resolver_can_resolve_chain_id_variable() { + // Arrange + let input = "$CHAIN_ID"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!(resolved, U256::from(MockResolver.chain_id().await.unwrap())) - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!(resolved, U256::from(MockResolver.chain_id().await.unwrap())) + } - #[tokio::test] - async fn resolver_can_resolve_gas_limit_variable() { - // Arrange - let input = "$GAS_LIMIT"; + #[tokio::test] + async fn resolver_can_resolve_gas_limit_variable() { + // Arrange + let input = "$GAS_LIMIT"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!( - resolved, - U256::from( - MockResolver - .block_gas_limit(Default::default()) - .await - .unwrap() - ) - ) - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!( + resolved, + U256::from(MockResolver.block_gas_limit(Default::default()).await.unwrap()) + ) + } - #[tokio::test] - async fn resolver_can_resolve_coinbase_variable() { - // Arrange - let input = "$COINBASE"; + #[tokio::test] + async fn resolver_can_resolve_coinbase_variable() { + // Arrange + let input = "$COINBASE"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!( - resolved, - U256::from_be_slice( - MockResolver - .block_coinbase(Default::default()) - .await - .unwrap() - .as_ref() - ) - ) - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!( + resolved, + U256::from_be_slice( + MockResolver.block_coinbase(Default::default()).await.unwrap().as_ref() + ) + ) + } - #[tokio::test] - async fn resolver_can_resolve_block_difficulty_variable() { - // Arrange - let input = "$DIFFICULTY"; + #[tokio::test] + async fn resolver_can_resolve_block_difficulty_variable() { + // Arrange + let input = "$DIFFICULTY"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!( - resolved, - MockResolver - .block_difficulty(Default::default()) - .await - .unwrap() - ) - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!(resolved, MockResolver.block_difficulty(Default::default()).await.unwrap()) + } - #[tokio::test] - async fn resolver_can_resolve_block_base_fee_variable() { - // Arrange - let input = "$BASE_FEE"; + #[tokio::test] + async fn resolver_can_resolve_block_base_fee_variable() { + // Arrange + let input = "$BASE_FEE"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!( - resolved, - MockResolver - .block_base_fee(Default::default()) - .await - .map(U256::from) - .unwrap() - ) - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!( + resolved, + MockResolver.block_base_fee(Default::default()).await.map(U256::from).unwrap() + ) + } - #[tokio::test] - async fn resolver_can_resolve_block_hash_variable() { - // Arrange - let input = "$BLOCK_HASH"; + #[tokio::test] + async fn resolver_can_resolve_block_hash_variable() { + // Arrange + let input = "$BLOCK_HASH"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!( - resolved, - U256::from_be_bytes(MockResolver.block_hash(Default::default()).await.unwrap().0) - ) - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!( + resolved, + U256::from_be_bytes(MockResolver.block_hash(Default::default()).await.unwrap().0) + ) + } - #[tokio::test] - async fn resolver_can_resolve_block_number_variable() { - // Arrange - let input = "$BLOCK_NUMBER"; + #[tokio::test] + async fn resolver_can_resolve_block_number_variable() { + // Arrange + let input = "$BLOCK_NUMBER"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!( - resolved, - U256::from(MockResolver.last_block_number().await.unwrap()) - ) - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!(resolved, U256::from(MockResolver.last_block_number().await.unwrap())) + } - #[tokio::test] - async fn resolver_can_resolve_block_timestamp_variable() { - // Arrange - let input = "$BLOCK_TIMESTAMP"; + #[tokio::test] + async fn resolver_can_resolve_block_timestamp_variable() { + // Arrange + let input = "$BLOCK_TIMESTAMP"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!( - resolved, - U256::from( - MockResolver - .block_timestamp(Default::default()) - .await - .unwrap() - ) - ) - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!( + resolved, + U256::from(MockResolver.block_timestamp(Default::default()).await.unwrap()) + ) + } - #[tokio::test] - async fn simple_addition_can_be_resolved() { - // Arrange - let input = "2 4 +"; + #[tokio::test] + async fn simple_addition_can_be_resolved() { + // Arrange + let input = "2 4 +"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!(resolved, U256::from(6)); - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!(resolved, U256::from(6)); + } - #[tokio::test] - async fn simple_subtraction_can_be_resolved() { - // Arrange - let input = "4 2 -"; + #[tokio::test] + async fn simple_subtraction_can_be_resolved() { + // Arrange + let input = "4 2 -"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!(resolved, U256::from(2)); - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!(resolved, U256::from(2)); + } - #[tokio::test] - async fn simple_multiplication_can_be_resolved() { - // Arrange - let input = "4 2 *"; + #[tokio::test] + async fn simple_multiplication_can_be_resolved() { + // Arrange + let input = "4 2 *"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!(resolved, U256::from(8)); - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!(resolved, U256::from(8)); + } - #[tokio::test] - async fn simple_division_can_be_resolved() { - // Arrange - let input = "4 2 /"; + #[tokio::test] + async fn simple_division_can_be_resolved() { + // Arrange + let input = "4 2 /"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!(resolved, U256::from(2)); - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!(resolved, U256::from(2)); + } - #[tokio::test] - async fn arithmetic_errors_are_not_panics() { - // Arrange - let input = "4 0 /"; + #[tokio::test] + async fn arithmetic_errors_are_not_panics() { + // Arrange + let input = "4 0 /"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - assert!(resolved.is_err()) - } + // Assert + assert!(resolved.is_err()) + } - #[tokio::test] - async fn arithmetic_with_resolution_works() { - // Arrange - let input = "$BLOCK_NUMBER 10 +"; + #[tokio::test] + async fn arithmetic_with_resolution_works() { + // Arrange + let input = "$BLOCK_NUMBER 10 +"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - let resolved = resolved.expect("Failed to resolve argument"); - assert_eq!( - resolved, - U256::from(MockResolver.last_block_number().await.unwrap() + 10) - ); - } + // Assert + let resolved = resolved.expect("Failed to resolve argument"); + assert_eq!(resolved, U256::from(MockResolver.last_block_number().await.unwrap() + 10)); + } - #[tokio::test] - async fn incorrect_number_of_arguments_errors() { - // Arrange - let input = "$BLOCK_NUMBER 10 + +"; + #[tokio::test] + async fn incorrect_number_of_arguments_errors() { + // Arrange + let input = "$BLOCK_NUMBER 10 + +"; - // Act - let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; + // Act + let resolved = resolve_calldata_item(input, &Default::default(), &MockResolver).await; - // Assert - assert!(resolved.is_err()) - } + // Assert + assert!(resolved.is_err()) + } - #[test] - fn expected_json_can_be_deserialized1() { - // Arrange - let str = r#" + #[test] + fn expected_json_can_be_deserialized1() { + // Arrange + let str = r#" { "return_data": [ "1" @@ -1440,17 +1367,17 @@ mod tests { } "#; - // Act - let expected = serde_json::from_str::(str); + // Act + let expected = serde_json::from_str::(str); - // Assert - expected.expect("Failed to deserialize"); - } + // Assert + expected.expect("Failed to deserialize"); + } - #[test] - fn expected_json_can_be_deserialized2() { - // Arrange - let str = r#" + #[test] + fn expected_json_can_be_deserialized2() { + // Arrange + let str = r#" { "return_data": [ "1" @@ -1465,10 +1392,10 @@ mod tests { } "#; - // Act - let expected = serde_json::from_str::(str); + // Act + let expected = serde_json::from_str::(str); - // Assert - expected.expect("Failed to deserialize"); - } + // Assert + expected.expect("Failed to deserialize"); + } } diff --git a/crates/format/src/traits.rs b/crates/format/src/traits.rs index 5fabe77..6670969 100644 --- a/crates/format/src/traits.rs +++ b/crates/format/src/traits.rs @@ -1,9 +1,9 @@ use std::{collections::HashMap, pin::Pin}; use alloy::{ - eips::BlockNumberOrTag, - json_abi::JsonAbi, - primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, U256}, + eips::BlockNumberOrTag, + json_abi::JsonAbi, + primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, U256}, }; use anyhow::Result; @@ -12,165 +12,164 @@ use crate::metadata::{ContractIdent, ContractInstance}; /// A trait of the interface are required to implement to be used by the resolution logic that this /// crate implements to go from string calldata and into the bytes calldata. pub trait ResolverApi { - /// Returns the ID of the chain that the node is on. - fn chain_id(&self) -> Pin> + '_>>; + /// Returns the ID of the chain that the node is on. + fn chain_id(&self) -> Pin> + '_>>; - /// Returns the gas price for the specified transaction. - fn transaction_gas_price( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>>; + /// Returns the gas price for the specified transaction. + fn transaction_gas_price( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>>; - // TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit - // when we implement the changes to the gas we need to adjust this to be a u64. - /// Returns the gas limit of the specified block. - fn block_gas_limit( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>>; + // TODO: This is currently a u128 due to substrate needing more than 64 bits for its gas limit + // when we implement the changes to the gas we need to adjust this to be a u64. + /// Returns the gas limit of the specified block. + fn block_gas_limit( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>>; - /// Returns the coinbase of the specified block. - fn block_coinbase( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>>; + /// Returns the coinbase of the specified block. + fn block_coinbase( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>>; - /// Returns the difficulty of the specified block. - fn block_difficulty( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>>; + /// Returns the difficulty of the specified block. + fn block_difficulty( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>>; - /// Returns the base fee of the specified block. - fn block_base_fee( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>>; + /// Returns the base fee of the specified block. + fn block_base_fee( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>>; - /// Returns the hash of the specified block. - fn block_hash( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>>; + /// Returns the hash of the specified block. + fn block_hash( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>>; - /// Returns the timestamp of the specified block, - fn block_timestamp( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>>; + /// Returns the timestamp of the specified block, + fn block_timestamp( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>>; - /// Returns the number of the last block. - fn last_block_number(&self) -> Pin> + '_>>; + /// Returns the number of the last block. + fn last_block_number(&self) -> Pin> + '_>>; } #[derive(Clone, Copy, Debug, Default)] /// Contextual information required by the code that's performing the resolution. pub struct ResolutionContext<'a> { - /// When provided the contracts provided here will be used for resolutions. - deployed_contracts: Option<&'a HashMap>, + /// When provided the contracts provided here will be used for resolutions. + deployed_contracts: Option<&'a HashMap>, - /// When provided the variables in here will be used for performing resolutions. - variables: Option<&'a HashMap>, + /// When provided the variables in here will be used for performing resolutions. + variables: Option<&'a HashMap>, - /// When provided this block number will be treated as the tip of the chain. - block_number: Option<&'a BlockNumber>, + /// When provided this block number will be treated as the tip of the chain. + block_number: Option<&'a BlockNumber>, - /// When provided the resolver will use this transaction hash for all of its resolutions. - transaction_hash: Option<&'a TxHash>, + /// When provided the resolver will use this transaction hash for all of its resolutions. + transaction_hash: Option<&'a TxHash>, } impl<'a> ResolutionContext<'a> { - pub fn new() -> Self { - Default::default() - } + pub fn new() -> Self { + Default::default() + } - pub fn new_from_parts( - deployed_contracts: impl Into< - Option<&'a HashMap>, - >, - variables: impl Into>>, - block_number: impl Into>, - transaction_hash: impl Into>, - ) -> Self { - Self { - deployed_contracts: deployed_contracts.into(), - variables: variables.into(), - block_number: block_number.into(), - transaction_hash: transaction_hash.into(), - } - } + pub fn new_from_parts( + deployed_contracts: impl Into< + Option<&'a HashMap>, + >, + variables: impl Into>>, + block_number: impl Into>, + transaction_hash: impl Into>, + ) -> Self { + Self { + deployed_contracts: deployed_contracts.into(), + variables: variables.into(), + block_number: block_number.into(), + transaction_hash: transaction_hash.into(), + } + } - pub fn with_deployed_contracts( - mut self, - deployed_contracts: impl Into< - Option<&'a HashMap>, - >, - ) -> Self { - self.deployed_contracts = deployed_contracts.into(); - self - } + pub fn with_deployed_contracts( + mut self, + deployed_contracts: impl Into< + Option<&'a HashMap>, + >, + ) -> Self { + self.deployed_contracts = deployed_contracts.into(); + self + } - pub fn with_variables( - mut self, - variables: impl Into>>, - ) -> Self { - self.variables = variables.into(); - self - } + pub fn with_variables( + mut self, + variables: impl Into>>, + ) -> Self { + self.variables = variables.into(); + self + } - pub fn with_block_number(mut self, block_number: impl Into>) -> Self { - self.block_number = block_number.into(); - self - } + pub fn with_block_number(mut self, block_number: impl Into>) -> Self { + self.block_number = block_number.into(); + self + } - pub fn with_transaction_hash( - mut self, - transaction_hash: impl Into>, - ) -> Self { - self.transaction_hash = transaction_hash.into(); - self - } + pub fn with_transaction_hash( + mut self, + transaction_hash: impl Into>, + ) -> Self { + self.transaction_hash = transaction_hash.into(); + self + } - pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag { - match self.block_number { - Some(block_number) => match number { - BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number), - n @ (BlockNumberOrTag::Finalized - | BlockNumberOrTag::Safe - | BlockNumberOrTag::Earliest - | BlockNumberOrTag::Pending - | BlockNumberOrTag::Number(_)) => n, - }, - None => number, - } - } + pub fn resolve_block_number(&self, number: BlockNumberOrTag) -> BlockNumberOrTag { + match self.block_number { + Some(block_number) => match number { + BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number), + n @ (BlockNumberOrTag::Finalized | + BlockNumberOrTag::Safe | + BlockNumberOrTag::Earliest | + BlockNumberOrTag::Pending | + BlockNumberOrTag::Number(_)) => n, + }, + None => number, + } + } - pub fn deployed_contract( - &self, - instance: &ContractInstance, - ) -> Option<&(ContractIdent, Address, JsonAbi)> { - self.deployed_contracts - .and_then(|deployed_contracts| deployed_contracts.get(instance)) - } + pub fn deployed_contract( + &self, + instance: &ContractInstance, + ) -> Option<&(ContractIdent, Address, JsonAbi)> { + self.deployed_contracts + .and_then(|deployed_contracts| deployed_contracts.get(instance)) + } - pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> { - self.deployed_contract(instance).map(|(_, a, _)| a) - } + pub fn deployed_contract_address(&self, instance: &ContractInstance) -> Option<&Address> { + self.deployed_contract(instance).map(|(_, a, _)| a) + } - pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> { - self.deployed_contract(instance).map(|(_, _, a)| a) - } + pub fn deployed_contract_abi(&self, instance: &ContractInstance) -> Option<&JsonAbi> { + self.deployed_contract(instance).map(|(_, _, a)| a) + } - pub fn variable(&self, name: impl AsRef) -> Option<&U256> { - self.variables - .and_then(|variables| variables.get(name.as_ref())) - } + pub fn variable(&self, name: impl AsRef) -> Option<&U256> { + self.variables.and_then(|variables| variables.get(name.as_ref())) + } - pub fn tip_block_number(&self) -> Option<&'a BlockNumber> { - self.block_number - } + pub fn tip_block_number(&self) -> Option<&'a BlockNumber> { + self.block_number + } - pub fn transaction_hash(&self) -> Option<&'a TxHash> { - self.transaction_hash - } + pub fn transaction_hash(&self) -> Option<&'a TxHash> { + self.transaction_hash + } } diff --git a/crates/ml-test-runner/src/main.rs b/crates/ml-test-runner/src/main.rs index 923eaf7..cc02ded 100644 --- a/crates/ml-test-runner/src/main.rs +++ b/crates/ml-test-runner/src/main.rs @@ -3,22 +3,22 @@ use clap::Parser; use revive_dt_common::{iterators::FilesWithExtensionIterator, types::PrivateKeyAllocator}; use revive_dt_config::{TestExecutionContext, TestingPlatform}; use revive_dt_core::{ - CachedCompiler, Platform, - helpers::{TestDefinition, TestPlatformInformation}, + CachedCompiler, Platform, + helpers::{TestDefinition, TestPlatformInformation}, }; use revive_dt_format::{ - case::CaseIdx, - corpus::Corpus, - metadata::{Metadata, MetadataFile}, + case::CaseIdx, + corpus::Corpus, + metadata::{Metadata, MetadataFile}, }; use std::{ - borrow::Cow, - collections::{BTreeMap, HashSet}, - fs::File, - io::{BufRead, BufReader, BufWriter, Write}, - path::{Path, PathBuf}, - sync::Arc, - time::Instant, + borrow::Cow, + collections::{BTreeMap, HashSet}, + fs::File, + io::{BufRead, BufReader, BufWriter, Write}, + path::{Path, PathBuf}, + sync::Arc, + time::Instant, }; use temp_dir::TempDir; use tokio::sync::Mutex; @@ -29,501 +29,475 @@ use tracing_subscriber::{EnvFilter, FmtSubscriber}; #[derive(Debug, Parser)] #[command(name = "ml-test-runner")] struct MlTestRunnerArgs { - /// Path to test file (.sol), corpus file (.json), or folder containing .sol files - #[arg(value_name = "PATH")] - path: PathBuf, + /// Path to test file (.sol), corpus file (.json), or folder containing .sol files + #[arg(value_name = "PATH")] + path: PathBuf, - /// File to cache tests that have already passed - #[arg(long = "cached-passed")] - cached_passed: Option, + /// File to cache tests that have already passed + #[arg(long = "cached-passed")] + cached_passed: Option, - /// Stop after the first file failure - #[arg(long = "bail")] - bail: bool, + /// Stop after the first file failure + #[arg(long = "bail")] + bail: bool, - /// Platform to test against (geth or kitchensink) - #[arg(long = "platform", default_value = "geth")] - platform: TestingPlatform, + /// Platform to test against (geth or kitchensink) + #[arg(long = "platform", default_value = "geth")] + platform: TestingPlatform, - /// Start the platform and wait for RPC readiness - #[arg(long = "start-platform", default_value = "false")] - start_platform: bool, + /// Start the platform and wait for RPC readiness + #[arg(long = "start-platform", default_value = "false")] + start_platform: bool, } fn main() -> anyhow::Result<()> { - // Initialize tracing subscriber - let subscriber = FmtSubscriber::builder() - .with_env_filter(EnvFilter::from_default_env()) - .with_writer(std::io::stderr) - .finish(); - tracing::subscriber::set_global_default(subscriber).expect("Failed to set tracing subscriber"); + // Initialize tracing subscriber + let subscriber = FmtSubscriber::builder() + .with_env_filter(EnvFilter::from_default_env()) + .with_writer(std::io::stderr) + .finish(); + tracing::subscriber::set_global_default(subscriber).expect("Failed to set tracing subscriber"); - let args = MlTestRunnerArgs::parse(); + let args = MlTestRunnerArgs::parse(); - info!("ML test runner starting"); - info!("Platform: {:?}", args.platform); - info!("Start platform: {}", args.start_platform); + info!("ML test runner starting"); + info!("Platform: {:?}", args.platform); + info!("Start platform: {}", args.start_platform); - // Run the async body - tokio::runtime::Builder::new_multi_thread() - .enable_all() - .build() - .expect("Failed building the Runtime") - .block_on(run(args)) + // Run the async body + tokio::runtime::Builder::new_multi_thread() + .enable_all() + .build() + .expect("Failed building the Runtime") + .block_on(run(args)) } async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> { - let start_time = Instant::now(); + let start_time = Instant::now(); - // Discover test files - info!("Discovering test files from: {}", args.path.display()); - let test_files = discover_test_files(&args.path)?; - info!("Found {} test file(s)", test_files.len()); + // Discover test files + info!("Discovering test files from: {}", args.path.display()); + let test_files = discover_test_files(&args.path)?; + info!("Found {} test file(s)", test_files.len()); - // Load cached passed tests if provided - let cached_passed = if let Some(cache_file) = &args.cached_passed { - let cached = load_cached_passed(cache_file)?; - info!("Loaded {} cached passed test(s)", cached.len()); - cached - } else { - HashSet::new() - }; + // Load cached passed tests if provided + let cached_passed = if let Some(cache_file) = &args.cached_passed { + let cached = load_cached_passed(cache_file)?; + info!("Loaded {} cached passed test(s)", cached.len()); + cached + } else { + HashSet::new() + }; - let cached_passed = Arc::new(Mutex::new(cached_passed)); + let cached_passed = Arc::new(Mutex::new(cached_passed)); - // Statistics - let mut passed_files = 0; - let mut failed_files = 0; - let mut skipped_files = 0; - let mut failures = Vec::new(); + // Statistics + let mut passed_files = 0; + let mut failed_files = 0; + let mut skipped_files = 0; + let mut failures = Vec::new(); - const GREEN: &str = "\x1B[32m"; - const RED: &str = "\x1B[31m"; - const YELLOW: &str = "\x1B[33m"; - const COLOUR_RESET: &str = "\x1B[0m"; - const BOLD: &str = "\x1B[1m"; - const BOLD_RESET: &str = "\x1B[22m"; + const GREEN: &str = "\x1B[32m"; + const RED: &str = "\x1B[31m"; + const YELLOW: &str = "\x1B[33m"; + const COLOUR_RESET: &str = "\x1B[0m"; + const BOLD: &str = "\x1B[1m"; + const BOLD_RESET: &str = "\x1B[22m"; - // Process each file - for test_file in test_files { - let file_display = test_file.display().to_string(); + // Process each file + for test_file in test_files { + let file_display = test_file.display().to_string(); - // Check if already passed - { - let cache = cached_passed.lock().await; - if cache.contains(&file_display) { - println!("test {} ... {YELLOW}cached{COLOUR_RESET}", file_display); - skipped_files += 1; - continue; - } - } + // Check if already passed + { + let cache = cached_passed.lock().await; + if cache.contains(&file_display) { + println!("test {} ... {YELLOW}cached{COLOUR_RESET}", file_display); + skipped_files += 1; + continue; + } + } - // Load metadata from file - info!("Loading metadata from: {}", test_file.display()); - let metadata_file = match load_metadata_file(&test_file) { - Ok(mf) => { - info!("Loaded metadata with {} case(s)", mf.cases.len()); - mf - } - Err(e) => { - println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display); - println!(" Error loading metadata: {}", e); - failed_files += 1; - failures.push(( - file_display.clone(), - format!("Error loading metadata: {}", e), - )); - if args.bail { - break; - } - continue; - } - }; + // Load metadata from file + info!("Loading metadata from: {}", test_file.display()); + let metadata_file = match load_metadata_file(&test_file) { + Ok(mf) => { + info!("Loaded metadata with {} case(s)", mf.cases.len()); + mf + }, + Err(e) => { + println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display); + println!(" Error loading metadata: {}", e); + failed_files += 1; + failures.push((file_display.clone(), format!("Error loading metadata: {}", e))); + if args.bail { + break; + } + continue; + }, + }; - // Execute test cases for this file - info!("Executing test file: {}", file_display); - match execute_test_file(&args, &metadata_file).await { - Ok(_) => { - println!("test {} ... {GREEN}ok{COLOUR_RESET}", file_display); - info!("Test file passed: {}", file_display); - passed_files += 1; + // Execute test cases for this file + info!("Executing test file: {}", file_display); + match execute_test_file(&args, &metadata_file).await { + Ok(_) => { + println!("test {} ... {GREEN}ok{COLOUR_RESET}", file_display); + info!("Test file passed: {}", file_display); + passed_files += 1; - // Add to cache - { - let mut cache = cached_passed.lock().await; - cache.insert(file_display); - } - } - Err(e) => { - println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display); - info!("Test file failed: {}", file_display); - failed_files += 1; - failures.push((file_display, format!("{:?}", e))); + // Add to cache + { + let mut cache = cached_passed.lock().await; + cache.insert(file_display); + } + }, + Err(e) => { + println!("test {} ... {RED}FAILED{COLOUR_RESET}", file_display); + info!("Test file failed: {}", file_display); + failed_files += 1; + failures.push((file_display, format!("{:?}", e))); - if args.bail { - info!("Bailing after first failure"); - break; - } - } - } - } + if args.bail { + info!("Bailing after first failure"); + break; + } + }, + } + } - // Save cached passed tests - if let Some(cache_file) = &args.cached_passed { - let cache = cached_passed.lock().await; - info!("Saving {} cached passed test(s)", cache.len()); - save_cached_passed(cache_file, &cache)?; - } + // Save cached passed tests + if let Some(cache_file) = &args.cached_passed { + let cache = cached_passed.lock().await; + info!("Saving {} cached passed test(s)", cache.len()); + save_cached_passed(cache_file, &cache)?; + } - // Print summary - println!(); - if !failures.is_empty() { - println!("{BOLD}failures:{BOLD_RESET}"); - println!(); - for (file, error) in &failures { - println!("---- {} ----", file); - println!("{}", error); - println!(); - } - } + // Print summary + println!(); + if !failures.is_empty() { + println!("{BOLD}failures:{BOLD_RESET}"); + println!(); + for (file, error) in &failures { + println!("---- {} ----", file); + println!("{}", error); + println!(); + } + } - let elapsed = start_time.elapsed(); - println!( - "test result: {}. {} passed; {} failed; {} cached; finished in {:.2}s", - if failed_files == 0 { - format!("{GREEN}ok{COLOUR_RESET}") - } else { - format!("{RED}FAILED{COLOUR_RESET}") - }, - passed_files, - failed_files, - skipped_files, - elapsed.as_secs_f64() - ); + let elapsed = start_time.elapsed(); + println!( + "test result: {}. {} passed; {} failed; {} cached; finished in {:.2}s", + if failed_files == 0 { + format!("{GREEN}ok{COLOUR_RESET}") + } else { + format!("{RED}FAILED{COLOUR_RESET}") + }, + passed_files, + failed_files, + skipped_files, + elapsed.as_secs_f64() + ); - if failed_files > 0 { - std::process::exit(1); - } + if failed_files > 0 { + std::process::exit(1); + } - Ok(()) + Ok(()) } /// Discover test files from the given path fn discover_test_files(path: &Path) -> anyhow::Result> { - if !path.exists() { - anyhow::bail!("Path does not exist: {}", path.display()); - } + if !path.exists() { + anyhow::bail!("Path does not exist: {}", path.display()); + } - let mut files = Vec::new(); + let mut files = Vec::new(); - if path.is_file() { - let extension = path.extension().and_then(|s| s.to_str()).unwrap_or(""); + if path.is_file() { + let extension = path.extension().and_then(|s| s.to_str()).unwrap_or(""); - match extension { - "sol" => { - // Single .sol file - files.push(path.to_path_buf()); - } - "json" => { - // Corpus file - enumerate its tests - let corpus = Corpus::try_from_path(path)?; - let metadata_files = corpus.enumerate_tests(); - for metadata in metadata_files { - files.push(metadata.metadata_file_path); - } - } - _ => anyhow::bail!( - "Unsupported file extension: {}. Expected .sol or .json", - extension - ), - } - } else if path.is_dir() { - // Walk directory recursively for .sol files - for entry in FilesWithExtensionIterator::new(path) - .with_allowed_extension("sol") - .with_use_cached_fs(true) - { - files.push(entry); - } - } else { - anyhow::bail!("Path is neither a file nor a directory: {}", path.display()); - } + match extension { + "sol" => { + // Single .sol file + files.push(path.to_path_buf()); + }, + "json" => { + // Corpus file - enumerate its tests + let corpus = Corpus::try_from_path(path)?; + let metadata_files = corpus.enumerate_tests(); + for metadata in metadata_files { + files.push(metadata.metadata_file_path); + } + }, + _ => anyhow::bail!("Unsupported file extension: {}. Expected .sol or .json", extension), + } + } else if path.is_dir() { + // Walk directory recursively for .sol files + for entry in FilesWithExtensionIterator::new(path) + .with_allowed_extension("sol") + .with_use_cached_fs(true) + { + files.push(entry); + } + } else { + anyhow::bail!("Path is neither a file nor a directory: {}", path.display()); + } - Ok(files) + Ok(files) } /// Load metadata from a test file fn load_metadata_file(path: &Path) -> anyhow::Result { - let metadata = Metadata::try_from_file(path) - .ok_or_else(|| anyhow::anyhow!("Failed to load metadata from {}", path.display()))?; + let metadata = Metadata::try_from_file(path) + .ok_or_else(|| anyhow::anyhow!("Failed to load metadata from {}", path.display()))?; - Ok(MetadataFile { - metadata_file_path: path.to_path_buf(), - corpus_file_path: path.to_path_buf(), - content: metadata, - }) + Ok(MetadataFile { + metadata_file_path: path.to_path_buf(), + corpus_file_path: path.to_path_buf(), + content: metadata, + }) } /// Execute all test cases in a metadata file async fn execute_test_file( - args: &MlTestRunnerArgs, - metadata_file: &MetadataFile, + args: &MlTestRunnerArgs, + metadata_file: &MetadataFile, ) -> anyhow::Result<()> { - if metadata_file.cases.is_empty() { - anyhow::bail!("No test cases found in file"); - } + if metadata_file.cases.is_empty() { + anyhow::bail!("No test cases found in file"); + } - info!("Processing {} test case(s)", metadata_file.cases.len()); + info!("Processing {} test case(s)", metadata_file.cases.len()); - // Get the platform based on CLI args - let platform: &dyn Platform = match args.platform { - TestingPlatform::Geth => &revive_dt_core::GethEvmSolcPlatform, - TestingPlatform::Kitchensink => &revive_dt_core::KitchensinkPolkavmResolcPlatform, - TestingPlatform::Zombienet => &revive_dt_core::ZombienetPolkavmResolcPlatform, - }; + // Get the platform based on CLI args + let platform: &dyn Platform = match args.platform { + TestingPlatform::Geth => &revive_dt_core::GethEvmSolcPlatform, + TestingPlatform::Kitchensink => &revive_dt_core::KitchensinkPolkavmResolcPlatform, + TestingPlatform::Zombienet => &revive_dt_core::ZombienetPolkavmResolcPlatform, + }; - // Create temporary working directory - let temp_dir = TempDir::new()?; - info!("Created temporary directory: {}", temp_dir.path().display()); + // Create temporary working directory + let temp_dir = TempDir::new()?; + info!("Created temporary directory: {}", temp_dir.path().display()); - // Create a test execution context (with defaults) - let test_context = TestExecutionContext::default(); - let context = revive_dt_config::Context::Test(Box::new(test_context)); + // Create a test execution context (with defaults) + let test_context = TestExecutionContext::default(); + let context = revive_dt_config::Context::Test(Box::new(test_context)); - let node: &'static dyn revive_dt_node_interaction::EthereumNode = if args.start_platform { - info!("Starting blockchain node..."); - let node_handle = platform - .new_node(context.clone()) - .context("Failed to spawn node thread")?; + let node: &'static dyn revive_dt_node_interaction::EthereumNode = if args.start_platform { + info!("Starting blockchain node..."); + let node_handle = + platform.new_node(context.clone()).context("Failed to spawn node thread")?; - info!("Waiting for node to start..."); - let node = node_handle - .join() - .map_err(|e| anyhow::anyhow!("Node thread panicked: {:?}", e))? - .context("Failed to start node")?; + info!("Waiting for node to start..."); + let node = node_handle + .join() + .map_err(|e| anyhow::anyhow!("Node thread panicked: {:?}", e))? + .context("Failed to start node")?; - info!( - "Node started with ID: {}, connection: {}", - node.id(), - node.connection_string() - ); + info!("Node started with ID: {}, connection: {}", node.id(), node.connection_string()); - // Run pre-transactions on the node - let node = Box::leak(node); // Leak to get 'static lifetime for simplicity - info!("Running pre-transactions..."); - node.pre_transactions() - .await - .context("Failed to run pre-transactions")?; - info!("Pre-transactions completed"); + // Run pre-transactions on the node + let node = Box::leak(node); // Leak to get 'static lifetime for simplicity + info!("Running pre-transactions..."); + node.pre_transactions().await.context("Failed to run pre-transactions")?; + info!("Pre-transactions completed"); - node - } else { - info!("Using existing node"); - let existing_node: Box = match args.platform { - TestingPlatform::Geth => { - Box::new(revive_dt_node::node_implementations::geth::GethNode::new_existing()) - } - TestingPlatform::Kitchensink | TestingPlatform::Zombienet => Box::new( - revive_dt_node::node_implementations::substrate::SubstrateNode::new_existing(), - ), - }; - Box::leak(existing_node) - }; + node + } else { + info!("Using existing node"); + let existing_node: Box = match args.platform { + TestingPlatform::Geth => + Box::new(revive_dt_node::node_implementations::geth::GethNode::new_existing()), + TestingPlatform::Kitchensink | TestingPlatform::Zombienet => Box::new( + revive_dt_node::node_implementations::substrate::SubstrateNode::new_existing(), + ), + }; + Box::leak(existing_node) + }; - // Create a cached compiler for this file (wrapped in Arc like the main code does) - info!("Initializing cached compiler"); - let cached_compiler = CachedCompiler::new(temp_dir.path().join("compilation_cache"), false) - .await - .map(Arc::new) - .context("Failed to create cached compiler")?; + // Create a cached compiler for this file (wrapped in Arc like the main code does) + info!("Initializing cached compiler"); + let cached_compiler = CachedCompiler::new(temp_dir.path().join("compilation_cache"), false) + .await + .map(Arc::new) + .context("Failed to create cached compiler")?; - // Create a private key allocator - let private_key_allocator = Arc::new(Mutex::new(PrivateKeyAllocator::new( - alloy::primitives::U256::from(100), - ))); + // Create a private key allocator + let private_key_allocator = + Arc::new(Mutex::new(PrivateKeyAllocator::new(alloy::primitives::U256::from(100)))); - // Create reporter infrastructure (minimal, just for the Driver API) - // Note: We need to keep the report_task alive, otherwise the reporter channel closes - let (reporter, report_task) = - revive_dt_report::ReportAggregator::new(context.clone()).into_task(); + // Create reporter infrastructure (minimal, just for the Driver API) + // Note: We need to keep the report_task alive, otherwise the reporter channel closes + let (reporter, report_task) = + revive_dt_report::ReportAggregator::new(context.clone()).into_task(); - // Spawn the report task in the background to keep the channel open - tokio::spawn(report_task); + // Spawn the report task in the background to keep the channel open + tokio::spawn(report_task); - info!( - "Building test definitions for {} case(s)", - metadata_file.cases.len() - ); - // Build all test definitions upfront - let mut test_definitions = Vec::new(); - for (case_idx, case) in metadata_file.cases.iter().enumerate() { - info!("Building test definition for case {}", case_idx); - let test_def = build_test_definition( - metadata_file, - case, - case_idx, - platform, - node, - &context, - &reporter, - ) - .await?; + info!("Building test definitions for {} case(s)", metadata_file.cases.len()); + // Build all test definitions upfront + let mut test_definitions = Vec::new(); + for (case_idx, case) in metadata_file.cases.iter().enumerate() { + info!("Building test definition for case {}", case_idx); + let test_def = build_test_definition( + metadata_file, + case, + case_idx, + platform, + node, + &context, + &reporter, + ) + .await?; - if let Some(test_def) = test_def { - info!("Test definition for case {} created successfully", case_idx); - test_definitions.push(test_def); - } - } + if let Some(test_def) = test_def { + info!("Test definition for case {} created successfully", case_idx); + test_definitions.push(test_def); + } + } - // Execute each test case - info!("Executing {} test definition(s)", test_definitions.len()); - for (idx, test_definition) in test_definitions.iter().enumerate() { - info!("─────────────────────────────────────────────────────────────────"); - info!( - "Executing case {}/{}: case_idx={}, mode={}, steps={}", - idx + 1, - test_definitions.len(), - test_definition.case_idx, - test_definition.mode, - test_definition.case.steps.len() - ); + // Execute each test case + info!("Executing {} test definition(s)", test_definitions.len()); + for (idx, test_definition) in test_definitions.iter().enumerate() { + info!("─────────────────────────────────────────────────────────────────"); + info!( + "Executing case {}/{}: case_idx={}, mode={}, steps={}", + idx + 1, + test_definitions.len(), + test_definition.case_idx, + test_definition.mode, + test_definition.case.steps.len() + ); - info!("Creating driver for case {}", test_definition.case_idx); - let driver = revive_dt_core::differential_tests::Driver::new_root( - test_definition, - private_key_allocator.clone(), - &cached_compiler, - ) - .await - .context("Failed to create driver")?; + info!("Creating driver for case {}", test_definition.case_idx); + let driver = revive_dt_core::differential_tests::Driver::new_root( + test_definition, + private_key_allocator.clone(), + &cached_compiler, + ) + .await + .context("Failed to create driver")?; - info!( - "Running {} step(s) for case {}", - test_definition.case.steps.len(), - test_definition.case_idx - ); - let steps_executed = driver.execute_all().await.context(format!( - "Failed to execute case {}", - test_definition.case_idx - ))?; - info!( - "✓ Case {} completed successfully, executed {} step(s)", - test_definition.case_idx, steps_executed - ); - } - info!("─────────────────────────────────────────────────────────────────"); - info!( - "All {} test case(s) executed successfully", - test_definitions.len() - ); + info!( + "Running {} step(s) for case {}", + test_definition.case.steps.len(), + test_definition.case_idx + ); + let steps_executed = driver + .execute_all() + .await + .context(format!("Failed to execute case {}", test_definition.case_idx))?; + info!( + "✓ Case {} completed successfully, executed {} step(s)", + test_definition.case_idx, steps_executed + ); + } + info!("─────────────────────────────────────────────────────────────────"); + info!("All {} test case(s) executed successfully", test_definitions.len()); - Ok(()) + Ok(()) } /// Build a test definition for a single test case async fn build_test_definition<'a>( - metadata_file: &'a MetadataFile, - case: &'a revive_dt_format::case::Case, - case_idx: usize, - platform: &'a dyn Platform, - node: &'a dyn revive_dt_node_interaction::EthereumNode, - context: &revive_dt_config::Context, - reporter: &revive_dt_report::Reporter, + metadata_file: &'a MetadataFile, + case: &'a revive_dt_format::case::Case, + case_idx: usize, + platform: &'a dyn Platform, + node: &'a dyn revive_dt_node_interaction::EthereumNode, + context: &revive_dt_config::Context, + reporter: &revive_dt_report::Reporter, ) -> anyhow::Result>> { - // Determine mode - use case mode if specified, otherwise use default - let mode = case - .modes - .as_ref() - .or(metadata_file.modes.as_ref()) - .and_then(|modes| modes.first()) - .and_then(|parsed_mode| parsed_mode.to_modes().next()) - .map(Cow::Owned) - .or_else(|| revive_dt_compiler::Mode::all().next().map(Cow::Borrowed)) - .unwrap(); + // Determine mode - use case mode if specified, otherwise use default + let mode = case + .modes + .as_ref() + .or(metadata_file.modes.as_ref()) + .and_then(|modes| modes.first()) + .and_then(|parsed_mode| parsed_mode.to_modes().next()) + .map(Cow::Owned) + .or_else(|| revive_dt_compiler::Mode::all().next().map(Cow::Borrowed)) + .unwrap(); - // Create a compiler for this mode - let compiler = platform - .new_compiler(context.clone(), mode.version.clone().map(Into::into)) - .await - .context("Failed to create compiler")?; + // Create a compiler for this mode + let compiler = platform + .new_compiler(context.clone(), mode.version.clone().map(Into::into)) + .await + .context("Failed to create compiler")?; - // Create test-specific reporter - let test_reporter = - reporter.test_specific_reporter(Arc::new(revive_dt_report::TestSpecifier { - solc_mode: mode.as_ref().clone(), - metadata_file_path: metadata_file.metadata_file_path.clone(), - case_idx: CaseIdx::new(case_idx), - })); + // Create test-specific reporter + let test_reporter = + reporter.test_specific_reporter(Arc::new(revive_dt_report::TestSpecifier { + solc_mode: mode.as_ref().clone(), + metadata_file_path: metadata_file.metadata_file_path.clone(), + case_idx: CaseIdx::new(case_idx), + })); - // Create execution-specific reporter - let execution_reporter = - test_reporter.execution_specific_reporter(node.id(), platform.platform_identifier()); + // Create execution-specific reporter + let execution_reporter = + test_reporter.execution_specific_reporter(node.id(), platform.platform_identifier()); - // Build platform information - let mut platforms = BTreeMap::new(); - platforms.insert( - platform.platform_identifier(), - TestPlatformInformation { - platform, - node, - compiler, - reporter: execution_reporter, - }, - ); + // Build platform information + let mut platforms = BTreeMap::new(); + platforms.insert( + platform.platform_identifier(), + TestPlatformInformation { platform, node, compiler, reporter: execution_reporter }, + ); - // Build test definition - let test_definition = TestDefinition { - metadata: metadata_file, - metadata_file_path: &metadata_file.metadata_file_path, - mode, - case_idx: CaseIdx::new(case_idx), - case, - platforms, - reporter: test_reporter, - }; + // Build test definition + let test_definition = TestDefinition { + metadata: metadata_file, + metadata_file_path: &metadata_file.metadata_file_path, + mode, + case_idx: CaseIdx::new(case_idx), + case, + platforms, + reporter: test_reporter, + }; - // Check compatibility - if let Err((reason, _)) = test_definition.check_compatibility() { - println!(" Skipping case {}: {}", case_idx, reason); - return Ok(None); - } + // Check compatibility + if let Err((reason, _)) = test_definition.check_compatibility() { + println!(" Skipping case {}: {}", case_idx, reason); + return Ok(None); + } - Ok(Some(test_definition)) + Ok(Some(test_definition)) } /// Load cached passed tests from file fn load_cached_passed(path: &Path) -> anyhow::Result> { - if !path.exists() { - return Ok(HashSet::new()); - } + if !path.exists() { + return Ok(HashSet::new()); + } - let file = File::open(path).context("Failed to open cached-passed file")?; - let reader = BufReader::new(file); + let file = File::open(path).context("Failed to open cached-passed file")?; + let reader = BufReader::new(file); - let mut cache = HashSet::new(); - for line in reader.lines() { - let line = line?; - let trimmed = line.trim(); - if !trimmed.is_empty() { - cache.insert(trimmed.to_string()); - } - } + let mut cache = HashSet::new(); + for line in reader.lines() { + let line = line?; + let trimmed = line.trim(); + if !trimmed.is_empty() { + cache.insert(trimmed.to_string()); + } + } - Ok(cache) + Ok(cache) } /// Save cached passed tests to file fn save_cached_passed(path: &Path, cache: &HashSet) -> anyhow::Result<()> { - let file = File::create(path).context("Failed to create cached-passed file")?; - let mut writer = BufWriter::new(file); + let file = File::create(path).context("Failed to create cached-passed file")?; + let mut writer = BufWriter::new(file); - let mut entries: Vec<_> = cache.iter().collect(); - entries.sort(); + let mut entries: Vec<_> = cache.iter().collect(); + entries.sort(); - for entry in entries { - writeln!(writer, "{}", entry)?; - } + for entry in entries { + writeln!(writer, "{}", entry)?; + } - writer.flush()?; - Ok(()) + writer.flush()?; + Ok(()) } diff --git a/crates/node-interaction/src/lib.rs b/crates/node-interaction/src/lib.rs index bffc4c8..d434575 100644 --- a/crates/node-interaction/src/lib.rs +++ b/crates/node-interaction/src/lib.rs @@ -3,11 +3,11 @@ use std::{pin::Pin, sync::Arc}; use alloy::{ - primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256}, - rpc::types::{ - EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, - trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace}, - }, + primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256}, + rpc::types::{ + EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, + trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace}, + }, }; use anyhow::Result; @@ -18,85 +18,85 @@ use revive_dt_format::traits::ResolverApi; /// An interface for all interactions with Ethereum compatible nodes. #[allow(clippy::type_complexity)] pub trait EthereumNode { - /// A function to run post spawning the nodes and before any transactions are run on the node. - fn pre_transactions(&mut self) -> Pin> + '_>>; + /// A function to run post spawning the nodes and before any transactions are run on the node. + fn pre_transactions(&mut self) -> Pin> + '_>>; - fn id(&self) -> usize; + fn id(&self) -> usize; - /// Returns the nodes connection string. - fn connection_string(&self) -> &str; + /// Returns the nodes connection string. + fn connection_string(&self) -> &str; - fn submit_transaction( - &self, - transaction: TransactionRequest, - ) -> Pin> + '_>>; + fn submit_transaction( + &self, + transaction: TransactionRequest, + ) -> Pin> + '_>>; - fn get_receipt( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>>; + fn get_receipt( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>>; - /// Execute the [TransactionRequest] and return a [TransactionReceipt]. - fn execute_transaction( - &self, - transaction: TransactionRequest, - ) -> Pin> + '_>>; + /// Execute the [TransactionRequest] and return a [TransactionReceipt]. + fn execute_transaction( + &self, + transaction: TransactionRequest, + ) -> Pin> + '_>>; - /// Trace the transaction in the [TransactionReceipt] and return a [GethTrace]. - fn trace_transaction( - &self, - tx_hash: TxHash, - trace_options: GethDebugTracingOptions, - ) -> Pin> + '_>>; + /// Trace the transaction in the [TransactionReceipt] and return a [GethTrace]. + fn trace_transaction( + &self, + tx_hash: TxHash, + trace_options: GethDebugTracingOptions, + ) -> Pin> + '_>>; - /// Returns the state diff of the transaction hash in the [TransactionReceipt]. - fn state_diff(&self, tx_hash: TxHash) -> Pin> + '_>>; + /// Returns the state diff of the transaction hash in the [TransactionReceipt]. + fn state_diff(&self, tx_hash: TxHash) -> Pin> + '_>>; - /// Returns the balance of the provided [`Address`] back. - fn balance_of(&self, address: Address) -> Pin> + '_>>; + /// Returns the balance of the provided [`Address`] back. + fn balance_of(&self, address: Address) -> Pin> + '_>>; - /// Returns the latest storage proof of the provided [`Address`] - fn latest_state_proof( - &self, - address: Address, - keys: Vec, - ) -> Pin> + '_>>; + /// Returns the latest storage proof of the provided [`Address`] + fn latest_state_proof( + &self, + address: Address, + keys: Vec, + ) -> Pin> + '_>>; - /// Returns the resolver that is to use with this ethereum node. - fn resolver(&self) -> Pin>> + '_>>; + /// Returns the resolver that is to use with this ethereum node. + fn resolver(&self) -> Pin>> + '_>>; - /// Returns the EVM version of the node. - fn evm_version(&self) -> EVMVersion; + /// Returns the EVM version of the node. + fn evm_version(&self) -> EVMVersion; - /// Returns a stream of the blocks that were mined by the node. - fn subscribe_to_full_blocks_information( - &self, - ) -> Pin< - Box< - dyn Future>>>> - + '_, - >, - >; + /// Returns a stream of the blocks that were mined by the node. + fn subscribe_to_full_blocks_information( + &self, + ) -> Pin< + Box< + dyn Future>>>> + + '_, + >, + >; - /// Checks if the provided address is in the wallet. If it is, returns the address. - /// Otherwise, returns the default signer's address. - fn resolve_signer_or_default(&self, address: Address) -> Address; + /// Checks if the provided address is in the wallet. If it is, returns the address. + /// Otherwise, returns the default signer's address. + fn resolve_signer_or_default(&self, address: Address) -> Address; } #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct MinedBlockInformation { - /// The block number. - pub block_number: BlockNumber, + /// The block number. + pub block_number: BlockNumber, - /// The block timestamp. - pub block_timestamp: BlockTimestamp, + /// The block timestamp. + pub block_timestamp: BlockTimestamp, - /// The amount of gas mined in the block. - pub mined_gas: u128, + /// The amount of gas mined in the block. + pub mined_gas: u128, - /// The gas limit of the block. - pub block_gas_limit: u128, + /// The gas limit of the block. + pub block_gas_limit: u128, - /// The hashes of the transactions that were mined as part of the block. - pub transaction_hashes: Vec, + /// The hashes of the transactions that were mined as part of the block. + pub transaction_hashes: Vec, } diff --git a/crates/node/src/helpers/process.rs b/crates/node/src/helpers/process.rs index 5ffa5f5..50344ee 100644 --- a/crates/node/src/helpers/process.rs +++ b/crates/node/src/helpers/process.rs @@ -1,9 +1,9 @@ use std::{ - fs::{File, OpenOptions}, - io::{BufRead, BufReader, Write}, - path::Path, - process::{Child, Command}, - time::{Duration, Instant}, + fs::{File, OpenOptions}, + io::{BufRead, BufReader, Write}, + path::Path, + process::{Child, Command}, + time::{Duration, Instant}, }; use anyhow::{Context, Result, bail}; @@ -12,180 +12,161 @@ use anyhow::{Context, Result, bail}; /// when the process is dropped. #[derive(Debug)] pub struct Process { - /// The handle of the child process. - child: Child, + /// The handle of the child process. + child: Child, - /// The file that stdout is being logged to. - stdout_logs_file: File, + /// The file that stdout is being logged to. + stdout_logs_file: File, - /// The file that stderr is being logged to. - stderr_logs_file: File, + /// The file that stderr is being logged to. + stderr_logs_file: File, } impl Process { - pub fn new( - log_file_prefix: impl Into>, - logs_directory: impl AsRef, - binary_path: impl AsRef, - command_building_callback: impl FnOnce(&mut Command, File, File), - process_readiness_wait_behavior: ProcessReadinessWaitBehavior, - ) -> Result { - let log_file_prefix = log_file_prefix.into(); + pub fn new( + log_file_prefix: impl Into>, + logs_directory: impl AsRef, + binary_path: impl AsRef, + command_building_callback: impl FnOnce(&mut Command, File, File), + process_readiness_wait_behavior: ProcessReadinessWaitBehavior, + ) -> Result { + let log_file_prefix = log_file_prefix.into(); - let (stdout_file_name, stderr_file_name) = match log_file_prefix { - Some(prefix) => ( - format!("{prefix}_stdout.log"), - format!("{prefix}_stderr.log"), - ), - None => ("stdout.log".to_string(), "stderr.log".to_string()), - }; + let (stdout_file_name, stderr_file_name) = match log_file_prefix { + Some(prefix) => (format!("{prefix}_stdout.log"), format!("{prefix}_stderr.log")), + None => ("stdout.log".to_string(), "stderr.log".to_string()), + }; - let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name); - let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name); + let stdout_logs_file_path = logs_directory.as_ref().join(stdout_file_name); + let stderr_logs_file_path = logs_directory.as_ref().join(stderr_file_name); - let stdout_logs_file = OpenOptions::new() - .write(true) - .truncate(true) - .create(true) - .open(stdout_logs_file_path.as_path()) - .context("Failed to open the stdout logs file")?; - let stderr_logs_file = OpenOptions::new() - .write(true) - .truncate(true) - .create(true) - .open(stderr_logs_file_path.as_path()) - .context("Failed to open the stderr logs file")?; + let stdout_logs_file = OpenOptions::new() + .write(true) + .truncate(true) + .create(true) + .open(stdout_logs_file_path.as_path()) + .context("Failed to open the stdout logs file")?; + let stderr_logs_file = OpenOptions::new() + .write(true) + .truncate(true) + .create(true) + .open(stderr_logs_file_path.as_path()) + .context("Failed to open the stderr logs file")?; - let mut command = { - let stdout_logs_file = stdout_logs_file - .try_clone() - .context("Failed to clone the stdout logs file")?; - let stderr_logs_file = stderr_logs_file - .try_clone() - .context("Failed to clone the stderr logs file")?; + let mut command = { + let stdout_logs_file = + stdout_logs_file.try_clone().context("Failed to clone the stdout logs file")?; + let stderr_logs_file = + stderr_logs_file.try_clone().context("Failed to clone the stderr logs file")?; - let mut command = Command::new(binary_path.as_ref()); - command_building_callback(&mut command, stdout_logs_file, stderr_logs_file); - command - }; - let mut child = command - .spawn() - .context("Failed to spawn the built command")?; + let mut command = Command::new(binary_path.as_ref()); + command_building_callback(&mut command, stdout_logs_file, stderr_logs_file); + command + }; + let mut child = command.spawn().context("Failed to spawn the built command")?; - match process_readiness_wait_behavior { - ProcessReadinessWaitBehavior::NoStartupWait => {} - ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration), - ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { - max_wait_duration, - mut check_function, - } => { - let spawn_time = Instant::now(); + match process_readiness_wait_behavior { + ProcessReadinessWaitBehavior::NoStartupWait => {}, + ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration), + ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { + max_wait_duration, + mut check_function, + } => { + let spawn_time = Instant::now(); - let stdout_logs_file = OpenOptions::new() - .read(true) - .open(stdout_logs_file_path) - .context("Failed to open the stdout logs file")?; - let stderr_logs_file = OpenOptions::new() - .read(true) - .open(stderr_logs_file_path) - .context("Failed to open the stderr logs file")?; + let stdout_logs_file = OpenOptions::new() + .read(true) + .open(stdout_logs_file_path) + .context("Failed to open the stdout logs file")?; + let stderr_logs_file = OpenOptions::new() + .read(true) + .open(stderr_logs_file_path) + .context("Failed to open the stderr logs file")?; - let mut stdout_lines = BufReader::new(stdout_logs_file).lines(); - let mut stderr_lines = BufReader::new(stderr_logs_file).lines(); + let mut stdout_lines = BufReader::new(stdout_logs_file).lines(); + let mut stderr_lines = BufReader::new(stderr_logs_file).lines(); - let mut stdout = String::new(); - let mut stderr = String::new(); + let mut stdout = String::new(); + let mut stderr = String::new(); - loop { - let stdout_line = stdout_lines.next().and_then(Result::ok); - let stderr_line = stderr_lines.next().and_then(Result::ok); + loop { + let stdout_line = stdout_lines.next().and_then(Result::ok); + let stderr_line = stderr_lines.next().and_then(Result::ok); - if let Some(stdout_line) = stdout_line.as_ref() { - stdout.push_str(stdout_line); - stdout.push('\n'); - } - if let Some(stderr_line) = stderr_line.as_ref() { - stderr.push_str(stderr_line); - stderr.push('\n'); - } + if let Some(stdout_line) = stdout_line.as_ref() { + stdout.push_str(stdout_line); + stdout.push('\n'); + } + if let Some(stderr_line) = stderr_line.as_ref() { + stderr.push_str(stderr_line); + stderr.push('\n'); + } - let check_result = - check_function(stdout_line.as_deref(), stderr_line.as_deref()).context( - format!( - "Failed to wait for the process to be ready - {stdout} - {stderr}" - ), - )?; + let check_result = + check_function(stdout_line.as_deref(), stderr_line.as_deref()).context( + format!( + "Failed to wait for the process to be ready - {stdout} - {stderr}" + ), + )?; - if check_result { - break; - } + if check_result { + break; + } - if Instant::now().duration_since(spawn_time) > max_wait_duration { - bail!( - "Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}" - ) - } - } - } - ProcessReadinessWaitBehavior::WaitForCommandToExit => { - if !child - .wait() - .context("Failed waiting for process to finish")? - .success() - { - anyhow::bail!("Failed to spawn command"); - } - } - } + if Instant::now().duration_since(spawn_time) > max_wait_duration { + bail!( + "Waited for the process to start but it failed to start in time. stderr {stderr} - stdout {stdout}" + ) + } + } + }, + ProcessReadinessWaitBehavior::WaitForCommandToExit => { + if !child.wait().context("Failed waiting for process to finish")?.success() { + anyhow::bail!("Failed to spawn command"); + } + }, + } - Ok(Self { - child, - stdout_logs_file, - stderr_logs_file, - }) - } + Ok(Self { child, stdout_logs_file, stderr_logs_file }) + } } impl Drop for Process { - fn drop(&mut self) { - self.child.kill().expect("Failed to kill the process"); - self.stdout_logs_file - .flush() - .expect("Failed to flush the stdout logs file"); - self.stderr_logs_file - .flush() - .expect("Failed to flush the stderr logs file"); - } + fn drop(&mut self) { + self.child.kill().expect("Failed to kill the process"); + self.stdout_logs_file.flush().expect("Failed to flush the stdout logs file"); + self.stderr_logs_file.flush().expect("Failed to flush the stderr logs file"); + } } pub enum ProcessReadinessWaitBehavior { - /// The process does not require any kind of wait after it's been spawned and can be used - /// straight away. - NoStartupWait, + /// The process does not require any kind of wait after it's been spawned and can be used + /// straight away. + NoStartupWait, - /// Waits for the command to exit. - WaitForCommandToExit, + /// Waits for the command to exit. + WaitForCommandToExit, - /// The process does require some amount of wait duration after it's been started. - WaitDuration(Duration), + /// The process does require some amount of wait duration after it's been started. + WaitDuration(Duration), - /// The process requires a time bounded wait function which is a function of the lines that - /// appear in the log files. - TimeBoundedWaitFunction { - /// The maximum amount of time to wait for the check function to return true. - max_wait_duration: Duration, + /// The process requires a time bounded wait function which is a function of the lines that + /// appear in the log files. + TimeBoundedWaitFunction { + /// The maximum amount of time to wait for the check function to return true. + max_wait_duration: Duration, - /// The function to use to check if the process spawned is ready to use or not. This - /// function should return the following in the following cases: - /// - /// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled - /// and the wait is completed. - /// - `Ok(false)`: The process is not ready yet but it might be ready in the future. - /// - `Err`: The process is not ready yet and will not be ready in the future as it appears - /// that it has encountered an error when it was being spawned. - /// - /// The first argument is a line from stdout and the second argument is a line from stderr. - #[allow(clippy::type_complexity)] - check_function: Box, Option<&str>) -> anyhow::Result>, - }, + /// The function to use to check if the process spawned is ready to use or not. This + /// function should return the following in the following cases: + /// + /// - `Ok(true)`: Returned when the condition the process is waiting for has been fulfilled + /// and the wait is completed. + /// - `Ok(false)`: The process is not ready yet but it might be ready in the future. + /// - `Err`: The process is not ready yet and will not be ready in the future as it appears + /// that it has encountered an error when it was being spawned. + /// + /// The first argument is a line from stdout and the second argument is a line from stderr. + #[allow(clippy::type_complexity)] + check_function: Box, Option<&str>) -> anyhow::Result>, + }, } diff --git a/crates/node/src/lib.rs b/crates/node/src/lib.rs index 8607dcc..271c1ec 100644 --- a/crates/node/src/lib.rs +++ b/crates/node/src/lib.rs @@ -10,16 +10,16 @@ pub mod provider_utils; /// An abstract interface for testing nodes. pub trait Node: EthereumNode { - /// Spawns a node configured according to the genesis json. - /// - /// Blocking until it's ready to accept transactions. - fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>; + /// Spawns a node configured according to the genesis json. + /// + /// Blocking until it's ready to accept transactions. + fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()>; - /// Prune the node instance and related data. - /// - /// Blocking until it's completely stopped. - fn shutdown(&mut self) -> anyhow::Result<()>; + /// Prune the node instance and related data. + /// + /// Blocking until it's completely stopped. + fn shutdown(&mut self) -> anyhow::Result<()>; - /// Returns the node version. - fn version(&self) -> anyhow::Result; + /// Returns the node version. + fn version(&self) -> anyhow::Result; } diff --git a/crates/node/src/node_implementations/geth.rs b/crates/node/src/node_implementations/geth.rs index 6c5e72c..eacce5f 100644 --- a/crates/node/src/node_implementations/geth.rs +++ b/crates/node/src/node_implementations/geth.rs @@ -1,37 +1,37 @@ //! The go-ethereum node implementation. use std::{ - fs::{File, create_dir_all, remove_dir_all}, - io::Read, - ops::ControlFlow, - path::PathBuf, - pin::Pin, - process::{Command, Stdio}, - sync::{ - Arc, - atomic::{AtomicU32, Ordering}, - }, - time::Duration, + fs::{File, create_dir_all, remove_dir_all}, + io::Read, + ops::ControlFlow, + path::PathBuf, + pin::Pin, + process::{Command, Stdio}, + sync::{ + Arc, + atomic::{AtomicU32, Ordering}, + }, + time::Duration, }; use alloy::{ - eips::BlockNumberOrTag, - genesis::{Genesis, GenesisAccount}, - network::{Ethereum, EthereumWallet, NetworkWallet}, - primitives::{ - Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, StorageKey, TxHash, U256, - }, - providers::{ - Provider, - ext::DebugApi, - fillers::{CachedNonceManager, ChainIdFiller, NonceFiller}, - }, - rpc::types::{ - EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, - trace::geth::{ - DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame, - }, - }, + eips::BlockNumberOrTag, + genesis::{Genesis, GenesisAccount}, + network::{Ethereum, EthereumWallet, NetworkWallet}, + primitives::{ + Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, StorageKey, TxHash, U256, + }, + providers::{ + Provider, + ext::DebugApi, + fillers::{CachedNonceManager, ChainIdFiller, NonceFiller}, + }, + rpc::types::{ + EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, + trace::geth::{ + DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame, + }, + }, }; use anyhow::Context as _; use futures::{Stream, StreamExt}; @@ -40,18 +40,18 @@ use tokio::sync::OnceCell; use tracing::{Instrument, error, instrument}; use revive_dt_common::{ - fs::clear_directory, - futures::{PollingWaitBehavior, poll}, + fs::clear_directory, + futures::{PollingWaitBehavior, poll}, }; use revive_dt_config::*; use revive_dt_format::traits::ResolverApi; use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation}; use crate::{ - Node, - constants::{CHAIN_ID, INITIAL_BALANCE}, - helpers::{Process, ProcessReadinessWaitBehavior}, - provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider}, + Node, + constants::{CHAIN_ID, INITIAL_BALANCE}, + helpers::{Process, ProcessReadinessWaitBehavior}, + provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider}, }; static NODE_COUNT: AtomicU32 = AtomicU32::new(0); @@ -66,867 +66,827 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0); #[derive(Debug)] #[allow(clippy::type_complexity)] pub struct GethNode { - connection_string: String, - base_directory: PathBuf, - data_directory: PathBuf, - logs_directory: PathBuf, - geth: PathBuf, - id: u32, - handle: Option, - start_timeout: Duration, - wallet: Arc, - nonce_manager: CachedNonceManager, - provider: OnceCell>>, - chain_id: ChainId, + connection_string: String, + base_directory: PathBuf, + data_directory: PathBuf, + logs_directory: PathBuf, + geth: PathBuf, + id: u32, + handle: Option, + start_timeout: Duration, + wallet: Arc, + nonce_manager: CachedNonceManager, + provider: OnceCell>>, + chain_id: ChainId, } impl GethNode { - const BASE_DIRECTORY: &str = "geth"; - const DATA_DIRECTORY: &str = "data"; - const LOGS_DIRECTORY: &str = "logs"; + const BASE_DIRECTORY: &str = "geth"; + const DATA_DIRECTORY: &str = "data"; + const LOGS_DIRECTORY: &str = "logs"; - const IPC_FILE: &str = "geth.ipc"; - const GENESIS_JSON_FILE: &str = "genesis.json"; + const IPC_FILE: &str = "geth.ipc"; + const GENESIS_JSON_FILE: &str = "genesis.json"; - const READY_MARKER: &str = "IPC endpoint opened"; - const ERROR_MARKER: &str = "Fatal:"; + const READY_MARKER: &str = "IPC endpoint opened"; + const ERROR_MARKER: &str = "Fatal:"; - const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress"; - const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet"; + const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress"; + const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet"; - const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60); - const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60); + const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60); + const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60); - pub fn new( - context: impl AsRef - + AsRef - + AsRef - + Clone, - ) -> Self { - let working_directory_configuration = - AsRef::::as_ref(&context); - let wallet_configuration = AsRef::::as_ref(&context); - let geth_configuration = AsRef::::as_ref(&context); + pub fn new( + context: impl AsRef + + AsRef + + AsRef + + Clone, + ) -> Self { + let working_directory_configuration = + AsRef::::as_ref(&context); + let wallet_configuration = AsRef::::as_ref(&context); + let geth_configuration = AsRef::::as_ref(&context); - let geth_directory = working_directory_configuration - .as_path() - .join(Self::BASE_DIRECTORY); - let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); - let base_directory = geth_directory.join(id.to_string()); + let geth_directory = working_directory_configuration.as_path().join(Self::BASE_DIRECTORY); + let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); + let base_directory = geth_directory.join(id.to_string()); - let wallet = wallet_configuration.wallet(); + let wallet = wallet_configuration.wallet(); - Self { - connection_string: base_directory.join(Self::IPC_FILE).display().to_string(), - data_directory: base_directory.join(Self::DATA_DIRECTORY), - logs_directory: base_directory.join(Self::LOGS_DIRECTORY), - base_directory, - geth: geth_configuration.path.clone(), - id, - handle: None, - start_timeout: geth_configuration.start_timeout_ms, - wallet: wallet.clone(), - nonce_manager: Default::default(), - provider: Default::default(), - chain_id: CHAIN_ID, - } - } + Self { + connection_string: base_directory.join(Self::IPC_FILE).display().to_string(), + data_directory: base_directory.join(Self::DATA_DIRECTORY), + logs_directory: base_directory.join(Self::LOGS_DIRECTORY), + base_directory, + geth: geth_configuration.path.clone(), + id, + handle: None, + start_timeout: geth_configuration.start_timeout_ms, + wallet: wallet.clone(), + nonce_manager: Default::default(), + provider: Default::default(), + chain_id: CHAIN_ID, + } + } - pub fn new_existing() -> Self { - let wallet_config = revive_dt_config::WalletConfiguration::default(); - Self { - connection_string: "http://localhost:8545".to_string(), - base_directory: PathBuf::new(), - data_directory: PathBuf::new(), - logs_directory: PathBuf::new(), - geth: PathBuf::new(), - id: 0, - chain_id: 1337, - handle: None, - start_timeout: Duration::from_secs(0), - wallet: wallet_config.wallet(), - nonce_manager: Default::default(), - provider: Default::default(), - } - } + pub fn new_existing() -> Self { + let wallet_config = revive_dt_config::WalletConfiguration::default(); + Self { + connection_string: "http://localhost:8545".to_string(), + base_directory: PathBuf::new(), + data_directory: PathBuf::new(), + logs_directory: PathBuf::new(), + geth: PathBuf::new(), + id: 0, + chain_id: 1337, + handle: None, + start_timeout: Duration::from_secs(0), + wallet: wallet_config.wallet(), + nonce_manager: Default::default(), + provider: Default::default(), + } + } - /// Create the node directory and call `geth init` to configure the genesis. - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { - let _ = clear_directory(&self.base_directory); - let _ = clear_directory(&self.logs_directory); + /// Create the node directory and call `geth init` to configure the genesis. + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { + let _ = clear_directory(&self.base_directory); + let _ = clear_directory(&self.logs_directory); - create_dir_all(&self.base_directory) - .context("Failed to create base directory for geth node")?; - create_dir_all(&self.logs_directory) - .context("Failed to create logs directory for geth node")?; + create_dir_all(&self.base_directory) + .context("Failed to create base directory for geth node")?; + create_dir_all(&self.logs_directory) + .context("Failed to create logs directory for geth node")?; - for signer_address in - >::signer_addresses(&self.wallet) - { - // Note, the use of the entry API here means that we only modify the entries for any - // account that is not in the `alloc` field of the genesis state. - genesis - .alloc - .entry(signer_address) - .or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE))); - } - let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE); - serde_json::to_writer( - File::create(&genesis_path).context("Failed to create geth genesis file")?, - &genesis, - ) - .context("Failed to serialize geth genesis JSON to file")?; + for signer_address in + >::signer_addresses(&self.wallet) + { + // Note, the use of the entry API here means that we only modify the entries for any + // account that is not in the `alloc` field of the genesis state. + genesis + .alloc + .entry(signer_address) + .or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE))); + } + let genesis_path = self.base_directory.join(Self::GENESIS_JSON_FILE); + serde_json::to_writer( + File::create(&genesis_path).context("Failed to create geth genesis file")?, + &genesis, + ) + .context("Failed to serialize geth genesis JSON to file")?; - let mut child = Command::new(&self.geth) - .arg("--state.scheme") - .arg("hash") - .arg("init") - .arg("--datadir") - .arg(&self.data_directory) - .arg(genesis_path) - .stderr(Stdio::piped()) - .stdout(Stdio::null()) - .spawn() - .context("Failed to spawn geth --init process")?; + let mut child = Command::new(&self.geth) + .arg("--state.scheme") + .arg("hash") + .arg("init") + .arg("--datadir") + .arg(&self.data_directory) + .arg(genesis_path) + .stderr(Stdio::piped()) + .stdout(Stdio::null()) + .spawn() + .context("Failed to spawn geth --init process")?; - let mut stderr = String::new(); - child - .stderr - .take() - .expect("should be piped") - .read_to_string(&mut stderr) - .context("Failed to read geth --init stderr")?; + let mut stderr = String::new(); + child + .stderr + .take() + .expect("should be piped") + .read_to_string(&mut stderr) + .context("Failed to read geth --init stderr")?; - if !child - .wait() - .context("Failed waiting for geth --init process to finish")? - .success() - { - anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id); - } + if !child + .wait() + .context("Failed waiting for geth --init process to finish")? + .success() + { + anyhow::bail!("failed to initialize geth node #{:?}: {stderr}", &self.id); + } - Ok(self) - } + Ok(self) + } - /// Spawn the go-ethereum node child process. - /// - /// [Instance::init] must be called prior. - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn spawn_process(&mut self) -> anyhow::Result<&mut Self> { - let process = Process::new( - None, - self.logs_directory.as_path(), - self.geth.as_path(), - |command, stdout_file, stderr_file| { - command - .arg("--dev") - .arg("--datadir") - .arg(&self.data_directory) - .arg("--ipcpath") - .arg(&self.connection_string) - .arg("--nodiscover") - .arg("--maxpeers") - .arg("0") - .arg("--txlookuplimit") - .arg("0") - .arg("--cache.blocklogs") - .arg("512") - .arg("--state.scheme") - .arg("hash") - .arg("--syncmode") - .arg("full") - .arg("--gcmode") - .arg("archive") - .stderr(stderr_file) - .stdout(stdout_file); - }, - ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { - max_wait_duration: self.start_timeout, - check_function: Box::new(|_, stderr_line| match stderr_line { - Some(line) => { - if line.contains(Self::ERROR_MARKER) { - anyhow::bail!("Failed to start geth {line}"); - } else if line.contains(Self::READY_MARKER) { - Ok(true) - } else { - Ok(false) - } - } - None => Ok(false), - }), - }, - ); + /// Spawn the go-ethereum node child process. + /// + /// [Instance::init] must be called prior. + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn spawn_process(&mut self) -> anyhow::Result<&mut Self> { + let process = Process::new( + None, + self.logs_directory.as_path(), + self.geth.as_path(), + |command, stdout_file, stderr_file| { + command + .arg("--dev") + .arg("--datadir") + .arg(&self.data_directory) + .arg("--ipcpath") + .arg(&self.connection_string) + .arg("--nodiscover") + .arg("--maxpeers") + .arg("0") + .arg("--txlookuplimit") + .arg("0") + .arg("--cache.blocklogs") + .arg("512") + .arg("--state.scheme") + .arg("hash") + .arg("--syncmode") + .arg("full") + .arg("--gcmode") + .arg("archive") + .stderr(stderr_file) + .stdout(stdout_file); + }, + ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { + max_wait_duration: self.start_timeout, + check_function: Box::new(|_, stderr_line| match stderr_line { + Some(line) => + if line.contains(Self::ERROR_MARKER) { + anyhow::bail!("Failed to start geth {line}"); + } else if line.contains(Self::READY_MARKER) { + Ok(true) + } else { + Ok(false) + }, + None => Ok(false), + }), + }, + ); - match process { - Ok(process) => self.handle = Some(process), - Err(err) => { - error!(?err, "Failed to start geth, shutting down gracefully"); - self.shutdown() - .context("Failed to gracefully shutdown after geth start error")?; - return Err(err); - } - } + match process { + Ok(process) => self.handle = Some(process), + Err(err) => { + error!(?err, "Failed to start geth, shutting down gracefully"); + self.shutdown() + .context("Failed to gracefully shutdown after geth start error")?; + return Err(err); + }, + } - Ok(self) - } + Ok(self) + } - async fn provider(&self) -> anyhow::Result>> { - self.provider - .get_or_try_init(|| async move { - construct_concurrency_limited_provider::( - self.connection_string.as_str(), - FallbackGasFiller::default(), - ChainIdFiller::new(Some(self.chain_id)), - NonceFiller::new(self.nonce_manager.clone()), - self.wallet.clone(), - ) - .await - .context("Failed to construct the provider") - }) - .await - .cloned() - } + async fn provider(&self) -> anyhow::Result>> { + self.provider + .get_or_try_init(|| async move { + construct_concurrency_limited_provider::( + self.connection_string.as_str(), + FallbackGasFiller::default(), + ChainIdFiller::new(Some(self.chain_id)), + NonceFiller::new(self.nonce_manager.clone()), + self.wallet.clone(), + ) + .await + .context("Failed to construct the provider") + }) + .await + .cloned() + } } impl EthereumNode for GethNode { - fn pre_transactions(&mut self) -> Pin> + '_>> { - Box::pin(async move { Ok(()) }) - } + fn pre_transactions(&mut self) -> Pin> + '_>> { + Box::pin(async move { Ok(()) }) + } - fn id(&self) -> usize { - self.id as _ - } + fn id(&self) -> usize { + self.id as _ + } - fn connection_string(&self) -> &str { - &self.connection_string - } + fn connection_string(&self) -> &str { + &self.connection_string + } - #[instrument( + #[instrument( level = "info", skip_all, fields(geth_node_id = self.id, connection_string = self.connection_string), err, )] - fn submit_transaction( - &self, - transaction: TransactionRequest, - ) -> Pin> + '_>> { - Box::pin(async move { - let provider = self - .provider() - .await - .context("Failed to create the provider for transaction submission")?; - let pending_transaction = provider - .send_transaction(transaction) - .await - .context("Failed to submit the transaction through the provider")?; - Ok(*pending_transaction.tx_hash()) - }) - } + fn submit_transaction( + &self, + transaction: TransactionRequest, + ) -> Pin> + '_>> { + Box::pin(async move { + let provider = self + .provider() + .await + .context("Failed to create the provider for transaction submission")?; + let pending_transaction = provider + .send_transaction(transaction) + .await + .context("Failed to submit the transaction through the provider")?; + Ok(*pending_transaction.tx_hash()) + }) + } - #[instrument( + #[instrument( level = "info", skip_all, fields(geth_node_id = self.id, connection_string = self.connection_string), err, )] - fn get_receipt( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider() - .await - .context("Failed to create provider for getting the receipt")? - .get_transaction_receipt(tx_hash) - .await - .context("Failed to get the receipt of the transaction")? - .context("Failed to get the receipt of the transaction") - }) - } + fn get_receipt( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to create provider for getting the receipt")? + .get_transaction_receipt(tx_hash) + .await + .context("Failed to get the receipt of the transaction")? + .context("Failed to get the receipt of the transaction") + }) + } - #[instrument( + #[instrument( level = "info", skip_all, fields(geth_node_id = self.id, connection_string = self.connection_string), err, )] - fn execute_transaction( - &self, - transaction: TransactionRequest, - ) -> Pin> + '_>> { - Box::pin(async move { - let provider = self - .provider() - .await - .context("Failed to create provider for transaction submission")?; + fn execute_transaction( + &self, + transaction: TransactionRequest, + ) -> Pin> + '_>> { + Box::pin(async move { + let provider = self + .provider() + .await + .context("Failed to create provider for transaction submission")?; - let pending_transaction = provider - .send_transaction(transaction) - .await - .inspect_err( - |err| error!(%err, "Encountered an error when submitting the transaction"), - ) - .context("Failed to submit transaction to geth node")?; - let transaction_hash = *pending_transaction.tx_hash(); + let pending_transaction = provider + .send_transaction(transaction) + .await + .inspect_err( + |err| error!(%err, "Encountered an error when submitting the transaction"), + ) + .context("Failed to submit transaction to geth node")?; + let transaction_hash = *pending_transaction.tx_hash(); - // The following is a fix for the "transaction indexing is in progress" error that we used - // to get. You can find more information on this in the following GH issue in geth - // https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, - // before we can get the receipt of the transaction it needs to have been indexed by the - // node's indexer. Just because the transaction has been confirmed it doesn't mean that it - // has been indexed. When we call alloy's `get_receipt` it checks if the transaction was - // confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which - // _might_ return the above error if the tx has not yet been indexed yet. So, we need to - // implement a retry mechanism for the receipt to keep retrying to get it until it - // eventually works, but we only do that if the error we get back is the "transaction - // indexing is in progress" error or if the receipt is None. - // - // Getting the transaction indexed and taking a receipt can take a long time especially when - // a lot of transactions are being submitted to the node. Thus, while initially we only - // allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for - // a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential - // backoff each time we attempt to get the receipt and find that it's not available. - poll( - Self::RECEIPT_POLLING_DURATION, - PollingWaitBehavior::Constant(Duration::from_millis(200)), - move || { - let provider = provider.clone(); - async move { - match provider.get_transaction_receipt(transaction_hash).await { - Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)), - Ok(None) => Ok(ControlFlow::Continue(())), - Err(error) => { - let error_string = error.to_string(); - match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) { - true => Ok(ControlFlow::Continue(())), - false => Err(error.into()), - } - } - } - } - }, - ) - .instrument(tracing::info_span!( - "Awaiting transaction receipt", - ?transaction_hash - )) - .await - }) - } + // The following is a fix for the "transaction indexing is in progress" error that we + // used to get. You can find more information on this in the following GH issue in + // geth https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, + // before we can get the receipt of the transaction it needs to have been indexed by the + // node's indexer. Just because the transaction has been confirmed it doesn't mean that + // it has been indexed. When we call alloy's `get_receipt` it checks if the + // transaction was confirmed. If it has been, then it will call + // `eth_getTransactionReceipt` method which _might_ return the above error if the tx + // has not yet been indexed yet. So, we need to implement a retry mechanism for the + // receipt to keep retrying to get it until it eventually works, but we only do that + // if the error we get back is the "transaction indexing is in progress" error or if + // the receipt is None. + // + // Getting the transaction indexed and taking a receipt can take a long time especially + // when a lot of transactions are being submitted to the node. Thus, while initially + // we only allowed for 60 seconds of waiting with a 1 second delay in polling, we + // need to allow for a larger wait time. Therefore, in here we allow for 5 minutes of + // waiting with exponential backoff each time we attempt to get the receipt and find + // that it's not available. + poll( + Self::RECEIPT_POLLING_DURATION, + PollingWaitBehavior::Constant(Duration::from_millis(200)), + move || { + let provider = provider.clone(); + async move { + match provider.get_transaction_receipt(transaction_hash).await { + Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)), + Ok(None) => Ok(ControlFlow::Continue(())), + Err(error) => { + let error_string = error.to_string(); + match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) { + true => Ok(ControlFlow::Continue(())), + false => Err(error.into()), + } + }, + } + } + }, + ) + .instrument(tracing::info_span!("Awaiting transaction receipt", ?transaction_hash)) + .await + }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn trace_transaction( - &self, - tx_hash: TxHash, - trace_options: GethDebugTracingOptions, - ) -> Pin> + '_>> { - Box::pin(async move { - let provider = self - .provider() - .await - .context("Failed to create provider for tracing")?; - poll( - Self::TRACE_POLLING_DURATION, - PollingWaitBehavior::Constant(Duration::from_millis(200)), - move || { - let provider = provider.clone(); - let trace_options = trace_options.clone(); - async move { - match provider - .debug_trace_transaction(tx_hash, trace_options) - .await - { - Ok(trace) => Ok(ControlFlow::Break(trace)), - Err(error) => { - let error_string = error.to_string(); - match error_string.contains(Self::TRANSACTION_TRACING_ERROR) { - true => Ok(ControlFlow::Continue(())), - false => Err(error.into()), - } - } - } - } - }, - ) - .await - }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn trace_transaction( + &self, + tx_hash: TxHash, + trace_options: GethDebugTracingOptions, + ) -> Pin> + '_>> { + Box::pin(async move { + let provider = + self.provider().await.context("Failed to create provider for tracing")?; + poll( + Self::TRACE_POLLING_DURATION, + PollingWaitBehavior::Constant(Duration::from_millis(200)), + move || { + let provider = provider.clone(); + let trace_options = trace_options.clone(); + async move { + match provider.debug_trace_transaction(tx_hash, trace_options).await { + Ok(trace) => Ok(ControlFlow::Break(trace)), + Err(error) => { + let error_string = error.to_string(); + match error_string.contains(Self::TRANSACTION_TRACING_ERROR) { + true => Ok(ControlFlow::Continue(())), + false => Err(error.into()), + } + }, + } + } + }, + ) + .await + }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn state_diff( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { - diff_mode: Some(true), - disable_code: None, - disable_storage: None, - }); - match self - .trace_transaction(tx_hash, trace_options) - .await - .context("Failed to trace transaction for prestate diff")? - .try_into_pre_state_frame() - .context("Failed to convert trace into pre-state frame")? - { - PreStateFrame::Diff(diff) => Ok(diff), - _ => anyhow::bail!("expected a diff mode trace"), - } - }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn state_diff( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { + diff_mode: Some(true), + disable_code: None, + disable_storage: None, + }); + match self + .trace_transaction(tx_hash, trace_options) + .await + .context("Failed to trace transaction for prestate diff")? + .try_into_pre_state_frame() + .context("Failed to convert trace into pre-state frame")? + { + PreStateFrame::Diff(diff) => Ok(diff), + _ => anyhow::bail!("expected a diff mode trace"), + } + }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn balance_of( - &self, - address: Address, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_balance(address) - .await - .map_err(Into::into) - }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn balance_of( + &self, + address: Address, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to get the Geth provider")? + .get_balance(address) + .await + .map_err(Into::into) + }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn latest_state_proof( - &self, - address: Address, - keys: Vec, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider() - .await - .context("Failed to get the Geth provider")? - .get_proof(address, keys) - .latest() - .await - .map_err(Into::into) - }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn latest_state_proof( + &self, + address: Address, + keys: Vec, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to get the Geth provider")? + .get_proof(address, keys) + .latest() + .await + .map_err(Into::into) + }) + } - // #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn resolver( - &self, - ) -> Pin>> + '_>> { - Box::pin(async move { - let id = self.id; - let provider = self.provider().await?; - Ok(Arc::new(GethNodeResolver { id, provider }) as Arc) - }) - } + // #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn resolver( + &self, + ) -> Pin>> + '_>> { + Box::pin(async move { + let id = self.id; + let provider = self.provider().await?; + Ok(Arc::new(GethNodeResolver { id, provider }) as Arc) + }) + } - fn evm_version(&self) -> EVMVersion { - EVMVersion::Cancun - } + fn evm_version(&self) -> EVMVersion { + EVMVersion::Cancun + } - fn subscribe_to_full_blocks_information( - &self, - ) -> Pin< - Box< - dyn Future>>>> - + '_, - >, - > { - Box::pin(async move { - let provider = self - .provider() - .await - .context("Failed to create the provider for block subscription")?; - let block_subscription = provider.subscribe_full_blocks(); - let block_stream = block_subscription - .into_stream() - .await - .context("Failed to create the block stream")?; + fn subscribe_to_full_blocks_information( + &self, + ) -> Pin< + Box< + dyn Future>>>> + + '_, + >, + > { + Box::pin(async move { + let provider = self + .provider() + .await + .context("Failed to create the provider for block subscription")?; + let block_subscription = provider.subscribe_full_blocks(); + let block_stream = block_subscription + .into_stream() + .await + .context("Failed to create the block stream")?; - let mined_block_information_stream = block_stream.filter_map(|block| async { - let block = block.ok()?; - Some(MinedBlockInformation { - block_number: block.number(), - block_timestamp: block.header.timestamp, - mined_gas: block.header.gas_used as _, - block_gas_limit: block.header.gas_limit as _, - transaction_hashes: block - .transactions - .into_hashes() - .as_hashes() - .expect("Must be hashes") - .to_vec(), - }) - }); + let mined_block_information_stream = block_stream.filter_map(|block| async { + let block = block.ok()?; + Some(MinedBlockInformation { + block_number: block.number(), + block_timestamp: block.header.timestamp, + mined_gas: block.header.gas_used as _, + block_gas_limit: block.header.gas_limit as _, + transaction_hashes: block + .transactions + .into_hashes() + .as_hashes() + .expect("Must be hashes") + .to_vec(), + }) + }); - Ok(Box::pin(mined_block_information_stream) - as Pin>>) - }) - } + Ok(Box::pin(mined_block_information_stream) + as Pin>>) + }) + } - fn resolve_signer_or_default(&self, address: Address) -> Address { - let signer_addresses: Vec<_> = - >::signer_addresses(&self.wallet).collect(); - if signer_addresses.contains(&address) { - address - } else { - self.wallet.default_signer().address() - } - } + fn resolve_signer_or_default(&self, address: Address) -> Address { + let signer_addresses: Vec<_> = + >::signer_addresses(&self.wallet).collect(); + if signer_addresses.contains(&address) { + address + } else { + self.wallet.default_signer().address() + } + } } pub struct GethNodeResolver { - id: u32, - provider: ConcreteProvider>, + id: u32, + provider: ConcreteProvider>, } impl ResolverApi for GethNodeResolver { - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn chain_id( - &self, - ) -> Pin> + '_>> { - Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn chain_id( + &self, + ) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn transaction_gas_price( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_transaction_receipt(tx_hash) - .await? - .context("Failed to get the transaction receipt") - .map(|receipt| receipt.effective_gas_price) - }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn transaction_gas_price( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_transaction_receipt(tx_hash) + .await? + .context("Failed to get the transaction receipt") + .map(|receipt| receipt.effective_gas_price) + }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn block_gas_limit( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.gas_limit as _) - }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_gas_limit( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.gas_limit as _) + }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn block_coinbase( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.beneficiary) - }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_coinbase( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.beneficiary) + }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn block_difficulty( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) - }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_difficulty( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) + }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn block_base_fee( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .and_then(|block| { - block - .header - .base_fee_per_gas - .context("Failed to get the base fee per gas") - }) - }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_base_fee( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .and_then(|block| { + block.header.base_fee_per_gas.context("Failed to get the base fee per gas") + }) + }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn block_hash( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.hash) - }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_hash( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.hash) + }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn block_timestamp( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.timestamp) - }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn block_timestamp( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.timestamp) + }) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn last_block_number(&self) -> Pin> + '_>> { - Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) }) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn last_block_number(&self) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) }) + } } impl Node for GethNode { - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn shutdown(&mut self) -> anyhow::Result<()> { - drop(self.handle.take()); + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn shutdown(&mut self) -> anyhow::Result<()> { + drop(self.handle.take()); - // Remove the node's database so that subsequent runs do not run on the same database. We - // ignore the error just in case the directory didn't exist in the first place and therefore - // there's nothing to be deleted. - let _ = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY)); + // Remove the node's database so that subsequent runs do not run on the same database. We + // ignore the error just in case the directory didn't exist in the first place and therefore + // there's nothing to be deleted. + let _ = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY)); - Ok(()) - } + Ok(()) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> { - self.init(genesis)?.spawn_process()?; - Ok(()) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> { + self.init(genesis)?.spawn_process()?; + Ok(()) + } - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn version(&self) -> anyhow::Result { - let output = Command::new(&self.geth) - .arg("--version") - .stdin(Stdio::null()) - .stdout(Stdio::piped()) - .stderr(Stdio::null()) - .spawn() - .context("Failed to spawn geth --version process")? - .wait_with_output() - .context("Failed to wait for geth --version output")? - .stdout; - Ok(String::from_utf8_lossy(&output).into()) - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn version(&self) -> anyhow::Result { + let output = Command::new(&self.geth) + .arg("--version") + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .spawn() + .context("Failed to spawn geth --version process")? + .wait_with_output() + .context("Failed to wait for geth --version output")? + .stdout; + Ok(String::from_utf8_lossy(&output).into()) + } } impl Drop for GethNode { - #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] - fn drop(&mut self) { - self.shutdown().expect("Failed to shutdown") - } + #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] + fn drop(&mut self) { + self.shutdown().expect("Failed to shutdown") + } } #[cfg(test)] mod tests { - use std::sync::LazyLock; + use std::sync::LazyLock; - use super::*; + use super::*; - fn test_config() -> TestExecutionContext { - TestExecutionContext::default() - } + fn test_config() -> TestExecutionContext { + TestExecutionContext::default() + } - fn new_node() -> (TestExecutionContext, GethNode) { - let context = test_config(); - let mut node = GethNode::new(&context); - node.init(context.genesis_configuration.genesis().unwrap().clone()) - .expect("Failed to initialize the node") - .spawn_process() - .expect("Failed to spawn the node process"); - (context, node) - } + fn new_node() -> (TestExecutionContext, GethNode) { + let context = test_config(); + let mut node = GethNode::new(&context); + node.init(context.genesis_configuration.genesis().unwrap().clone()) + .expect("Failed to initialize the node") + .spawn_process() + .expect("Failed to spawn the node process"); + (context, node) + } - fn shared_state() -> &'static (TestExecutionContext, GethNode) { - static STATE: LazyLock<(TestExecutionContext, GethNode)> = LazyLock::new(new_node); - &STATE - } + fn shared_state() -> &'static (TestExecutionContext, GethNode) { + static STATE: LazyLock<(TestExecutionContext, GethNode)> = LazyLock::new(new_node); + &STATE + } - fn shared_node() -> &'static GethNode { - &shared_state().1 - } + fn shared_node() -> &'static GethNode { + &shared_state().1 + } - #[tokio::test] - async fn node_mines_simple_transfer_transaction_and_returns_receipt() { - // Arrange - let (context, node) = shared_state(); + #[tokio::test] + async fn node_mines_simple_transfer_transaction_and_returns_receipt() { + // Arrange + let (context, node) = shared_state(); - let account_address = context - .wallet_configuration - .wallet() - .default_signer() - .address(); - let transaction = TransactionRequest::default() - .to(account_address) - .value(U256::from(100_000_000_000_000u128)); + let account_address = context.wallet_configuration.wallet().default_signer().address(); + let transaction = TransactionRequest::default() + .to(account_address) + .value(U256::from(100_000_000_000_000u128)); - // Act - let receipt = node.execute_transaction(transaction).await; + // Act + let receipt = node.execute_transaction(transaction).await; - // Assert - let _ = receipt.expect("Failed to get the receipt for the transfer"); - } + // Assert + let _ = receipt.expect("Failed to get the receipt for the transfer"); + } - #[test] - #[ignore = "Ignored since they take a long time to run"] - fn version_works() { - // Arrange - let node = shared_node(); + #[test] + #[ignore = "Ignored since they take a long time to run"] + fn version_works() { + // Arrange + let node = shared_node(); - // Act - let version = node.version(); + // Act + let version = node.version(); - // Assert - let version = version.expect("Failed to get the version"); - assert!( - version.starts_with("geth version"), - "expected version string, got: '{version}'" - ); - } + // Assert + let version = version.expect("Failed to get the version"); + assert!(version.starts_with("geth version"), "expected version string, got: '{version}'"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_chain_id_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_chain_id_from_node() { + // Arrange + let node = shared_node(); - // Act - let chain_id = node.resolver().await.unwrap().chain_id().await; + // Act + let chain_id = node.resolver().await.unwrap().chain_id().await; - // Assert - let chain_id = chain_id.expect("Failed to get the chain id"); - assert_eq!(chain_id, 420_420_420); - } + // Assert + let chain_id = chain_id.expect("Failed to get the chain id"); + assert_eq!(chain_id, 420_420_420); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_gas_limit_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_gas_limit_from_node() { + // Arrange + let node = shared_node(); - // Act - let gas_limit = node - .resolver() - .await - .unwrap() - .block_gas_limit(BlockNumberOrTag::Latest) - .await; + // Act + let gas_limit = + node.resolver().await.unwrap().block_gas_limit(BlockNumberOrTag::Latest).await; - // Assert - let _ = gas_limit.expect("Failed to get the gas limit"); - } + // Assert + let _ = gas_limit.expect("Failed to get the gas limit"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_coinbase_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_coinbase_from_node() { + // Arrange + let node = shared_node(); - // Act - let coinbase = node - .resolver() - .await - .unwrap() - .block_coinbase(BlockNumberOrTag::Latest) - .await; + // Act + let coinbase = + node.resolver().await.unwrap().block_coinbase(BlockNumberOrTag::Latest).await; - // Assert - let _ = coinbase.expect("Failed to get the coinbase"); - } + // Assert + let _ = coinbase.expect("Failed to get the coinbase"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_difficulty_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_difficulty_from_node() { + // Arrange + let node = shared_node(); - // Act - let block_difficulty = node - .resolver() - .await - .unwrap() - .block_difficulty(BlockNumberOrTag::Latest) - .await; + // Act + let block_difficulty = + node.resolver().await.unwrap().block_difficulty(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_difficulty.expect("Failed to get the block difficulty"); - } + // Assert + let _ = block_difficulty.expect("Failed to get the block difficulty"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_hash_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_hash_from_node() { + // Arrange + let node = shared_node(); - // Act - let block_hash = node - .resolver() - .await - .unwrap() - .block_hash(BlockNumberOrTag::Latest) - .await; + // Act + let block_hash = node.resolver().await.unwrap().block_hash(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_hash.expect("Failed to get the block hash"); - } + // Assert + let _ = block_hash.expect("Failed to get the block hash"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_timestamp_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_timestamp_from_node() { + // Arrange + let node = shared_node(); - // Act - let block_timestamp = node - .resolver() - .await - .unwrap() - .block_timestamp(BlockNumberOrTag::Latest) - .await; + // Act + let block_timestamp = + node.resolver().await.unwrap().block_timestamp(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_timestamp.expect("Failed to get the block timestamp"); - } + // Assert + let _ = block_timestamp.expect("Failed to get the block timestamp"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_number_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_number_from_node() { + // Arrange + let node = shared_node(); - // Act - let block_number = node.resolver().await.unwrap().last_block_number().await; + // Act + let block_number = node.resolver().await.unwrap().last_block_number().await; - // Assert - let _ = block_number.expect("Failed to get the block number"); - } + // Assert + let _ = block_number.expect("Failed to get the block number"); + } } diff --git a/crates/node/src/node_implementations/lighthouse_geth.rs b/crates/node/src/node_implementations/lighthouse_geth.rs index e517484..1f3f7dd 100644 --- a/crates/node/src/node_implementations/lighthouse_geth.rs +++ b/crates/node/src/node_implementations/lighthouse_geth.rs @@ -9,38 +9,38 @@ //! that the tool has. use std::{ - collections::{BTreeMap, HashSet}, - fs::{File, create_dir_all}, - io::Read, - ops::ControlFlow, - path::PathBuf, - pin::Pin, - process::{Command, Stdio}, - sync::{ - Arc, - atomic::{AtomicU32, Ordering}, - }, - time::{Duration, SystemTime, UNIX_EPOCH}, + collections::{BTreeMap, HashSet}, + fs::{File, create_dir_all}, + io::Read, + ops::ControlFlow, + path::PathBuf, + pin::Pin, + process::{Command, Stdio}, + sync::{ + Arc, + atomic::{AtomicU32, Ordering}, + }, + time::{Duration, SystemTime, UNIX_EPOCH}, }; use alloy::{ - eips::BlockNumberOrTag, - genesis::{Genesis, GenesisAccount}, - network::{Ethereum, EthereumWallet, NetworkWallet}, - primitives::{ - Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256, address, - }, - providers::{ - Provider, - ext::DebugApi, - fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller}, - }, - rpc::types::{ - EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, - trace::geth::{ - DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame, - }, - }, + eips::BlockNumberOrTag, + genesis::{Genesis, GenesisAccount}, + network::{Ethereum, EthereumWallet, NetworkWallet}, + primitives::{ + Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256, address, + }, + providers::{ + Provider, + ext::DebugApi, + fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller}, + }, + rpc::types::{ + EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, + trace::geth::{ + DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame, + }, + }, }; use anyhow::Context as _; use futures::{Stream, StreamExt}; @@ -51,18 +51,18 @@ use tokio::sync::OnceCell; use tracing::{Instrument, info, instrument}; use revive_dt_common::{ - fs::clear_directory, - futures::{PollingWaitBehavior, poll}, + fs::clear_directory, + futures::{PollingWaitBehavior, poll}, }; use revive_dt_config::*; use revive_dt_format::traits::ResolverApi; use revive_dt_node_interaction::{EthereumNode, MinedBlockInformation}; use crate::{ - Node, - constants::{CHAIN_ID, INITIAL_BALANCE}, - helpers::{Process, ProcessReadinessWaitBehavior}, - provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider}, + Node, + constants::{CHAIN_ID, INITIAL_BALANCE}, + helpers::{Process, ProcessReadinessWaitBehavior}, + provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider}, }; static NODE_COUNT: AtomicU32 = AtomicU32::new(0); @@ -77,1222 +77,1174 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0); #[derive(Debug)] #[allow(clippy::type_complexity)] pub struct LighthouseGethNode { - /* Node Identifier */ - id: u32, - ws_connection_string: String, - http_connection_string: String, - enclave_name: String, + /* Node Identifier */ + id: u32, + ws_connection_string: String, + http_connection_string: String, + enclave_name: String, - /* Directory Paths */ - base_directory: PathBuf, - logs_directory: PathBuf, + /* Directory Paths */ + base_directory: PathBuf, + logs_directory: PathBuf, - /* File Paths */ - config_file_path: PathBuf, + /* File Paths */ + config_file_path: PathBuf, - /* Binary Paths & Timeouts */ - kurtosis_binary_path: PathBuf, + /* Binary Paths & Timeouts */ + kurtosis_binary_path: PathBuf, - /* Spawned Processes */ - process: Option, + /* Spawned Processes */ + process: Option, - /* Prefunded Account Information */ - prefunded_account_address: Address, + /* Prefunded Account Information */ + prefunded_account_address: Address, - /* Provider Related Fields */ - wallet: Arc, - nonce_manager: CachedNonceManager, + /* Provider Related Fields */ + wallet: Arc, + nonce_manager: CachedNonceManager, - persistent_http_provider: OnceCell>>, - persistent_ws_provider: OnceCell>>, + persistent_http_provider: OnceCell>>, + persistent_ws_provider: OnceCell>>, } impl LighthouseGethNode { - const BASE_DIRECTORY: &str = "lighthouse"; - const LOGS_DIRECTORY: &str = "logs"; + const BASE_DIRECTORY: &str = "lighthouse"; + const LOGS_DIRECTORY: &str = "logs"; - const CONFIG_FILE_NAME: &str = "config.yaml"; + const CONFIG_FILE_NAME: &str = "config.yaml"; - const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress"; - const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet"; + const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress"; + const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet"; - const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60); - const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60); + const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60); + const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60); - const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete"; + const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete"; - pub fn new( - context: impl AsRef - + AsRef - + AsRef - + Clone, - ) -> Self { - let working_directory_configuration = - AsRef::::as_ref(&context); - let wallet_configuration = AsRef::::as_ref(&context); - let kurtosis_configuration = AsRef::::as_ref(&context); + pub fn new( + context: impl AsRef + + AsRef + + AsRef + + Clone, + ) -> Self { + let working_directory_configuration = + AsRef::::as_ref(&context); + let wallet_configuration = AsRef::::as_ref(&context); + let kurtosis_configuration = AsRef::::as_ref(&context); - let geth_directory = working_directory_configuration - .as_path() - .join(Self::BASE_DIRECTORY); - let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); - let base_directory = geth_directory.join(id.to_string()); + let geth_directory = working_directory_configuration.as_path().join(Self::BASE_DIRECTORY); + let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); + let base_directory = geth_directory.join(id.to_string()); - let wallet = wallet_configuration.wallet(); + let wallet = wallet_configuration.wallet(); - Self { - /* Node Identifier */ - id, - ws_connection_string: String::default(), - http_connection_string: String::default(), - enclave_name: format!( - "enclave-{}-{}", - SystemTime::now() - .duration_since(UNIX_EPOCH) - .expect("Must not fail") - .as_nanos(), - id - ), + Self { + /* Node Identifier */ + id, + ws_connection_string: String::default(), + http_connection_string: String::default(), + enclave_name: format!( + "enclave-{}-{}", + SystemTime::now().duration_since(UNIX_EPOCH).expect("Must not fail").as_nanos(), + id + ), - /* File Paths */ - config_file_path: base_directory.join(Self::CONFIG_FILE_NAME), + /* File Paths */ + config_file_path: base_directory.join(Self::CONFIG_FILE_NAME), - /* Directory Paths */ - logs_directory: base_directory.join(Self::LOGS_DIRECTORY), - base_directory, + /* Directory Paths */ + logs_directory: base_directory.join(Self::LOGS_DIRECTORY), + base_directory, - /* Binary Paths & Timeouts */ - kurtosis_binary_path: kurtosis_configuration.path.clone(), + /* Binary Paths & Timeouts */ + kurtosis_binary_path: kurtosis_configuration.path.clone(), - /* Spawned Processes */ - process: None, + /* Spawned Processes */ + process: None, - /* Prefunded Account Information */ - prefunded_account_address: wallet.default_signer().address(), + /* Prefunded Account Information */ + prefunded_account_address: wallet.default_signer().address(), - /* Provider Related Fields */ - wallet: wallet.clone(), - nonce_manager: Default::default(), - persistent_http_provider: OnceCell::const_new(), - persistent_ws_provider: OnceCell::const_new(), - } - } + /* Provider Related Fields */ + wallet: wallet.clone(), + nonce_manager: Default::default(), + persistent_http_provider: OnceCell::const_new(), + persistent_ws_provider: OnceCell::const_new(), + } + } - /// Create the node directory and call `geth init` to configure the genesis. - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> { - self.init_directories() - .context("Failed to initialize the directories of the Lighthouse Geth node.")?; - self.init_kurtosis_config_file() - .context("Failed to write the config file to the FS")?; + /// Create the node directory and call `geth init` to configure the genesis. + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn init(&mut self, _: Genesis) -> anyhow::Result<&mut Self> { + self.init_directories() + .context("Failed to initialize the directories of the Lighthouse Geth node.")?; + self.init_kurtosis_config_file() + .context("Failed to write the config file to the FS")?; - Ok(self) - } + Ok(self) + } - fn init_directories(&self) -> anyhow::Result<()> { - let _ = clear_directory(&self.base_directory); - let _ = clear_directory(&self.logs_directory); + fn init_directories(&self) -> anyhow::Result<()> { + let _ = clear_directory(&self.base_directory); + let _ = clear_directory(&self.logs_directory); - create_dir_all(&self.base_directory) - .context("Failed to create base directory for geth node")?; - create_dir_all(&self.logs_directory) - .context("Failed to create logs directory for geth node")?; + create_dir_all(&self.base_directory) + .context("Failed to create base directory for geth node")?; + create_dir_all(&self.logs_directory) + .context("Failed to create logs directory for geth node")?; - Ok(()) - } + Ok(()) + } - fn init_kurtosis_config_file(&self) -> anyhow::Result<()> { - let config = KurtosisNetworkConfig { - participants: vec![ParticipantParameters { - execution_layer_type: ExecutionLayerType::Geth, - consensus_layer_type: ConsensusLayerType::Lighthouse, - execution_layer_extra_parameters: vec![ - "--nodiscover".to_string(), - "--cache=4096".to_string(), - "--txlookuplimit=0".to_string(), - "--gcmode=archive".to_string(), - "--txpool.globalslots=500000".to_string(), - "--txpool.globalqueue=500000".to_string(), - "--txpool.accountslots=32768".to_string(), - "--txpool.accountqueue=32768".to_string(), - "--http.api=admin,engine,net,eth,web3,debug,txpool".to_string(), - "--http.addr=0.0.0.0".to_string(), - "--ws".to_string(), - "--ws.addr=0.0.0.0".to_string(), - "--ws.port=8546".to_string(), - "--ws.api=eth,net,web3,txpool,engine".to_string(), - "--ws.origins=*".to_string(), - ], - consensus_layer_extra_parameters: vec![ - "--disable-quic".to_string(), - "--disable-deposit-contract-sync".to_string(), - ], - }], - network_parameters: NetworkParameters { - preset: NetworkPreset::Mainnet, - seconds_per_slot: 12, - network_id: CHAIN_ID, - deposit_contract_address: address!("0x00000000219ab540356cBB839Cbe05303d7705Fa"), - altair_fork_epoch: 0, - bellatrix_fork_epoch: 0, - capella_fork_epoch: 0, - deneb_fork_epoch: 0, - electra_fork_epoch: 0, - preregistered_validator_keys_mnemonic: Self::VALIDATOR_MNEMONIC.to_string(), - num_validator_keys_per_node: 64, - genesis_delay: 10, - prefunded_accounts: { - let map = std::iter::once(self.prefunded_account_address) - .map(|address| (address, GenesisAccount::default().with_balance(U256::MAX))) - .collect::>(); - serde_json::to_string(&map).unwrap() - }, - }, - wait_for_finalization: false, - port_publisher: Some(PortPublisherParameters { - execution_layer_port_publisher_parameters: Some( - PortPublisherSingleItemParameters { - enabled: Some(true), - public_port_start: Some(32000 + self.id as u16 * 1000), - }, - ), - consensus_layer_port_publisher_parameters: Some( - PortPublisherSingleItemParameters { - enabled: Some(true), - public_port_start: Some(59010 + self.id as u16 * 50), - }, - ), - }), - }; + fn init_kurtosis_config_file(&self) -> anyhow::Result<()> { + let config = KurtosisNetworkConfig { + participants: vec![ParticipantParameters { + execution_layer_type: ExecutionLayerType::Geth, + consensus_layer_type: ConsensusLayerType::Lighthouse, + execution_layer_extra_parameters: vec![ + "--nodiscover".to_string(), + "--cache=4096".to_string(), + "--txlookuplimit=0".to_string(), + "--gcmode=archive".to_string(), + "--txpool.globalslots=500000".to_string(), + "--txpool.globalqueue=500000".to_string(), + "--txpool.accountslots=32768".to_string(), + "--txpool.accountqueue=32768".to_string(), + "--http.api=admin,engine,net,eth,web3,debug,txpool".to_string(), + "--http.addr=0.0.0.0".to_string(), + "--ws".to_string(), + "--ws.addr=0.0.0.0".to_string(), + "--ws.port=8546".to_string(), + "--ws.api=eth,net,web3,txpool,engine".to_string(), + "--ws.origins=*".to_string(), + ], + consensus_layer_extra_parameters: vec![ + "--disable-quic".to_string(), + "--disable-deposit-contract-sync".to_string(), + ], + }], + network_parameters: NetworkParameters { + preset: NetworkPreset::Mainnet, + seconds_per_slot: 12, + network_id: CHAIN_ID, + deposit_contract_address: address!("0x00000000219ab540356cBB839Cbe05303d7705Fa"), + altair_fork_epoch: 0, + bellatrix_fork_epoch: 0, + capella_fork_epoch: 0, + deneb_fork_epoch: 0, + electra_fork_epoch: 0, + preregistered_validator_keys_mnemonic: Self::VALIDATOR_MNEMONIC.to_string(), + num_validator_keys_per_node: 64, + genesis_delay: 10, + prefunded_accounts: { + let map = std::iter::once(self.prefunded_account_address) + .map(|address| (address, GenesisAccount::default().with_balance(U256::MAX))) + .collect::>(); + serde_json::to_string(&map).unwrap() + }, + }, + wait_for_finalization: false, + port_publisher: Some(PortPublisherParameters { + execution_layer_port_publisher_parameters: Some( + PortPublisherSingleItemParameters { + enabled: Some(true), + public_port_start: Some(32000 + self.id as u16 * 1000), + }, + ), + consensus_layer_port_publisher_parameters: Some( + PortPublisherSingleItemParameters { + enabled: Some(true), + public_port_start: Some(59010 + self.id as u16 * 50), + }, + ), + }), + }; - let file = File::create(self.config_file_path.as_path()) - .context("Failed to open the config yaml file")?; - serde_yaml_ng::to_writer(file, &config) - .context("Failed to write the config to the yaml file")?; + let file = File::create(self.config_file_path.as_path()) + .context("Failed to open the config yaml file")?; + serde_yaml_ng::to_writer(file, &config) + .context("Failed to write the config to the yaml file")?; - Ok(()) - } + Ok(()) + } - /// Spawn the go-ethereum node child process. - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn spawn_process(&mut self) -> anyhow::Result<&mut Self> { - let process = Process::new( - None, - self.logs_directory.as_path(), - self.kurtosis_binary_path.as_path(), - |command, stdout, stderr| { - command - .arg("run") - .arg("--enclave") - .arg(self.enclave_name.as_str()) - .arg("github.com/ethpandaops/ethereum-package") - .arg("--args-file") - .arg(self.config_file_path.as_path()) - .stdout(stdout) - .stderr(stderr); - }, - ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { - max_wait_duration: Duration::from_secs(15 * 60), - check_function: Box::new(|stdout, stderr| { - for line in [stdout, stderr].iter().flatten() { - if line.to_lowercase().contains("error encountered") { - anyhow::bail!("Encountered an error when starting Kurtosis") - } else if line.contains("RUNNING") { - return Ok(true); - } - } - Ok(false) - }), - }, - ) - .context("Failed to spawn the kurtosis enclave") - .inspect_err(|err| { - tracing::error!(?err, "Failed to spawn Kurtosis"); - self.shutdown().expect("Failed to shutdown kurtosis"); - })?; - self.process = Some(process); + /// Spawn the go-ethereum node child process. + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn spawn_process(&mut self) -> anyhow::Result<&mut Self> { + let process = Process::new( + None, + self.logs_directory.as_path(), + self.kurtosis_binary_path.as_path(), + |command, stdout, stderr| { + command + .arg("run") + .arg("--enclave") + .arg(self.enclave_name.as_str()) + .arg("github.com/ethpandaops/ethereum-package") + .arg("--args-file") + .arg(self.config_file_path.as_path()) + .stdout(stdout) + .stderr(stderr); + }, + ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { + max_wait_duration: Duration::from_secs(15 * 60), + check_function: Box::new(|stdout, stderr| { + for line in [stdout, stderr].iter().flatten() { + if line.to_lowercase().contains("error encountered") { + anyhow::bail!("Encountered an error when starting Kurtosis") + } else if line.contains("RUNNING") { + return Ok(true); + } + } + Ok(false) + }), + }, + ) + .context("Failed to spawn the kurtosis enclave") + .inspect_err(|err| { + tracing::error!(?err, "Failed to spawn Kurtosis"); + self.shutdown().expect("Failed to shutdown kurtosis"); + })?; + self.process = Some(process); - let child = Command::new(self.kurtosis_binary_path.as_path()) - .arg("enclave") - .arg("inspect") - .arg(self.enclave_name.as_str()) - .stdout(Stdio::piped()) - .spawn() - .context("Failed to spawn the kurtosis enclave inspect process")?; + let child = Command::new(self.kurtosis_binary_path.as_path()) + .arg("enclave") + .arg("inspect") + .arg(self.enclave_name.as_str()) + .stdout(Stdio::piped()) + .spawn() + .context("Failed to spawn the kurtosis enclave inspect process")?; - let stdout = { - let mut stdout = String::default(); - child - .stdout - .expect("Should be piped") - .read_to_string(&mut stdout) - .context("Failed to read stdout of kurtosis inspect to string")?; - stdout - }; + let stdout = { + let mut stdout = String::default(); + child + .stdout + .expect("Should be piped") + .read_to_string(&mut stdout) + .context("Failed to read stdout of kurtosis inspect to string")?; + stdout + }; - self.http_connection_string = stdout - .split("el-1-geth-lighthouse") - .nth(1) - .and_then(|str| str.split(" rpc").nth(1)) - .and_then(|str| str.split("->").nth(1)) - .and_then(|str| str.split("\n").next()) - .and_then(|str| str.trim().split(" ").next()) - .map(|str| format!("http://{}", str.trim())) - .context("Failed to find the HTTP connection string of Kurtosis")?; - self.ws_connection_string = stdout - .split("el-1-geth-lighthouse") - .nth(1) - .and_then(|str| str.split("ws").nth(1)) - .and_then(|str| str.split("->").nth(1)) - .and_then(|str| str.split("\n").next()) - .and_then(|str| str.trim().split(" ").next()) - .map(|str| format!("ws://{}", str.trim())) - .context("Failed to find the WS connection string of Kurtosis")?; + self.http_connection_string = stdout + .split("el-1-geth-lighthouse") + .nth(1) + .and_then(|str| str.split(" rpc").nth(1)) + .and_then(|str| str.split("->").nth(1)) + .and_then(|str| str.split("\n").next()) + .and_then(|str| str.trim().split(" ").next()) + .map(|str| format!("http://{}", str.trim())) + .context("Failed to find the HTTP connection string of Kurtosis")?; + self.ws_connection_string = stdout + .split("el-1-geth-lighthouse") + .nth(1) + .and_then(|str| str.split("ws").nth(1)) + .and_then(|str| str.split("->").nth(1)) + .and_then(|str| str.split("\n").next()) + .and_then(|str| str.trim().split(" ").next()) + .map(|str| format!("ws://{}", str.trim())) + .context("Failed to find the WS connection string of Kurtosis")?; - info!( - http_connection_string = self.http_connection_string, - ws_connection_string = self.ws_connection_string, - "Discovered the connection strings for the node" - ); + info!( + http_connection_string = self.http_connection_string, + ws_connection_string = self.ws_connection_string, + "Discovered the connection strings for the node" + ); - Ok(self) - } + Ok(self) + } - #[instrument( + #[instrument( level = "info", skip_all, fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string), err(Debug), )] - #[allow(clippy::type_complexity)] - async fn ws_provider(&self) -> anyhow::Result>> { - self.persistent_ws_provider - .get_or_try_init(|| async move { - construct_concurrency_limited_provider::( - self.ws_connection_string.as_str(), - FallbackGasFiller::default(), - ChainIdFiller::new(Some(CHAIN_ID)), - NonceFiller::new(self.nonce_manager.clone()), - self.wallet.clone(), - ) - .await - .context("Failed to construct the provider") - }) - .await - .cloned() - } + #[allow(clippy::type_complexity)] + async fn ws_provider(&self) -> anyhow::Result>> { + self.persistent_ws_provider + .get_or_try_init(|| async move { + construct_concurrency_limited_provider::( + self.ws_connection_string.as_str(), + FallbackGasFiller::default(), + ChainIdFiller::new(Some(CHAIN_ID)), + NonceFiller::new(self.nonce_manager.clone()), + self.wallet.clone(), + ) + .await + .context("Failed to construct the provider") + }) + .await + .cloned() + } - #[instrument( + #[instrument( level = "info", skip_all, fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string), err(Debug), )] - #[allow(clippy::type_complexity)] - async fn http_provider( - &self, - ) -> anyhow::Result>> { - self.persistent_http_provider - .get_or_try_init(|| async move { - construct_concurrency_limited_provider::( - self.http_connection_string.as_str(), - FallbackGasFiller::default(), - ChainIdFiller::new(Some(CHAIN_ID)), - NonceFiller::new(self.nonce_manager.clone()), - self.wallet.clone(), - ) - .await - .context("Failed to construct the provider") - }) - .await - .cloned() - } + #[allow(clippy::type_complexity)] + async fn http_provider( + &self, + ) -> anyhow::Result>> { + self.persistent_http_provider + .get_or_try_init(|| async move { + construct_concurrency_limited_provider::( + self.http_connection_string.as_str(), + FallbackGasFiller::default(), + ChainIdFiller::new(Some(CHAIN_ID)), + NonceFiller::new(self.nonce_manager.clone()), + self.wallet.clone(), + ) + .await + .context("Failed to construct the provider") + }) + .await + .cloned() + } - /// Funds all of the accounts in the Ethereum wallet from the initially funded account. - #[instrument( + /// Funds all of the accounts in the Ethereum wallet from the initially funded account. + #[instrument( level = "info", skip_all, fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string), err(Debug), )] - async fn fund_all_accounts(&self) -> anyhow::Result<()> { - let mut full_block_subscriber = self - .ws_provider() - .await - .context("Failed to create the WS provider")? - .subscribe_full_blocks() - .into_stream() - .await - .context("Full block subscriber")?; + async fn fund_all_accounts(&self) -> anyhow::Result<()> { + let mut full_block_subscriber = self + .ws_provider() + .await + .context("Failed to create the WS provider")? + .subscribe_full_blocks() + .into_stream() + .await + .context("Full block subscriber")?; - let mut tx_hashes = futures::future::try_join_all( - NetworkWallet::::signer_addresses(self.wallet.as_ref()) - .enumerate() - .map(|(nonce, address)| async move { - let mut transaction = TransactionRequest::default() - .from(self.prefunded_account_address) - .to(address) - .nonce(nonce as _) - .value(INITIAL_BALANCE.try_into().unwrap()); - transaction.chain_id = Some(CHAIN_ID); - self.submit_transaction(transaction).await - }), - ) - .await - .context("Failed to submit all transactions")? - .into_iter() - .collect::>(); + let mut tx_hashes = futures::future::try_join_all( + NetworkWallet::::signer_addresses(self.wallet.as_ref()) + .enumerate() + .map(|(nonce, address)| async move { + let mut transaction = TransactionRequest::default() + .from(self.prefunded_account_address) + .to(address) + .nonce(nonce as _) + .value(INITIAL_BALANCE.try_into().unwrap()); + transaction.chain_id = Some(CHAIN_ID); + self.submit_transaction(transaction).await + }), + ) + .await + .context("Failed to submit all transactions")? + .into_iter() + .collect::>(); - while let Some(block) = full_block_subscriber.next().await { - let Ok(block) = block else { - continue; - }; + while let Some(block) = full_block_subscriber.next().await { + let Ok(block) = block else { + continue; + }; - let block_number = block.number(); - let block_timestamp = block.header.timestamp; - let block_transaction_count = block.transactions.len(); + let block_number = block.number(); + let block_timestamp = block.header.timestamp; + let block_transaction_count = block.transactions.len(); - for hash in block.transactions.into_hashes().as_hashes().unwrap() { - tx_hashes.remove(hash); - } + for hash in block.transactions.into_hashes().as_hashes().unwrap() { + tx_hashes.remove(hash); + } - info!( - block.number = block_number, - block.timestamp = block_timestamp, - block.transaction_count = block_transaction_count, - remaining_transactions = tx_hashes.len(), - "Discovered new block when funding accounts" - ); + info!( + block.number = block_number, + block.timestamp = block_timestamp, + block.transaction_count = block_transaction_count, + remaining_transactions = tx_hashes.len(), + "Discovered new block when funding accounts" + ); - if tx_hashes.is_empty() { - break; - } - } + if tx_hashes.is_empty() { + break; + } + } - Ok(()) - } + Ok(()) + } - fn internal_execute_transaction<'a>( - transaction: TransactionRequest, - provider: FillProvider< - impl TxFiller + 'a, - impl Provider + Clone + 'a, - Ethereum, - >, - ) -> Pin> + 'a>> { - Box::pin(async move { - let pending_transaction = provider - .send_transaction(transaction) - .await - .inspect_err(|err| { - tracing::error!( - %err, - "Encountered an error when submitting the transaction" - ) - }) - .context("Failed to submit transaction to geth node")?; - let transaction_hash = *pending_transaction.tx_hash(); + fn internal_execute_transaction<'a>( + transaction: TransactionRequest, + provider: FillProvider< + impl TxFiller + 'a, + impl Provider + Clone + 'a, + Ethereum, + >, + ) -> Pin> + 'a>> { + Box::pin(async move { + let pending_transaction = provider + .send_transaction(transaction) + .await + .inspect_err(|err| { + tracing::error!( + %err, + "Encountered an error when submitting the transaction" + ) + }) + .context("Failed to submit transaction to geth node")?; + let transaction_hash = *pending_transaction.tx_hash(); - // The following is a fix for the "transaction indexing is in progress" error that we - // used to get. You can find more information on this in the following GH issue in geth - // https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, - // before we can get the receipt of the transaction it needs to have been indexed by the - // node's indexer. Just because the transaction has been confirmed it doesn't mean that - // it has been indexed. When we call alloy's `get_receipt` it checks if the transaction - // was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method - // which _might_ return the above error if the tx has not yet been indexed yet. So, we - // need to implement a retry mechanism for the receipt to keep retrying to get it until - // it eventually works, but we only do that if the error we get back is the "transaction - // indexing is in progress" error or if the receipt is None. - // - // Getting the transaction indexed and taking a receipt can take a long time especially - // when a lot of transactions are being submitted to the node. Thus, while initially we - // only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to - // allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting - // with exponential backoff each time we attempt to get the receipt and find that it's - // not available. - poll( - Self::RECEIPT_POLLING_DURATION, - PollingWaitBehavior::Constant(Duration::from_millis(500)), - move || { - let provider = provider.clone(); - async move { - match provider.get_transaction_receipt(transaction_hash).await { - Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)), - Ok(None) => Ok(ControlFlow::Continue(())), - Err(error) => { - let error_string = error.to_string(); - match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) { - true => Ok(ControlFlow::Continue(())), - false => Err(error.into()), - } - } - } - } - }, - ) - .instrument(tracing::info_span!( - "Awaiting transaction receipt", - ?transaction_hash - )) - .await - }) - } + // The following is a fix for the "transaction indexing is in progress" error that we + // used to get. You can find more information on this in the following GH issue in geth + // https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, + // before we can get the receipt of the transaction it needs to have been indexed by the + // node's indexer. Just because the transaction has been confirmed it doesn't mean that + // it has been indexed. When we call alloy's `get_receipt` it checks if the transaction + // was confirmed. If it has been, then it will call `eth_getTransactionReceipt` method + // which _might_ return the above error if the tx has not yet been indexed yet. So, we + // need to implement a retry mechanism for the receipt to keep retrying to get it until + // it eventually works, but we only do that if the error we get back is the "transaction + // indexing is in progress" error or if the receipt is None. + // + // Getting the transaction indexed and taking a receipt can take a long time especially + // when a lot of transactions are being submitted to the node. Thus, while initially we + // only allowed for 60 seconds of waiting with a 1 second delay in polling, we need to + // allow for a larger wait time. Therefore, in here we allow for 5 minutes of waiting + // with exponential backoff each time we attempt to get the receipt and find that it's + // not available. + poll( + Self::RECEIPT_POLLING_DURATION, + PollingWaitBehavior::Constant(Duration::from_millis(500)), + move || { + let provider = provider.clone(); + async move { + match provider.get_transaction_receipt(transaction_hash).await { + Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)), + Ok(None) => Ok(ControlFlow::Continue(())), + Err(error) => { + let error_string = error.to_string(); + match error_string.contains(Self::TRANSACTION_INDEXING_ERROR) { + true => Ok(ControlFlow::Continue(())), + false => Err(error.into()), + } + }, + } + } + }, + ) + .instrument(tracing::info_span!("Awaiting transaction receipt", ?transaction_hash)) + .await + }) + } } impl EthereumNode for LighthouseGethNode { - fn pre_transactions(&mut self) -> Pin> + '_>> { - Box::pin(async move { self.fund_all_accounts().await }) - } + fn pre_transactions(&mut self) -> Pin> + '_>> { + Box::pin(async move { self.fund_all_accounts().await }) + } - fn id(&self) -> usize { - self.id as _ - } + fn id(&self) -> usize { + self.id as _ + } - fn connection_string(&self) -> &str { - &self.ws_connection_string - } + fn connection_string(&self) -> &str { + &self.ws_connection_string + } - #[instrument( + #[instrument( level = "info", skip_all, fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string), err, )] - fn submit_transaction( - &self, - transaction: TransactionRequest, - ) -> Pin> + '_>> { - Box::pin(async move { - let provider = self - .http_provider() - .await - .context("Failed to create the provider for transaction submission")?; - let pending_transaction = provider - .send_transaction(transaction) - .await - .context("Failed to submit the transaction through the provider")?; - Ok(*pending_transaction.tx_hash()) - }) - } + fn submit_transaction( + &self, + transaction: TransactionRequest, + ) -> Pin> + '_>> { + Box::pin(async move { + let provider = self + .http_provider() + .await + .context("Failed to create the provider for transaction submission")?; + let pending_transaction = provider + .send_transaction(transaction) + .await + .context("Failed to submit the transaction through the provider")?; + Ok(*pending_transaction.tx_hash()) + }) + } - #[instrument( + #[instrument( level = "info", skip_all, fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string), )] - fn get_receipt( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - self.ws_provider() - .await - .context("Failed to create provider for getting the receipt")? - .get_transaction_receipt(tx_hash) - .await - .context("Failed to get the receipt of the transaction")? - .context("Failed to get the receipt of the transaction") - }) - } + fn get_receipt( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + self.ws_provider() + .await + .context("Failed to create provider for getting the receipt")? + .get_transaction_receipt(tx_hash) + .await + .context("Failed to get the receipt of the transaction")? + .context("Failed to get the receipt of the transaction") + }) + } - #[instrument( + #[instrument( level = "info", skip_all, fields(lighthouse_node_id = self.id, connection_string = self.ws_connection_string), err, )] - fn execute_transaction( - &self, - transaction: TransactionRequest, - ) -> Pin> + '_>> { - Box::pin(async move { - let provider = self - .http_provider() - .await - .context("Failed to create provider for transaction execution")?; - Self::internal_execute_transaction(transaction, provider).await - }) - } + fn execute_transaction( + &self, + transaction: TransactionRequest, + ) -> Pin> + '_>> { + Box::pin(async move { + let provider = self + .http_provider() + .await + .context("Failed to create provider for transaction execution")?; + Self::internal_execute_transaction(transaction, provider).await + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn trace_transaction( - &self, - tx_hash: TxHash, - trace_options: GethDebugTracingOptions, - ) -> Pin> + '_>> { - Box::pin(async move { - let provider = Arc::new( - self.http_provider() - .await - .context("Failed to create provider for tracing")?, - ); - poll( - Self::TRACE_POLLING_DURATION, - PollingWaitBehavior::Constant(Duration::from_millis(200)), - move || { - let provider = provider.clone(); - let trace_options = trace_options.clone(); - async move { - match provider - .debug_trace_transaction(tx_hash, trace_options) - .await - { - Ok(trace) => Ok(ControlFlow::Break(trace)), - Err(error) => { - let error_string = error.to_string(); - match error_string.contains(Self::TRANSACTION_TRACING_ERROR) { - true => Ok(ControlFlow::Continue(())), - false => Err(error.into()), - } - } - } - } - }, - ) - .await - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn trace_transaction( + &self, + tx_hash: TxHash, + trace_options: GethDebugTracingOptions, + ) -> Pin> + '_>> { + Box::pin(async move { + let provider = Arc::new( + self.http_provider().await.context("Failed to create provider for tracing")?, + ); + poll( + Self::TRACE_POLLING_DURATION, + PollingWaitBehavior::Constant(Duration::from_millis(200)), + move || { + let provider = provider.clone(); + let trace_options = trace_options.clone(); + async move { + match provider.debug_trace_transaction(tx_hash, trace_options).await { + Ok(trace) => Ok(ControlFlow::Break(trace)), + Err(error) => { + let error_string = error.to_string(); + match error_string.contains(Self::TRANSACTION_TRACING_ERROR) { + true => Ok(ControlFlow::Continue(())), + false => Err(error.into()), + } + }, + } + } + }, + ) + .await + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn state_diff( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { - diff_mode: Some(true), - disable_code: None, - disable_storage: None, - }); - match self - .trace_transaction(tx_hash, trace_options) - .await - .context("Failed to trace transaction for prestate diff")? - .try_into_pre_state_frame() - .context("Failed to convert trace into pre-state frame")? - { - PreStateFrame::Diff(diff) => Ok(diff), - _ => anyhow::bail!("expected a diff mode trace"), - } - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn state_diff( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { + diff_mode: Some(true), + disable_code: None, + disable_storage: None, + }); + match self + .trace_transaction(tx_hash, trace_options) + .await + .context("Failed to trace transaction for prestate diff")? + .try_into_pre_state_frame() + .context("Failed to convert trace into pre-state frame")? + { + PreStateFrame::Diff(diff) => Ok(diff), + _ => anyhow::bail!("expected a diff mode trace"), + } + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn balance_of( - &self, - address: Address, - ) -> Pin> + '_>> { - Box::pin(async move { - self.ws_provider() - .await - .context("Failed to get the Geth provider")? - .get_balance(address) - .await - .map_err(Into::into) - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn balance_of( + &self, + address: Address, + ) -> Pin> + '_>> { + Box::pin(async move { + self.ws_provider() + .await + .context("Failed to get the Geth provider")? + .get_balance(address) + .await + .map_err(Into::into) + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn latest_state_proof( - &self, - address: Address, - keys: Vec, - ) -> Pin> + '_>> { - Box::pin(async move { - self.ws_provider() - .await - .context("Failed to get the Geth provider")? - .get_proof(address, keys) - .latest() - .await - .map_err(Into::into) - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn latest_state_proof( + &self, + address: Address, + keys: Vec, + ) -> Pin> + '_>> { + Box::pin(async move { + self.ws_provider() + .await + .context("Failed to get the Geth provider")? + .get_proof(address, keys) + .latest() + .await + .map_err(Into::into) + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn resolver( - &self, - ) -> Pin>> + '_>> { - Box::pin(async move { - let id = self.id; - let provider = self.ws_provider().await?; - Ok(Arc::new(LighthouseGethNodeResolver { id, provider }) as Arc) - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn resolver( + &self, + ) -> Pin>> + '_>> { + Box::pin(async move { + let id = self.id; + let provider = self.ws_provider().await?; + Ok(Arc::new(LighthouseGethNodeResolver { id, provider }) as Arc) + }) + } - fn evm_version(&self) -> EVMVersion { - EVMVersion::Cancun - } + fn evm_version(&self) -> EVMVersion { + EVMVersion::Cancun + } - fn subscribe_to_full_blocks_information( - &self, - ) -> Pin< - Box< - dyn Future>>>> - + '_, - >, - > { - Box::pin(async move { - let provider = self.ws_provider().await?; - let block_subscription = provider.subscribe_full_blocks().channel_size(1024); - let block_stream = block_subscription - .into_stream() - .await - .context("Failed to create the block stream")?; + fn subscribe_to_full_blocks_information( + &self, + ) -> Pin< + Box< + dyn Future>>>> + + '_, + >, + > { + Box::pin(async move { + let provider = self.ws_provider().await?; + let block_subscription = provider.subscribe_full_blocks().channel_size(1024); + let block_stream = block_subscription + .into_stream() + .await + .context("Failed to create the block stream")?; - let mined_block_information_stream = block_stream.filter_map(|block| async { - let block = block.ok()?; - Some(MinedBlockInformation { - block_number: block.number(), - block_timestamp: block.header.timestamp, - mined_gas: block.header.gas_used as _, - block_gas_limit: block.header.gas_limit as _, - transaction_hashes: block - .transactions - .into_hashes() - .as_hashes() - .expect("Must be hashes") - .to_vec(), - }) - }); + let mined_block_information_stream = block_stream.filter_map(|block| async { + let block = block.ok()?; + Some(MinedBlockInformation { + block_number: block.number(), + block_timestamp: block.header.timestamp, + mined_gas: block.header.gas_used as _, + block_gas_limit: block.header.gas_limit as _, + transaction_hashes: block + .transactions + .into_hashes() + .as_hashes() + .expect("Must be hashes") + .to_vec(), + }) + }); - Ok(Box::pin(mined_block_information_stream) - as Pin>>) - }) - } + Ok(Box::pin(mined_block_information_stream) + as Pin>>) + }) + } - fn resolve_signer_or_default(&self, address: Address) -> Address { - let signer_addresses: Vec<_> = - >::signer_addresses(&self.wallet).collect(); - if signer_addresses.contains(&address) { - address - } else { - self.wallet.default_signer().address() - } - } + fn resolve_signer_or_default(&self, address: Address) -> Address { + let signer_addresses: Vec<_> = + >::signer_addresses(&self.wallet).collect(); + if signer_addresses.contains(&address) { + address + } else { + self.wallet.default_signer().address() + } + } } pub struct LighthouseGethNodeResolver, P: Provider> { - id: u32, - provider: FillProvider, + id: u32, + provider: FillProvider, } impl, P: Provider> ResolverApi - for LighthouseGethNodeResolver + for LighthouseGethNodeResolver { - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn chain_id( - &self, - ) -> Pin> + '_>> { - Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn chain_id( + &self, + ) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn transaction_gas_price( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_transaction_receipt(tx_hash) - .await? - .context("Failed to get the transaction receipt") - .map(|receipt| receipt.effective_gas_price) - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn transaction_gas_price( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_transaction_receipt(tx_hash) + .await? + .context("Failed to get the transaction receipt") + .map(|receipt| receipt.effective_gas_price) + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn block_gas_limit( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.gas_limit as _) - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn block_gas_limit( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.gas_limit as _) + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn block_coinbase( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.beneficiary) - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn block_coinbase( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.beneficiary) + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn block_difficulty( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn block_difficulty( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn block_base_fee( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .and_then(|block| { - block - .header - .base_fee_per_gas - .context("Failed to get the base fee per gas") - }) - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn block_base_fee( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .and_then(|block| { + block.header.base_fee_per_gas.context("Failed to get the base fee per gas") + }) + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn block_hash( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.hash) - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn block_hash( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.hash) + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn block_timestamp( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the geth block")? - .context("Failed to get the Geth block, perhaps there are no blocks?") - .map(|block| block.header.timestamp) - }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn block_timestamp( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the geth block")? + .context("Failed to get the Geth block, perhaps there are no blocks?") + .map(|block| block.header.timestamp) + }) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn last_block_number(&self) -> Pin> + '_>> { - Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) }) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn last_block_number(&self) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) }) + } } impl Node for LighthouseGethNode { - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn shutdown(&mut self) -> anyhow::Result<()> { - let mut child = Command::new(self.kurtosis_binary_path.as_path()) - .arg("enclave") - .arg("rm") - .arg("-f") - .arg(self.enclave_name.as_str()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()) - .spawn() - .expect("Failed to spawn the enclave kill command"); + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn shutdown(&mut self) -> anyhow::Result<()> { + let mut child = Command::new(self.kurtosis_binary_path.as_path()) + .arg("enclave") + .arg("rm") + .arg("-f") + .arg(self.enclave_name.as_str()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()) + .spawn() + .expect("Failed to spawn the enclave kill command"); - if !child - .wait() - .expect("Failed to wait for the enclave kill command") - .success() - { - let stdout = { - let mut stdout = String::default(); - child - .stdout - .take() - .expect("Should be piped") - .read_to_string(&mut stdout) - .context("Failed to read stdout of kurtosis inspect to string")?; - stdout - }; - let stderr = { - let mut stderr = String::default(); - child - .stderr - .take() - .expect("Should be piped") - .read_to_string(&mut stderr) - .context("Failed to read stderr of kurtosis inspect to string")?; - stderr - }; + if !child.wait().expect("Failed to wait for the enclave kill command").success() { + let stdout = { + let mut stdout = String::default(); + child + .stdout + .take() + .expect("Should be piped") + .read_to_string(&mut stdout) + .context("Failed to read stdout of kurtosis inspect to string")?; + stdout + }; + let stderr = { + let mut stderr = String::default(); + child + .stderr + .take() + .expect("Should be piped") + .read_to_string(&mut stderr) + .context("Failed to read stderr of kurtosis inspect to string")?; + stderr + }; - panic!( - "Failed to shut down the enclave {} - stdout: {stdout}, stderr: {stderr}", - self.enclave_name - ) - } + panic!( + "Failed to shut down the enclave {} - stdout: {stdout}, stderr: {stderr}", + self.enclave_name + ) + } - drop(self.process.take()); + drop(self.process.take()); - Ok(()) - } + Ok(()) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> { - self.init(genesis)?.spawn_process()?; - Ok(()) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> { + self.init(genesis)?.spawn_process()?; + Ok(()) + } - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn version(&self) -> anyhow::Result { - let output = Command::new(&self.kurtosis_binary_path) - .arg("version") - .stdin(Stdio::null()) - .stdout(Stdio::piped()) - .stderr(Stdio::null()) - .spawn() - .context("Failed to spawn geth --version process")? - .wait_with_output() - .context("Failed to wait for geth --version output")? - .stdout; - Ok(String::from_utf8_lossy(&output).into()) - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn version(&self) -> anyhow::Result { + let output = Command::new(&self.kurtosis_binary_path) + .arg("version") + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .spawn() + .context("Failed to spawn geth --version process")? + .wait_with_output() + .context("Failed to wait for geth --version output")? + .stdout; + Ok(String::from_utf8_lossy(&output).into()) + } } impl Drop for LighthouseGethNode { - #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] - fn drop(&mut self) { - self.shutdown().expect("Failed to shutdown") - } + #[instrument(level = "info", skip_all, fields(lighthouse_node_id = self.id))] + fn drop(&mut self) { + self.shutdown().expect("Failed to shutdown") + } } #[derive(Clone, Debug, Serialize, Deserialize)] struct KurtosisNetworkConfig { - pub participants: Vec, + pub participants: Vec, - #[serde(rename = "network_params")] - pub network_parameters: NetworkParameters, + #[serde(rename = "network_params")] + pub network_parameters: NetworkParameters, - pub wait_for_finalization: bool, + pub wait_for_finalization: bool, - #[serde(skip_serializing_if = "Option::is_none")] - pub port_publisher: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub port_publisher: Option, } #[derive(Clone, Debug, Serialize, Deserialize)] struct ParticipantParameters { - #[serde(rename = "el_type")] - pub execution_layer_type: ExecutionLayerType, + #[serde(rename = "el_type")] + pub execution_layer_type: ExecutionLayerType, - #[serde(rename = "el_extra_params")] - pub execution_layer_extra_parameters: Vec, + #[serde(rename = "el_extra_params")] + pub execution_layer_extra_parameters: Vec, - #[serde(rename = "cl_type")] - pub consensus_layer_type: ConsensusLayerType, + #[serde(rename = "cl_type")] + pub consensus_layer_type: ConsensusLayerType, - #[serde(rename = "cl_extra_params")] - pub consensus_layer_extra_parameters: Vec, + #[serde(rename = "cl_extra_params")] + pub consensus_layer_extra_parameters: Vec, } #[derive(Clone, Copy, Debug, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] enum ExecutionLayerType { - Geth, + Geth, } #[derive(Clone, Copy, Debug, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] enum ConsensusLayerType { - Lighthouse, + Lighthouse, } #[serde_as] #[derive(Clone, Debug, Serialize, Deserialize)] struct NetworkParameters { - pub preset: NetworkPreset, + pub preset: NetworkPreset, - pub seconds_per_slot: u64, + pub seconds_per_slot: u64, - #[serde_as(as = "serde_with::DisplayFromStr")] - pub network_id: u64, + #[serde_as(as = "serde_with::DisplayFromStr")] + pub network_id: u64, - pub deposit_contract_address: Address, + pub deposit_contract_address: Address, - pub altair_fork_epoch: u64, - pub bellatrix_fork_epoch: u64, - pub capella_fork_epoch: u64, - pub deneb_fork_epoch: u64, - pub electra_fork_epoch: u64, + pub altair_fork_epoch: u64, + pub bellatrix_fork_epoch: u64, + pub capella_fork_epoch: u64, + pub deneb_fork_epoch: u64, + pub electra_fork_epoch: u64, - pub preregistered_validator_keys_mnemonic: String, + pub preregistered_validator_keys_mnemonic: String, - pub num_validator_keys_per_node: u64, + pub num_validator_keys_per_node: u64, - pub genesis_delay: u64, + pub genesis_delay: u64, - pub prefunded_accounts: String, + pub prefunded_accounts: String, } #[derive(Clone, Copy, Debug, Serialize, Deserialize)] #[serde(rename_all = "lowercase")] enum NetworkPreset { - Mainnet, + Mainnet, } #[derive(Clone, Debug, Serialize, Deserialize)] struct PortPublisherParameters { - #[serde(rename = "el", skip_serializing_if = "Option::is_none")] - pub execution_layer_port_publisher_parameters: Option, + #[serde(rename = "el", skip_serializing_if = "Option::is_none")] + pub execution_layer_port_publisher_parameters: Option, - #[serde(rename = "cl", skip_serializing_if = "Option::is_none")] - pub consensus_layer_port_publisher_parameters: Option, + #[serde(rename = "cl", skip_serializing_if = "Option::is_none")] + pub consensus_layer_port_publisher_parameters: Option, } #[derive(Clone, Copy, Debug, Serialize, Deserialize)] struct PortPublisherSingleItemParameters { - #[serde(skip_serializing_if = "Option::is_none")] - pub enabled: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub enabled: Option, - #[serde(skip_serializing_if = "Option::is_none")] - pub public_port_start: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub public_port_start: Option, } /// Custom serializer/deserializer for u128 values as 0x-prefixed hex strings pub struct HexPrefixedU128; impl serde_with::SerializeAs for HexPrefixedU128 { - fn serialize_as(source: &u128, serializer: S) -> Result - where - S: Serializer, - { - let hex_string = format!("0x{source:x}"); - serializer.serialize_str(&hex_string) - } + fn serialize_as(source: &u128, serializer: S) -> Result + where + S: Serializer, + { + let hex_string = format!("0x{source:x}"); + serializer.serialize_str(&hex_string) + } } impl<'de> serde_with::DeserializeAs<'de, u128> for HexPrefixedU128 { - fn deserialize_as(deserializer: D) -> Result - where - D: Deserializer<'de>, - { - let hex_string = String::deserialize(deserializer)?; - if let Some(hex_part) = hex_string.strip_prefix("0x") { - u128::from_str_radix(hex_part, 16).map_err(serde::de::Error::custom) - } else { - Err(serde::de::Error::custom("Expected 0x-prefixed hex string")) - } - } + fn deserialize_as(deserializer: D) -> Result + where + D: Deserializer<'de>, + { + let hex_string = String::deserialize(deserializer)?; + if let Some(hex_part) = hex_string.strip_prefix("0x") { + u128::from_str_radix(hex_part, 16).map_err(serde::de::Error::custom) + } else { + Err(serde::de::Error::custom("Expected 0x-prefixed hex string")) + } + } } #[cfg(test)] mod tests { - use std::sync::Mutex; + use std::sync::Mutex; - use super::*; + use super::*; - fn test_config() -> TestExecutionContext { - let mut config = TestExecutionContext::default(); - config.wallet_configuration.additional_keys = 100; - config - } + fn test_config() -> TestExecutionContext { + let mut config = TestExecutionContext::default(); + config.wallet_configuration.additional_keys = 100; + config + } - fn new_node() -> (TestExecutionContext, LighthouseGethNode) { - // Note: When we run the tests in the CI we found that if they're all - // run in parallel then the CI is unable to start all of the nodes in - // time and their start up times-out. Therefore, we want all of the - // nodes to be started in series and not in parallel. To do this, we use - // a dummy mutex here such that there can only be a single node being - // started up at any point of time. This will make our tests run slower - // but it will allow the node startup to not timeout. - // - // Note: an alternative to starting all of the nodes in series and not - // in parallel would be for us to reuse the same node between tests - // which is not the best thing to do in my opinion as it removes all - // of the isolation between tests and makes them depend on what other - // tests do. For example, if one test checks what the block number is - // and another test submits a transaction then the tx test would have - // side effects that affect the block number test. - static NODE_START_MUTEX: Mutex<()> = Mutex::new(()); - let _guard = NODE_START_MUTEX.lock().unwrap(); + fn new_node() -> (TestExecutionContext, LighthouseGethNode) { + // Note: When we run the tests in the CI we found that if they're all + // run in parallel then the CI is unable to start all of the nodes in + // time and their start up times-out. Therefore, we want all of the + // nodes to be started in series and not in parallel. To do this, we use + // a dummy mutex here such that there can only be a single node being + // started up at any point of time. This will make our tests run slower + // but it will allow the node startup to not timeout. + // + // Note: an alternative to starting all of the nodes in series and not + // in parallel would be for us to reuse the same node between tests + // which is not the best thing to do in my opinion as it removes all + // of the isolation between tests and makes them depend on what other + // tests do. For example, if one test checks what the block number is + // and another test submits a transaction then the tx test would have + // side effects that affect the block number test. + static NODE_START_MUTEX: Mutex<()> = Mutex::new(()); + let _guard = NODE_START_MUTEX.lock().unwrap(); - let context = test_config(); - let mut node = LighthouseGethNode::new(&context); - node.init(context.genesis_configuration.genesis().unwrap().clone()) - .expect("Failed to initialize the node") - .spawn_process() - .expect("Failed to spawn the node process"); - (context, node) - } + let context = test_config(); + let mut node = LighthouseGethNode::new(&context); + node.init(context.genesis_configuration.genesis().unwrap().clone()) + .expect("Failed to initialize the node") + .spawn_process() + .expect("Failed to spawn the node process"); + (context, node) + } - #[tokio::test] - async fn node_mines_simple_transfer_transaction_and_returns_receipt() { - // Arrange - let (context, node) = new_node(); - node.fund_all_accounts().await.expect("Failed"); + #[tokio::test] + async fn node_mines_simple_transfer_transaction_and_returns_receipt() { + // Arrange + let (context, node) = new_node(); + node.fund_all_accounts().await.expect("Failed"); - let account_address = context - .wallet_configuration - .wallet() - .default_signer() - .address(); - let transaction = TransactionRequest::default() - .to(account_address) - .value(U256::from(100_000_000_000_000u128)); + let account_address = context.wallet_configuration.wallet().default_signer().address(); + let transaction = TransactionRequest::default() + .to(account_address) + .value(U256::from(100_000_000_000_000u128)); - // Act - let receipt = node.execute_transaction(transaction).await; + // Act + let receipt = node.execute_transaction(transaction).await; - // Assert - let _ = receipt.expect("Failed to send the transfer transaction"); - } + // Assert + let _ = receipt.expect("Failed to send the transfer transaction"); + } - #[test] - #[ignore = "Ignored since they take a long time to run"] - fn version_works() { - // Arrange - let (_context, node) = new_node(); + #[test] + #[ignore = "Ignored since they take a long time to run"] + fn version_works() { + // Arrange + let (_context, node) = new_node(); - // Act - let version = node.version(); + // Act + let version = node.version(); - // Assert - let version = version.expect("Failed to get the version"); - assert!( - version.starts_with("CLI Version"), - "expected version string, got: '{version}'" - ); - } + // Assert + let version = version.expect("Failed to get the version"); + assert!(version.starts_with("CLI Version"), "expected version string, got: '{version}'"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_chain_id_from_node() { - // Arrange - let (_context, node) = new_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_chain_id_from_node() { + // Arrange + let (_context, node) = new_node(); - // Act - let chain_id = node.resolver().await.unwrap().chain_id().await; + // Act + let chain_id = node.resolver().await.unwrap().chain_id().await; - // Assert - let chain_id = chain_id.expect("Failed to get the chain id"); - assert_eq!(chain_id, 420_420_420); - } + // Assert + let chain_id = chain_id.expect("Failed to get the chain id"); + assert_eq!(chain_id, 420_420_420); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_gas_limit_from_node() { - // Arrange - let (_context, node) = new_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_gas_limit_from_node() { + // Arrange + let (_context, node) = new_node(); - // Act - let gas_limit = node - .resolver() - .await - .unwrap() - .block_gas_limit(BlockNumberOrTag::Latest) - .await; + // Act + let gas_limit = + node.resolver().await.unwrap().block_gas_limit(BlockNumberOrTag::Latest).await; - // Assert - let _ = gas_limit.expect("Failed to get the gas limit"); - } + // Assert + let _ = gas_limit.expect("Failed to get the gas limit"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_coinbase_from_node() { - // Arrange - let (_context, node) = new_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_coinbase_from_node() { + // Arrange + let (_context, node) = new_node(); - // Act - let coinbase = node - .resolver() - .await - .unwrap() - .block_coinbase(BlockNumberOrTag::Latest) - .await; + // Act + let coinbase = + node.resolver().await.unwrap().block_coinbase(BlockNumberOrTag::Latest).await; - // Assert - let _ = coinbase.expect("Failed to get the coinbase"); - } + // Assert + let _ = coinbase.expect("Failed to get the coinbase"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_difficulty_from_node() { - // Arrange - let (_context, node) = new_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_difficulty_from_node() { + // Arrange + let (_context, node) = new_node(); - // Act - let block_difficulty = node - .resolver() - .await - .unwrap() - .block_difficulty(BlockNumberOrTag::Latest) - .await; + // Act + let block_difficulty = + node.resolver().await.unwrap().block_difficulty(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_difficulty.expect("Failed to get the block difficulty"); - } + // Assert + let _ = block_difficulty.expect("Failed to get the block difficulty"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_hash_from_node() { - // Arrange - let (_context, node) = new_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_hash_from_node() { + // Arrange + let (_context, node) = new_node(); - // Act - let block_hash = node - .resolver() - .await - .unwrap() - .block_hash(BlockNumberOrTag::Latest) - .await; + // Act + let block_hash = node.resolver().await.unwrap().block_hash(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_hash.expect("Failed to get the block hash"); - } + // Assert + let _ = block_hash.expect("Failed to get the block hash"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_timestamp_from_node() { - // Arrange - let (_context, node) = new_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_timestamp_from_node() { + // Arrange + let (_context, node) = new_node(); - // Act - let block_timestamp = node - .resolver() - .await - .unwrap() - .block_timestamp(BlockNumberOrTag::Latest) - .await; + // Act + let block_timestamp = + node.resolver().await.unwrap().block_timestamp(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_timestamp.expect("Failed to get the block timestamp"); - } + // Assert + let _ = block_timestamp.expect("Failed to get the block timestamp"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_number_from_node() { - // Arrange - let (_context, node) = new_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_number_from_node() { + // Arrange + let (_context, node) = new_node(); - // Act - let block_number = node.resolver().await.unwrap().last_block_number().await; + // Act + let block_number = node.resolver().await.unwrap().last_block_number().await; - // Assert - let _ = block_number.expect("Failed to get the block number"); - } + // Assert + let _ = block_number.expect("Failed to get the block number"); + } } diff --git a/crates/node/src/node_implementations/substrate.rs b/crates/node/src/node_implementations/substrate.rs index 9ef8fa2..97377b3 100644 --- a/crates/node/src/node_implementations/substrate.rs +++ b/crates/node/src/node_implementations/substrate.rs @@ -1,39 +1,39 @@ use std::{ - fs::{create_dir_all, remove_dir_all}, - path::PathBuf, - pin::Pin, - process::{Command, Stdio}, - sync::{ - Arc, - atomic::{AtomicU32, Ordering}, - }, - time::Duration, + fs::{create_dir_all, remove_dir_all}, + path::PathBuf, + pin::Pin, + process::{Command, Stdio}, + sync::{ + Arc, + atomic::{AtomicU32, Ordering}, + }, + time::Duration, }; use alloy::{ - consensus::{BlockHeader, TxEnvelope}, - eips::BlockNumberOrTag, - genesis::{Genesis, GenesisAccount}, - network::{ - Ethereum, EthereumWallet, Network, NetworkWallet, TransactionBuilder, - TransactionBuilderError, UnbuiltTransactionError, - }, - primitives::{ - Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, StorageKey, - TxHash, U256, - }, - providers::{ - Provider, - ext::DebugApi, - fillers::{CachedNonceManager, ChainIdFiller, NonceFiller}, - }, - rpc::types::{ - EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, - eth::{Block, Header, Transaction}, - trace::geth::{ - DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame, - }, - }, + consensus::{BlockHeader, TxEnvelope}, + eips::BlockNumberOrTag, + genesis::{Genesis, GenesisAccount}, + network::{ + Ethereum, EthereumWallet, Network, NetworkWallet, TransactionBuilder, + TransactionBuilderError, UnbuiltTransactionError, + }, + primitives::{ + Address, B64, B256, BlockHash, BlockNumber, BlockTimestamp, Bloom, Bytes, StorageKey, + TxHash, U256, + }, + providers::{ + Provider, + ext::DebugApi, + fillers::{CachedNonceManager, ChainIdFiller, NonceFiller}, + }, + rpc::types::{ + EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, + eth::{Block, Header, Transaction}, + trace::geth::{ + DiffMode, GethDebugTracingOptions, GethTrace, PreStateConfig, PreStateFrame, + }, + }, }; use anyhow::Context as _; use futures::{Stream, StreamExt}; @@ -51,13 +51,13 @@ use tokio::sync::OnceCell; use tracing::instrument; use crate::{ - Node, - constants::{CHAIN_ID, INITIAL_BALANCE}, - helpers::{Process, ProcessReadinessWaitBehavior}, - provider_utils::{ - ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider, - execute_transaction, - }, + Node, + constants::{CHAIN_ID, INITIAL_BALANCE}, + helpers::{Process, ProcessReadinessWaitBehavior}, + provider_utils::{ + ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider, + execute_transaction, + }, }; static NODE_COUNT: AtomicU32 = AtomicU32::new(0); @@ -68,1185 +68,1164 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0); #[derive(Debug)] pub struct SubstrateNode { - id: u32, - node_binary: PathBuf, - eth_proxy_binary: PathBuf, - export_chainspec_command: String, - rpc_url: String, - base_directory: PathBuf, - logs_directory: PathBuf, - substrate_process: Option, - eth_proxy_process: Option, - wallet: Arc, - nonce_manager: CachedNonceManager, - provider: OnceCell>>, + id: u32, + node_binary: PathBuf, + eth_proxy_binary: PathBuf, + export_chainspec_command: String, + rpc_url: String, + base_directory: PathBuf, + logs_directory: PathBuf, + substrate_process: Option, + eth_proxy_process: Option, + wallet: Arc, + nonce_manager: CachedNonceManager, + provider: OnceCell>>, } impl SubstrateNode { - const BASE_DIRECTORY: &str = "substrate"; - const LOGS_DIRECTORY: &str = "logs"; - const DATA_DIRECTORY: &str = "chains"; + const BASE_DIRECTORY: &str = "substrate"; + const LOGS_DIRECTORY: &str = "logs"; + const DATA_DIRECTORY: &str = "chains"; - const SUBSTRATE_READY_MARKER: &str = "Running JSON-RPC server"; - const ETH_PROXY_READY_MARKER: &str = "Running JSON-RPC server"; - const CHAIN_SPEC_JSON_FILE: &str = "template_chainspec.json"; - const BASE_SUBSTRATE_RPC_PORT: u16 = 9944; - const BASE_PROXY_RPC_PORT: u16 = 8545; + const SUBSTRATE_READY_MARKER: &str = "Running JSON-RPC server"; + const ETH_PROXY_READY_MARKER: &str = "Running JSON-RPC server"; + const CHAIN_SPEC_JSON_FILE: &str = "template_chainspec.json"; + const BASE_SUBSTRATE_RPC_PORT: u16 = 9944; + const BASE_PROXY_RPC_PORT: u16 = 8545; - const SUBSTRATE_LOG_ENV: &str = "error,evm=debug,sc_rpc_server=info,runtime::revive=debug"; - const PROXY_LOG_ENV: &str = "info,eth-rpc=debug"; + const SUBSTRATE_LOG_ENV: &str = "error,evm=debug,sc_rpc_server=info,runtime::revive=debug"; + const PROXY_LOG_ENV: &str = "info,eth-rpc=debug"; - pub const KITCHENSINK_EXPORT_CHAINSPEC_COMMAND: &str = "export-chain-spec"; - pub const REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND: &str = "build-spec"; + pub const KITCHENSINK_EXPORT_CHAINSPEC_COMMAND: &str = "export-chain-spec"; + pub const REVIVE_DEV_NODE_EXPORT_CHAINSPEC_COMMAND: &str = "build-spec"; - pub fn new( - node_path: PathBuf, - export_chainspec_command: &str, - context: impl AsRef - + AsRef - + AsRef, - ) -> Self { - let working_directory_path = - AsRef::::as_ref(&context).as_path(); - let eth_rpc_path = AsRef::::as_ref(&context) - .path - .as_path(); - let wallet = AsRef::::as_ref(&context).wallet(); + pub fn new( + node_path: PathBuf, + export_chainspec_command: &str, + context: impl AsRef + + AsRef + + AsRef, + ) -> Self { + let working_directory_path = + AsRef::::as_ref(&context).as_path(); + let eth_rpc_path = AsRef::::as_ref(&context).path.as_path(); + let wallet = AsRef::::as_ref(&context).wallet(); - let substrate_directory = working_directory_path.join(Self::BASE_DIRECTORY); - let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); - let base_directory = substrate_directory.join(id.to_string()); - let logs_directory = base_directory.join(Self::LOGS_DIRECTORY); + let substrate_directory = working_directory_path.join(Self::BASE_DIRECTORY); + let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); + let base_directory = substrate_directory.join(id.to_string()); + let logs_directory = base_directory.join(Self::LOGS_DIRECTORY); - Self { - id, - node_binary: node_path, - eth_proxy_binary: eth_rpc_path.to_path_buf(), - export_chainspec_command: export_chainspec_command.to_string(), - rpc_url: String::new(), - base_directory, - logs_directory, - substrate_process: None, - eth_proxy_process: None, - wallet: wallet.clone(), - nonce_manager: Default::default(), - provider: Default::default(), - } - } + Self { + id, + node_binary: node_path, + eth_proxy_binary: eth_rpc_path.to_path_buf(), + export_chainspec_command: export_chainspec_command.to_string(), + rpc_url: String::new(), + base_directory, + logs_directory, + substrate_process: None, + eth_proxy_process: None, + wallet: wallet.clone(), + nonce_manager: Default::default(), + provider: Default::default(), + } + } - pub fn new_existing() -> Self { - let wallet_config = revive_dt_config::WalletConfiguration::default(); - Self { - id: 0, - node_binary: PathBuf::new(), - eth_proxy_binary: PathBuf::new(), - export_chainspec_command: String::new(), - rpc_url: "http://localhost:8545".to_string(), - base_directory: PathBuf::new(), - logs_directory: PathBuf::new(), - substrate_process: None, - eth_proxy_process: None, - wallet: wallet_config.wallet(), - nonce_manager: Default::default(), - provider: Default::default(), - } - } + pub fn new_existing() -> Self { + let wallet_config = revive_dt_config::WalletConfiguration::default(); + Self { + id: 0, + node_binary: PathBuf::new(), + eth_proxy_binary: PathBuf::new(), + export_chainspec_command: String::new(), + rpc_url: "http://localhost:8545".to_string(), + base_directory: PathBuf::new(), + logs_directory: PathBuf::new(), + substrate_process: None, + eth_proxy_process: None, + wallet: wallet_config.wallet(), + nonce_manager: Default::default(), + provider: Default::default(), + } + } - fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { - let _ = remove_dir_all(self.base_directory.as_path()); - let _ = clear_directory(&self.base_directory); - let _ = clear_directory(&self.logs_directory); + fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { + let _ = remove_dir_all(self.base_directory.as_path()); + let _ = clear_directory(&self.base_directory); + let _ = clear_directory(&self.logs_directory); - create_dir_all(&self.base_directory) - .context("Failed to create base directory for substrate node")?; - create_dir_all(&self.logs_directory) - .context("Failed to create logs directory for substrate node")?; + create_dir_all(&self.base_directory) + .context("Failed to create base directory for substrate node")?; + create_dir_all(&self.logs_directory) + .context("Failed to create logs directory for substrate node")?; - let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE); + let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE); - // Note: we do not pipe the logs of this process to a separate file since this is just a - // once-off export of the default chain spec and not part of the long-running node process. - let output = Command::new(&self.node_binary) - .arg(self.export_chainspec_command.as_str()) - .arg("--chain") - .arg("dev") - .env_remove("RUST_LOG") - .output() - .context("Failed to export the chain-spec")?; + // Note: we do not pipe the logs of this process to a separate file since this is just a + // once-off export of the default chain spec and not part of the long-running node process. + let output = Command::new(&self.node_binary) + .arg(self.export_chainspec_command.as_str()) + .arg("--chain") + .arg("dev") + .env_remove("RUST_LOG") + .output() + .context("Failed to export the chain-spec")?; - if !output.status.success() { - anyhow::bail!( - "Substrate-node export-chain-spec failed: {}", - String::from_utf8_lossy(&output.stderr) - ); - } + if !output.status.success() { + anyhow::bail!( + "Substrate-node export-chain-spec failed: {}", + String::from_utf8_lossy(&output.stderr) + ); + } - let content = String::from_utf8(output.stdout) - .context("Failed to decode Substrate export-chain-spec output as UTF-8")?; - let mut chainspec_json: JsonValue = - serde_json::from_str(&content).context("Failed to parse Substrate chain spec JSON")?; + let content = String::from_utf8(output.stdout) + .context("Failed to decode Substrate export-chain-spec output as UTF-8")?; + let mut chainspec_json: JsonValue = + serde_json::from_str(&content).context("Failed to parse Substrate chain spec JSON")?; - let existing_chainspec_balances = - chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] - .as_array() - .cloned() - .unwrap_or_default(); + let existing_chainspec_balances = + chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] + .as_array() + .cloned() + .unwrap_or_default(); - let mut merged_balances: Vec<(String, u128)> = existing_chainspec_balances - .into_iter() - .filter_map(|val| { - if let Some(arr) = val.as_array() { - if arr.len() == 2 { - let account = arr[0].as_str()?.to_string(); - let balance = arr[1].as_f64()? as u128; - return Some((account, balance)); - } - } - None - }) - .collect(); - let mut eth_balances = { - for signer_address in - >::signer_addresses(&self.wallet) - { - // Note, the use of the entry API here means that we only modify the entries for any - // account that is not in the `alloc` field of the genesis state. - genesis - .alloc - .entry(signer_address) - .or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE))); - } - self.extract_balance_from_genesis_file(&genesis) - .context("Failed to extract balances from EVM genesis JSON")? - }; - merged_balances.append(&mut eth_balances); + let mut merged_balances: Vec<(String, u128)> = existing_chainspec_balances + .into_iter() + .filter_map(|val| { + if let Some(arr) = val.as_array() { + if arr.len() == 2 { + let account = arr[0].as_str()?.to_string(); + let balance = arr[1].as_f64()? as u128; + return Some((account, balance)); + } + } + None + }) + .collect(); + let mut eth_balances = { + for signer_address in + >::signer_addresses(&self.wallet) + { + // Note, the use of the entry API here means that we only modify the entries for any + // account that is not in the `alloc` field of the genesis state. + genesis + .alloc + .entry(signer_address) + .or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE))); + } + self.extract_balance_from_genesis_file(&genesis) + .context("Failed to extract balances from EVM genesis JSON")? + }; + merged_balances.append(&mut eth_balances); - chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] = - json!(merged_balances); + chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] = + json!(merged_balances); - serde_json::to_writer_pretty( - std::fs::File::create(&template_chainspec_path) - .context("Failed to create substrate template chainspec file")?, - &chainspec_json, - ) - .context("Failed to write substrate template chainspec JSON")?; - Ok(self) - } + serde_json::to_writer_pretty( + std::fs::File::create(&template_chainspec_path) + .context("Failed to create substrate template chainspec file")?, + &chainspec_json, + ) + .context("Failed to write substrate template chainspec JSON")?; + Ok(self) + } - fn spawn_process(&mut self) -> anyhow::Result<()> { - let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16; - let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16; + fn spawn_process(&mut self) -> anyhow::Result<()> { + let substrate_rpc_port = Self::BASE_SUBSTRATE_RPC_PORT + self.id as u16; + let proxy_rpc_port = Self::BASE_PROXY_RPC_PORT + self.id as u16; - let chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE); + let chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE); - self.rpc_url = format!("http://127.0.0.1:{proxy_rpc_port}"); + self.rpc_url = format!("http://127.0.0.1:{proxy_rpc_port}"); - let substrate_process = Process::new( - "node", - self.logs_directory.as_path(), - self.node_binary.as_path(), - |command, stdout_file, stderr_file| { - command - .arg("--dev") - .arg("--chain") - .arg(chainspec_path) - .arg("--base-path") - .arg(&self.base_directory) - .arg("--rpc-port") - .arg(substrate_rpc_port.to_string()) - .arg("--name") - .arg(format!("revive-substrate-{}", self.id)) - .arg("--force-authoring") - .arg("--rpc-methods") - .arg("Unsafe") - .arg("--rpc-cors") - .arg("all") - .arg("--rpc-max-connections") - .arg(u32::MAX.to_string()) - .env("RUST_LOG", Self::SUBSTRATE_LOG_ENV) - .stdout(stdout_file) - .stderr(stderr_file); - }, - ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { - max_wait_duration: Duration::from_secs(30), - check_function: Box::new(|_, stderr_line| match stderr_line { - Some(line) => Ok(line.contains(Self::SUBSTRATE_READY_MARKER)), - None => Ok(false), - }), - }, - ); - match substrate_process { - Ok(process) => self.substrate_process = Some(process), - Err(err) => { - tracing::error!(?err, "Failed to start substrate, shutting down gracefully"); - self.shutdown() - .context("Failed to gracefully shutdown after substrate start error")?; - return Err(err); - } - } + let substrate_process = Process::new( + "node", + self.logs_directory.as_path(), + self.node_binary.as_path(), + |command, stdout_file, stderr_file| { + command + .arg("--dev") + .arg("--chain") + .arg(chainspec_path) + .arg("--base-path") + .arg(&self.base_directory) + .arg("--rpc-port") + .arg(substrate_rpc_port.to_string()) + .arg("--name") + .arg(format!("revive-substrate-{}", self.id)) + .arg("--force-authoring") + .arg("--rpc-methods") + .arg("Unsafe") + .arg("--rpc-cors") + .arg("all") + .arg("--rpc-max-connections") + .arg(u32::MAX.to_string()) + .env("RUST_LOG", Self::SUBSTRATE_LOG_ENV) + .stdout(stdout_file) + .stderr(stderr_file); + }, + ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { + max_wait_duration: Duration::from_secs(30), + check_function: Box::new(|_, stderr_line| match stderr_line { + Some(line) => Ok(line.contains(Self::SUBSTRATE_READY_MARKER)), + None => Ok(false), + }), + }, + ); + match substrate_process { + Ok(process) => self.substrate_process = Some(process), + Err(err) => { + tracing::error!(?err, "Failed to start substrate, shutting down gracefully"); + self.shutdown() + .context("Failed to gracefully shutdown after substrate start error")?; + return Err(err); + }, + } - let eth_proxy_process = Process::new( - "proxy", - self.logs_directory.as_path(), - self.eth_proxy_binary.as_path(), - |command, stdout_file, stderr_file| { - command - .arg("--dev") - .arg("--rpc-port") - .arg(proxy_rpc_port.to_string()) - .arg("--node-rpc-url") - .arg(format!("ws://127.0.0.1:{substrate_rpc_port}")) - .arg("--rpc-max-connections") - .arg(u32::MAX.to_string()) - .env("RUST_LOG", Self::PROXY_LOG_ENV) - .stdout(stdout_file) - .stderr(stderr_file); - }, - ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { - max_wait_duration: Duration::from_secs(30), - check_function: Box::new(|_, stderr_line| match stderr_line { - Some(line) => Ok(line.contains(Self::ETH_PROXY_READY_MARKER)), - None => Ok(false), - }), - }, - ); - match eth_proxy_process { - Ok(process) => self.eth_proxy_process = Some(process), - Err(err) => { - tracing::error!(?err, "Failed to start eth proxy, shutting down gracefully"); - self.shutdown() - .context("Failed to gracefully shutdown after eth proxy start error")?; - return Err(err); - } - } + let eth_proxy_process = Process::new( + "proxy", + self.logs_directory.as_path(), + self.eth_proxy_binary.as_path(), + |command, stdout_file, stderr_file| { + command + .arg("--dev") + .arg("--rpc-port") + .arg(proxy_rpc_port.to_string()) + .arg("--node-rpc-url") + .arg(format!("ws://127.0.0.1:{substrate_rpc_port}")) + .arg("--rpc-max-connections") + .arg(u32::MAX.to_string()) + .env("RUST_LOG", Self::PROXY_LOG_ENV) + .stdout(stdout_file) + .stderr(stderr_file); + }, + ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { + max_wait_duration: Duration::from_secs(30), + check_function: Box::new(|_, stderr_line| match stderr_line { + Some(line) => Ok(line.contains(Self::ETH_PROXY_READY_MARKER)), + None => Ok(false), + }), + }, + ); + match eth_proxy_process { + Ok(process) => self.eth_proxy_process = Some(process), + Err(err) => { + tracing::error!(?err, "Failed to start eth proxy, shutting down gracefully"); + self.shutdown() + .context("Failed to gracefully shutdown after eth proxy start error")?; + return Err(err); + }, + } - Ok(()) - } + Ok(()) + } - fn extract_balance_from_genesis_file( - &self, - genesis: &Genesis, - ) -> anyhow::Result> { - genesis - .alloc - .iter() - .try_fold(Vec::new(), |mut vec, (address, acc)| { - let substrate_address = Self::eth_to_substrate_address(address); - let balance = acc.balance.try_into()?; - vec.push((substrate_address, balance)); - Ok(vec) - }) - } + fn extract_balance_from_genesis_file( + &self, + genesis: &Genesis, + ) -> anyhow::Result> { + genesis.alloc.iter().try_fold(Vec::new(), |mut vec, (address, acc)| { + let substrate_address = Self::eth_to_substrate_address(address); + let balance = acc.balance.try_into()?; + vec.push((substrate_address, balance)); + Ok(vec) + }) + } - fn eth_to_substrate_address(address: &Address) -> String { - let eth_bytes = address.0.0; + fn eth_to_substrate_address(address: &Address) -> String { + let eth_bytes = address.0.0; - let mut padded = [0xEEu8; 32]; - padded[..20].copy_from_slice(ð_bytes); + let mut padded = [0xEEu8; 32]; + padded[..20].copy_from_slice(ð_bytes); - let account_id = AccountId32::from(padded); - account_id.to_ss58check() - } + let account_id = AccountId32::from(padded); + account_id.to_ss58check() + } - pub fn eth_rpc_version(&self) -> anyhow::Result { - let output = Command::new(&self.eth_proxy_binary) - .arg("--version") - .stdin(Stdio::null()) - .stdout(Stdio::piped()) - .stderr(Stdio::null()) - .spawn()? - .wait_with_output()? - .stdout; - Ok(String::from_utf8_lossy(&output).trim().to_string()) - } + pub fn eth_rpc_version(&self) -> anyhow::Result { + let output = Command::new(&self.eth_proxy_binary) + .arg("--version") + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .spawn()? + .wait_with_output()? + .stdout; + Ok(String::from_utf8_lossy(&output).trim().to_string()) + } - async fn provider( - &self, - ) -> anyhow::Result>> { - self.provider - .get_or_try_init(|| async move { - construct_concurrency_limited_provider::( - self.rpc_url.as_str(), - FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000), - ChainIdFiller::new(Some(CHAIN_ID)), - NonceFiller::new(self.nonce_manager.clone()), - self.wallet.clone(), - ) - .await - .context("Failed to construct the provider") - }) - .await - .cloned() - } + async fn provider( + &self, + ) -> anyhow::Result>> { + self.provider + .get_or_try_init(|| async move { + construct_concurrency_limited_provider::( + self.rpc_url.as_str(), + FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000), + ChainIdFiller::new(Some(CHAIN_ID)), + NonceFiller::new(self.nonce_manager.clone()), + self.wallet.clone(), + ) + .await + .context("Failed to construct the provider") + }) + .await + .cloned() + } } impl EthereumNode for SubstrateNode { - fn pre_transactions(&mut self) -> Pin> + '_>> { - Box::pin(async move { Ok(()) }) - } + fn pre_transactions(&mut self) -> Pin> + '_>> { + Box::pin(async move { Ok(()) }) + } - fn id(&self) -> usize { - self.id as _ - } + fn id(&self) -> usize { + self.id as _ + } - fn connection_string(&self) -> &str { - &self.rpc_url - } + fn connection_string(&self) -> &str { + &self.rpc_url + } - fn submit_transaction( - &self, - transaction: TransactionRequest, - ) -> Pin> + '_>> { - Box::pin(async move { - let provider = self - .provider() - .await - .context("Failed to create the provider for transaction submission")?; - let pending_transaction = provider - .send_transaction(transaction) - .await - .context("Failed to submit the transaction through the provider")?; - Ok(*pending_transaction.tx_hash()) - }) - } + fn submit_transaction( + &self, + transaction: TransactionRequest, + ) -> Pin> + '_>> { + Box::pin(async move { + let provider = self + .provider() + .await + .context("Failed to create the provider for transaction submission")?; + let pending_transaction = provider + .send_transaction(transaction) + .await + .context("Failed to submit the transaction through the provider")?; + Ok(*pending_transaction.tx_hash()) + }) + } - fn get_receipt( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider() - .await - .context("Failed to create provider for getting the receipt")? - .get_transaction_receipt(tx_hash) - .await - .context("Failed to get the receipt of the transaction")? - .context("Failed to get the receipt of the transaction") - }) - } + fn get_receipt( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to create provider for getting the receipt")? + .get_transaction_receipt(tx_hash) + .await + .context("Failed to get the receipt of the transaction")? + .context("Failed to get the receipt of the transaction") + }) + } - fn execute_transaction( - &self, - transaction: TransactionRequest, - ) -> Pin> + '_>> { - Box::pin(async move { - let provider = self - .provider() - .await - .context("Failed to create the provider")?; - execute_transaction(provider, transaction).await - }) - } + fn execute_transaction( + &self, + transaction: TransactionRequest, + ) -> Pin> + '_>> { + Box::pin(async move { + let provider = self.provider().await.context("Failed to create the provider")?; + execute_transaction(provider, transaction).await + }) + } - fn trace_transaction( - &self, - tx_hash: TxHash, - trace_options: GethDebugTracingOptions, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider() - .await - .context("Failed to create provider for debug tracing")? - .debug_trace_transaction(tx_hash, trace_options) - .await - .context("Failed to obtain debug trace from substrate proxy") - }) - } + fn trace_transaction( + &self, + tx_hash: TxHash, + trace_options: GethDebugTracingOptions, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to create provider for debug tracing")? + .debug_trace_transaction(tx_hash, trace_options) + .await + .context("Failed to obtain debug trace from substrate proxy") + }) + } - fn state_diff( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { - diff_mode: Some(true), - disable_code: None, - disable_storage: None, - }); - match self - .trace_transaction(tx_hash, trace_options) - .await? - .try_into_pre_state_frame()? - { - PreStateFrame::Diff(diff) => Ok(diff), - _ => anyhow::bail!("expected a diff mode trace"), - } - }) - } + fn state_diff( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { + diff_mode: Some(true), + disable_code: None, + disable_storage: None, + }); + match self + .trace_transaction(tx_hash, trace_options) + .await? + .try_into_pre_state_frame()? + { + PreStateFrame::Diff(diff) => Ok(diff), + _ => anyhow::bail!("expected a diff mode trace"), + } + }) + } - fn balance_of( - &self, - address: Address, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider() - .await - .context("Failed to get the substrate provider")? - .get_balance(address) - .await - .map_err(Into::into) - }) - } + fn balance_of( + &self, + address: Address, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to get the substrate provider")? + .get_balance(address) + .await + .map_err(Into::into) + }) + } - fn latest_state_proof( - &self, - address: Address, - keys: Vec, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider() - .await - .context("Failed to get the substrate provider")? - .get_proof(address, keys) - .latest() - .await - .map_err(Into::into) - }) - } + fn latest_state_proof( + &self, + address: Address, + keys: Vec, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to get the substrate provider")? + .get_proof(address, keys) + .latest() + .await + .map_err(Into::into) + }) + } - fn resolver( - &self, - ) -> Pin>> + '_>> { - Box::pin(async move { - let id = self.id; - let provider = self.provider().await?; - Ok(Arc::new(SubstrateNodeResolver { id, provider }) as Arc) - }) - } + fn resolver( + &self, + ) -> Pin>> + '_>> { + Box::pin(async move { + let id = self.id; + let provider = self.provider().await?; + Ok(Arc::new(SubstrateNodeResolver { id, provider }) as Arc) + }) + } - fn evm_version(&self) -> EVMVersion { - EVMVersion::Cancun - } + fn evm_version(&self) -> EVMVersion { + EVMVersion::Cancun + } - fn subscribe_to_full_blocks_information( - &self, - ) -> Pin< - Box< - dyn Future>>>> - + '_, - >, - > { - Box::pin(async move { - let provider = self - .provider() - .await - .context("Failed to create the provider for block subscription")?; - let mut block_subscription = provider - .watch_full_blocks() - .await - .context("Failed to create the blocks stream")?; - block_subscription.set_channel_size(0xFFFF); - block_subscription.set_poll_interval(Duration::from_secs(1)); - let block_stream = block_subscription.into_stream(); + fn subscribe_to_full_blocks_information( + &self, + ) -> Pin< + Box< + dyn Future>>>> + + '_, + >, + > { + Box::pin(async move { + let provider = self + .provider() + .await + .context("Failed to create the provider for block subscription")?; + let mut block_subscription = provider + .watch_full_blocks() + .await + .context("Failed to create the blocks stream")?; + block_subscription.set_channel_size(0xFFFF); + block_subscription.set_poll_interval(Duration::from_secs(1)); + let block_stream = block_subscription.into_stream(); - let mined_block_information_stream = block_stream.filter_map(|block| async { - let block = block.ok()?; - Some(MinedBlockInformation { - block_number: block.number(), - block_timestamp: block.header.timestamp, - mined_gas: block.header.gas_used as _, - block_gas_limit: block.header.gas_limit, - transaction_hashes: block - .transactions - .into_hashes() - .as_hashes() - .expect("Must be hashes") - .to_vec(), - }) - }); + let mined_block_information_stream = block_stream.filter_map(|block| async { + let block = block.ok()?; + Some(MinedBlockInformation { + block_number: block.number(), + block_timestamp: block.header.timestamp, + mined_gas: block.header.gas_used as _, + block_gas_limit: block.header.gas_limit, + transaction_hashes: block + .transactions + .into_hashes() + .as_hashes() + .expect("Must be hashes") + .to_vec(), + }) + }); - Ok(Box::pin(mined_block_information_stream) - as Pin>>) - }) - } + Ok(Box::pin(mined_block_information_stream) + as Pin>>) + }) + } - fn resolve_signer_or_default(&self, address: Address) -> Address { - let signer_addresses: Vec<_> = - >::signer_addresses(&self.wallet).collect(); - if signer_addresses.contains(&address) { - address - } else { - self.wallet.default_signer().address() - } - } + fn resolve_signer_or_default(&self, address: Address) -> Address { + let signer_addresses: Vec<_> = + >::signer_addresses(&self.wallet).collect(); + if signer_addresses.contains(&address) { + address + } else { + self.wallet.default_signer().address() + } + } } pub struct SubstrateNodeResolver { - id: u32, - provider: ConcreteProvider>, + id: u32, + provider: ConcreteProvider>, } impl ResolverApi for SubstrateNodeResolver { - #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] - fn chain_id( - &self, - ) -> Pin> + '_>> { - Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) }) - } + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn chain_id( + &self, + ) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) }) + } - #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] - fn transaction_gas_price( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_transaction_receipt(tx_hash) - .await? - .context("Failed to get the transaction receipt") - .map(|receipt| receipt.effective_gas_price) - }) - } + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn transaction_gas_price( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_transaction_receipt(tx_hash) + .await? + .context("Failed to get the transaction receipt") + .map(|receipt| receipt.effective_gas_price) + }) + } - #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] - fn block_gas_limit( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the substrate block")? - .context("Failed to get the substrate block, perhaps the chain has no blocks?") - .map(|block| block.header.gas_limit as _) - }) - } + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_gas_limit( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .map(|block| block.header.gas_limit as _) + }) + } - #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] - fn block_coinbase( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the substrate block")? - .context("Failed to get the substrate block, perhaps the chain has no blocks?") - .map(|block| block.header.beneficiary) - }) - } + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_coinbase( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .map(|block| block.header.beneficiary) + }) + } - #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] - fn block_difficulty( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the substrate block")? - .context("Failed to get the substrate block, perhaps the chain has no blocks?") - .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) - }) - } + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_difficulty( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) + }) + } - #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] - fn block_base_fee( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the substrate block")? - .context("Failed to get the substrate block, perhaps the chain has no blocks?") - .and_then(|block| { - block - .header - .base_fee_per_gas - .context("Failed to get the base fee per gas") - }) - }) - } + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_base_fee( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .and_then(|block| { + block.header.base_fee_per_gas.context("Failed to get the base fee per gas") + }) + }) + } - #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] - fn block_hash( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the substrate block")? - .context("Failed to get the substrate block, perhaps the chain has no blocks?") - .map(|block| block.header.hash) - }) - } + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_hash( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .map(|block| block.header.hash) + }) + } - #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] - fn block_timestamp( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the substrate block")? - .context("Failed to get the substrate block, perhaps the chain has no blocks?") - .map(|block| block.header.timestamp) - }) - } + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn block_timestamp( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the substrate block")? + .context("Failed to get the substrate block, perhaps the chain has no blocks?") + .map(|block| block.header.timestamp) + }) + } - #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] - fn last_block_number(&self) -> Pin> + '_>> { - Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) }) - } + #[instrument(level = "info", skip_all, fields(substrate_node_id = self.id))] + fn last_block_number(&self) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) }) + } } impl Node for SubstrateNode { - fn shutdown(&mut self) -> anyhow::Result<()> { - drop(self.substrate_process.take()); - drop(self.eth_proxy_process.take()); + fn shutdown(&mut self) -> anyhow::Result<()> { + drop(self.substrate_process.take()); + drop(self.eth_proxy_process.take()); - // Remove the node's database so that subsequent runs do not run on the same database. We - // ignore the error just in case the directory didn't exist in the first place and therefore - // there's nothing to be deleted. - let _ = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY)); + // Remove the node's database so that subsequent runs do not run on the same database. We + // ignore the error just in case the directory didn't exist in the first place and therefore + // there's nothing to be deleted. + let _ = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY)); - Ok(()) - } + Ok(()) + } - fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> { - self.init(genesis)?.spawn_process() - } + fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> { + self.init(genesis)?.spawn_process() + } - fn version(&self) -> anyhow::Result { - let output = Command::new(&self.node_binary) - .arg("--version") - .stdin(Stdio::null()) - .stdout(Stdio::piped()) - .stderr(Stdio::null()) - .spawn() - .context("Failed to spawn substrate --version")? - .wait_with_output() - .context("Failed to wait for substrate --version")? - .stdout; - Ok(String::from_utf8_lossy(&output).into()) - } + fn version(&self) -> anyhow::Result { + let output = Command::new(&self.node_binary) + .arg("--version") + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .spawn() + .context("Failed to spawn substrate --version")? + .wait_with_output() + .context("Failed to wait for substrate --version")? + .stdout; + Ok(String::from_utf8_lossy(&output).into()) + } } impl Drop for SubstrateNode { - fn drop(&mut self) { - self.shutdown().expect("Failed to shutdown") - } + fn drop(&mut self) { + self.shutdown().expect("Failed to shutdown") + } } #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] pub struct ReviveNetwork; impl Network for ReviveNetwork { - type TxType = ::TxType; + type TxType = ::TxType; - type TxEnvelope = ::TxEnvelope; + type TxEnvelope = ::TxEnvelope; - type UnsignedTx = ::UnsignedTx; + type UnsignedTx = ::UnsignedTx; - type ReceiptEnvelope = ::ReceiptEnvelope; + type ReceiptEnvelope = ::ReceiptEnvelope; - type Header = ReviveHeader; + type Header = ReviveHeader; - type TransactionRequest = ::TransactionRequest; + type TransactionRequest = ::TransactionRequest; - type TransactionResponse = ::TransactionResponse; + type TransactionResponse = ::TransactionResponse; - type ReceiptResponse = ::ReceiptResponse; + type ReceiptResponse = ::ReceiptResponse; - type HeaderResponse = Header; + type HeaderResponse = Header; - type BlockResponse = Block, Header>; + type BlockResponse = Block, Header>; } impl TransactionBuilder for ::TransactionRequest { - fn chain_id(&self) -> Option { - <::TransactionRequest as TransactionBuilder>::chain_id(self) - } + fn chain_id(&self) -> Option { + <::TransactionRequest as TransactionBuilder>::chain_id(self) + } - fn set_chain_id(&mut self, chain_id: alloy::primitives::ChainId) { - <::TransactionRequest as TransactionBuilder>::set_chain_id( - self, chain_id, - ) - } + fn set_chain_id(&mut self, chain_id: alloy::primitives::ChainId) { + <::TransactionRequest as TransactionBuilder>::set_chain_id( + self, chain_id, + ) + } - fn nonce(&self) -> Option { - <::TransactionRequest as TransactionBuilder>::nonce(self) - } + fn nonce(&self) -> Option { + <::TransactionRequest as TransactionBuilder>::nonce(self) + } - fn set_nonce(&mut self, nonce: u64) { - <::TransactionRequest as TransactionBuilder>::set_nonce( - self, nonce, - ) - } + fn set_nonce(&mut self, nonce: u64) { + <::TransactionRequest as TransactionBuilder>::set_nonce( + self, nonce, + ) + } - fn take_nonce(&mut self) -> Option { - <::TransactionRequest as TransactionBuilder>::take_nonce( - self, - ) - } + fn take_nonce(&mut self) -> Option { + <::TransactionRequest as TransactionBuilder>::take_nonce( + self, + ) + } - fn input(&self) -> Option<&alloy::primitives::Bytes> { - <::TransactionRequest as TransactionBuilder>::input(self) - } + fn input(&self) -> Option<&alloy::primitives::Bytes> { + <::TransactionRequest as TransactionBuilder>::input(self) + } - fn set_input>(&mut self, input: T) { - <::TransactionRequest as TransactionBuilder>::set_input( - self, input, - ) - } + fn set_input>(&mut self, input: T) { + <::TransactionRequest as TransactionBuilder>::set_input( + self, input, + ) + } - fn from(&self) -> Option
{ - <::TransactionRequest as TransactionBuilder>::from(self) - } + fn from(&self) -> Option
{ + <::TransactionRequest as TransactionBuilder>::from(self) + } - fn set_from(&mut self, from: Address) { - <::TransactionRequest as TransactionBuilder>::set_from( - self, from, - ) - } + fn set_from(&mut self, from: Address) { + <::TransactionRequest as TransactionBuilder>::set_from( + self, from, + ) + } - fn kind(&self) -> Option { - <::TransactionRequest as TransactionBuilder>::kind(self) - } + fn kind(&self) -> Option { + <::TransactionRequest as TransactionBuilder>::kind(self) + } - fn clear_kind(&mut self) { - <::TransactionRequest as TransactionBuilder>::clear_kind( - self, - ) - } + fn clear_kind(&mut self) { + <::TransactionRequest as TransactionBuilder>::clear_kind( + self, + ) + } - fn set_kind(&mut self, kind: alloy::primitives::TxKind) { - <::TransactionRequest as TransactionBuilder>::set_kind( - self, kind, - ) - } + fn set_kind(&mut self, kind: alloy::primitives::TxKind) { + <::TransactionRequest as TransactionBuilder>::set_kind( + self, kind, + ) + } - fn value(&self) -> Option { - <::TransactionRequest as TransactionBuilder>::value(self) - } + fn value(&self) -> Option { + <::TransactionRequest as TransactionBuilder>::value(self) + } - fn set_value(&mut self, value: alloy::primitives::U256) { - <::TransactionRequest as TransactionBuilder>::set_value( - self, value, - ) - } + fn set_value(&mut self, value: alloy::primitives::U256) { + <::TransactionRequest as TransactionBuilder>::set_value( + self, value, + ) + } - fn gas_price(&self) -> Option { - <::TransactionRequest as TransactionBuilder>::gas_price(self) - } + fn gas_price(&self) -> Option { + <::TransactionRequest as TransactionBuilder>::gas_price(self) + } - fn set_gas_price(&mut self, gas_price: u128) { - <::TransactionRequest as TransactionBuilder>::set_gas_price( - self, gas_price, - ) - } + fn set_gas_price(&mut self, gas_price: u128) { + <::TransactionRequest as TransactionBuilder>::set_gas_price( + self, gas_price, + ) + } - fn max_fee_per_gas(&self) -> Option { - <::TransactionRequest as TransactionBuilder>::max_fee_per_gas( - self, - ) - } + fn max_fee_per_gas(&self) -> Option { + <::TransactionRequest as TransactionBuilder>::max_fee_per_gas( + self, + ) + } - fn set_max_fee_per_gas(&mut self, max_fee_per_gas: u128) { - <::TransactionRequest as TransactionBuilder>::set_max_fee_per_gas( + fn set_max_fee_per_gas(&mut self, max_fee_per_gas: u128) { + <::TransactionRequest as TransactionBuilder>::set_max_fee_per_gas( self, max_fee_per_gas ) - } + } - fn max_priority_fee_per_gas(&self) -> Option { - <::TransactionRequest as TransactionBuilder>::max_priority_fee_per_gas( + fn max_priority_fee_per_gas(&self) -> Option { + <::TransactionRequest as TransactionBuilder>::max_priority_fee_per_gas( self, ) - } + } - fn set_max_priority_fee_per_gas(&mut self, max_priority_fee_per_gas: u128) { - <::TransactionRequest as TransactionBuilder>::set_max_priority_fee_per_gas( + fn set_max_priority_fee_per_gas(&mut self, max_priority_fee_per_gas: u128) { + <::TransactionRequest as TransactionBuilder>::set_max_priority_fee_per_gas( self, max_priority_fee_per_gas ) - } + } - fn gas_limit(&self) -> Option { - <::TransactionRequest as TransactionBuilder>::gas_limit(self) - } + fn gas_limit(&self) -> Option { + <::TransactionRequest as TransactionBuilder>::gas_limit(self) + } - fn set_gas_limit(&mut self, gas_limit: u64) { - <::TransactionRequest as TransactionBuilder>::set_gas_limit( - self, gas_limit, - ) - } + fn set_gas_limit(&mut self, gas_limit: u64) { + <::TransactionRequest as TransactionBuilder>::set_gas_limit( + self, gas_limit, + ) + } - fn access_list(&self) -> Option<&alloy::rpc::types::AccessList> { - <::TransactionRequest as TransactionBuilder>::access_list( + fn access_list(&self) -> Option<&alloy::rpc::types::AccessList> { + <::TransactionRequest as TransactionBuilder>::access_list( + self, + ) + } + + fn set_access_list(&mut self, access_list: alloy::rpc::types::AccessList) { + <::TransactionRequest as TransactionBuilder>::set_access_list( + self, + access_list, + ) + } + + fn complete_type( + &self, + ty: ::TxType, + ) -> Result<(), Vec<&'static str>> { + <::TransactionRequest as TransactionBuilder>::complete_type( + self, ty, + ) + } + + fn can_submit(&self) -> bool { + <::TransactionRequest as TransactionBuilder>::can_submit( + self, + ) + } + + fn can_build(&self) -> bool { + <::TransactionRequest as TransactionBuilder>::can_build(self) + } + + fn output_tx_type(&self) -> ::TxType { + <::TransactionRequest as TransactionBuilder>::output_tx_type( + self, + ) + } + + fn output_tx_type_checked(&self) -> Option<::TxType> { + <::TransactionRequest as TransactionBuilder>::output_tx_type_checked( self, ) - } + } - fn set_access_list(&mut self, access_list: alloy::rpc::types::AccessList) { - <::TransactionRequest as TransactionBuilder>::set_access_list( - self, - access_list, - ) - } - - fn complete_type( - &self, - ty: ::TxType, - ) -> Result<(), Vec<&'static str>> { - <::TransactionRequest as TransactionBuilder>::complete_type( - self, ty, - ) - } - - fn can_submit(&self) -> bool { - <::TransactionRequest as TransactionBuilder>::can_submit( + fn prep_for_submission(&mut self) { + <::TransactionRequest as TransactionBuilder>::prep_for_submission( self, ) - } + } - fn can_build(&self) -> bool { - <::TransactionRequest as TransactionBuilder>::can_build(self) - } - - fn output_tx_type(&self) -> ::TxType { - <::TransactionRequest as TransactionBuilder>::output_tx_type( - self, - ) - } - - fn output_tx_type_checked(&self) -> Option<::TxType> { - <::TransactionRequest as TransactionBuilder>::output_tx_type_checked( - self, - ) - } - - fn prep_for_submission(&mut self) { - <::TransactionRequest as TransactionBuilder>::prep_for_submission( - self, - ) - } - - fn build_unsigned( - self, - ) -> alloy::network::BuildResult<::UnsignedTx, ReviveNetwork> { - let result = <::TransactionRequest as TransactionBuilder>::build_unsigned( + fn build_unsigned( + self, + ) -> alloy::network::BuildResult<::UnsignedTx, ReviveNetwork> { + let result = <::TransactionRequest as TransactionBuilder>::build_unsigned( self, ); - match result { - Ok(unsigned_tx) => Ok(unsigned_tx), - Err(UnbuiltTransactionError { request, error }) => { - Err(UnbuiltTransactionError:: { - request, - error: match error { - TransactionBuilderError::InvalidTransactionRequest(tx_type, items) => { - TransactionBuilderError::InvalidTransactionRequest(tx_type, items) - } - TransactionBuilderError::UnsupportedSignatureType => { - TransactionBuilderError::UnsupportedSignatureType - } - TransactionBuilderError::Signer(error) => { - TransactionBuilderError::Signer(error) - } - TransactionBuilderError::Custom(error) => { - TransactionBuilderError::Custom(error) - } - }, - }) - } - } - } + match result { + Ok(unsigned_tx) => Ok(unsigned_tx), + Err(UnbuiltTransactionError { request, error }) => + Err(UnbuiltTransactionError:: { + request, + error: match error { + TransactionBuilderError::InvalidTransactionRequest(tx_type, items) => + TransactionBuilderError::InvalidTransactionRequest(tx_type, items), + TransactionBuilderError::UnsupportedSignatureType => + TransactionBuilderError::UnsupportedSignatureType, + TransactionBuilderError::Signer(error) => + TransactionBuilderError::Signer(error), + TransactionBuilderError::Custom(error) => + TransactionBuilderError::Custom(error), + }, + }), + } + } - async fn build>( - self, - wallet: &W, - ) -> Result<::TxEnvelope, TransactionBuilderError> - { - Ok(wallet.sign_request(self).await?) - } + async fn build>( + self, + wallet: &W, + ) -> Result<::TxEnvelope, TransactionBuilderError> { + Ok(wallet.sign_request(self).await?) + } } #[derive(Clone, Debug, PartialEq, Eq, Hash, Serialize, Deserialize)] #[serde(rename_all = "camelCase")] pub struct ReviveHeader { - /// The Keccak 256-bit hash of the parent - /// block’s header, in its entirety; formally Hp. - pub parent_hash: B256, - /// The Keccak 256-bit hash of the ommers list portion of this block; formally Ho. - #[serde(rename = "sha3Uncles", alias = "ommersHash")] - pub ommers_hash: B256, - /// The 160-bit address to which all fees collected from the successful mining of this block - /// be transferred; formally Hc. - #[serde(rename = "miner", alias = "beneficiary")] - pub beneficiary: Address, - /// The Keccak 256-bit hash of the root node of the state trie, after all transactions are - /// executed and finalisations applied; formally Hr. - pub state_root: B256, - /// The Keccak 256-bit hash of the root node of the trie structure populated with each - /// transaction in the transactions list portion of the block; formally Ht. - pub transactions_root: B256, - /// The Keccak 256-bit hash of the root node of the trie structure populated with the receipts - /// of each transaction in the transactions list portion of the block; formally He. - pub receipts_root: B256, - /// The Bloom filter composed from indexable information (logger address and log topics) - /// contained in each log entry from the receipt of each transaction in the transactions list; - /// formally Hb. - pub logs_bloom: Bloom, - /// A scalar value corresponding to the difficulty level of this block. This can be calculated - /// from the previous block’s difficulty level and the timestamp; formally Hd. - pub difficulty: U256, - /// A scalar value equal to the number of ancestor blocks. The genesis block has a number of - /// zero; formally Hi. - #[serde(with = "alloy::serde::quantity")] - pub number: BlockNumber, - /// A scalar value equal to the current limit of gas expenditure per block; formally Hl. - // This is the main difference over the Ethereum network implementation. We use u128 here and - // not u64. - #[serde(with = "alloy::serde::quantity")] - pub gas_limit: u128, - /// A scalar value equal to the total gas used in transactions in this block; formally Hg. - #[serde(with = "alloy::serde::quantity")] - pub gas_used: u64, - /// A scalar value equal to the reasonable output of Unix’s time() at this block’s inception; - /// formally Hs. - #[serde(with = "alloy::serde::quantity")] - pub timestamp: u64, - /// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or - /// fewer; formally Hx. - pub extra_data: Bytes, - /// A 256-bit hash which, combined with the - /// nonce, proves that a sufficient amount of computation has been carried out on this block; - /// formally Hm. - pub mix_hash: B256, - /// A 64-bit value which, combined with the mixhash, proves that a sufficient amount of - /// computation has been carried out on this block; formally Hn. - pub nonce: B64, - /// A scalar representing EIP1559 base fee which can move up or down each block according - /// to a formula which is a function of gas used in parent block and gas target - /// (block gas limit divided by elasticity multiplier) of parent block. - /// The algorithm results in the base fee per gas increasing when blocks are - /// above the gas target, and decreasing when blocks are below the gas target. The base fee per - /// gas is burned. - #[serde( - default, - with = "alloy::serde::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub base_fee_per_gas: Option, - /// The Keccak 256-bit hash of the withdrawals list portion of this block. - /// - #[serde(default, skip_serializing_if = "Option::is_none")] - pub withdrawals_root: Option, - /// The total amount of blob gas consumed by the transactions within the block, added in - /// EIP-4844. - #[serde( - default, - with = "alloy::serde::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub blob_gas_used: Option, - /// A running total of blob gas consumed in excess of the target, prior to the block. Blocks - /// with above-target blob gas consumption increase this value, blocks with below-target blob - /// gas consumption decrease it (bounded at 0). This was added in EIP-4844. - #[serde( - default, - with = "alloy::serde::quantity::opt", - skip_serializing_if = "Option::is_none" - )] - pub excess_blob_gas: Option, - /// The hash of the parent beacon block's root is included in execution blocks, as proposed by - /// EIP-4788. - /// - /// This enables trust-minimized access to consensus state, supporting staking pools, bridges, - /// and more. - /// - /// The beacon roots contract handles root storage, enhancing Ethereum's functionalities. - #[serde(default, skip_serializing_if = "Option::is_none")] - pub parent_beacon_block_root: Option, - /// The Keccak 256-bit hash of the an RLP encoded list with each - /// [EIP-7685] request in the block body. - /// - /// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685 - #[serde(default, skip_serializing_if = "Option::is_none")] - pub requests_hash: Option, + /// The Keccak 256-bit hash of the parent + /// block’s header, in its entirety; formally Hp. + pub parent_hash: B256, + /// The Keccak 256-bit hash of the ommers list portion of this block; formally Ho. + #[serde(rename = "sha3Uncles", alias = "ommersHash")] + pub ommers_hash: B256, + /// The 160-bit address to which all fees collected from the successful mining of this block + /// be transferred; formally Hc. + #[serde(rename = "miner", alias = "beneficiary")] + pub beneficiary: Address, + /// The Keccak 256-bit hash of the root node of the state trie, after all transactions are + /// executed and finalisations applied; formally Hr. + pub state_root: B256, + /// The Keccak 256-bit hash of the root node of the trie structure populated with each + /// transaction in the transactions list portion of the block; formally Ht. + pub transactions_root: B256, + /// The Keccak 256-bit hash of the root node of the trie structure populated with the receipts + /// of each transaction in the transactions list portion of the block; formally He. + pub receipts_root: B256, + /// The Bloom filter composed from indexable information (logger address and log topics) + /// contained in each log entry from the receipt of each transaction in the transactions list; + /// formally Hb. + pub logs_bloom: Bloom, + /// A scalar value corresponding to the difficulty level of this block. This can be calculated + /// from the previous block’s difficulty level and the timestamp; formally Hd. + pub difficulty: U256, + /// A scalar value equal to the number of ancestor blocks. The genesis block has a number of + /// zero; formally Hi. + #[serde(with = "alloy::serde::quantity")] + pub number: BlockNumber, + /// A scalar value equal to the current limit of gas expenditure per block; formally Hl. + // This is the main difference over the Ethereum network implementation. We use u128 here and + // not u64. + #[serde(with = "alloy::serde::quantity")] + pub gas_limit: u128, + /// A scalar value equal to the total gas used in transactions in this block; formally Hg. + #[serde(with = "alloy::serde::quantity")] + pub gas_used: u64, + /// A scalar value equal to the reasonable output of Unix’s time() at this block’s inception; + /// formally Hs. + #[serde(with = "alloy::serde::quantity")] + pub timestamp: u64, + /// An arbitrary byte array containing data relevant to this block. This must be 32 bytes or + /// fewer; formally Hx. + pub extra_data: Bytes, + /// A 256-bit hash which, combined with the + /// nonce, proves that a sufficient amount of computation has been carried out on this block; + /// formally Hm. + pub mix_hash: B256, + /// A 64-bit value which, combined with the mixhash, proves that a sufficient amount of + /// computation has been carried out on this block; formally Hn. + pub nonce: B64, + /// A scalar representing EIP1559 base fee which can move up or down each block according + /// to a formula which is a function of gas used in parent block and gas target + /// (block gas limit divided by elasticity multiplier) of parent block. + /// The algorithm results in the base fee per gas increasing when blocks are + /// above the gas target, and decreasing when blocks are below the gas target. The base fee per + /// gas is burned. + #[serde( + default, + with = "alloy::serde::quantity::opt", + skip_serializing_if = "Option::is_none" + )] + pub base_fee_per_gas: Option, + /// The Keccak 256-bit hash of the withdrawals list portion of this block. + /// + #[serde(default, skip_serializing_if = "Option::is_none")] + pub withdrawals_root: Option, + /// The total amount of blob gas consumed by the transactions within the block, added in + /// EIP-4844. + #[serde( + default, + with = "alloy::serde::quantity::opt", + skip_serializing_if = "Option::is_none" + )] + pub blob_gas_used: Option, + /// A running total of blob gas consumed in excess of the target, prior to the block. Blocks + /// with above-target blob gas consumption increase this value, blocks with below-target blob + /// gas consumption decrease it (bounded at 0). This was added in EIP-4844. + #[serde( + default, + with = "alloy::serde::quantity::opt", + skip_serializing_if = "Option::is_none" + )] + pub excess_blob_gas: Option, + /// The hash of the parent beacon block's root is included in execution blocks, as proposed by + /// EIP-4788. + /// + /// This enables trust-minimized access to consensus state, supporting staking pools, bridges, + /// and more. + /// + /// The beacon roots contract handles root storage, enhancing Ethereum's functionalities. + #[serde(default, skip_serializing_if = "Option::is_none")] + pub parent_beacon_block_root: Option, + /// The Keccak 256-bit hash of the an RLP encoded list with each + /// [EIP-7685] request in the block body. + /// + /// [EIP-7685]: https://eips.ethereum.org/EIPS/eip-7685 + #[serde(default, skip_serializing_if = "Option::is_none")] + pub requests_hash: Option, } impl BlockHeader for ReviveHeader { - fn parent_hash(&self) -> B256 { - self.parent_hash - } + fn parent_hash(&self) -> B256 { + self.parent_hash + } - fn ommers_hash(&self) -> B256 { - self.ommers_hash - } + fn ommers_hash(&self) -> B256 { + self.ommers_hash + } - fn beneficiary(&self) -> Address { - self.beneficiary - } + fn beneficiary(&self) -> Address { + self.beneficiary + } - fn state_root(&self) -> B256 { - self.state_root - } + fn state_root(&self) -> B256 { + self.state_root + } - fn transactions_root(&self) -> B256 { - self.transactions_root - } + fn transactions_root(&self) -> B256 { + self.transactions_root + } - fn receipts_root(&self) -> B256 { - self.receipts_root - } + fn receipts_root(&self) -> B256 { + self.receipts_root + } - fn withdrawals_root(&self) -> Option { - self.withdrawals_root - } + fn withdrawals_root(&self) -> Option { + self.withdrawals_root + } - fn logs_bloom(&self) -> Bloom { - self.logs_bloom - } + fn logs_bloom(&self) -> Bloom { + self.logs_bloom + } - fn difficulty(&self) -> U256 { - self.difficulty - } + fn difficulty(&self) -> U256 { + self.difficulty + } - fn number(&self) -> BlockNumber { - self.number - } + fn number(&self) -> BlockNumber { + self.number + } - // There's sadly nothing that we can do about this. We're required to implement this trait on - // any type that represents a header and the gas limit type used here is a u64. - fn gas_limit(&self) -> u64 { - self.gas_limit.try_into().unwrap_or(u64::MAX) - } + // There's sadly nothing that we can do about this. We're required to implement this trait on + // any type that represents a header and the gas limit type used here is a u64. + fn gas_limit(&self) -> u64 { + self.gas_limit.try_into().unwrap_or(u64::MAX) + } - fn gas_used(&self) -> u64 { - self.gas_used - } + fn gas_used(&self) -> u64 { + self.gas_used + } - fn timestamp(&self) -> u64 { - self.timestamp - } + fn timestamp(&self) -> u64 { + self.timestamp + } - fn mix_hash(&self) -> Option { - Some(self.mix_hash) - } + fn mix_hash(&self) -> Option { + Some(self.mix_hash) + } - fn nonce(&self) -> Option { - Some(self.nonce) - } + fn nonce(&self) -> Option { + Some(self.nonce) + } - fn base_fee_per_gas(&self) -> Option { - self.base_fee_per_gas - } + fn base_fee_per_gas(&self) -> Option { + self.base_fee_per_gas + } - fn blob_gas_used(&self) -> Option { - self.blob_gas_used - } + fn blob_gas_used(&self) -> Option { + self.blob_gas_used + } - fn excess_blob_gas(&self) -> Option { - self.excess_blob_gas - } + fn excess_blob_gas(&self) -> Option { + self.excess_blob_gas + } - fn parent_beacon_block_root(&self) -> Option { - self.parent_beacon_block_root - } + fn parent_beacon_block_root(&self) -> Option { + self.parent_beacon_block_root + } - fn requests_hash(&self) -> Option { - self.requests_hash - } + fn requests_hash(&self) -> Option { + self.requests_hash + } - fn extra_data(&self) -> &Bytes { - &self.extra_data - } + fn extra_data(&self) -> &Bytes { + &self.extra_data + } } #[cfg(test)] mod tests { - use alloy::rpc::types::TransactionRequest; - use std::sync::{LazyLock, Mutex}; + use alloy::rpc::types::TransactionRequest; + use std::sync::{LazyLock, Mutex}; - use std::fs; + use std::fs; - use super::*; - use crate::Node; + use super::*; + use crate::Node; - fn test_config() -> TestExecutionContext { - TestExecutionContext::default() - } + fn test_config() -> TestExecutionContext { + TestExecutionContext::default() + } - fn new_node() -> (TestExecutionContext, SubstrateNode) { - // Note: When we run the tests in the CI we found that if they're all - // run in parallel then the CI is unable to start all of the nodes in - // time and their start up times-out. Therefore, we want all of the - // nodes to be started in series and not in parallel. To do this, we use - // a dummy mutex here such that there can only be a single node being - // started up at any point of time. This will make our tests run slower - // but it will allow the node startup to not timeout. - // - // Note: an alternative to starting all of the nodes in series and not - // in parallel would be for us to reuse the same node between tests - // which is not the best thing to do in my opinion as it removes all - // of the isolation between tests and makes them depend on what other - // tests do. For example, if one test checks what the block number is - // and another test submits a transaction then the tx test would have - // side effects that affect the block number test. - static NODE_START_MUTEX: Mutex<()> = Mutex::new(()); - let _guard = NODE_START_MUTEX.lock().unwrap(); + fn new_node() -> (TestExecutionContext, SubstrateNode) { + // Note: When we run the tests in the CI we found that if they're all + // run in parallel then the CI is unable to start all of the nodes in + // time and their start up times-out. Therefore, we want all of the + // nodes to be started in series and not in parallel. To do this, we use + // a dummy mutex here such that there can only be a single node being + // started up at any point of time. This will make our tests run slower + // but it will allow the node startup to not timeout. + // + // Note: an alternative to starting all of the nodes in series and not + // in parallel would be for us to reuse the same node between tests + // which is not the best thing to do in my opinion as it removes all + // of the isolation between tests and makes them depend on what other + // tests do. For example, if one test checks what the block number is + // and another test submits a transaction then the tx test would have + // side effects that affect the block number test. + static NODE_START_MUTEX: Mutex<()> = Mutex::new(()); + let _guard = NODE_START_MUTEX.lock().unwrap(); - let context = test_config(); - let mut node = SubstrateNode::new( - context.kitchensink_configuration.path.clone(), - SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, - &context, - ); - node.init(context.genesis_configuration.genesis().unwrap().clone()) - .expect("Failed to initialize the node") - .spawn_process() - .expect("Failed to spawn the node process"); - (context, node) - } + let context = test_config(); + let mut node = SubstrateNode::new( + context.kitchensink_configuration.path.clone(), + SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, + &context, + ); + node.init(context.genesis_configuration.genesis().unwrap().clone()) + .expect("Failed to initialize the node") + .spawn_process() + .expect("Failed to spawn the node process"); + (context, node) + } - fn shared_state() -> &'static (TestExecutionContext, SubstrateNode) { - static STATE: LazyLock<(TestExecutionContext, SubstrateNode)> = LazyLock::new(new_node); - &STATE - } + fn shared_state() -> &'static (TestExecutionContext, SubstrateNode) { + static STATE: LazyLock<(TestExecutionContext, SubstrateNode)> = LazyLock::new(new_node); + &STATE + } - fn shared_node() -> &'static SubstrateNode { - &shared_state().1 - } + fn shared_node() -> &'static SubstrateNode { + &shared_state().1 + } - #[tokio::test] - async fn node_mines_simple_transfer_transaction_and_returns_receipt() { - // Arrange - let (context, node) = shared_state(); + #[tokio::test] + async fn node_mines_simple_transfer_transaction_and_returns_receipt() { + // Arrange + let (context, node) = shared_state(); - let provider = node.provider().await.expect("Failed to create provider"); + let provider = node.provider().await.expect("Failed to create provider"); - let account_address = context - .wallet_configuration - .wallet() - .default_signer() - .address(); - let transaction = TransactionRequest::default() - .to(account_address) - .value(U256::from(100_000_000_000_000u128)); + let account_address = context.wallet_configuration.wallet().default_signer().address(); + let transaction = TransactionRequest::default() + .to(account_address) + .value(U256::from(100_000_000_000_000u128)); - // Act - let receipt = provider.send_transaction(transaction).await; + // Act + let receipt = provider.send_transaction(transaction).await; - // Assert - let _ = receipt - .expect("Failed to send the transfer transaction") - .get_receipt() - .await - .expect("Failed to get the receipt for the transfer"); - } + // Assert + let _ = receipt + .expect("Failed to send the transfer transaction") + .get_receipt() + .await + .expect("Failed to get the receipt for the transfer"); + } - #[test] - #[ignore = "Ignored since they take a long time to run"] - fn test_init_generates_chainspec_with_balances() { - let genesis_content = r#" + #[test] + #[ignore = "Ignored since they take a long time to run"] + fn test_init_generates_chainspec_with_balances() { + let genesis_content = r#" { "alloc": { "90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": { @@ -1259,49 +1238,48 @@ mod tests { } "#; - let context = test_config(); - let mut dummy_node = SubstrateNode::new( - context.kitchensink_configuration.path.clone(), - SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, - &context, - ); + let context = test_config(); + let mut dummy_node = SubstrateNode::new( + context.kitchensink_configuration.path.clone(), + SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, + &context, + ); - // Call `init()` - dummy_node - .init(serde_json::from_str(genesis_content).unwrap()) - .expect("init failed"); + // Call `init()` + dummy_node + .init(serde_json::from_str(genesis_content).unwrap()) + .expect("init failed"); - // Check that the patched chainspec file was generated - let final_chainspec_path = dummy_node - .base_directory - .join(SubstrateNode::CHAIN_SPEC_JSON_FILE); - assert!(final_chainspec_path.exists(), "Chainspec file should exist"); + // Check that the patched chainspec file was generated + let final_chainspec_path = + dummy_node.base_directory.join(SubstrateNode::CHAIN_SPEC_JSON_FILE); + assert!(final_chainspec_path.exists(), "Chainspec file should exist"); - let contents = fs::read_to_string(&final_chainspec_path).expect("Failed to read chainspec"); + let contents = fs::read_to_string(&final_chainspec_path).expect("Failed to read chainspec"); - // Validate that the Substrate addresses derived from the Ethereum addresses are in the file - let first_eth_addr = SubstrateNode::eth_to_substrate_address( - &"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1".parse().unwrap(), - ); - let second_eth_addr = SubstrateNode::eth_to_substrate_address( - &"Ab8483F64d9C6d1EcF9b849Ae677dD3315835cb2".parse().unwrap(), - ); + // Validate that the Substrate addresses derived from the Ethereum addresses are in the file + let first_eth_addr = SubstrateNode::eth_to_substrate_address( + &"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1".parse().unwrap(), + ); + let second_eth_addr = SubstrateNode::eth_to_substrate_address( + &"Ab8483F64d9C6d1EcF9b849Ae677dD3315835cb2".parse().unwrap(), + ); - assert!( - contents.contains(&first_eth_addr), - "Chainspec should contain Substrate address for first Ethereum account" - ); - assert!( - contents.contains(&second_eth_addr), - "Chainspec should contain Substrate address for second Ethereum account" - ); - } + assert!( + contents.contains(&first_eth_addr), + "Chainspec should contain Substrate address for first Ethereum account" + ); + assert!( + contents.contains(&second_eth_addr), + "Chainspec should contain Substrate address for second Ethereum account" + ); + } - #[test] - #[ignore = "Ignored since they take a long time to run"] - fn test_parse_genesis_alloc() { - // Create test genesis file - let genesis_json = r#" + #[test] + #[ignore = "Ignored since they take a long time to run"] + fn test_parse_genesis_alloc() { + // Create test genesis file + let genesis_json = r#" { "alloc": { "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": { "balance": "1000000000000000000" }, @@ -1311,222 +1289,198 @@ mod tests { } "#; - let context = test_config(); - let node = SubstrateNode::new( - context.kitchensink_configuration.path.clone(), - SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, - &context, - ); + let context = test_config(); + let node = SubstrateNode::new( + context.kitchensink_configuration.path.clone(), + SubstrateNode::KITCHENSINK_EXPORT_CHAINSPEC_COMMAND, + &context, + ); - let result = node - .extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap()) - .unwrap(); + let result = node + .extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap()) + .unwrap(); - let result_map: std::collections::HashMap<_, _> = result.into_iter().collect(); + let result_map: std::collections::HashMap<_, _> = result.into_iter().collect(); - assert_eq!( - result_map.get("5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV"), - Some(&1_000_000_000_000_000_000u128) - ); + assert_eq!( + result_map.get("5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV"), + Some(&1_000_000_000_000_000_000u128) + ); - assert_eq!( - result_map.get("5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1"), - Some(&1_000_000_000_000_000_000u128) - ); + assert_eq!( + result_map.get("5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1"), + Some(&1_000_000_000_000_000_000u128) + ); - assert_eq!( - result_map.get("5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4"), - Some(&123_456_789u128) - ); - } + assert_eq!( + result_map.get("5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4"), + Some(&123_456_789u128) + ); + } - #[test] - #[ignore = "Ignored since they take a long time to run"] - fn print_eth_to_substrate_mappings() { - let eth_addresses = vec![ - "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", - "0xffffffffffffffffffffffffffffffffffffffff", - "90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", - ]; + #[test] + #[ignore = "Ignored since they take a long time to run"] + fn print_eth_to_substrate_mappings() { + let eth_addresses = vec![ + "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", + "0xffffffffffffffffffffffffffffffffffffffff", + "90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", + ]; - for eth_addr in eth_addresses { - let ss58 = SubstrateNode::eth_to_substrate_address(ð_addr.parse().unwrap()); + for eth_addr in eth_addresses { + let ss58 = SubstrateNode::eth_to_substrate_address(ð_addr.parse().unwrap()); - println!("Ethereum: {eth_addr} -> Substrate SS58: {ss58}"); - } - } + println!("Ethereum: {eth_addr} -> Substrate SS58: {ss58}"); + } + } - #[test] - #[ignore = "Ignored since they take a long time to run"] - fn test_eth_to_substrate_address() { - let cases = vec![ - ( - "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", - "5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV", - ), - ( - "90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", - "5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV", - ), - ( - "0x0000000000000000000000000000000000000000", - "5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1", - ), - ( - "0xffffffffffffffffffffffffffffffffffffffff", - "5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4", - ), - ]; + #[test] + #[ignore = "Ignored since they take a long time to run"] + fn test_eth_to_substrate_address() { + let cases = vec![ + ( + "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", + "5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV", + ), + ( + "90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", + "5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV", + ), + ( + "0x0000000000000000000000000000000000000000", + "5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1", + ), + ( + "0xffffffffffffffffffffffffffffffffffffffff", + "5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4", + ), + ]; - for (eth_addr, expected_ss58) in cases { - let result = SubstrateNode::eth_to_substrate_address(ð_addr.parse().unwrap()); - assert_eq!( - result, expected_ss58, - "Mismatch for Ethereum address {eth_addr}" - ); - } - } + for (eth_addr, expected_ss58) in cases { + let result = SubstrateNode::eth_to_substrate_address(ð_addr.parse().unwrap()); + assert_eq!(result, expected_ss58, "Mismatch for Ethereum address {eth_addr}"); + } + } - #[test] - #[ignore = "Ignored since they take a long time to run"] - fn version_works() { - let node = shared_node(); + #[test] + #[ignore = "Ignored since they take a long time to run"] + fn version_works() { + let node = shared_node(); - let version = node.version().unwrap(); + let version = node.version().unwrap(); - assert!( - version.starts_with("substrate-node"), - "Expected Substrate-node version string, got: {version}" - ); - } + assert!( + version.starts_with("substrate-node"), + "Expected Substrate-node version string, got: {version}" + ); + } - #[test] - #[ignore = "Ignored since they take a long time to run"] - fn eth_rpc_version_works() { - let node = shared_node(); + #[test] + #[ignore = "Ignored since they take a long time to run"] + fn eth_rpc_version_works() { + let node = shared_node(); - let version = node.eth_rpc_version().unwrap(); + let version = node.eth_rpc_version().unwrap(); - assert!( - version.starts_with("pallet-revive-eth-rpc"), - "Expected eth-rpc version string, got: {version}" - ); - } + assert!( + version.starts_with("pallet-revive-eth-rpc"), + "Expected eth-rpc version string, got: {version}" + ); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_chain_id_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_chain_id_from_node() { + // Arrange + let node = shared_node(); - // Act - let chain_id = node.resolver().await.unwrap().chain_id().await; + // Act + let chain_id = node.resolver().await.unwrap().chain_id().await; - // Assert - let chain_id = chain_id.expect("Failed to get the chain id"); - assert_eq!(chain_id, 420_420_420); - } + // Assert + let chain_id = chain_id.expect("Failed to get the chain id"); + assert_eq!(chain_id, 420_420_420); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_gas_limit_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_gas_limit_from_node() { + // Arrange + let node = shared_node(); - // Act - let gas_limit = node - .resolver() - .await - .unwrap() - .block_gas_limit(BlockNumberOrTag::Latest) - .await; + // Act + let gas_limit = + node.resolver().await.unwrap().block_gas_limit(BlockNumberOrTag::Latest).await; - // Assert - let _ = gas_limit.expect("Failed to get the gas limit"); - } + // Assert + let _ = gas_limit.expect("Failed to get the gas limit"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_coinbase_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_coinbase_from_node() { + // Arrange + let node = shared_node(); - // Act - let coinbase = node - .resolver() - .await - .unwrap() - .block_coinbase(BlockNumberOrTag::Latest) - .await; + // Act + let coinbase = + node.resolver().await.unwrap().block_coinbase(BlockNumberOrTag::Latest).await; - // Assert - let _ = coinbase.expect("Failed to get the coinbase"); - } + // Assert + let _ = coinbase.expect("Failed to get the coinbase"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_difficulty_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_difficulty_from_node() { + // Arrange + let node = shared_node(); - // Act - let block_difficulty = node - .resolver() - .await - .unwrap() - .block_difficulty(BlockNumberOrTag::Latest) - .await; + // Act + let block_difficulty = + node.resolver().await.unwrap().block_difficulty(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_difficulty.expect("Failed to get the block difficulty"); - } + // Assert + let _ = block_difficulty.expect("Failed to get the block difficulty"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_hash_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_hash_from_node() { + // Arrange + let node = shared_node(); - // Act - let block_hash = node - .resolver() - .await - .unwrap() - .block_hash(BlockNumberOrTag::Latest) - .await; + // Act + let block_hash = node.resolver().await.unwrap().block_hash(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_hash.expect("Failed to get the block hash"); - } + // Assert + let _ = block_hash.expect("Failed to get the block hash"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_timestamp_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_timestamp_from_node() { + // Arrange + let node = shared_node(); - // Act - let block_timestamp = node - .resolver() - .await - .unwrap() - .block_timestamp(BlockNumberOrTag::Latest) - .await; + // Act + let block_timestamp = + node.resolver().await.unwrap().block_timestamp(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_timestamp.expect("Failed to get the block timestamp"); - } + // Assert + let _ = block_timestamp.expect("Failed to get the block timestamp"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_number_from_node() { - // Arrange - let node = shared_node(); + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_number_from_node() { + // Arrange + let node = shared_node(); - // Act - let block_number = node.resolver().await.unwrap().last_block_number().await; + // Act + let block_number = node.resolver().await.unwrap().last_block_number().await; - // Assert - let _ = block_number.expect("Failed to get the block number"); - } + // Assert + let _ = block_number.expect("Failed to get the block number"); + } } diff --git a/crates/node/src/node_implementations/zombienet.rs b/crates/node/src/node_implementations/zombienet.rs index 092d0f2..82a3e71 100644 --- a/crates/node/src/node_implementations/zombienet.rs +++ b/crates/node/src/node_implementations/zombienet.rs @@ -3,55 +3,47 @@ //! ## Required Binaries //! This module requires the following binaries to be compiled and available in your PATH: //! -//! 1. **polkadot-parachain**: -//! ```bash -//! git clone https://github.com/paritytech/polkadot-sdk.git -//! cd polkadot-sdk -//! cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain -//! ``` +//! 1. **polkadot-parachain**: ```bash git clone https://github.com/paritytech/polkadot-sdk.git cd +//! polkadot-sdk cargo build --release --locked -p polkadot-parachain-bin --bin +//! polkadot-parachain ``` //! -//! 2. **eth-rpc** (Revive EVM RPC server): -//! ```bash -//! git clone https://github.com/paritytech/polkadot-sdk.git -//! cd polkadot-sdk -//! cargo build --locked --profile production -p pallet-revive-eth-rpc --bin eth-rpc -//! ``` +//! 2. **eth-rpc** (Revive EVM RPC server): ```bash git clone https://github.com/paritytech/polkadot-sdk.git +//! cd polkadot-sdk cargo build --locked --profile production -p pallet-revive-eth-rpc --bin +//! eth-rpc ``` //! -//! 3. **polkadot** (for the relay chain): -//! ```bash -//! # In polkadot-sdk directory -//! cargo build --locked --profile testnet --features fast-runtime --bin polkadot --bin polkadot-prepare-worker --bin polkadot-execute-worker -//! ``` +//! 3. **polkadot** (for the relay chain): ```bash # In polkadot-sdk directory cargo build --locked +//! --profile testnet --features fast-runtime --bin polkadot --bin polkadot-prepare-worker --bin +//! polkadot-execute-worker ``` //! //! Make sure to add the build output directories to your PATH or provide //! the full paths in your configuration. use std::{ - fs::{create_dir_all, remove_dir_all}, - path::PathBuf, - pin::Pin, - process::{Command, Stdio}, - sync::{ - Arc, - atomic::{AtomicU32, Ordering}, - }, - time::Duration, + fs::{create_dir_all, remove_dir_all}, + path::PathBuf, + pin::Pin, + process::{Command, Stdio}, + sync::{ + Arc, + atomic::{AtomicU32, Ordering}, + }, + time::Duration, }; use alloy::{ - eips::BlockNumberOrTag, - genesis::{Genesis, GenesisAccount}, - network::{Ethereum, EthereumWallet, NetworkWallet}, - primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256}, - providers::{ - Provider, - ext::DebugApi, - fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller}, - }, - rpc::types::{ - EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, - trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, - }, + eips::BlockNumberOrTag, + genesis::{Genesis, GenesisAccount}, + network::{Ethereum, EthereumWallet, NetworkWallet}, + primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256}, + providers::{ + Provider, + ext::DebugApi, + fillers::{CachedNonceManager, ChainIdFiller, FillProvider, NonceFiller, TxFiller}, + }, + rpc::types::{ + EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest, + trace::geth::{DiffMode, GethDebugTracingOptions, PreStateConfig, PreStateFrame}, + }, }; use anyhow::Context as _; @@ -69,11 +61,11 @@ use tracing::instrument; use zombienet_sdk::{LocalFileSystem, NetworkConfigBuilder, NetworkConfigExt}; use crate::{ - Node, - constants::INITIAL_BALANCE, - helpers::{Process, ProcessReadinessWaitBehavior}, - node_implementations::substrate::ReviveNetwork, - provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider}, + Node, + constants::INITIAL_BALANCE, + helpers::{Process, ProcessReadinessWaitBehavior}, + node_implementations::substrate::ReviveNetwork, + provider_utils::{ConcreteProvider, FallbackGasFiller, construct_concurrency_limited_provider}, }; static NODE_COUNT: AtomicU32 = AtomicU32::new(0); @@ -83,775 +75,758 @@ static NODE_COUNT: AtomicU32 = AtomicU32::new(0); /// an interface to interact with the parachain's Ethereum RPC. #[derive(Debug, Default)] pub struct ZombieNode { - /* Node Identifier */ - id: u32, - connection_string: String, - node_rpc_port: Option, + /* Node Identifier */ + id: u32, + connection_string: String, + node_rpc_port: Option, - /* Directory Paths */ - base_directory: PathBuf, - logs_directory: PathBuf, + /* Directory Paths */ + base_directory: PathBuf, + logs_directory: PathBuf, - /* Binary Paths & Timeouts */ - eth_proxy_binary: PathBuf, - polkadot_parachain_path: PathBuf, + /* Binary Paths & Timeouts */ + eth_proxy_binary: PathBuf, + polkadot_parachain_path: PathBuf, - /* Spawned Processes */ - eth_rpc_process: Option, + /* Spawned Processes */ + eth_rpc_process: Option, - /* Zombienet Network */ - network_config: Option, - network: Option>, + /* Zombienet Network */ + network_config: Option, + network: Option>, - /* Provider Related Fields */ - wallet: Arc, - nonce_manager: CachedNonceManager, + /* Provider Related Fields */ + wallet: Arc, + nonce_manager: CachedNonceManager, - provider: OnceCell>>, + provider: OnceCell>>, } impl ZombieNode { - const BASE_DIRECTORY: &str = "zombienet"; - const DATA_DIRECTORY: &str = "data"; - const LOGS_DIRECTORY: &str = "logs"; + const BASE_DIRECTORY: &str = "zombienet"; + const DATA_DIRECTORY: &str = "data"; + const LOGS_DIRECTORY: &str = "logs"; - const NODE_BASE_RPC_PORT: u16 = 9946; - const PARACHAIN_ID: u32 = 100; - const ETH_RPC_BASE_PORT: u16 = 8545; + const NODE_BASE_RPC_PORT: u16 = 9946; + const PARACHAIN_ID: u32 = 100; + const ETH_RPC_BASE_PORT: u16 = 8545; - const ETH_RPC_READY_MARKER: &str = "Running JSON-RPC server"; + const ETH_RPC_READY_MARKER: &str = "Running JSON-RPC server"; - const EXPORT_CHAINSPEC_COMMAND: &str = "build-spec"; - const CHAIN_SPEC_JSON_FILE: &str = "template_chainspec.json"; + const EXPORT_CHAINSPEC_COMMAND: &str = "build-spec"; + const CHAIN_SPEC_JSON_FILE: &str = "template_chainspec.json"; - pub fn new( - polkadot_parachain_path: PathBuf, - context: impl AsRef - + AsRef - + AsRef, - ) -> Self { - let eth_proxy_binary = AsRef::::as_ref(&context) - .path - .to_owned(); - let working_directory_path = AsRef::::as_ref(&context); - let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); - let base_directory = working_directory_path - .join(Self::BASE_DIRECTORY) - .join(id.to_string()); - let logs_directory = base_directory.join(Self::LOGS_DIRECTORY); - let wallet = AsRef::::as_ref(&context).wallet(); + pub fn new( + polkadot_parachain_path: PathBuf, + context: impl AsRef + + AsRef + + AsRef, + ) -> Self { + let eth_proxy_binary = AsRef::::as_ref(&context).path.to_owned(); + let working_directory_path = AsRef::::as_ref(&context); + let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); + let base_directory = working_directory_path.join(Self::BASE_DIRECTORY).join(id.to_string()); + let logs_directory = base_directory.join(Self::LOGS_DIRECTORY); + let wallet = AsRef::::as_ref(&context).wallet(); - Self { - id, - base_directory, - logs_directory, - wallet, - polkadot_parachain_path, - eth_proxy_binary, - nonce_manager: CachedNonceManager::default(), - network_config: None, - network: None, - eth_rpc_process: None, - connection_string: String::new(), - node_rpc_port: None, - provider: Default::default(), - } - } + Self { + id, + base_directory, + logs_directory, + wallet, + polkadot_parachain_path, + eth_proxy_binary, + nonce_manager: CachedNonceManager::default(), + network_config: None, + network: None, + eth_rpc_process: None, + connection_string: String::new(), + node_rpc_port: None, + provider: Default::default(), + } + } - fn init(&mut self, genesis: Genesis) -> anyhow::Result<&mut Self> { - let _ = clear_directory(&self.base_directory); - let _ = clear_directory(&self.logs_directory); + fn init(&mut self, genesis: Genesis) -> anyhow::Result<&mut Self> { + let _ = clear_directory(&self.base_directory); + let _ = clear_directory(&self.logs_directory); - create_dir_all(&self.base_directory) - .context("Failed to create base directory for zombie node")?; - create_dir_all(&self.logs_directory) - .context("Failed to create logs directory for zombie node")?; + create_dir_all(&self.base_directory) + .context("Failed to create base directory for zombie node")?; + create_dir_all(&self.logs_directory) + .context("Failed to create logs directory for zombie node")?; - let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE); - self.prepare_chainspec(template_chainspec_path.clone(), genesis)?; - let polkadot_parachain_path = self - .polkadot_parachain_path - .to_str() - .context("Invalid polkadot parachain path")?; + let template_chainspec_path = self.base_directory.join(Self::CHAIN_SPEC_JSON_FILE); + self.prepare_chainspec(template_chainspec_path.clone(), genesis)?; + let polkadot_parachain_path = self + .polkadot_parachain_path + .to_str() + .context("Invalid polkadot parachain path")?; - let node_rpc_port = Self::NODE_BASE_RPC_PORT + self.id as u16; + let node_rpc_port = Self::NODE_BASE_RPC_PORT + self.id as u16; - let network_config = NetworkConfigBuilder::new() - .with_relaychain(|r| { - r.with_chain("westend-local") - .with_default_command("polkadot") - .with_node(|node| node.with_name("alice")) - .with_node(|node| node.with_name("bob")) - }) - .with_global_settings(|g| g.with_base_dir(&self.base_directory)) - .with_parachain(|p| { - p.with_id(Self::PARACHAIN_ID) - .with_chain_spec_path(template_chainspec_path.to_str().unwrap()) - .with_chain("asset-hub-westend-local") - .with_collator(|n| { - n.with_name("Collator") - .with_command(polkadot_parachain_path) - .with_rpc_port(node_rpc_port) - }) - }) - .build() - .map_err(|e| anyhow::anyhow!("Failed to build zombienet network config: {e:?}"))?; + let network_config = NetworkConfigBuilder::new() + .with_relaychain(|r| { + r.with_chain("westend-local") + .with_default_command("polkadot") + .with_node(|node| node.with_name("alice")) + .with_node(|node| node.with_name("bob")) + }) + .with_global_settings(|g| g.with_base_dir(&self.base_directory)) + .with_parachain(|p| { + p.with_id(Self::PARACHAIN_ID) + .with_chain_spec_path(template_chainspec_path.to_str().unwrap()) + .with_chain("asset-hub-westend-local") + .with_collator(|n| { + n.with_name("Collator") + .with_command(polkadot_parachain_path) + .with_rpc_port(node_rpc_port) + }) + }) + .build() + .map_err(|e| anyhow::anyhow!("Failed to build zombienet network config: {e:?}"))?; - self.node_rpc_port = Some(node_rpc_port); - self.network_config = Some(network_config); + self.node_rpc_port = Some(node_rpc_port); + self.network_config = Some(network_config); - Ok(self) - } + Ok(self) + } - fn spawn_process(&mut self) -> anyhow::Result<()> { - let network_config = self - .network_config - .clone() - .context("Node not initialized, call init() first")?; + fn spawn_process(&mut self) -> anyhow::Result<()> { + let network_config = + self.network_config.clone().context("Node not initialized, call init() first")?; - let rt = tokio::runtime::Runtime::new().unwrap(); - let network = rt.block_on(async { - network_config - .spawn_native() - .await - .map_err(|e| anyhow::anyhow!("Failed to spawn zombienet network: {e:?}")) - })?; + let rt = tokio::runtime::Runtime::new().unwrap(); + let network = rt.block_on(async { + network_config + .spawn_native() + .await + .map_err(|e| anyhow::anyhow!("Failed to spawn zombienet network: {e:?}")) + })?; - tracing::debug!("Zombienet network is up"); + tracing::debug!("Zombienet network is up"); - let node_url = format!("ws://localhost:{}", self.node_rpc_port.unwrap()); - let eth_rpc_port = Self::ETH_RPC_BASE_PORT + self.id as u16; + let node_url = format!("ws://localhost:{}", self.node_rpc_port.unwrap()); + let eth_rpc_port = Self::ETH_RPC_BASE_PORT + self.id as u16; - let eth_rpc_process = Process::new( - "proxy", - self.logs_directory.as_path(), - self.eth_proxy_binary.as_path(), - |command, stdout_file, stderr_file| { - command - .arg("--node-rpc-url") - .arg(node_url) - .arg("--rpc-cors") - .arg("all") - .arg("--rpc-max-connections") - .arg(u32::MAX.to_string()) - .arg("--rpc-port") - .arg(eth_rpc_port.to_string()) - .stdout(stdout_file) - .stderr(stderr_file); - }, - ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { - max_wait_duration: Duration::from_secs(30), - check_function: Box::new(|_, stderr_line| match stderr_line { - Some(line) => Ok(line.contains(Self::ETH_RPC_READY_MARKER)), - None => Ok(false), - }), - }, - ); + let eth_rpc_process = Process::new( + "proxy", + self.logs_directory.as_path(), + self.eth_proxy_binary.as_path(), + |command, stdout_file, stderr_file| { + command + .arg("--node-rpc-url") + .arg(node_url) + .arg("--rpc-cors") + .arg("all") + .arg("--rpc-max-connections") + .arg(u32::MAX.to_string()) + .arg("--rpc-port") + .arg(eth_rpc_port.to_string()) + .stdout(stdout_file) + .stderr(stderr_file); + }, + ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { + max_wait_duration: Duration::from_secs(30), + check_function: Box::new(|_, stderr_line| match stderr_line { + Some(line) => Ok(line.contains(Self::ETH_RPC_READY_MARKER)), + None => Ok(false), + }), + }, + ); - match eth_rpc_process { - Ok(process) => self.eth_rpc_process = Some(process), - Err(err) => { - tracing::error!(?err, "Failed to start eth proxy, shutting down gracefully"); - self.shutdown() - .context("Failed to gracefully shutdown after eth proxy start error")?; - return Err(err); - } - } + match eth_rpc_process { + Ok(process) => self.eth_rpc_process = Some(process), + Err(err) => { + tracing::error!(?err, "Failed to start eth proxy, shutting down gracefully"); + self.shutdown() + .context("Failed to gracefully shutdown after eth proxy start error")?; + return Err(err); + }, + } - tracing::debug!("eth-rpc is up"); + tracing::debug!("eth-rpc is up"); - self.connection_string = format!("http://localhost:{}", eth_rpc_port); - self.network = Some(network); + self.connection_string = format!("http://localhost:{}", eth_rpc_port); + self.network = Some(network); - Ok(()) - } + Ok(()) + } - fn prepare_chainspec( - &mut self, - template_chainspec_path: PathBuf, - mut genesis: Genesis, - ) -> anyhow::Result<()> { - let mut cmd: Command = std::process::Command::new(&self.polkadot_parachain_path); - cmd.arg(Self::EXPORT_CHAINSPEC_COMMAND) - .arg("--chain") - .arg("asset-hub-westend-local"); + fn prepare_chainspec( + &mut self, + template_chainspec_path: PathBuf, + mut genesis: Genesis, + ) -> anyhow::Result<()> { + let mut cmd: Command = std::process::Command::new(&self.polkadot_parachain_path); + cmd.arg(Self::EXPORT_CHAINSPEC_COMMAND) + .arg("--chain") + .arg("asset-hub-westend-local"); - let output = cmd.output().context("Failed to export the chain-spec")?; + let output = cmd.output().context("Failed to export the chain-spec")?; - if !output.status.success() { - anyhow::bail!( - "Build chain-spec failed: {}", - String::from_utf8_lossy(&output.stderr) - ); - } + if !output.status.success() { + anyhow::bail!("Build chain-spec failed: {}", String::from_utf8_lossy(&output.stderr)); + } - let content = String::from_utf8(output.stdout) - .context("Failed to decode collators chain-spec output as UTF-8")?; - let mut chainspec_json: JsonValue = - serde_json::from_str(&content).context("Failed to parse collators chain spec JSON")?; + let content = String::from_utf8(output.stdout) + .context("Failed to decode collators chain-spec output as UTF-8")?; + let mut chainspec_json: JsonValue = + serde_json::from_str(&content).context("Failed to parse collators chain spec JSON")?; - let existing_chainspec_balances = - chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] - .as_array() - .cloned() - .unwrap_or_default(); + let existing_chainspec_balances = + chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] + .as_array() + .cloned() + .unwrap_or_default(); - let mut merged_balances: Vec<(String, u128)> = existing_chainspec_balances - .into_iter() - .filter_map(|val| { - if let Some(arr) = val.as_array() { - if arr.len() == 2 { - let account = arr[0].as_str()?.to_string(); - let balance = arr[1].as_f64()? as u128; - return Some((account, balance)); - } - } - None - }) - .collect(); + let mut merged_balances: Vec<(String, u128)> = existing_chainspec_balances + .into_iter() + .filter_map(|val| { + if let Some(arr) = val.as_array() { + if arr.len() == 2 { + let account = arr[0].as_str()?.to_string(); + let balance = arr[1].as_f64()? as u128; + return Some((account, balance)); + } + } + None + }) + .collect(); - let mut eth_balances = { - for signer_address in - >::signer_addresses(&self.wallet) - { - // Note, the use of the entry API here means that we only modify the entries for any - // account that is not in the `alloc` field of the genesis state. - genesis - .alloc - .entry(signer_address) - .or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE))); - } - self.extract_balance_from_genesis_file(&genesis) - .context("Failed to extract balances from EVM genesis JSON")? - }; + let mut eth_balances = { + for signer_address in + >::signer_addresses(&self.wallet) + { + // Note, the use of the entry API here means that we only modify the entries for any + // account that is not in the `alloc` field of the genesis state. + genesis + .alloc + .entry(signer_address) + .or_insert(GenesisAccount::default().with_balance(U256::from(INITIAL_BALANCE))); + } + self.extract_balance_from_genesis_file(&genesis) + .context("Failed to extract balances from EVM genesis JSON")? + }; - merged_balances.append(&mut eth_balances); + merged_balances.append(&mut eth_balances); - chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] = - json!(merged_balances); + chainspec_json["genesis"]["runtimeGenesis"]["patch"]["balances"]["balances"] = + json!(merged_balances); - let writer = std::fs::File::create(&template_chainspec_path) - .context("Failed to create template chainspec file")?; + let writer = std::fs::File::create(&template_chainspec_path) + .context("Failed to create template chainspec file")?; - serde_json::to_writer_pretty(writer, &chainspec_json) - .context("Failed to write template chainspec JSON")?; + serde_json::to_writer_pretty(writer, &chainspec_json) + .context("Failed to write template chainspec JSON")?; - Ok(()) - } + Ok(()) + } - fn extract_balance_from_genesis_file( - &self, - genesis: &Genesis, - ) -> anyhow::Result> { - genesis - .alloc - .iter() - .try_fold(Vec::new(), |mut vec, (address, acc)| { - let polkadot_address = Self::eth_to_polkadot_address(address); - let balance = acc.balance.try_into()?; - vec.push((polkadot_address, balance)); - Ok(vec) - }) - } + fn extract_balance_from_genesis_file( + &self, + genesis: &Genesis, + ) -> anyhow::Result> { + genesis.alloc.iter().try_fold(Vec::new(), |mut vec, (address, acc)| { + let polkadot_address = Self::eth_to_polkadot_address(address); + let balance = acc.balance.try_into()?; + vec.push((polkadot_address, balance)); + Ok(vec) + }) + } - fn eth_to_polkadot_address(address: &Address) -> String { - let eth_bytes = address.0.0; + fn eth_to_polkadot_address(address: &Address) -> String { + let eth_bytes = address.0.0; - let mut padded = [0xEEu8; 32]; - padded[..20].copy_from_slice(ð_bytes); + let mut padded = [0xEEu8; 32]; + padded[..20].copy_from_slice(ð_bytes); - let account_id = AccountId32::from(padded); - account_id.to_ss58check() - } + let account_id = AccountId32::from(padded); + account_id.to_ss58check() + } - pub fn eth_rpc_version(&self) -> anyhow::Result { - let output = Command::new(&self.eth_proxy_binary) - .arg("--version") - .stdin(Stdio::null()) - .stdout(Stdio::piped()) - .stderr(Stdio::null()) - .spawn()? - .wait_with_output()? - .stdout; + pub fn eth_rpc_version(&self) -> anyhow::Result { + let output = Command::new(&self.eth_proxy_binary) + .arg("--version") + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .spawn()? + .wait_with_output()? + .stdout; - Ok(String::from_utf8_lossy(&output).trim().to_string()) - } + Ok(String::from_utf8_lossy(&output).trim().to_string()) + } - async fn provider( - &self, - ) -> anyhow::Result>> { - self.provider - .get_or_try_init(|| async move { - construct_concurrency_limited_provider::( - self.connection_string.as_str(), - FallbackGasFiller::new(250_000_000, 5_000_000_000, 1_000_000_000), - ChainIdFiller::default(), // TODO: use CHAIN_ID constant - NonceFiller::new(self.nonce_manager.clone()), - self.wallet.clone(), - ) - .await - .context("Failed to construct the provider") - }) - .await - .cloned() - } + async fn provider( + &self, + ) -> anyhow::Result>> { + self.provider + .get_or_try_init(|| async move { + construct_concurrency_limited_provider::( + self.connection_string.as_str(), + FallbackGasFiller::new(250_000_000, 5_000_000_000, 1_000_000_000), + ChainIdFiller::default(), // TODO: use CHAIN_ID constant + NonceFiller::new(self.nonce_manager.clone()), + self.wallet.clone(), + ) + .await + .context("Failed to construct the provider") + }) + .await + .cloned() + } } impl EthereumNode for ZombieNode { - fn pre_transactions(&mut self) -> Pin> + '_>> { - Box::pin(async move { Ok(()) }) - } + fn pre_transactions(&mut self) -> Pin> + '_>> { + Box::pin(async move { Ok(()) }) + } - fn id(&self) -> usize { - self.id as _ - } + fn id(&self) -> usize { + self.id as _ + } - fn connection_string(&self) -> &str { - &self.connection_string - } + fn connection_string(&self) -> &str { + &self.connection_string + } - fn submit_transaction( - &self, - transaction: TransactionRequest, - ) -> Pin> + '_>> { - Box::pin(async move { - let provider = self - .provider() - .await - .context("Failed to create the provider for transaction submission")?; - let pending_transaction = provider - .send_transaction(transaction) - .await - .context("Failed to submit the transaction through the provider")?; - Ok(*pending_transaction.tx_hash()) - }) - } + fn submit_transaction( + &self, + transaction: TransactionRequest, + ) -> Pin> + '_>> { + Box::pin(async move { + let provider = self + .provider() + .await + .context("Failed to create the provider for transaction submission")?; + let pending_transaction = provider + .send_transaction(transaction) + .await + .context("Failed to submit the transaction through the provider")?; + Ok(*pending_transaction.tx_hash()) + }) + } - fn get_receipt( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider() - .await - .context("Failed to create provider for getting the receipt")? - .get_transaction_receipt(tx_hash) - .await - .context("Failed to get the receipt of the transaction")? - .context("Failed to get the receipt of the transaction") - }) - } + fn get_receipt( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to create provider for getting the receipt")? + .get_transaction_receipt(tx_hash) + .await + .context("Failed to get the receipt of the transaction")? + .context("Failed to get the receipt of the transaction") + }) + } - fn execute_transaction( - &self, - transaction: alloy::rpc::types::TransactionRequest, - ) -> Pin> + '_>> { - Box::pin(async move { - let receipt = self - .provider() - .await - .context("Failed to create provider for transaction submission")? - .send_transaction(transaction) - .await - .context("Failed to submit transaction to proxy")? - .get_receipt() - .await - .context("Failed to fetch transaction receipt from proxy")?; - Ok(receipt) - }) - } + fn execute_transaction( + &self, + transaction: alloy::rpc::types::TransactionRequest, + ) -> Pin> + '_>> { + Box::pin(async move { + let receipt = self + .provider() + .await + .context("Failed to create provider for transaction submission")? + .send_transaction(transaction) + .await + .context("Failed to submit transaction to proxy")? + .get_receipt() + .await + .context("Failed to fetch transaction receipt from proxy")?; + Ok(receipt) + }) + } - fn trace_transaction( - &self, - tx_hash: TxHash, - trace_options: GethDebugTracingOptions, - ) -> Pin> + '_>> - { - Box::pin(async move { - self.provider() - .await - .context("Failed to create provider for debug tracing")? - .debug_trace_transaction(tx_hash, trace_options) - .await - .context("Failed to obtain debug trace from proxy") - }) - } + fn trace_transaction( + &self, + tx_hash: TxHash, + trace_options: GethDebugTracingOptions, + ) -> Pin> + '_>> + { + Box::pin(async move { + self.provider() + .await + .context("Failed to create provider for debug tracing")? + .debug_trace_transaction(tx_hash, trace_options) + .await + .context("Failed to obtain debug trace from proxy") + }) + } - fn state_diff( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { - diff_mode: Some(true), - disable_code: None, - disable_storage: None, - }); - match self - .trace_transaction(tx_hash, trace_options) - .await? - .try_into_pre_state_frame()? - { - PreStateFrame::Diff(diff) => Ok(diff), - _ => anyhow::bail!("expected a diff mode trace"), - } - }) - } + fn state_diff( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + let trace_options = GethDebugTracingOptions::prestate_tracer(PreStateConfig { + diff_mode: Some(true), + disable_code: None, + disable_storage: None, + }); + match self + .trace_transaction(tx_hash, trace_options) + .await? + .try_into_pre_state_frame()? + { + PreStateFrame::Diff(diff) => Ok(diff), + _ => anyhow::bail!("expected a diff mode trace"), + } + }) + } - fn balance_of( - &self, - address: Address, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider() - .await - .context("Failed to get the zombie provider")? - .get_balance(address) - .await - .map_err(Into::into) - }) - } + fn balance_of( + &self, + address: Address, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to get the zombie provider")? + .get_balance(address) + .await + .map_err(Into::into) + }) + } - fn latest_state_proof( - &self, - address: Address, - keys: Vec, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider() - .await - .context("Failed to get the zombie provider")? - .get_proof(address, keys) - .latest() - .await - .map_err(Into::into) - }) - } + fn latest_state_proof( + &self, + address: Address, + keys: Vec, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider() + .await + .context("Failed to get the zombie provider")? + .get_proof(address, keys) + .latest() + .await + .map_err(Into::into) + }) + } - fn resolver( - &self, - ) -> Pin>> + '_>> { - Box::pin(async move { - let id = self.id; - let provider = self.provider().await?; + fn resolver( + &self, + ) -> Pin>> + '_>> { + Box::pin(async move { + let id = self.id; + let provider = self.provider().await?; - Ok(Arc::new(ZombieNodeResolver { id, provider }) as Arc) - }) - } + Ok(Arc::new(ZombieNodeResolver { id, provider }) as Arc) + }) + } - fn evm_version(&self) -> EVMVersion { - EVMVersion::Cancun - } + fn evm_version(&self) -> EVMVersion { + EVMVersion::Cancun + } - fn subscribe_to_full_blocks_information( - &self, - ) -> Pin< - Box< - dyn Future>>>> - + '_, - >, - > { - Box::pin(async move { - let provider = self - .provider() - .await - .context("Failed to create the provider for block subscription")?; - let mut block_subscription = provider - .watch_full_blocks() - .await - .context("Failed to create the blocks stream")?; - block_subscription.set_channel_size(0xFFFF); - block_subscription.set_poll_interval(Duration::from_secs(1)); - let block_stream = block_subscription.into_stream(); + fn subscribe_to_full_blocks_information( + &self, + ) -> Pin< + Box< + dyn Future>>>> + + '_, + >, + > { + Box::pin(async move { + let provider = self + .provider() + .await + .context("Failed to create the provider for block subscription")?; + let mut block_subscription = provider + .watch_full_blocks() + .await + .context("Failed to create the blocks stream")?; + block_subscription.set_channel_size(0xFFFF); + block_subscription.set_poll_interval(Duration::from_secs(1)); + let block_stream = block_subscription.into_stream(); - let mined_block_information_stream = block_stream.filter_map(|block| async { - let block = block.ok()?; - Some(MinedBlockInformation { - block_number: block.number(), - block_timestamp: block.header.timestamp, - mined_gas: block.header.gas_used as _, - block_gas_limit: block.header.gas_limit, - transaction_hashes: block - .transactions - .into_hashes() - .as_hashes() - .expect("Must be hashes") - .to_vec(), - }) - }); + let mined_block_information_stream = block_stream.filter_map(|block| async { + let block = block.ok()?; + Some(MinedBlockInformation { + block_number: block.number(), + block_timestamp: block.header.timestamp, + mined_gas: block.header.gas_used as _, + block_gas_limit: block.header.gas_limit, + transaction_hashes: block + .transactions + .into_hashes() + .as_hashes() + .expect("Must be hashes") + .to_vec(), + }) + }); - Ok(Box::pin(mined_block_information_stream) - as Pin>>) - }) - } + Ok(Box::pin(mined_block_information_stream) + as Pin>>) + }) + } - fn resolve_signer_or_default(&self, address: Address) -> Address { - let signer_addresses: Vec<_> = - >::signer_addresses(&self.wallet).collect(); - if signer_addresses.contains(&address) { - address - } else { - self.wallet.default_signer().address() - } - } + fn resolve_signer_or_default(&self, address: Address) -> Address { + let signer_addresses: Vec<_> = + >::signer_addresses(&self.wallet).collect(); + if signer_addresses.contains(&address) { + address + } else { + self.wallet.default_signer().address() + } + } } pub struct ZombieNodeResolver, P: Provider> { - id: u32, - provider: FillProvider, + id: u32, + provider: FillProvider, } impl, P: Provider> ResolverApi - for ZombieNodeResolver + for ZombieNodeResolver { - #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] - fn chain_id( - &self, - ) -> Pin> + '_>> { - Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) }) - } + #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] + fn chain_id( + &self, + ) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_chain_id().await.map_err(Into::into) }) + } - #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] - fn transaction_gas_price( - &self, - tx_hash: TxHash, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_transaction_receipt(tx_hash) - .await? - .context("Failed to get the transaction receipt") - .map(|receipt| receipt.effective_gas_price) - }) - } + #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] + fn transaction_gas_price( + &self, + tx_hash: TxHash, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_transaction_receipt(tx_hash) + .await? + .context("Failed to get the transaction receipt") + .map(|receipt| receipt.effective_gas_price) + }) + } - #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] - fn block_gas_limit( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the block")? - .context("Failed to get the block, perhaps the chain has no blocks?") - .map(|block| block.header.gas_limit as _) - }) - } + #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] + fn block_gas_limit( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the block")? + .context("Failed to get the block, perhaps the chain has no blocks?") + .map(|block| block.header.gas_limit as _) + }) + } - #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] - fn block_coinbase( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the zombie block")? - .context("Failed to get the zombie block, perhaps the chain has no blocks?") - .map(|block| block.header.beneficiary) - }) - } + #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] + fn block_coinbase( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the zombie block")? + .context("Failed to get the zombie block, perhaps the chain has no blocks?") + .map(|block| block.header.beneficiary) + }) + } - #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] - fn block_difficulty( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the zombie block")? - .context("Failed to get the zombie block, perhaps the chain has no blocks?") - .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) - }) - } + #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] + fn block_difficulty( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the zombie block")? + .context("Failed to get the zombie block, perhaps the chain has no blocks?") + .map(|block| U256::from_be_bytes(block.header.mix_hash.0)) + }) + } - #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] - fn block_base_fee( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the zombie block")? - .context("Failed to get the zombie block, perhaps the chain has no blocks?") - .and_then(|block| { - block - .header - .base_fee_per_gas - .context("Failed to get the base fee per gas") - }) - }) - } + #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] + fn block_base_fee( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the zombie block")? + .context("Failed to get the zombie block, perhaps the chain has no blocks?") + .and_then(|block| { + block.header.base_fee_per_gas.context("Failed to get the base fee per gas") + }) + }) + } - #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] - fn block_hash( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the zombie block")? - .context("Failed to get the zombie block, perhaps the chain has no blocks?") - .map(|block| block.header.hash) - }) - } + #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] + fn block_hash( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the zombie block")? + .context("Failed to get the zombie block, perhaps the chain has no blocks?") + .map(|block| block.header.hash) + }) + } - #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] - fn block_timestamp( - &self, - number: BlockNumberOrTag, - ) -> Pin> + '_>> { - Box::pin(async move { - self.provider - .get_block_by_number(number) - .await - .context("Failed to get the zombie block")? - .context("Failed to get the zombie block, perhaps the chain has no blocks?") - .map(|block| block.header.timestamp) - }) - } + #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] + fn block_timestamp( + &self, + number: BlockNumberOrTag, + ) -> Pin> + '_>> { + Box::pin(async move { + self.provider + .get_block_by_number(number) + .await + .context("Failed to get the zombie block")? + .context("Failed to get the zombie block, perhaps the chain has no blocks?") + .map(|block| block.header.timestamp) + }) + } - #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] - fn last_block_number(&self) -> Pin> + '_>> { - Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) }) - } + #[instrument(level = "info", skip_all, fields(zombie_node_id = self.id))] + fn last_block_number(&self) -> Pin> + '_>> { + Box::pin(async move { self.provider.get_block_number().await.map_err(Into::into) }) + } } impl Node for ZombieNode { - fn shutdown(&mut self) -> anyhow::Result<()> { - // Kill the eth_rpc process - drop(self.eth_rpc_process.take()); + fn shutdown(&mut self) -> anyhow::Result<()> { + // Kill the eth_rpc process + drop(self.eth_rpc_process.take()); - // Destroy the network - if let Some(network) = self.network.take() { - // Handle network cleanup here - tokio::task::spawn_blocking(move || { - let rt = tokio::runtime::Runtime::new().unwrap(); - rt.block_on(async { - if let Err(e) = network.destroy().await { - tracing::warn!("Failed to destroy zombienet network: {e:?}"); - } - }) - }); - } + // Destroy the network + if let Some(network) = self.network.take() { + // Handle network cleanup here + tokio::task::spawn_blocking(move || { + let rt = tokio::runtime::Runtime::new().unwrap(); + rt.block_on(async { + if let Err(e) = network.destroy().await { + tracing::warn!("Failed to destroy zombienet network: {e:?}"); + } + }) + }); + } - // Remove the database directory - if let Err(e) = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY)) { - tracing::warn!("Failed to remove database directory: {e:?}"); - } + // Remove the database directory + if let Err(e) = remove_dir_all(self.base_directory.join(Self::DATA_DIRECTORY)) { + tracing::warn!("Failed to remove database directory: {e:?}"); + } - Ok(()) - } + Ok(()) + } - fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> { - self.init(genesis)?.spawn_process() - } + fn spawn(&mut self, genesis: Genesis) -> anyhow::Result<()> { + self.init(genesis)?.spawn_process() + } - fn version(&self) -> anyhow::Result { - let output = Command::new(&self.polkadot_parachain_path) - .arg("--version") - .stdin(Stdio::null()) - .stdout(Stdio::piped()) - .stderr(Stdio::null()) - .spawn() - .context("Failed execute --version")? - .wait_with_output() - .context("Failed to wait --version")? - .stdout; - Ok(String::from_utf8_lossy(&output).into()) - } + fn version(&self) -> anyhow::Result { + let output = Command::new(&self.polkadot_parachain_path) + .arg("--version") + .stdin(Stdio::null()) + .stdout(Stdio::piped()) + .stderr(Stdio::null()) + .spawn() + .context("Failed execute --version")? + .wait_with_output() + .context("Failed to wait --version")? + .stdout; + Ok(String::from_utf8_lossy(&output).into()) + } } impl Drop for ZombieNode { - fn drop(&mut self) { - let _ = self.shutdown(); - } + fn drop(&mut self) { + let _ = self.shutdown(); + } } #[cfg(test)] mod tests { - use alloy::rpc::types::TransactionRequest; + use alloy::rpc::types::TransactionRequest; - use crate::node_implementations::zombienet::tests::utils::shared_node; + use crate::node_implementations::zombienet::tests::utils::shared_node; - use super::*; + use super::*; - mod utils { - use super::*; + mod utils { + use super::*; - use std::sync::Arc; - use tokio::sync::OnceCell; + use std::sync::Arc; + use tokio::sync::OnceCell; - pub fn test_config() -> TestExecutionContext { - TestExecutionContext::default() - } + pub fn test_config() -> TestExecutionContext { + TestExecutionContext::default() + } - pub async fn new_node() -> (TestExecutionContext, ZombieNode) { - let context = test_config(); - let mut node = ZombieNode::new( - context.polkadot_parachain_configuration.path.clone(), - &context, - ); - let genesis = context.genesis_configuration.genesis().unwrap().clone(); - node.init(genesis).unwrap(); + pub async fn new_node() -> (TestExecutionContext, ZombieNode) { + let context = test_config(); + let mut node = + ZombieNode::new(context.polkadot_parachain_configuration.path.clone(), &context); + let genesis = context.genesis_configuration.genesis().unwrap().clone(); + node.init(genesis).unwrap(); - // Run spawn_process in a blocking thread - let node = tokio::task::spawn_blocking(move || { - node.spawn_process().unwrap(); - node - }) - .await - .expect("Failed to spawn process"); + // Run spawn_process in a blocking thread + let node = tokio::task::spawn_blocking(move || { + node.spawn_process().unwrap(); + node + }) + .await + .expect("Failed to spawn process"); - (context, node) - } + (context, node) + } - pub async fn shared_state() -> &'static (TestExecutionContext, Arc) { - static NODE: OnceCell<(TestExecutionContext, Arc)> = OnceCell::const_new(); + pub async fn shared_state() -> &'static (TestExecutionContext, Arc) { + static NODE: OnceCell<(TestExecutionContext, Arc)> = OnceCell::const_new(); - NODE.get_or_init(|| async { - let (context, node) = new_node().await; - (context, Arc::new(node)) - }) - .await - } + NODE.get_or_init(|| async { + let (context, node) = new_node().await; + (context, Arc::new(node)) + }) + .await + } - pub async fn shared_node() -> &'static Arc { - &shared_state().await.1 - } - } - use utils::{new_node, test_config}; + pub async fn shared_node() -> &'static Arc { + &shared_state().await.1 + } + } + use utils::{new_node, test_config}; - #[tokio::test] - async fn test_transfer_transaction_should_return_receipt() { - let (ctx, node) = new_node().await; + #[tokio::test] + async fn test_transfer_transaction_should_return_receipt() { + let (ctx, node) = new_node().await; - let provider = node.provider().await.expect("Failed to create provider"); - let account_address = ctx.wallet_configuration.wallet().default_signer().address(); - let transaction = TransactionRequest::default() - .to(account_address) - .value(U256::from(100_000_000_000_000u128)); + let provider = node.provider().await.expect("Failed to create provider"); + let account_address = ctx.wallet_configuration.wallet().default_signer().address(); + let transaction = TransactionRequest::default() + .to(account_address) + .value(U256::from(100_000_000_000_000u128)); - let receipt = provider.send_transaction(transaction).await; - let _ = receipt - .expect("Failed to send the transfer transaction") - .get_receipt() - .await - .expect("Failed to get the receipt for the transfer"); - } + let receipt = provider.send_transaction(transaction).await; + let _ = receipt + .expect("Failed to send the transfer transaction") + .get_receipt() + .await + .expect("Failed to get the receipt for the transfer"); + } - #[tokio::test] - async fn test_init_generates_chainspec_with_balances() { - let genesis_content = r#" + #[tokio::test] + async fn test_init_generates_chainspec_with_balances() { + let genesis_content = r#" { "alloc": { "90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": { @@ -864,45 +839,42 @@ mod tests { } "#; - let context = test_config(); - let mut node = ZombieNode::new( - context.polkadot_parachain_configuration.path.clone(), - &context, - ); + let context = test_config(); + let mut node = + ZombieNode::new(context.polkadot_parachain_configuration.path.clone(), &context); - // Call `init()` - node.init(serde_json::from_str(genesis_content).unwrap()) - .expect("init failed"); + // Call `init()` + node.init(serde_json::from_str(genesis_content).unwrap()).expect("init failed"); - // Check that the patched chainspec file was generated - let final_chainspec_path = node.base_directory.join(ZombieNode::CHAIN_SPEC_JSON_FILE); - assert!(final_chainspec_path.exists(), "Chainspec file should exist"); + // Check that the patched chainspec file was generated + let final_chainspec_path = node.base_directory.join(ZombieNode::CHAIN_SPEC_JSON_FILE); + assert!(final_chainspec_path.exists(), "Chainspec file should exist"); - let contents = - std::fs::read_to_string(&final_chainspec_path).expect("Failed to read chainspec"); + let contents = + std::fs::read_to_string(&final_chainspec_path).expect("Failed to read chainspec"); - // Validate that the Polkadot addresses derived from the Ethereum addresses are in the file - let first_eth_addr = ZombieNode::eth_to_polkadot_address( - &"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1".parse().unwrap(), - ); - let second_eth_addr = ZombieNode::eth_to_polkadot_address( - &"Ab8483F64d9C6d1EcF9b849Ae677dD3315835cb2".parse().unwrap(), - ); + // Validate that the Polkadot addresses derived from the Ethereum addresses are in the file + let first_eth_addr = ZombieNode::eth_to_polkadot_address( + &"90F8bf6A479f320ead074411a4B0e7944Ea8c9C1".parse().unwrap(), + ); + let second_eth_addr = ZombieNode::eth_to_polkadot_address( + &"Ab8483F64d9C6d1EcF9b849Ae677dD3315835cb2".parse().unwrap(), + ); - assert!( - contents.contains(&first_eth_addr), - "Chainspec should contain Polkadot address for first Ethereum account" - ); - assert!( - contents.contains(&second_eth_addr), - "Chainspec should contain Polkadot address for second Ethereum account" - ); - } + assert!( + contents.contains(&first_eth_addr), + "Chainspec should contain Polkadot address for first Ethereum account" + ); + assert!( + contents.contains(&second_eth_addr), + "Chainspec should contain Polkadot address for second Ethereum account" + ); + } - #[tokio::test] - async fn test_parse_genesis_alloc() { - // Create test genesis file - let genesis_json = r#" + #[tokio::test] + async fn test_parse_genesis_alloc() { + // Create test genesis file + let genesis_json = r#" { "alloc": { "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1": { "balance": "1000000000000000000" }, @@ -912,236 +884,203 @@ mod tests { } "#; - let context = test_config(); - let node = ZombieNode::new( - context.polkadot_parachain_configuration.path.clone(), - &context, - ); + let context = test_config(); + let node = ZombieNode::new(context.polkadot_parachain_configuration.path.clone(), &context); - let result = node - .extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap()) - .unwrap(); + let result = node + .extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap()) + .unwrap(); - let result_map: std::collections::HashMap<_, _> = result.into_iter().collect(); + let result_map: std::collections::HashMap<_, _> = result.into_iter().collect(); - assert_eq!( - result_map.get("5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV"), - Some(&1_000_000_000_000_000_000u128) - ); + assert_eq!( + result_map.get("5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV"), + Some(&1_000_000_000_000_000_000u128) + ); - assert_eq!( - result_map.get("5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1"), - Some(&1_000_000_000_000_000_000u128) - ); + assert_eq!( + result_map.get("5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1"), + Some(&1_000_000_000_000_000_000u128) + ); - assert_eq!( - result_map.get("5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4"), - Some(&123_456_789u128) - ); - } + assert_eq!( + result_map.get("5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4"), + Some(&123_456_789u128) + ); + } - #[test] - fn print_eth_to_polkadot_mappings() { - let eth_addresses = vec![ - "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", - "0xffffffffffffffffffffffffffffffffffffffff", - "90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", - ]; + #[test] + fn print_eth_to_polkadot_mappings() { + let eth_addresses = vec![ + "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", + "0xffffffffffffffffffffffffffffffffffffffff", + "90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", + ]; - for eth_addr in eth_addresses { - let ss58 = ZombieNode::eth_to_polkadot_address(ð_addr.parse().unwrap()); + for eth_addr in eth_addresses { + let ss58 = ZombieNode::eth_to_polkadot_address(ð_addr.parse().unwrap()); - println!("Ethereum: {eth_addr} -> Polkadot SS58: {ss58}"); - } - } + println!("Ethereum: {eth_addr} -> Polkadot SS58: {ss58}"); + } + } - #[test] - fn test_eth_to_polkadot_address() { - let cases = vec![ - ( - "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", - "5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV", - ), - ( - "90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", - "5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV", - ), - ( - "0x0000000000000000000000000000000000000000", - "5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1", - ), - ( - "0xffffffffffffffffffffffffffffffffffffffff", - "5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4", - ), - ]; + #[test] + fn test_eth_to_polkadot_address() { + let cases = vec![ + ( + "0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", + "5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV", + ), + ( + "90F8bf6A479f320ead074411a4B0e7944Ea8c9C1", + "5FLneRcWAfk3X3tg6PuGyLNGAquPAZez5gpqvyuf3yUK8VaV", + ), + ( + "0x0000000000000000000000000000000000000000", + "5C4hrfjw9DjXZTzV3MwzrrAr9P1MLDHajjSidz9bR544LEq1", + ), + ( + "0xffffffffffffffffffffffffffffffffffffffff", + "5HrN7fHLXWcFiXPwwtq2EkSGns9eMmoUQnbVKweNz3VVr6N4", + ), + ]; - for (eth_addr, expected_ss58) in cases { - let result = ZombieNode::eth_to_polkadot_address(ð_addr.parse().unwrap()); - assert_eq!( - result, expected_ss58, - "Mismatch for Ethereum address {eth_addr}" - ); - } - } + for (eth_addr, expected_ss58) in cases { + let result = ZombieNode::eth_to_polkadot_address(ð_addr.parse().unwrap()); + assert_eq!(result, expected_ss58, "Mismatch for Ethereum address {eth_addr}"); + } + } - #[test] - fn eth_rpc_version_works() { - // Arrange - let context = test_config(); - let node = ZombieNode::new( - context.polkadot_parachain_configuration.path.clone(), - &context, - ); + #[test] + fn eth_rpc_version_works() { + // Arrange + let context = test_config(); + let node = ZombieNode::new(context.polkadot_parachain_configuration.path.clone(), &context); - // Act - let version = node.eth_rpc_version().unwrap(); + // Act + let version = node.eth_rpc_version().unwrap(); - // Assert - assert!( - version.starts_with("pallet-revive-eth-rpc"), - "Expected eth-rpc version string, got: {version}" - ); - } + // Assert + assert!( + version.starts_with("pallet-revive-eth-rpc"), + "Expected eth-rpc version string, got: {version}" + ); + } - #[test] - fn version_works() { - // Arrange - let context = test_config(); - let node = ZombieNode::new( - context.polkadot_parachain_configuration.path.clone(), - &context, - ); + #[test] + fn version_works() { + // Arrange + let context = test_config(); + let node = ZombieNode::new(context.polkadot_parachain_configuration.path.clone(), &context); - // Act - let version = node.version().unwrap(); + // Act + let version = node.version().unwrap(); - // Assert - assert!( - version.starts_with("polkadot-parachain"), - "Expected Polkadot-parachain version string, got: {version}" - ); - } + // Assert + assert!( + version.starts_with("polkadot-parachain"), + "Expected Polkadot-parachain version string, got: {version}" + ); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn get_chain_id_from_node_should_succeed() { - // Arrange - let node = shared_node().await; + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn get_chain_id_from_node_should_succeed() { + // Arrange + let node = shared_node().await; - // Act - let chain_id = node - .resolver() - .await - .expect("Failed to create resolver") - .chain_id() - .await - .expect("Failed to get chain id"); + // Act + let chain_id = node + .resolver() + .await + .expect("Failed to create resolver") + .chain_id() + .await + .expect("Failed to get chain id"); - // Assert - assert!(chain_id > 0, "Chain ID should be greater than zero"); - } + // Assert + assert!(chain_id > 0, "Chain ID should be greater than zero"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_gas_limit_from_node() { - // Arrange - let node = shared_node().await; + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_gas_limit_from_node() { + // Arrange + let node = shared_node().await; - // Act - let gas_limit = node - .resolver() - .await - .unwrap() - .block_gas_limit(BlockNumberOrTag::Latest) - .await; + // Act + let gas_limit = + node.resolver().await.unwrap().block_gas_limit(BlockNumberOrTag::Latest).await; - // Assert - let _ = gas_limit.expect("Failed to get the gas limit"); - } + // Assert + let _ = gas_limit.expect("Failed to get the gas limit"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_coinbase_from_node() { - // Arrange - let node = shared_node().await; + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_coinbase_from_node() { + // Arrange + let node = shared_node().await; - // Act - let coinbase = node - .resolver() - .await - .unwrap() - .block_coinbase(BlockNumberOrTag::Latest) - .await; + // Act + let coinbase = + node.resolver().await.unwrap().block_coinbase(BlockNumberOrTag::Latest).await; - // Assert - let _ = coinbase.expect("Failed to get the coinbase"); - } + // Assert + let _ = coinbase.expect("Failed to get the coinbase"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_difficulty_from_node() { - // Arrange - let node = shared_node().await; + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_difficulty_from_node() { + // Arrange + let node = shared_node().await; - // Act - let block_difficulty = node - .resolver() - .await - .unwrap() - .block_difficulty(BlockNumberOrTag::Latest) - .await; + // Act + let block_difficulty = + node.resolver().await.unwrap().block_difficulty(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_difficulty.expect("Failed to get the block difficulty"); - } + // Assert + let _ = block_difficulty.expect("Failed to get the block difficulty"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_hash_from_node() { - // Arrange - let node = shared_node().await; + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_hash_from_node() { + // Arrange + let node = shared_node().await; - // Act - let block_hash = node - .resolver() - .await - .unwrap() - .block_hash(BlockNumberOrTag::Latest) - .await; + // Act + let block_hash = node.resolver().await.unwrap().block_hash(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_hash.expect("Failed to get the block hash"); - } + // Assert + let _ = block_hash.expect("Failed to get the block hash"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_timestamp_from_node() { - // Arrange - let node = shared_node().await; + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_timestamp_from_node() { + // Arrange + let node = shared_node().await; - // Act - let block_timestamp = node - .resolver() - .await - .unwrap() - .block_timestamp(BlockNumberOrTag::Latest) - .await; + // Act + let block_timestamp = + node.resolver().await.unwrap().block_timestamp(BlockNumberOrTag::Latest).await; - // Assert - let _ = block_timestamp.expect("Failed to get the block timestamp"); - } + // Assert + let _ = block_timestamp.expect("Failed to get the block timestamp"); + } - #[tokio::test] - #[ignore = "Ignored since they take a long time to run"] - async fn can_get_block_number_from_node() { - // Arrange - let node = shared_node().await; + #[tokio::test] + #[ignore = "Ignored since they take a long time to run"] + async fn can_get_block_number_from_node() { + // Arrange + let node = shared_node().await; - // Act - let block_number = node.resolver().await.unwrap().last_block_number().await; + // Act + let block_number = node.resolver().await.unwrap().last_block_number().await; - // Assert - let _ = block_number.expect("Failed to get the block number"); - } + // Assert + let _ = block_number.expect("Failed to get the block number"); + } } diff --git a/crates/node/src/provider_utils/concurrency_limiter.rs b/crates/node/src/provider_utils/concurrency_limiter.rs index 73878b9..7bc48e4 100644 --- a/crates/node/src/provider_utils/concurrency_limiter.rs +++ b/crates/node/src/provider_utils/concurrency_limiter.rs @@ -6,64 +6,56 @@ use tower::{Layer, Service}; #[derive(Clone, Debug)] pub struct ConcurrencyLimiterLayer { - semaphore: Arc, + semaphore: Arc, } impl ConcurrencyLimiterLayer { - pub fn new(permit_count: usize) -> Self { - Self { - semaphore: Arc::new(Semaphore::new(permit_count)), - } - } + pub fn new(permit_count: usize) -> Self { + Self { semaphore: Arc::new(Semaphore::new(permit_count)) } + } } impl Layer for ConcurrencyLimiterLayer { - type Service = ConcurrencyLimiterService; + type Service = ConcurrencyLimiterService; - fn layer(&self, inner: S) -> Self::Service { - ConcurrencyLimiterService { - service: inner, - semaphore: self.semaphore.clone(), - } - } + fn layer(&self, inner: S) -> Self::Service { + ConcurrencyLimiterService { service: inner, semaphore: self.semaphore.clone() } + } } #[derive(Clone)] pub struct ConcurrencyLimiterService { - service: S, - semaphore: Arc, + service: S, + semaphore: Arc, } impl Service for ConcurrencyLimiterService where - S: Service + Send, - S::Future: Send + 'static, + S: Service + Send, + S::Future: Send + 'static, { - type Response = S::Response; - type Error = S::Error; - type Future = BoxFuture<'static, Result>; + type Response = S::Response; + type Error = S::Error; + type Future = BoxFuture<'static, Result>; - fn poll_ready( - &mut self, - cx: &mut std::task::Context<'_>, - ) -> std::task::Poll> { - self.service.poll_ready(cx) - } + fn poll_ready( + &mut self, + cx: &mut std::task::Context<'_>, + ) -> std::task::Poll> { + self.service.poll_ready(cx) + } - fn call(&mut self, req: Request) -> Self::Future { - let semaphore = self.semaphore.clone(); - let future = self.service.call(req); + fn call(&mut self, req: Request) -> Self::Future { + let semaphore = self.semaphore.clone(); + let future = self.service.call(req); - Box::pin(async move { - let _permit = semaphore - .acquire() - .await - .expect("Semaphore has been closed"); - tracing::debug!( - available_permits = semaphore.available_permits(), - "Acquired Semaphore Permit" - ); - future.await - }) - } + Box::pin(async move { + let _permit = semaphore.acquire().await.expect("Semaphore has been closed"); + tracing::debug!( + available_permits = semaphore.available_permits(), + "Acquired Semaphore Permit" + ); + future.await + }) + } } diff --git a/crates/node/src/provider_utils/fallback_gas_provider.rs b/crates/node/src/provider_utils/fallback_gas_provider.rs index ff74ea2..cb42bcd 100644 --- a/crates/node/src/provider_utils/fallback_gas_provider.rs +++ b/crates/node/src/provider_utils/fallback_gas_provider.rs @@ -1,84 +1,79 @@ use alloy::{ - network::{Network, TransactionBuilder}, - providers::{ - Provider, SendableTx, - fillers::{GasFiller, TxFiller}, - }, - transports::TransportResult, + network::{Network, TransactionBuilder}, + providers::{ + Provider, SendableTx, + fillers::{GasFiller, TxFiller}, + }, + transports::TransportResult, }; #[derive(Clone, Debug)] pub struct FallbackGasFiller { - inner: GasFiller, - default_gas_limit: u64, - default_max_fee_per_gas: u128, - default_priority_fee: u128, + inner: GasFiller, + default_gas_limit: u64, + default_max_fee_per_gas: u128, + default_priority_fee: u128, } impl FallbackGasFiller { - pub fn new( - default_gas_limit: u64, - default_max_fee_per_gas: u128, - default_priority_fee: u128, - ) -> Self { - Self { - inner: GasFiller, - default_gas_limit, - default_max_fee_per_gas, - default_priority_fee, - } - } + pub fn new( + default_gas_limit: u64, + default_max_fee_per_gas: u128, + default_priority_fee: u128, + ) -> Self { + Self { inner: GasFiller, default_gas_limit, default_max_fee_per_gas, default_priority_fee } + } } impl Default for FallbackGasFiller { - fn default() -> Self { - FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000) - } + fn default() -> Self { + FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000) + } } impl TxFiller for FallbackGasFiller where - N: Network, + N: Network, { - type Fillable = Option<>::Fillable>; + type Fillable = Option<>::Fillable>; - fn status( - &self, - tx: &::TransactionRequest, - ) -> alloy::providers::fillers::FillerControlFlow { - >::status(&self.inner, tx) - } + fn status( + &self, + tx: &::TransactionRequest, + ) -> alloy::providers::fillers::FillerControlFlow { + >::status(&self.inner, tx) + } - fn fill_sync(&self, _: &mut alloy::providers::SendableTx) {} + fn fill_sync(&self, _: &mut alloy::providers::SendableTx) {} - async fn prepare>( - &self, - provider: &P, - tx: &::TransactionRequest, - ) -> TransportResult { - // Try to fetch GasFiller’s “fillable” (gas_price, base_fee, estimate_gas, …) - // If it errors (i.e. tx would revert under eth_estimateGas), swallow it. - match self.inner.prepare(provider, tx).await { - Ok(fill) => Ok(Some(fill)), - Err(_) => Ok(None), - } - } + async fn prepare>( + &self, + provider: &P, + tx: &::TransactionRequest, + ) -> TransportResult { + // Try to fetch GasFiller’s “fillable” (gas_price, base_fee, estimate_gas, …) + // If it errors (i.e. tx would revert under eth_estimateGas), swallow it. + match self.inner.prepare(provider, tx).await { + Ok(fill) => Ok(Some(fill)), + Err(_) => Ok(None), + } + } - async fn fill( - &self, - fillable: Self::Fillable, - mut tx: alloy::providers::SendableTx, - ) -> TransportResult> { - if let Some(fill) = fillable { - // our inner GasFiller succeeded — use it - self.inner.fill(fill, tx).await - } else { - if let Some(builder) = tx.as_mut_builder() { - builder.set_gas_limit(self.default_gas_limit); - builder.set_max_fee_per_gas(self.default_max_fee_per_gas); - builder.set_max_priority_fee_per_gas(self.default_priority_fee); - } - Ok(tx) - } - } + async fn fill( + &self, + fillable: Self::Fillable, + mut tx: alloy::providers::SendableTx, + ) -> TransportResult> { + if let Some(fill) = fillable { + // our inner GasFiller succeeded — use it + self.inner.fill(fill, tx).await + } else { + if let Some(builder) = tx.as_mut_builder() { + builder.set_gas_limit(self.default_gas_limit); + builder.set_max_fee_per_gas(self.default_max_fee_per_gas); + builder.set_max_priority_fee_per_gas(self.default_priority_fee); + } + Ok(tx) + } + } } diff --git a/crates/node/src/provider_utils/provider.rs b/crates/node/src/provider_utils/provider.rs index 6862537..8d70955 100644 --- a/crates/node/src/provider_utils/provider.rs +++ b/crates/node/src/provider_utils/provider.rs @@ -1,12 +1,12 @@ use std::{ops::ControlFlow, sync::LazyLock, time::Duration}; use alloy::{ - network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844}, - providers::{ - Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider, - fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller}, - }, - rpc::client::ClientBuilder, + network::{Ethereum, Network, NetworkWallet, TransactionBuilder4844}, + providers::{ + Identity, PendingTransactionBuilder, Provider, ProviderBuilder, RootProvider, + fillers::{ChainIdFiller, FillProvider, JoinFill, NonceFiller, TxFiller, WalletFiller}, + }, + rpc::client::ClientBuilder, }; use anyhow::{Context, Result}; use revive_dt_common::futures::{PollingWaitBehavior, poll}; @@ -15,114 +15,109 @@ use tracing::debug; use crate::provider_utils::{ConcurrencyLimiterLayer, FallbackGasFiller}; pub type ConcreteProvider = FillProvider< - JoinFill< - JoinFill, ChainIdFiller>, NonceFiller>, - WalletFiller, - >, - RootProvider, - N, + JoinFill< + JoinFill, ChainIdFiller>, NonceFiller>, + WalletFiller, + >, + RootProvider, + N, >; pub async fn construct_concurrency_limited_provider( - rpc_url: &str, - fallback_gas_filler: FallbackGasFiller, - chain_id_filler: ChainIdFiller, - nonce_filler: NonceFiller, - wallet: W, + rpc_url: &str, + fallback_gas_filler: FallbackGasFiller, + chain_id_filler: ChainIdFiller, + nonce_filler: NonceFiller, + wallet: W, ) -> Result> where - N: Network, - W: NetworkWallet, - Identity: TxFiller, - FallbackGasFiller: TxFiller, - ChainIdFiller: TxFiller, - NonceFiller: TxFiller, - WalletFiller: TxFiller, + N: Network, + W: NetworkWallet, + Identity: TxFiller, + FallbackGasFiller: TxFiller, + ChainIdFiller: TxFiller, + NonceFiller: TxFiller, + WalletFiller: TxFiller, { - // This is a global limit on the RPC concurrency that applies to all of the providers created - // by the framework. With this limit, it means that we can have a maximum of N concurrent - // requests at any point of time and no more than that. This is done in an effort to stabilize - // the framework from some of the interment issues that we've been seeing related to RPC calls. - static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock = - LazyLock::new(|| ConcurrencyLimiterLayer::new(10)); + // This is a global limit on the RPC concurrency that applies to all of the providers created + // by the framework. With this limit, it means that we can have a maximum of N concurrent + // requests at any point of time and no more than that. This is done in an effort to stabilize + // the framework from some of the interment issues that we've been seeing related to RPC calls. + static GLOBAL_CONCURRENCY_LIMITER_LAYER: LazyLock = + LazyLock::new(|| ConcurrencyLimiterLayer::new(10)); - let client = ClientBuilder::default() - .layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone()) - .connect(rpc_url) - .await - .context("Failed to construct the RPC client")?; + let client = ClientBuilder::default() + .layer(GLOBAL_CONCURRENCY_LIMITER_LAYER.clone()) + .connect(rpc_url) + .await + .context("Failed to construct the RPC client")?; - let provider = ProviderBuilder::new() - .disable_recommended_fillers() - .network::() - .filler(fallback_gas_filler) - .filler(chain_id_filler) - .filler(nonce_filler) - .wallet(wallet) - .connect_client(client); + let provider = ProviderBuilder::new() + .disable_recommended_fillers() + .network::() + .filler(fallback_gas_filler) + .filler(chain_id_filler) + .filler(nonce_filler) + .wallet(wallet) + .connect_client(client); - Ok(provider) + Ok(provider) } pub async fn execute_transaction( - provider: ConcreteProvider, - transaction: N::TransactionRequest, + provider: ConcreteProvider, + transaction: N::TransactionRequest, ) -> Result where - N: Network< - TransactionRequest: TransactionBuilder4844, - TxEnvelope = ::TxEnvelope, - >, - W: NetworkWallet, - Identity: TxFiller, - FallbackGasFiller: TxFiller, - ChainIdFiller: TxFiller, - NonceFiller: TxFiller, - WalletFiller: TxFiller, + N: Network< + TransactionRequest: TransactionBuilder4844, + TxEnvelope = ::TxEnvelope, + >, + W: NetworkWallet, + Identity: TxFiller, + FallbackGasFiller: TxFiller, + ChainIdFiller: TxFiller, + NonceFiller: TxFiller, + WalletFiller: TxFiller, { - let sendable_transaction = provider - .fill(transaction) - .await - .context("Failed to fill transaction")?; + let sendable_transaction = + provider.fill(transaction).await.context("Failed to fill transaction")?; - let transaction_envelope = sendable_transaction - .try_into_envelope() - .context("Failed to convert transaction into an envelope")?; - let tx_hash = *transaction_envelope.tx_hash(); + let transaction_envelope = sendable_transaction + .try_into_envelope() + .context("Failed to convert transaction into an envelope")?; + let tx_hash = *transaction_envelope.tx_hash(); - let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await { - Ok(pending_transaction) => pending_transaction, - Err(error) => { - let error_string = error.to_string(); + let mut pending_transaction = match provider.send_tx_envelope(transaction_envelope).await { + Ok(pending_transaction) => pending_transaction, + Err(error) => { + let error_string = error.to_string(); - if error_string.contains("Transaction Already Imported") { - PendingTransactionBuilder::::new(provider.root().clone(), tx_hash) - } else { - return Err(error).context(format!("Failed to submit transaction {tx_hash}")); - } - } - }; - debug!(%tx_hash, "Submitted Transaction"); + if error_string.contains("Transaction Already Imported") { + PendingTransactionBuilder::::new(provider.root().clone(), tx_hash) + } else { + return Err(error).context(format!("Failed to submit transaction {tx_hash}")); + } + }, + }; + debug!(%tx_hash, "Submitted Transaction"); - pending_transaction.set_timeout(Some(Duration::from_secs(120))); - let tx_hash = pending_transaction.watch().await.context(format!( - "Transaction inclusion watching timeout for {tx_hash}" - ))?; + pending_transaction.set_timeout(Some(Duration::from_secs(120))); + let tx_hash = pending_transaction + .watch() + .await + .context(format!("Transaction inclusion watching timeout for {tx_hash}"))?; - poll( - Duration::from_secs(60), - PollingWaitBehavior::Constant(Duration::from_secs(3)), - || { - let provider = provider.clone(); + poll(Duration::from_secs(60), PollingWaitBehavior::Constant(Duration::from_secs(3)), || { + let provider = provider.clone(); - async move { - match provider.get_transaction_receipt(tx_hash).await { - Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)), - _ => Ok(ControlFlow::Continue(())), - } - } - }, - ) - .await - .context(format!("Polling for receipt failed for {tx_hash}")) + async move { + match provider.get_transaction_receipt(tx_hash).await { + Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)), + _ => Ok(ControlFlow::Continue(())), + } + } + }) + .await + .context(format!("Polling for receipt failed for {tx_hash}")) } diff --git a/crates/report/src/aggregator.rs b/crates/report/src/aggregator.rs index bb235eb..25bc2c1 100644 --- a/crates/report/src/aggregator.rs +++ b/crates/report/src/aggregator.rs @@ -2,10 +2,10 @@ //! reporters and combines them into a single unified report. use std::{ - collections::{BTreeMap, BTreeSet, HashMap, HashSet}, - fs::OpenOptions, - path::PathBuf, - time::{SystemTime, UNIX_EPOCH}, + collections::{BTreeMap, BTreeSet, HashMap, HashSet}, + fs::OpenOptions, + path::PathBuf, + time::{SystemTime, UNIX_EPOCH}, }; use alloy::primitives::Address; @@ -19,434 +19,385 @@ use semver::Version; use serde::Serialize; use serde_with::{DisplayFromStr, serde_as}; use tokio::sync::{ - broadcast::{Sender, channel}, - mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel}, + broadcast::{Sender, channel}, + mpsc::{UnboundedReceiver, UnboundedSender, unbounded_channel}, }; use tracing::debug; use crate::*; pub struct ReportAggregator { - /* Internal Report State */ - report: Report, - remaining_cases: HashMap>>, - /* Channels */ - runner_tx: Option>, - runner_rx: UnboundedReceiver, - listener_tx: Sender, + /* Internal Report State */ + report: Report, + remaining_cases: HashMap>>, + /* Channels */ + runner_tx: Option>, + runner_rx: UnboundedReceiver, + listener_tx: Sender, } impl ReportAggregator { - pub fn new(context: Context) -> Self { - let (runner_tx, runner_rx) = unbounded_channel::(); - let (listener_tx, _) = channel::(1024); - Self { - report: Report::new(context), - remaining_cases: Default::default(), - runner_tx: Some(runner_tx), - runner_rx, - listener_tx, - } - } + pub fn new(context: Context) -> Self { + let (runner_tx, runner_rx) = unbounded_channel::(); + let (listener_tx, _) = channel::(1024); + Self { + report: Report::new(context), + remaining_cases: Default::default(), + runner_tx: Some(runner_tx), + runner_rx, + listener_tx, + } + } - pub fn into_task(mut self) -> (Reporter, impl Future>) { - let reporter = self - .runner_tx - .take() - .map(Into::into) - .expect("Can't fail since this can only be called once"); - (reporter, async move { self.aggregate().await }) - } + pub fn into_task(mut self) -> (Reporter, impl Future>) { + let reporter = self + .runner_tx + .take() + .map(Into::into) + .expect("Can't fail since this can only be called once"); + (reporter, async move { self.aggregate().await }) + } - async fn aggregate(mut self) -> Result<()> { - debug!("Starting to aggregate report"); + async fn aggregate(mut self) -> Result<()> { + debug!("Starting to aggregate report"); - while let Some(event) = self.runner_rx.recv().await { - debug!(?event, "Received Event"); - match event { - RunnerEvent::SubscribeToEvents(event) => { - self.handle_subscribe_to_events_event(*event); - } - RunnerEvent::CorpusFileDiscovery(event) => { - self.handle_corpus_file_discovered_event(*event) - } - RunnerEvent::MetadataFileDiscovery(event) => { - self.handle_metadata_file_discovery_event(*event); - } - RunnerEvent::TestCaseDiscovery(event) => { - self.handle_test_case_discovery(*event); - } - RunnerEvent::TestSucceeded(event) => { - self.handle_test_succeeded_event(*event); - } - RunnerEvent::TestFailed(event) => { - self.handle_test_failed_event(*event); - } - RunnerEvent::TestIgnored(event) => { - self.handle_test_ignored_event(*event); - } - RunnerEvent::NodeAssigned(event) => { - self.handle_node_assigned_event(*event); - } - RunnerEvent::PreLinkContractsCompilationSucceeded(event) => { - self.handle_pre_link_contracts_compilation_succeeded_event(*event) - } - RunnerEvent::PostLinkContractsCompilationSucceeded(event) => { - self.handle_post_link_contracts_compilation_succeeded_event(*event) - } - RunnerEvent::PreLinkContractsCompilationFailed(event) => { - self.handle_pre_link_contracts_compilation_failed_event(*event) - } - RunnerEvent::PostLinkContractsCompilationFailed(event) => { - self.handle_post_link_contracts_compilation_failed_event(*event) - } - RunnerEvent::LibrariesDeployed(event) => { - self.handle_libraries_deployed_event(*event); - } - RunnerEvent::ContractDeployed(event) => { - self.handle_contract_deployed_event(*event); - } - RunnerEvent::Completion(event) => { - self.handle_completion(*event); - break; - } - } - } - debug!("Report aggregation completed"); + while let Some(event) = self.runner_rx.recv().await { + debug!(?event, "Received Event"); + match event { + RunnerEvent::SubscribeToEvents(event) => { + self.handle_subscribe_to_events_event(*event); + }, + RunnerEvent::CorpusFileDiscovery(event) => + self.handle_corpus_file_discovered_event(*event), + RunnerEvent::MetadataFileDiscovery(event) => { + self.handle_metadata_file_discovery_event(*event); + }, + RunnerEvent::TestCaseDiscovery(event) => { + self.handle_test_case_discovery(*event); + }, + RunnerEvent::TestSucceeded(event) => { + self.handle_test_succeeded_event(*event); + }, + RunnerEvent::TestFailed(event) => { + self.handle_test_failed_event(*event); + }, + RunnerEvent::TestIgnored(event) => { + self.handle_test_ignored_event(*event); + }, + RunnerEvent::NodeAssigned(event) => { + self.handle_node_assigned_event(*event); + }, + RunnerEvent::PreLinkContractsCompilationSucceeded(event) => + self.handle_pre_link_contracts_compilation_succeeded_event(*event), + RunnerEvent::PostLinkContractsCompilationSucceeded(event) => + self.handle_post_link_contracts_compilation_succeeded_event(*event), + RunnerEvent::PreLinkContractsCompilationFailed(event) => + self.handle_pre_link_contracts_compilation_failed_event(*event), + RunnerEvent::PostLinkContractsCompilationFailed(event) => + self.handle_post_link_contracts_compilation_failed_event(*event), + RunnerEvent::LibrariesDeployed(event) => { + self.handle_libraries_deployed_event(*event); + }, + RunnerEvent::ContractDeployed(event) => { + self.handle_contract_deployed_event(*event); + }, + RunnerEvent::Completion(event) => { + self.handle_completion(*event); + break; + }, + } + } + debug!("Report aggregation completed"); - let file_name = { - let current_timestamp = SystemTime::now() - .duration_since(UNIX_EPOCH) - .context("System clock is before UNIX_EPOCH; cannot compute report timestamp")? - .as_secs(); - let mut file_name = current_timestamp.to_string(); - file_name.push_str(".json"); - file_name - }; - let file_path = self - .report - .context - .working_directory_configuration() - .as_path() - .join(file_name); - let file = OpenOptions::new() - .create(true) - .write(true) - .truncate(true) - .read(false) - .open(&file_path) - .with_context(|| { - format!( - "Failed to open report file for writing: {}", - file_path.display() - ) - })?; - serde_json::to_writer_pretty(&file, &self.report).with_context(|| { - format!("Failed to serialize report JSON to {}", file_path.display()) - })?; + let file_name = { + let current_timestamp = SystemTime::now() + .duration_since(UNIX_EPOCH) + .context("System clock is before UNIX_EPOCH; cannot compute report timestamp")? + .as_secs(); + let mut file_name = current_timestamp.to_string(); + file_name.push_str(".json"); + file_name + }; + let file_path = + self.report.context.working_directory_configuration().as_path().join(file_name); + let file = OpenOptions::new() + .create(true) + .write(true) + .truncate(true) + .read(false) + .open(&file_path) + .with_context(|| { + format!("Failed to open report file for writing: {}", file_path.display()) + })?; + serde_json::to_writer_pretty(&file, &self.report).with_context(|| { + format!("Failed to serialize report JSON to {}", file_path.display()) + })?; - Ok(()) - } + Ok(()) + } - fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) { - let _ = event.tx.send(self.listener_tx.subscribe()); - } + fn handle_subscribe_to_events_event(&self, event: SubscribeToEventsEvent) { + let _ = event.tx.send(self.listener_tx.subscribe()); + } - fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) { - self.report.corpora.push(event.corpus); - } + fn handle_corpus_file_discovered_event(&mut self, event: CorpusFileDiscoveryEvent) { + self.report.corpora.push(event.corpus); + } - fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) { - self.report.metadata_files.insert(event.path.clone()); - } + fn handle_metadata_file_discovery_event(&mut self, event: MetadataFileDiscoveryEvent) { + self.report.metadata_files.insert(event.path.clone()); + } - fn handle_test_case_discovery(&mut self, event: TestCaseDiscoveryEvent) { - self.remaining_cases - .entry(event.test_specifier.metadata_file_path.clone().into()) - .or_default() - .entry(event.test_specifier.solc_mode.clone()) - .or_default() - .insert(event.test_specifier.case_idx); - } + fn handle_test_case_discovery(&mut self, event: TestCaseDiscoveryEvent) { + self.remaining_cases + .entry(event.test_specifier.metadata_file_path.clone().into()) + .or_default() + .entry(event.test_specifier.solc_mode.clone()) + .or_default() + .insert(event.test_specifier.case_idx); + } - fn handle_test_succeeded_event(&mut self, event: TestSucceededEvent) { - // Remove this from the set of cases we're tracking since it has completed. - self.remaining_cases - .entry(event.test_specifier.metadata_file_path.clone().into()) - .or_default() - .entry(event.test_specifier.solc_mode.clone()) - .or_default() - .remove(&event.test_specifier.case_idx); + fn handle_test_succeeded_event(&mut self, event: TestSucceededEvent) { + // Remove this from the set of cases we're tracking since it has completed. + self.remaining_cases + .entry(event.test_specifier.metadata_file_path.clone().into()) + .or_default() + .entry(event.test_specifier.solc_mode.clone()) + .or_default() + .remove(&event.test_specifier.case_idx); - // Add information on the fact that the case was ignored to the report. - let test_case_report = self.test_case_report(&event.test_specifier); - test_case_report.status = Some(TestCaseStatus::Succeeded { - steps_executed: event.steps_executed, - }); - self.handle_post_test_case_status_update(&event.test_specifier); - } + // Add information on the fact that the case was ignored to the report. + let test_case_report = self.test_case_report(&event.test_specifier); + test_case_report.status = + Some(TestCaseStatus::Succeeded { steps_executed: event.steps_executed }); + self.handle_post_test_case_status_update(&event.test_specifier); + } - fn handle_test_failed_event(&mut self, event: TestFailedEvent) { - // Remove this from the set of cases we're tracking since it has completed. - self.remaining_cases - .entry(event.test_specifier.metadata_file_path.clone().into()) - .or_default() - .entry(event.test_specifier.solc_mode.clone()) - .or_default() - .remove(&event.test_specifier.case_idx); + fn handle_test_failed_event(&mut self, event: TestFailedEvent) { + // Remove this from the set of cases we're tracking since it has completed. + self.remaining_cases + .entry(event.test_specifier.metadata_file_path.clone().into()) + .or_default() + .entry(event.test_specifier.solc_mode.clone()) + .or_default() + .remove(&event.test_specifier.case_idx); - // Add information on the fact that the case was ignored to the report. - let test_case_report = self.test_case_report(&event.test_specifier); - test_case_report.status = Some(TestCaseStatus::Failed { - reason: event.reason, - }); - self.handle_post_test_case_status_update(&event.test_specifier); - } + // Add information on the fact that the case was ignored to the report. + let test_case_report = self.test_case_report(&event.test_specifier); + test_case_report.status = Some(TestCaseStatus::Failed { reason: event.reason }); + self.handle_post_test_case_status_update(&event.test_specifier); + } - fn handle_test_ignored_event(&mut self, event: TestIgnoredEvent) { - // Remove this from the set of cases we're tracking since it has completed. - self.remaining_cases - .entry(event.test_specifier.metadata_file_path.clone().into()) - .or_default() - .entry(event.test_specifier.solc_mode.clone()) - .or_default() - .remove(&event.test_specifier.case_idx); + fn handle_test_ignored_event(&mut self, event: TestIgnoredEvent) { + // Remove this from the set of cases we're tracking since it has completed. + self.remaining_cases + .entry(event.test_specifier.metadata_file_path.clone().into()) + .or_default() + .entry(event.test_specifier.solc_mode.clone()) + .or_default() + .remove(&event.test_specifier.case_idx); - // Add information on the fact that the case was ignored to the report. - let test_case_report = self.test_case_report(&event.test_specifier); - test_case_report.status = Some(TestCaseStatus::Ignored { - reason: event.reason, - additional_fields: event.additional_fields, - }); - self.handle_post_test_case_status_update(&event.test_specifier); - } + // Add information on the fact that the case was ignored to the report. + let test_case_report = self.test_case_report(&event.test_specifier); + test_case_report.status = Some(TestCaseStatus::Ignored { + reason: event.reason, + additional_fields: event.additional_fields, + }); + self.handle_post_test_case_status_update(&event.test_specifier); + } - fn handle_post_test_case_status_update(&mut self, specifier: &TestSpecifier) { - let remaining_cases = self - .remaining_cases - .entry(specifier.metadata_file_path.clone().into()) - .or_default() - .entry(specifier.solc_mode.clone()) - .or_default(); - if !remaining_cases.is_empty() { - return; - } + fn handle_post_test_case_status_update(&mut self, specifier: &TestSpecifier) { + let remaining_cases = self + .remaining_cases + .entry(specifier.metadata_file_path.clone().into()) + .or_default() + .entry(specifier.solc_mode.clone()) + .or_default(); + if !remaining_cases.is_empty() { + return; + } - let case_status = self - .report - .test_case_information - .entry(specifier.metadata_file_path.clone().into()) - .or_default() - .entry(specifier.solc_mode.clone()) - .or_default() - .iter() - .map(|(case_idx, case_report)| { - ( - *case_idx, - case_report.status.clone().expect("Can't be uninitialized"), - ) - }) - .collect::>(); - let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted { - metadata_file_path: specifier.metadata_file_path.clone().into(), - mode: specifier.solc_mode.clone(), - case_status, - }; + let case_status = self + .report + .test_case_information + .entry(specifier.metadata_file_path.clone().into()) + .or_default() + .entry(specifier.solc_mode.clone()) + .or_default() + .iter() + .map(|(case_idx, case_report)| { + (*case_idx, case_report.status.clone().expect("Can't be uninitialized")) + }) + .collect::>(); + let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted { + metadata_file_path: specifier.metadata_file_path.clone().into(), + mode: specifier.solc_mode.clone(), + case_status, + }; - // According to the documentation on send, the sending fails if there are no more receiver - // handles. Therefore, this isn't an error that we want to bubble up or anything. If we fail - // to send then we ignore the error. - let _ = self.listener_tx.send(event); - } + // According to the documentation on send, the sending fails if there are no more receiver + // handles. Therefore, this isn't an error that we want to bubble up or anything. If we fail + // to send then we ignore the error. + let _ = self.listener_tx.send(event); + } - fn handle_node_assigned_event(&mut self, event: NodeAssignedEvent) { - let execution_information = self.execution_information(&ExecutionSpecifier { - test_specifier: event.test_specifier, - node_id: event.id, - platform_identifier: event.platform_identifier, - }); - execution_information.node = Some(TestCaseNodeInformation { - id: event.id, - platform_identifier: event.platform_identifier, - connection_string: event.connection_string, - }); - } + fn handle_node_assigned_event(&mut self, event: NodeAssignedEvent) { + let execution_information = self.execution_information(&ExecutionSpecifier { + test_specifier: event.test_specifier, + node_id: event.id, + platform_identifier: event.platform_identifier, + }); + execution_information.node = Some(TestCaseNodeInformation { + id: event.id, + platform_identifier: event.platform_identifier, + connection_string: event.connection_string, + }); + } - fn handle_pre_link_contracts_compilation_succeeded_event( - &mut self, - event: PreLinkContractsCompilationSucceededEvent, - ) { - let include_input = self - .report - .context - .report_configuration() - .include_compiler_input; - let include_output = self - .report - .context - .report_configuration() - .include_compiler_output; + fn handle_pre_link_contracts_compilation_succeeded_event( + &mut self, + event: PreLinkContractsCompilationSucceededEvent, + ) { + let include_input = self.report.context.report_configuration().include_compiler_input; + let include_output = self.report.context.report_configuration().include_compiler_output; - let execution_information = self.execution_information(&event.execution_specifier); + let execution_information = self.execution_information(&event.execution_specifier); - let compiler_input = if include_input { - event.compiler_input - } else { - None - }; - let compiler_output = if include_output { - Some(event.compiler_output) - } else { - None - }; + let compiler_input = if include_input { event.compiler_input } else { None }; + let compiler_output = if include_output { Some(event.compiler_output) } else { None }; - execution_information.pre_link_compilation_status = Some(CompilationStatus::Success { - is_cached: event.is_cached, - compiler_version: event.compiler_version, - compiler_path: event.compiler_path, - compiler_input, - compiler_output, - }); - } + execution_information.pre_link_compilation_status = Some(CompilationStatus::Success { + is_cached: event.is_cached, + compiler_version: event.compiler_version, + compiler_path: event.compiler_path, + compiler_input, + compiler_output, + }); + } - fn handle_post_link_contracts_compilation_succeeded_event( - &mut self, - event: PostLinkContractsCompilationSucceededEvent, - ) { - let include_input = self - .report - .context - .report_configuration() - .include_compiler_input; - let include_output = self - .report - .context - .report_configuration() - .include_compiler_output; + fn handle_post_link_contracts_compilation_succeeded_event( + &mut self, + event: PostLinkContractsCompilationSucceededEvent, + ) { + let include_input = self.report.context.report_configuration().include_compiler_input; + let include_output = self.report.context.report_configuration().include_compiler_output; - let execution_information = self.execution_information(&event.execution_specifier); + let execution_information = self.execution_information(&event.execution_specifier); - let compiler_input = if include_input { - event.compiler_input - } else { - None - }; - let compiler_output = if include_output { - Some(event.compiler_output) - } else { - None - }; + let compiler_input = if include_input { event.compiler_input } else { None }; + let compiler_output = if include_output { Some(event.compiler_output) } else { None }; - execution_information.post_link_compilation_status = Some(CompilationStatus::Success { - is_cached: event.is_cached, - compiler_version: event.compiler_version, - compiler_path: event.compiler_path, - compiler_input, - compiler_output, - }); - } + execution_information.post_link_compilation_status = Some(CompilationStatus::Success { + is_cached: event.is_cached, + compiler_version: event.compiler_version, + compiler_path: event.compiler_path, + compiler_input, + compiler_output, + }); + } - fn handle_pre_link_contracts_compilation_failed_event( - &mut self, - event: PreLinkContractsCompilationFailedEvent, - ) { - let execution_information = self.execution_information(&event.execution_specifier); + fn handle_pre_link_contracts_compilation_failed_event( + &mut self, + event: PreLinkContractsCompilationFailedEvent, + ) { + let execution_information = self.execution_information(&event.execution_specifier); - execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure { - reason: event.reason, - compiler_version: event.compiler_version, - compiler_path: event.compiler_path, - compiler_input: event.compiler_input, - }); - } + execution_information.pre_link_compilation_status = Some(CompilationStatus::Failure { + reason: event.reason, + compiler_version: event.compiler_version, + compiler_path: event.compiler_path, + compiler_input: event.compiler_input, + }); + } - fn handle_post_link_contracts_compilation_failed_event( - &mut self, - event: PostLinkContractsCompilationFailedEvent, - ) { - let execution_information = self.execution_information(&event.execution_specifier); + fn handle_post_link_contracts_compilation_failed_event( + &mut self, + event: PostLinkContractsCompilationFailedEvent, + ) { + let execution_information = self.execution_information(&event.execution_specifier); - execution_information.post_link_compilation_status = Some(CompilationStatus::Failure { - reason: event.reason, - compiler_version: event.compiler_version, - compiler_path: event.compiler_path, - compiler_input: event.compiler_input, - }); - } + execution_information.post_link_compilation_status = Some(CompilationStatus::Failure { + reason: event.reason, + compiler_version: event.compiler_version, + compiler_path: event.compiler_path, + compiler_input: event.compiler_input, + }); + } - fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) { - self.execution_information(&event.execution_specifier) - .deployed_libraries = Some(event.libraries); - } + fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) { + self.execution_information(&event.execution_specifier).deployed_libraries = + Some(event.libraries); + } - fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) { - self.execution_information(&event.execution_specifier) - .deployed_contracts - .get_or_insert_default() - .insert(event.contract_instance, event.address); - } + fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) { + self.execution_information(&event.execution_specifier) + .deployed_contracts + .get_or_insert_default() + .insert(event.contract_instance, event.address); + } - fn handle_completion(&mut self, _: CompletionEvent) { - self.runner_rx.close(); - } + fn handle_completion(&mut self, _: CompletionEvent) { + self.runner_rx.close(); + } - fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport { - self.report - .test_case_information - .entry(specifier.metadata_file_path.clone().into()) - .or_default() - .entry(specifier.solc_mode.clone()) - .or_default() - .entry(specifier.case_idx) - .or_default() - } + fn test_case_report(&mut self, specifier: &TestSpecifier) -> &mut TestCaseReport { + self.report + .test_case_information + .entry(specifier.metadata_file_path.clone().into()) + .or_default() + .entry(specifier.solc_mode.clone()) + .or_default() + .entry(specifier.case_idx) + .or_default() + } - fn execution_information( - &mut self, - specifier: &ExecutionSpecifier, - ) -> &mut ExecutionInformation { - let test_case_report = self.test_case_report(&specifier.test_specifier); - test_case_report - .platform_execution - .entry(specifier.platform_identifier) - .or_default() - .get_or_insert_default() - } + fn execution_information( + &mut self, + specifier: &ExecutionSpecifier, + ) -> &mut ExecutionInformation { + let test_case_report = self.test_case_report(&specifier.test_specifier); + test_case_report + .platform_execution + .entry(specifier.platform_identifier) + .or_default() + .get_or_insert_default() + } } #[serde_as] #[derive(Clone, Debug, Serialize)] pub struct Report { - /// The context that the tool was started up with. - pub context: Context, - /// The list of corpus files that the tool found. - pub corpora: Vec, - /// The list of metadata files that were found by the tool. - pub metadata_files: BTreeSet, - /// Information relating to each test case. - #[serde_as(as = "BTreeMap<_, HashMap>>")] - pub test_case_information: - BTreeMap>>, + /// The context that the tool was started up with. + pub context: Context, + /// The list of corpus files that the tool found. + pub corpora: Vec, + /// The list of metadata files that were found by the tool. + pub metadata_files: BTreeSet, + /// Information relating to each test case. + #[serde_as(as = "BTreeMap<_, HashMap>>")] + pub test_case_information: + BTreeMap>>, } impl Report { - pub fn new(context: Context) -> Self { - Self { - context, - corpora: Default::default(), - metadata_files: Default::default(), - test_case_information: Default::default(), - } - } + pub fn new(context: Context) -> Self { + Self { + context, + corpora: Default::default(), + metadata_files: Default::default(), + test_case_information: Default::default(), + } + } } #[derive(Clone, Debug, Serialize, Default)] pub struct TestCaseReport { - /// Information on the status of the test case and whether it succeeded, failed, or was ignored. - #[serde(skip_serializing_if = "Option::is_none")] - pub status: Option, - /// Information related to the execution on one of the platforms. - pub platform_execution: BTreeMap>, + /// Information on the status of the test case and whether it succeeded, failed, or was + /// ignored. + #[serde(skip_serializing_if = "Option::is_none")] + pub status: Option, + /// Information related to the execution on one of the platforms. + pub platform_execution: BTreeMap>, } /// Information related to the status of the test. Could be that the test succeeded, failed, or that @@ -454,93 +405,93 @@ pub struct TestCaseReport { #[derive(Clone, Debug, Serialize)] #[serde(tag = "status")] pub enum TestCaseStatus { - /// The test case succeeded. - Succeeded { - /// The number of steps of the case that were executed. - steps_executed: usize, - }, - /// The test case failed. - Failed { - /// The reason for the failure of the test case. - reason: String, - }, - /// The test case was ignored. This variant carries information related to why it was ignored. - Ignored { - /// The reason behind the test case being ignored. - reason: String, - /// Additional fields that describe more information on why the test case is ignored. - #[serde(flatten)] - additional_fields: IndexMap, - }, + /// The test case succeeded. + Succeeded { + /// The number of steps of the case that were executed. + steps_executed: usize, + }, + /// The test case failed. + Failed { + /// The reason for the failure of the test case. + reason: String, + }, + /// The test case was ignored. This variant carries information related to why it was ignored. + Ignored { + /// The reason behind the test case being ignored. + reason: String, + /// Additional fields that describe more information on why the test case is ignored. + #[serde(flatten)] + additional_fields: IndexMap, + }, } /// Information related to the platform node that's being used to execute the step. #[derive(Clone, Debug, Serialize)] pub struct TestCaseNodeInformation { - /// The ID of the node that this case is being executed on. - pub id: usize, - /// The platform of the node. - pub platform_identifier: PlatformIdentifier, - /// The connection string of the node. - pub connection_string: String, + /// The ID of the node that this case is being executed on. + pub id: usize, + /// The platform of the node. + pub platform_identifier: PlatformIdentifier, + /// The connection string of the node. + pub connection_string: String, } /// Execution information tied to the platform. #[derive(Clone, Debug, Default, Serialize)] pub struct ExecutionInformation { - /// Information related to the node assigned to this test case. - #[serde(skip_serializing_if = "Option::is_none")] - pub node: Option, - /// Information on the pre-link compiled contracts. - #[serde(skip_serializing_if = "Option::is_none")] - pub pre_link_compilation_status: Option, - /// Information on the post-link compiled contracts. - #[serde(skip_serializing_if = "Option::is_none")] - pub post_link_compilation_status: Option, - /// Information on the deployed libraries. - #[serde(skip_serializing_if = "Option::is_none")] - pub deployed_libraries: Option>, - /// Information on the deployed contracts. - #[serde(skip_serializing_if = "Option::is_none")] - pub deployed_contracts: Option>, + /// Information related to the node assigned to this test case. + #[serde(skip_serializing_if = "Option::is_none")] + pub node: Option, + /// Information on the pre-link compiled contracts. + #[serde(skip_serializing_if = "Option::is_none")] + pub pre_link_compilation_status: Option, + /// Information on the post-link compiled contracts. + #[serde(skip_serializing_if = "Option::is_none")] + pub post_link_compilation_status: Option, + /// Information on the deployed libraries. + #[serde(skip_serializing_if = "Option::is_none")] + pub deployed_libraries: Option>, + /// Information on the deployed contracts. + #[serde(skip_serializing_if = "Option::is_none")] + pub deployed_contracts: Option>, } /// Information related to compilation #[derive(Clone, Debug, Serialize)] #[serde(tag = "status")] pub enum CompilationStatus { - /// The compilation was successful. - Success { - /// A flag with information on whether the compilation artifacts were cached or not. - is_cached: bool, - /// The version of the compiler used to compile the contracts. - compiler_version: Version, - /// The path of the compiler used to compile the contracts. - compiler_path: PathBuf, - /// The input provided to the compiler to compile the contracts. This is only included if - /// the appropriate flag is set in the CLI context and if the contracts were not cached and - /// the compiler was invoked. - #[serde(skip_serializing_if = "Option::is_none")] - compiler_input: Option, - /// The output of the compiler. This is only included if the appropriate flag is set in the - /// CLI contexts. - #[serde(skip_serializing_if = "Option::is_none")] - compiler_output: Option, - }, - /// The compilation failed. - Failure { - /// The failure reason. - reason: String, - /// The version of the compiler used to compile the contracts. - #[serde(skip_serializing_if = "Option::is_none")] - compiler_version: Option, - /// The path of the compiler used to compile the contracts. - #[serde(skip_serializing_if = "Option::is_none")] - compiler_path: Option, - /// The input provided to the compiler to compile the contracts. This is only included if - /// the appropriate flag is set in the CLI context and if the contracts were not cached and - /// the compiler was invoked. - #[serde(skip_serializing_if = "Option::is_none")] - compiler_input: Option, - }, + /// The compilation was successful. + Success { + /// A flag with information on whether the compilation artifacts were cached or not. + is_cached: bool, + /// The version of the compiler used to compile the contracts. + compiler_version: Version, + /// The path of the compiler used to compile the contracts. + compiler_path: PathBuf, + /// The input provided to the compiler to compile the contracts. This is only included if + /// the appropriate flag is set in the CLI context and if the contracts were not cached and + /// the compiler was invoked. + #[serde(skip_serializing_if = "Option::is_none")] + compiler_input: Option, + /// The output of the compiler. This is only included if the appropriate flag is set in the + /// CLI contexts. + #[serde(skip_serializing_if = "Option::is_none")] + compiler_output: Option, + }, + /// The compilation failed. + Failure { + /// The failure reason. + reason: String, + /// The version of the compiler used to compile the contracts. + #[serde(skip_serializing_if = "Option::is_none")] + compiler_version: Option, + /// The path of the compiler used to compile the contracts. + #[serde(skip_serializing_if = "Option::is_none")] + compiler_path: Option, + /// The input provided to the compiler to compile the contracts. This is only included if + /// the appropriate flag is set in the CLI context and if the contracts were not cached and + /// the compiler was invoked. + #[serde(skip_serializing_if = "Option::is_none")] + compiler_input: Option, + }, } diff --git a/crates/report/src/common.rs b/crates/report/src/common.rs index 2c28bf1..b020d33 100644 --- a/crates/report/src/common.rs +++ b/crates/report/src/common.rs @@ -8,30 +8,30 @@ use revive_dt_format::{case::CaseIdx, steps::StepPath}; use serde::{Deserialize, Serialize}; define_wrapper_type!( - #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] - #[serde(transparent)] - pub struct MetadataFilePath(PathBuf); + #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)] + #[serde(transparent)] + pub struct MetadataFilePath(PathBuf); ); /// An absolute specifier for a test. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct TestSpecifier { - pub solc_mode: Mode, - pub metadata_file_path: PathBuf, - pub case_idx: CaseIdx, + pub solc_mode: Mode, + pub metadata_file_path: PathBuf, + pub case_idx: CaseIdx, } /// An absolute path for a test that also includes information about the node that it's assigned to /// and what platform it belongs to. #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct ExecutionSpecifier { - pub test_specifier: Arc, - pub node_id: usize, - pub platform_identifier: PlatformIdentifier, + pub test_specifier: Arc, + pub node_id: usize, + pub platform_identifier: PlatformIdentifier, } #[derive(Clone, Debug, PartialEq, Eq, Hash)] pub struct StepExecutionSpecifier { - pub execution_specifier: Arc, - pub step_idx: StepPath, + pub execution_specifier: Arc, + pub step_idx: StepPath, } diff --git a/crates/report/src/reporter_event.rs b/crates/report/src/reporter_event.rs index 0211e64..d5e97f4 100644 --- a/crates/report/src/reporter_event.rs +++ b/crates/report/src/reporter_event.rs @@ -9,14 +9,14 @@ use crate::{MetadataFilePath, TestCaseStatus}; #[derive(Clone, Debug)] pub enum ReporterEvent { - /// An event sent by the reporter once an entire metadata file and solc mode combination has - /// finished execution. - MetadataFileSolcModeCombinationExecutionCompleted { - /// The path of the metadata file. - metadata_file_path: MetadataFilePath, - /// The Solc mode that this metadata file was executed in. - mode: Mode, - /// The status of each one of the cases. - case_status: BTreeMap, - }, + /// An event sent by the reporter once an entire metadata file and solc mode combination has + /// finished execution. + MetadataFileSolcModeCombinationExecutionCompleted { + /// The path of the metadata file. + metadata_file_path: MetadataFilePath, + /// The Solc mode that this metadata file was executed in. + mode: Mode, + /// The status of each one of the cases. + case_status: BTreeMap, + }, } diff --git a/crates/report/src/runner_event.rs b/crates/report/src/runner_event.rs index a022dde..07fc696 100644 --- a/crates/report/src/runner_event.rs +++ b/crates/report/src/runner_event.rs @@ -9,8 +9,8 @@ use indexmap::IndexMap; use revive_dt_common::types::PlatformIdentifier; use revive_dt_compiler::{CompilerInput, CompilerOutput}; use revive_dt_format::{ - corpus::Corpus, - metadata::{ContractInstance, Metadata}, + corpus::Corpus, + metadata::{ContractInstance, Metadata}, }; use semver::Version; use tokio::sync::{broadcast, oneshot}; @@ -474,160 +474,160 @@ macro_rules! define_event { } define_event! { - /// An event type that's sent by the test runners/drivers to the report aggregator. - pub(crate) enum RunnerEvent { - /// An event emitted by the reporter when it wishes to listen to events emitted by the - /// aggregator. - SubscribeToEvents { - /// The channel that the aggregator is to send the receive side of the channel on. - tx: oneshot::Sender> - }, - /// An event emitted by runners when they've discovered a corpus file. - CorpusFileDiscovery { - /// The contents of the corpus file. - corpus: Corpus - }, - /// An event emitted by runners when they've discovered a metadata file. - MetadataFileDiscovery { - /// The path of the metadata file discovered. - path: MetadataFilePath, - /// The content of the metadata file. - metadata: Metadata - }, - /// An event emitted by the runners when they discover a test case. - TestCaseDiscovery { - /// A specifier for the test that was discovered. - test_specifier: Arc, - }, - /// An event emitted by the runners when a test case is ignored. - TestIgnored { - /// A specifier for the test that's been ignored. - test_specifier: Arc, - /// A reason for the test to be ignored. - reason: String, - /// Additional fields that describe more information on why the test was ignored. - additional_fields: IndexMap - }, - /// An event emitted by the runners when a test case has succeeded. - TestSucceeded { - /// A specifier for the test that succeeded. - test_specifier: Arc, - /// The number of steps of the case that were executed by the driver. - steps_executed: usize, - }, - /// An event emitted by the runners when a test case has failed. - TestFailed { - /// A specifier for the test that succeeded. - test_specifier: Arc, - /// A reason for the failure of the test. - reason: String, - }, - /// An event emitted when the test case is assigned a platform node. - NodeAssigned { - /// A specifier for the test that the assignment is for. - test_specifier: Arc, - /// The ID of the node that this case is being executed on. - id: usize, - /// The identifier of the platform used. - platform_identifier: PlatformIdentifier, - /// The connection string of the node. - connection_string: String, - }, - /// An event emitted by the runners when the compilation of the contracts has succeeded - /// on the pre-link contracts. - PreLinkContractsCompilationSucceeded { - /// A specifier for the execution that's taking place. - execution_specifier: Arc, - /// The version of the compiler used to compile the contracts. - compiler_version: Version, - /// The path of the compiler used to compile the contracts. - compiler_path: PathBuf, - /// A flag of whether the contract bytecode and ABI were cached or if they were compiled - /// anew. - is_cached: bool, - /// The input provided to the compiler - this is optional and not provided if the - /// contracts were obtained from the cache. - compiler_input: Option, - /// The output of the compiler. - compiler_output: CompilerOutput - }, - /// An event emitted by the runners when the compilation of the contracts has succeeded - /// on the post-link contracts. - PostLinkContractsCompilationSucceeded { - /// A specifier for the execution that's taking place. - execution_specifier: Arc, - /// The version of the compiler used to compile the contracts. - compiler_version: Version, - /// The path of the compiler used to compile the contracts. - compiler_path: PathBuf, - /// A flag of whether the contract bytecode and ABI were cached or if they were compiled - /// anew. - is_cached: bool, - /// The input provided to the compiler - this is optional and not provided if the - /// contracts were obtained from the cache. - compiler_input: Option, - /// The output of the compiler. - compiler_output: CompilerOutput - }, - /// An event emitted by the runners when the compilation of the pre-link contract has - /// failed. - PreLinkContractsCompilationFailed { - /// A specifier for the execution that's taking place. - execution_specifier: Arc, - /// The version of the compiler used to compile the contracts. - compiler_version: Option, - /// The path of the compiler used to compile the contracts. - compiler_path: Option, - /// The input provided to the compiler - this is optional and not provided if the - /// contracts were obtained from the cache. - compiler_input: Option, - /// The failure reason. - reason: String, - }, - /// An event emitted by the runners when the compilation of the post-link contract has - /// failed. - PostLinkContractsCompilationFailed { - /// A specifier for the execution that's taking place. - execution_specifier: Arc, - /// The version of the compiler used to compile the contracts. - compiler_version: Option, - /// The path of the compiler used to compile the contracts. - compiler_path: Option, - /// The input provided to the compiler - this is optional and not provided if the - /// contracts were obtained from the cache. - compiler_input: Option, - /// The failure reason. - reason: String, - }, - /// An event emitted by the runners when a library has been deployed. - LibrariesDeployed { - /// A specifier for the execution that's taking place. - execution_specifier: Arc, - /// The addresses of the libraries that were deployed. - libraries: BTreeMap - }, - /// An event emitted by the runners when they've deployed a new contract. - ContractDeployed { - /// A specifier for the execution that's taking place. - execution_specifier: Arc, - /// The instance name of the contract. - contract_instance: ContractInstance, - /// The address of the contract. - address: Address - }, - /// Reports the completion of the run. - Completion {} - } + /// An event type that's sent by the test runners/drivers to the report aggregator. + pub(crate) enum RunnerEvent { + /// An event emitted by the reporter when it wishes to listen to events emitted by the + /// aggregator. + SubscribeToEvents { + /// The channel that the aggregator is to send the receive side of the channel on. + tx: oneshot::Sender> + }, + /// An event emitted by runners when they've discovered a corpus file. + CorpusFileDiscovery { + /// The contents of the corpus file. + corpus: Corpus + }, + /// An event emitted by runners when they've discovered a metadata file. + MetadataFileDiscovery { + /// The path of the metadata file discovered. + path: MetadataFilePath, + /// The content of the metadata file. + metadata: Metadata + }, + /// An event emitted by the runners when they discover a test case. + TestCaseDiscovery { + /// A specifier for the test that was discovered. + test_specifier: Arc, + }, + /// An event emitted by the runners when a test case is ignored. + TestIgnored { + /// A specifier for the test that's been ignored. + test_specifier: Arc, + /// A reason for the test to be ignored. + reason: String, + /// Additional fields that describe more information on why the test was ignored. + additional_fields: IndexMap + }, + /// An event emitted by the runners when a test case has succeeded. + TestSucceeded { + /// A specifier for the test that succeeded. + test_specifier: Arc, + /// The number of steps of the case that were executed by the driver. + steps_executed: usize, + }, + /// An event emitted by the runners when a test case has failed. + TestFailed { + /// A specifier for the test that succeeded. + test_specifier: Arc, + /// A reason for the failure of the test. + reason: String, + }, + /// An event emitted when the test case is assigned a platform node. + NodeAssigned { + /// A specifier for the test that the assignment is for. + test_specifier: Arc, + /// The ID of the node that this case is being executed on. + id: usize, + /// The identifier of the platform used. + platform_identifier: PlatformIdentifier, + /// The connection string of the node. + connection_string: String, + }, + /// An event emitted by the runners when the compilation of the contracts has succeeded + /// on the pre-link contracts. + PreLinkContractsCompilationSucceeded { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The version of the compiler used to compile the contracts. + compiler_version: Version, + /// The path of the compiler used to compile the contracts. + compiler_path: PathBuf, + /// A flag of whether the contract bytecode and ABI were cached or if they were compiled + /// anew. + is_cached: bool, + /// The input provided to the compiler - this is optional and not provided if the + /// contracts were obtained from the cache. + compiler_input: Option, + /// The output of the compiler. + compiler_output: CompilerOutput + }, + /// An event emitted by the runners when the compilation of the contracts has succeeded + /// on the post-link contracts. + PostLinkContractsCompilationSucceeded { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The version of the compiler used to compile the contracts. + compiler_version: Version, + /// The path of the compiler used to compile the contracts. + compiler_path: PathBuf, + /// A flag of whether the contract bytecode and ABI were cached or if they were compiled + /// anew. + is_cached: bool, + /// The input provided to the compiler - this is optional and not provided if the + /// contracts were obtained from the cache. + compiler_input: Option, + /// The output of the compiler. + compiler_output: CompilerOutput + }, + /// An event emitted by the runners when the compilation of the pre-link contract has + /// failed. + PreLinkContractsCompilationFailed { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The version of the compiler used to compile the contracts. + compiler_version: Option, + /// The path of the compiler used to compile the contracts. + compiler_path: Option, + /// The input provided to the compiler - this is optional and not provided if the + /// contracts were obtained from the cache. + compiler_input: Option, + /// The failure reason. + reason: String, + }, + /// An event emitted by the runners when the compilation of the post-link contract has + /// failed. + PostLinkContractsCompilationFailed { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The version of the compiler used to compile the contracts. + compiler_version: Option, + /// The path of the compiler used to compile the contracts. + compiler_path: Option, + /// The input provided to the compiler - this is optional and not provided if the + /// contracts were obtained from the cache. + compiler_input: Option, + /// The failure reason. + reason: String, + }, + /// An event emitted by the runners when a library has been deployed. + LibrariesDeployed { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The addresses of the libraries that were deployed. + libraries: BTreeMap + }, + /// An event emitted by the runners when they've deployed a new contract. + ContractDeployed { + /// A specifier for the execution that's taking place. + execution_specifier: Arc, + /// The instance name of the contract. + contract_instance: ContractInstance, + /// The address of the contract. + address: Address + }, + /// Reports the completion of the run. + Completion {} + } } /// An extension to the [`Reporter`] implemented by the macro. impl RunnerEventReporter { - pub async fn subscribe(&self) -> anyhow::Result> { - let (tx, rx) = oneshot::channel::>(); - self.report_subscribe_to_events_event(tx) - .context("Failed to send subscribe request to reporter task")?; - rx.await.map_err(Into::into) - } + pub async fn subscribe(&self) -> anyhow::Result> { + let (tx, rx) = oneshot::channel::>(); + self.report_subscribe_to_events_event(tx) + .context("Failed to send subscribe request to reporter task")?; + rx.await.map_err(Into::into) + } } pub type Reporter = RunnerEventReporter; diff --git a/crates/solc-binaries/src/cache.rs b/crates/solc-binaries/src/cache.rs index 46211a7..2fed792 100644 --- a/crates/solc-binaries/src/cache.rs +++ b/crates/solc-binaries/src/cache.rs @@ -1,12 +1,12 @@ //! Helper for caching the solc binaries. use std::{ - collections::HashSet, - fs::{File, create_dir_all}, - io::{BufWriter, Write}, - os::unix::fs::PermissionsExt, - path::{Path, PathBuf}, - sync::LazyLock, + collections::HashSet, + fs::{File, create_dir_all}, + io::{BufWriter, Write}, + os::unix::fs::PermissionsExt, + path::{Path, PathBuf}, + sync::LazyLock, }; use semver::Version; @@ -19,90 +19,71 @@ pub const SOLC_CACHE_DIRECTORY: &str = "solc"; pub(crate) static SOLC_CACHER: LazyLock>> = LazyLock::new(Default::default); pub(crate) async fn get_or_download( - working_directory: &Path, - downloader: &SolcDownloader, + working_directory: &Path, + downloader: &SolcDownloader, ) -> anyhow::Result<(Version, PathBuf)> { - let target_directory = working_directory - .join(SOLC_CACHE_DIRECTORY) - .join(downloader.version.to_string()); - let target_file = target_directory.join(downloader.target); + let target_directory = working_directory + .join(SOLC_CACHE_DIRECTORY) + .join(downloader.version.to_string()); + let target_file = target_directory.join(downloader.target); - let mut cache = SOLC_CACHER.lock().await; - if cache.contains(&target_file) { - tracing::debug!("using cached solc: {}", target_file.display()); - return Ok((downloader.version.clone(), target_file)); - } + let mut cache = SOLC_CACHER.lock().await; + if cache.contains(&target_file) { + tracing::debug!("using cached solc: {}", target_file.display()); + return Ok((downloader.version.clone(), target_file)); + } - create_dir_all(&target_directory).with_context(|| { - format!( - "Failed to create solc cache directory: {}", - target_directory.display() - ) - })?; - download_to_file(&target_file, downloader) - .await - .with_context(|| { - format!( - "Failed to write downloaded solc to {}", - target_file.display() - ) - })?; - cache.insert(target_file.clone()); + create_dir_all(&target_directory).with_context(|| { + format!("Failed to create solc cache directory: {}", target_directory.display()) + })?; + download_to_file(&target_file, downloader) + .await + .with_context(|| format!("Failed to write downloaded solc to {}", target_file.display()))?; + cache.insert(target_file.clone()); - Ok((downloader.version.clone(), target_file)) + Ok((downloader.version.clone(), target_file)) } async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::Result<()> { - let Ok(file) = File::create_new(path) else { - return Ok(()); - }; + let Ok(file) = File::create_new(path) else { + return Ok(()); + }; - #[cfg(unix)] - { - let mut permissions = file - .metadata() - .with_context(|| format!("Failed to read metadata for {}", path.display()))? - .permissions(); - permissions.set_mode(permissions.mode() | 0o111); - file.set_permissions(permissions).with_context(|| { - format!("Failed to set executable permissions on {}", path.display()) - })?; - } + #[cfg(unix)] + { + let mut permissions = file + .metadata() + .with_context(|| format!("Failed to read metadata for {}", path.display()))? + .permissions(); + permissions.set_mode(permissions.mode() | 0o111); + file.set_permissions(permissions).with_context(|| { + format!("Failed to set executable permissions on {}", path.display()) + })?; + } - let mut file = BufWriter::new(file); - file.write_all( - &downloader - .download() - .await - .context("Failed to download solc binary bytes")?, - ) - .with_context(|| format!("Failed to write solc binary to {}", path.display()))?; - file.flush() - .with_context(|| format!("Failed to flush file {}", path.display()))?; - drop(file); + let mut file = BufWriter::new(file); + file.write_all(&downloader.download().await.context("Failed to download solc binary bytes")?) + .with_context(|| format!("Failed to write solc binary to {}", path.display()))?; + file.flush() + .with_context(|| format!("Failed to flush file {}", path.display()))?; + drop(file); - #[cfg(target_os = "macos")] - std::process::Command::new("xattr") - .arg("-d") - .arg("com.apple.quarantine") - .arg(path) - .stderr(std::process::Stdio::null()) - .stdout(std::process::Stdio::null()) - .stdout(std::process::Stdio::null()) - .spawn() - .with_context(|| { - format!( - "Failed to spawn xattr to remove quarantine attribute on {}", - path.display() - ) - })? - .wait() - .with_context(|| { - format!( - "Failed waiting for xattr operation to complete on {}", - path.display() - ) - })?; + #[cfg(target_os = "macos")] + std::process::Command::new("xattr") + .arg("-d") + .arg("com.apple.quarantine") + .arg(path) + .stderr(std::process::Stdio::null()) + .stdout(std::process::Stdio::null()) + .stdout(std::process::Stdio::null()) + .spawn() + .with_context(|| { + format!("Failed to spawn xattr to remove quarantine attribute on {}", path.display()) + })? + .wait() + .with_context(|| { + format!("Failed waiting for xattr operation to complete on {}", path.display()) + })?; - Ok(()) + Ok(()) } diff --git a/crates/solc-binaries/src/download.rs b/crates/solc-binaries/src/download.rs index 19f7aa1..9b7f3d7 100644 --- a/crates/solc-binaries/src/download.rs +++ b/crates/solc-binaries/src/download.rs @@ -1,8 +1,8 @@ //! This module downloads solc binaries. use std::{ - collections::HashMap, - sync::{LazyLock, Mutex}, + collections::HashMap, + sync::{LazyLock, Mutex}, }; use revive_dt_common::types::VersionOrRequirement; @@ -14,199 +14,158 @@ use crate::list::List; use anyhow::Context as _; pub static LIST_CACHE: LazyLock>> = - LazyLock::new(Default::default); + LazyLock::new(Default::default); impl List { - pub const LINUX_URL: &str = "https://binaries.soliditylang.org/linux-amd64/list.json"; - pub const WINDOWS_URL: &str = "https://binaries.soliditylang.org/windows-amd64/list.json"; - pub const MACOSX_URL: &str = "https://binaries.soliditylang.org/macosx-amd64/list.json"; - pub const WASM_URL: &str = "https://binaries.soliditylang.org/wasm/list.json"; + pub const LINUX_URL: &str = "https://binaries.soliditylang.org/linux-amd64/list.json"; + pub const WINDOWS_URL: &str = "https://binaries.soliditylang.org/windows-amd64/list.json"; + pub const MACOSX_URL: &str = "https://binaries.soliditylang.org/macosx-amd64/list.json"; + pub const WASM_URL: &str = "https://binaries.soliditylang.org/wasm/list.json"; - /// Try to downloads the list from the given URL. - /// - /// Caches the list retrieved from the `url` into [LIST_CACHE], - /// subsequent calls with the same `url` will return the cached list. - pub async fn download(url: &'static str) -> anyhow::Result { - if let Some(list) = LIST_CACHE.lock().unwrap().get(url) { - return Ok(list.clone()); - } + /// Try to downloads the list from the given URL. + /// + /// Caches the list retrieved from the `url` into [LIST_CACHE], + /// subsequent calls with the same `url` will return the cached list. + pub async fn download(url: &'static str) -> anyhow::Result { + if let Some(list) = LIST_CACHE.lock().unwrap().get(url) { + return Ok(list.clone()); + } - let body: List = reqwest::get(url) - .await - .with_context(|| format!("Failed to GET solc list from {url}"))? - .json() - .await - .with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?; + let body: List = reqwest::get(url) + .await + .with_context(|| format!("Failed to GET solc list from {url}"))? + .json() + .await + .with_context(|| format!("Failed to deserialize solc list JSON from {url}"))?; - LIST_CACHE.lock().unwrap().insert(url, body.clone()); + LIST_CACHE.lock().unwrap().insert(url, body.clone()); - Ok(body) - } + Ok(body) + } } /// Download solc binaries from the official SolidityLang site #[derive(Clone, Debug)] pub struct SolcDownloader { - pub version: Version, - pub target: &'static str, - pub list: &'static str, + pub version: Version, + pub target: &'static str, + pub list: &'static str, } impl SolcDownloader { - pub const BASE_URL: &str = "https://binaries.soliditylang.org"; + pub const BASE_URL: &str = "https://binaries.soliditylang.org"; - pub const LINUX_NAME: &str = "linux-amd64"; - pub const MACOSX_NAME: &str = "macosx-amd64"; - pub const WINDOWS_NAME: &str = "windows-amd64"; - pub const WASM_NAME: &str = "wasm"; + pub const LINUX_NAME: &str = "linux-amd64"; + pub const MACOSX_NAME: &str = "macosx-amd64"; + pub const WINDOWS_NAME: &str = "windows-amd64"; + pub const WASM_NAME: &str = "wasm"; - async fn new( - version: impl Into, - target: &'static str, - list: &'static str, - ) -> anyhow::Result { - let version_or_requirement = version.into(); - match version_or_requirement { - VersionOrRequirement::Version(version) => Ok(Self { - version, - target, - list, - }), - VersionOrRequirement::Requirement(requirement) => { - let Some(version) = List::download(list) - .await - .with_context(|| format!("Failed to download solc builds list from {list}"))? - .builds - .into_iter() - .map(|build| build.version) - .filter(|version| requirement.matches(version)) - .max() - else { - anyhow::bail!("Failed to find a version that satisfies {requirement:?}"); - }; - Ok(Self { - version, - target, - list, - }) - } - } - } + async fn new( + version: impl Into, + target: &'static str, + list: &'static str, + ) -> anyhow::Result { + let version_or_requirement = version.into(); + match version_or_requirement { + VersionOrRequirement::Version(version) => Ok(Self { version, target, list }), + VersionOrRequirement::Requirement(requirement) => { + let Some(version) = List::download(list) + .await + .with_context(|| format!("Failed to download solc builds list from {list}"))? + .builds + .into_iter() + .map(|build| build.version) + .filter(|version| requirement.matches(version)) + .max() + else { + anyhow::bail!("Failed to find a version that satisfies {requirement:?}"); + }; + Ok(Self { version, target, list }) + }, + } + } - pub async fn linux(version: impl Into) -> anyhow::Result { - Self::new(version, Self::LINUX_NAME, List::LINUX_URL).await - } + pub async fn linux(version: impl Into) -> anyhow::Result { + Self::new(version, Self::LINUX_NAME, List::LINUX_URL).await + } - pub async fn macosx(version: impl Into) -> anyhow::Result { - Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL).await - } + pub async fn macosx(version: impl Into) -> anyhow::Result { + Self::new(version, Self::MACOSX_NAME, List::MACOSX_URL).await + } - pub async fn windows(version: impl Into) -> anyhow::Result { - Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL).await - } + pub async fn windows(version: impl Into) -> anyhow::Result { + Self::new(version, Self::WINDOWS_NAME, List::WINDOWS_URL).await + } - pub async fn wasm(version: impl Into) -> anyhow::Result { - Self::new(version, Self::WASM_NAME, List::WASM_URL).await - } + pub async fn wasm(version: impl Into) -> anyhow::Result { + Self::new(version, Self::WASM_NAME, List::WASM_URL).await + } - /// Download the solc binary. - /// - /// Errors out if the download fails or the digest of the downloaded file - /// mismatches the expected digest from the release [List]. - pub async fn download(&self) -> anyhow::Result> { - let builds = List::download(self.list) - .await - .with_context(|| format!("Failed to download solc builds list from {}", self.list))? - .builds; - let build = builds - .iter() - .find(|build| build.version == self.version) - .ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version)) - .with_context(|| { - format!( - "Requested solc version {} was not found in builds list fetched from {}", - self.version, self.list - ) - })?; + /// Download the solc binary. + /// + /// Errors out if the download fails or the digest of the downloaded file + /// mismatches the expected digest from the release [List]. + pub async fn download(&self) -> anyhow::Result> { + let builds = List::download(self.list) + .await + .with_context(|| format!("Failed to download solc builds list from {}", self.list))? + .builds; + let build = builds + .iter() + .find(|build| build.version == self.version) + .ok_or_else(|| anyhow::anyhow!("solc v{} not found builds", self.version)) + .with_context(|| { + format!( + "Requested solc version {} was not found in builds list fetched from {}", + self.version, self.list + ) + })?; - let path = build.path.clone(); - let expected_digest = build - .sha256 - .strip_prefix("0x") - .unwrap_or(&build.sha256) - .to_string(); - let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display()); + let path = build.path.clone(); + let expected_digest = build.sha256.strip_prefix("0x").unwrap_or(&build.sha256).to_string(); + let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display()); - let file = reqwest::get(&url) - .await - .with_context(|| format!("Failed to GET solc binary from {url}"))? - .bytes() - .await - .with_context(|| format!("Failed to read solc binary bytes from {url}"))? - .to_vec(); + let file = reqwest::get(&url) + .await + .with_context(|| format!("Failed to GET solc binary from {url}"))? + .bytes() + .await + .with_context(|| format!("Failed to read solc binary bytes from {url}"))? + .to_vec(); - if hex::encode(Sha256::digest(&file)) != expected_digest { - anyhow::bail!("sha256 mismatch for solc version {}", self.version); - } + if hex::encode(Sha256::digest(&file)) != expected_digest { + anyhow::bail!("sha256 mismatch for solc version {}", self.version); + } - Ok(file) - } + Ok(file) + } } #[cfg(test)] mod tests { - use crate::{download::SolcDownloader, list::List}; + use crate::{download::SolcDownloader, list::List}; - #[tokio::test] - async fn try_get_windows() { - let version = List::download(List::WINDOWS_URL) - .await - .unwrap() - .latest_release; - SolcDownloader::windows(version) - .await - .unwrap() - .download() - .await - .unwrap(); - } + #[tokio::test] + async fn try_get_windows() { + let version = List::download(List::WINDOWS_URL).await.unwrap().latest_release; + SolcDownloader::windows(version).await.unwrap().download().await.unwrap(); + } - #[tokio::test] - async fn try_get_macosx() { - let version = List::download(List::MACOSX_URL) - .await - .unwrap() - .latest_release; - SolcDownloader::macosx(version) - .await - .unwrap() - .download() - .await - .unwrap(); - } + #[tokio::test] + async fn try_get_macosx() { + let version = List::download(List::MACOSX_URL).await.unwrap().latest_release; + SolcDownloader::macosx(version).await.unwrap().download().await.unwrap(); + } - #[tokio::test] - async fn try_get_linux() { - let version = List::download(List::LINUX_URL) - .await - .unwrap() - .latest_release; - SolcDownloader::linux(version) - .await - .unwrap() - .download() - .await - .unwrap(); - } + #[tokio::test] + async fn try_get_linux() { + let version = List::download(List::LINUX_URL).await.unwrap().latest_release; + SolcDownloader::linux(version).await.unwrap().download().await.unwrap(); + } - #[tokio::test] - async fn try_get_wasm() { - let version = List::download(List::WASM_URL).await.unwrap().latest_release; - SolcDownloader::wasm(version) - .await - .unwrap() - .download() - .await - .unwrap(); - } + #[tokio::test] + async fn try_get_wasm() { + let version = List::download(List::WASM_URL).await.unwrap().latest_release; + SolcDownloader::wasm(version).await.unwrap().download().await.unwrap(); + } } diff --git a/crates/solc-binaries/src/lib.rs b/crates/solc-binaries/src/lib.rs index e97dd1a..81162c5 100644 --- a/crates/solc-binaries/src/lib.rs +++ b/crates/solc-binaries/src/lib.rs @@ -22,22 +22,22 @@ pub mod list; /// Subsequent calls for the same version will use a cached artifact /// and not download it again. pub async fn download_solc( - cache_directory: &Path, - version: impl Into, - wasm: bool, + cache_directory: &Path, + version: impl Into, + wasm: bool, ) -> anyhow::Result<(Version, PathBuf)> { - let downloader = if wasm { - SolcDownloader::wasm(version).await - } else if cfg!(target_os = "linux") { - SolcDownloader::linux(version).await - } else if cfg!(target_os = "macos") { - SolcDownloader::macosx(version).await - } else if cfg!(target_os = "windows") { - SolcDownloader::windows(version).await - } else { - unimplemented!() - } - .context("Failed to initialize the Solc Downloader")?; + let downloader = if wasm { + SolcDownloader::wasm(version).await + } else if cfg!(target_os = "linux") { + SolcDownloader::linux(version).await + } else if cfg!(target_os = "macos") { + SolcDownloader::macosx(version).await + } else if cfg!(target_os = "windows") { + SolcDownloader::windows(version).await + } else { + unimplemented!() + } + .context("Failed to initialize the Solc Downloader")?; - get_or_download(cache_directory, &downloader).await + get_or_download(cache_directory, &downloader).await } diff --git a/crates/solc-binaries/src/list.rs b/crates/solc-binaries/src/list.rs index 2287158..5bfc495 100644 --- a/crates/solc-binaries/src/list.rs +++ b/crates/solc-binaries/src/list.rs @@ -7,20 +7,20 @@ use serde::Deserialize; #[derive(Debug, Deserialize, Clone, Eq, PartialEq)] pub struct List { - pub builds: Vec, - pub releases: HashMap, - #[serde(rename = "latestRelease")] - pub latest_release: Version, + pub builds: Vec, + pub releases: HashMap, + #[serde(rename = "latestRelease")] + pub latest_release: Version, } #[derive(Debug, Deserialize, Clone, Eq, PartialEq)] pub struct Build { - pub path: PathBuf, - pub version: Version, - pub build: String, - #[serde(rename = "longVersion")] - pub long_version: String, - pub keccak256: String, - pub sha256: String, - pub urls: Vec, + pub path: PathBuf, + pub version: Version, + pub build: String, + #[serde(rename = "longVersion")] + pub long_version: String, + pub keccak256: String, + pub sha256: String, + pub urls: Vec, }