Compare commits

...

29 Commits

Author SHA1 Message Date
pgherveou 1659164310 save before flight 2025-10-13 13:05:07 +02:00
pgherveou 0a68800856 nit 2025-10-08 18:26:43 +02:00
pgherveou 8303d789cd use 10^6 for gas filler 2025-10-08 15:15:08 +02:00
pgherveou 40bf44fe58 fix 2025-10-08 14:50:50 +02:00
pgherveou ba8ad03290 fix 2025-10-08 14:06:03 +02:00
pgherveou 3dd99f3ac8 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 11:42:37 +00:00
pgherveou 6618463c68 fix 2025-10-08 11:40:08 +00:00
pgherveou dffb80ac0a fixes 2025-10-08 11:18:31 +02:00
pgherveou 43a1114337 custom rpc port 2025-10-08 11:10:46 +02:00
pgherveou 3a07ea042b fix 2025-10-08 10:45:49 +02:00
pgherveou 9e2aa972db fix 2025-10-08 10:33:59 +02:00
pgherveou 86f2173e8b nit 2025-10-08 10:14:22 +02:00
pgherveou 6e658aec49 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 10:04:38 +02:00
pgherveou 1aba74ec3e fix 2025-10-08 10:03:00 +02:00
pgherveou 180bd64bc5 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 10:01:36 +02:00
pgherveou 967cbac349 fix 2025-10-08 10:00:32 +02:00
pgherveou a8d84c8360 fix 2025-10-08 09:59:53 +02:00
pgherveou c83a755416 Merge branch 'main' into pg/fmt 2025-10-08 09:59:42 +02:00
pgherveou 0711216539 add fmt check 2025-10-08 09:57:28 +02:00
pgherveou b40c17c0af fixes 2025-10-08 09:52:13 +02:00
pgherveou 8ae994f9de fixes 2025-10-08 09:43:36 +02:00
pgherveou 3f3cbfa934 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 09:28:45 +02:00
pgherveou c676114fe1 apply fmt 2025-10-08 09:27:11 +02:00
pgherveou 92885351ed use polkadot-sdk rustfmt 2025-10-08 09:26:24 +02:00
pgherveou e16f8ebf59 Merge branch 'pg/fmt' into pg/ml-runner 2025-10-08 09:19:21 +02:00
pgherveou d482808eb2 add rustfmt.toml 2025-10-08 07:18:17 +00:00
pgherveou 1f84ce6f61 fix lint 2025-10-08 06:28:57 +00:00
pgherveou 765569a8b6 fix 2025-10-08 08:22:26 +02:00
pgherveou 6e64f678ee ml-runner init 2025-10-07 16:10:43 +00:00
60 changed files with 12511 additions and 12206 deletions
-1
View File
@@ -13,4 +13,3 @@ resolc-compiler-tests
workdir workdir
!/schema.json !/schema.json
!/dev-genesis.json
+25
View File
@@ -0,0 +1,25 @@
# Basic
edition = "2024"
hard_tabs = true
max_width = 100
use_small_heuristics = "Max"
# Imports
imports_granularity = "Crate"
reorder_imports = true
# Consistency
newline_style = "Unix"
# Misc
chain_width = 80
spaces_around_ranges = false
binop_separator = "Back"
reorder_impl_items = false
match_arm_leading_pipes = "Preserve"
match_arm_blocks = false
match_block_trailing_comma = true
trailing_comma = "Vertical"
trailing_semicolon = false
use_field_init_shorthand = true
# Format comments
comment_width = 100
wrap_comments = true
Generated
+21
View File
@@ -4526,6 +4526,27 @@ dependencies = [
"windows-sys 0.59.0", "windows-sys 0.59.0",
] ]
[[package]]
name = "ml-test-runner"
version = "0.1.0"
dependencies = [
"alloy",
"anyhow",
"clap",
"revive-dt-common",
"revive-dt-compiler",
"revive-dt-config",
"revive-dt-core",
"revive-dt-format",
"revive-dt-node",
"revive-dt-node-interaction",
"revive-dt-report",
"temp-dir",
"tokio",
"tracing",
"tracing-subscriber",
]
[[package]] [[package]]
name = "moka" name = "moka"
version = "0.12.10" version = "0.12.10"
+1 -1
View File
@@ -1,7 +1,7 @@
.PHONY: format clippy test machete .PHONY: format clippy test machete
format: format:
cargo fmt --all -- --check cargo +nightly fmt --all -- --check
clippy: clippy:
cargo clippy --all-features --workspace -- --deny warnings cargo clippy --all-features --workspace -- --deny warnings
+8 -9
View File
@@ -1,9 +1,11 @@
//! This module implements a cached file system allowing for results to be stored in-memory rather //! This module implements a cached file system allowing for results to be stored in-memory rather
//! rather being queried from the file system again. //! rather being queried from the file system again.
use std::fs; use std::{
use std::io::{Error, Result}; fs,
use std::path::{Path, PathBuf}; io::{Error, Result},
path::{Path, PathBuf},
};
use moka::sync::Cache; use moka::sync::Cache;
use once_cell::sync::Lazy; use once_cell::sync::Lazy;
@@ -18,17 +20,14 @@ pub fn read(path: impl AsRef<Path>) -> Result<Vec<u8>> {
let content = fs::read(path.as_path())?; let content = fs::read(path.as_path())?;
READ_CACHE.insert(path, content.clone()); READ_CACHE.insert(path, content.clone());
Ok(content) Ok(content)
} },
} }
} }
pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> { pub fn read_to_string(path: impl AsRef<Path>) -> Result<String> {
let content = read(path)?; let content = read(path)?;
String::from_utf8(content).map_err(|_| { String::from_utf8(content).map_err(|_| {
Error::new( Error::new(std::io::ErrorKind::InvalidData, "The contents of the file are not valid UTF8")
std::io::ErrorKind::InvalidData,
"The contents of the file are not valid UTF8",
)
}) })
} }
@@ -44,6 +43,6 @@ pub fn read_dir(path: impl AsRef<Path>) -> Result<Box<dyn Iterator<Item = Result
.collect(); .collect();
READ_DIR_CACHE.insert(path.clone(), entries); READ_DIR_CACHE.insert(path.clone(), entries);
Ok(read_dir(path).unwrap()) Ok(read_dir(path).unwrap())
} },
} }
} }
+1 -4
View File
@@ -12,10 +12,7 @@ pub fn clear_directory(path: impl AsRef<Path>) -> Result<()> {
.with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))? .with_context(|| format!("Failed to read directory: {}", path.as_ref().display()))?
{ {
let entry = entry.with_context(|| { let entry = entry.with_context(|| {
format!( format!("Failed to read an entry in directory: {}", path.as_ref().display())
"Failed to read an entry in directory: {}",
path.as_ref().display()
)
})?; })?;
let entry_path = entry.path(); let entry_path = entry.path();
+6 -11
View File
@@ -1,5 +1,4 @@
use std::ops::ControlFlow; use std::{ops::ControlFlow, time::Duration};
use std::time::Duration;
use anyhow::{Context as _, Result, anyhow}; use anyhow::{Context as _, Result, anyhow};
@@ -38,17 +37,13 @@ where
)); ));
} }
match future() match future().await.context("Polled future returned an error during polling loop")? {
.await
.context("Polled future returned an error during polling loop")?
{
ControlFlow::Continue(()) => { ControlFlow::Continue(()) => {
let next_wait_duration = match polling_wait_behavior { let next_wait_duration = match polling_wait_behavior {
PollingWaitBehavior::Constant(duration) => duration, PollingWaitBehavior::Constant(duration) => duration,
PollingWaitBehavior::ExponentialBackoff => { PollingWaitBehavior::ExponentialBackoff =>
Duration::from_secs(2u64.pow(retries)) Duration::from_secs(2u64.pow(retries))
.min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION) .min(EXPONENTIAL_BACKOFF_MAX_WAIT_DURATION),
}
}; };
let next_wait_duration = let next_wait_duration =
next_wait_duration.min(max_allowed_wait_duration - total_wait_duration); next_wait_duration.min(max_allowed_wait_duration - total_wait_duration);
@@ -56,10 +51,10 @@ where
retries += 1; retries += 1;
tokio::time::sleep(next_wait_duration).await; tokio::time::sleep(next_wait_duration).await;
} },
ControlFlow::Break(output) => { ControlFlow::Break(output) => {
break Ok(output); break Ok(output);
} },
} }
} }
} }
@@ -75,13 +75,12 @@ impl Iterator for FilesWithExtensionIterator {
for entry_path in iterator.flatten() { for entry_path in iterator.flatten() {
if entry_path.is_dir() { if entry_path.is_dir() {
self.directories_to_search.push(entry_path) self.directories_to_search.push(entry_path)
} else if entry_path.is_file() } else if entry_path.is_file() &&
&& entry_path.extension().is_some_and(|ext| { entry_path.extension().is_some_and(|ext| {
self.allowed_extensions self.allowed_extensions
.iter() .iter()
.any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref())) .any(|allowed| ext.eq_ignore_ascii_case(allowed.as_ref()))
}) }) {
{
self.files_matching_allowed_extensions.push(entry_path) self.files_matching_allowed_extensions.push(entry_path)
} }
} }
@@ -135,6 +135,6 @@ macro_rules! define_wrapper_type {
}; };
} }
/// Technically not needed but this allows for the macro to be found in the `macros` module of the /// Technically not needed but this allows for the macro to be found in the `macros` module of
/// crate in addition to being found in the root of the crate. /// the crate in addition to being found in the root of the crate.
pub use {define_wrapper_type, impl_for_wrapper}; pub use {define_wrapper_type, impl_for_wrapper};
+2 -6
View File
@@ -1,9 +1,7 @@
use crate::types::VersionOrRequirement; use crate::types::VersionOrRequirement;
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::fmt::Display; use std::{fmt::Display, str::FromStr, sync::LazyLock};
use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that a given test should be run with, if possible. /// This represents a mode that a given test should be run with, if possible.
/// ///
@@ -78,9 +76,7 @@ impl FromStr for ModePipeline {
// Don't go via Yul IR // Don't go via Yul IR
"E" => Ok(ModePipeline::ViaEVMAssembly), "E" => Ok(ModePipeline::ViaEVMAssembly),
// Anything else that we see isn't a mode at all // Anything else that we see isn't a mode at all
_ => Err(anyhow::anyhow!( _ => Err(anyhow::anyhow!("Unsupported pipeline '{s}': expected 'Y' or 'E'")),
"Unsupported pipeline '{s}': expected 'Y' or 'E'"
)),
} }
} }
} }
@@ -1,5 +1,4 @@
use alloy::primitives::U256; use alloy::{primitives::U256, signers::local::PrivateKeySigner};
use alloy::signers::local::PrivateKeySigner;
use anyhow::{Context, Result, bail}; use anyhow::{Context, Result, bail};
/// This is a sequential private key allocator. When instantiated, it allocated private keys in /// This is a sequential private key allocator. When instantiated, it allocated private keys in
@@ -16,10 +15,7 @@ pub struct PrivateKeyAllocator {
impl PrivateKeyAllocator { impl PrivateKeyAllocator {
/// Creates a new instance of the private key allocator. /// Creates a new instance of the private key allocator.
pub fn new(highest_private_key_inclusive: U256) -> Self { pub fn new(highest_private_key_inclusive: U256) -> Self {
Self { Self { next_private_key: U256::ONE, highest_private_key_inclusive }
next_private_key: U256::ONE,
highest_private_key_inclusive,
}
} }
/// Allocates a new private key and errors out if the maximum private key has been reached. /// Allocates a new private key and errors out if the maximum private key has been reached.
+1 -4
View File
@@ -7,10 +7,7 @@ pub struct RoundRobinPool<T> {
impl<T> RoundRobinPool<T> { impl<T> RoundRobinPool<T> {
pub fn new(items: Vec<T>) -> Self { pub fn new(items: Vec<T>) -> Self {
Self { Self { next_index: Default::default(), items }
next_index: Default::default(),
items,
}
} }
pub fn round_robin(&self) -> &T { pub fn round_robin(&self) -> &T {
+1 -2
View File
@@ -10,8 +10,7 @@ use std::{
pin::Pin, pin::Pin,
}; };
use alloy::json_abi::JsonAbi; use alloy::{json_abi::JsonAbi, primitives::Address};
use alloy::primitives::Address;
use anyhow::{Context as _, Result}; use anyhow::{Context as _, Result};
use semver::Version; use semver::Version;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
+14 -22
View File
@@ -60,10 +60,7 @@ impl Resolc {
Ok(COMPILERS_CACHE Ok(COMPILERS_CACHE
.entry(solc.clone()) .entry(solc.clone())
.or_insert_with(|| { .or_insert_with(|| {
Self(Arc::new(ResolcInner { Self(Arc::new(ResolcInner { solc, resolc_path: resolc_configuration.path.clone() }))
solc,
resolc_path: resolc_configuration.path.clone(),
}))
}) })
.clone()) .clone())
} }
@@ -141,9 +138,7 @@ impl SolidityCompiler for Resolc {
output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()), output_selection: Some(SolcStandardJsonInputSettingsSelection::new_required()),
via_ir: Some(true), via_ir: Some(true),
optimizer: SolcStandardJsonInputSettingsOptimizer::new( optimizer: SolcStandardJsonInputSettingsOptimizer::new(
optimization optimization.unwrap_or(ModeOptimizerSetting::M0).optimizations_enabled(),
.unwrap_or(ModeOptimizerSetting::M0)
.optimizations_enabled(),
None, None,
&Version::new(0, 0, 0), &Version::new(0, 0, 0),
false, false,
@@ -259,9 +254,8 @@ impl SolidityCompiler for Resolc {
.as_ref() .as_ref()
.context("No metadata found for the contract")?; .context("No metadata found for the contract")?;
let solc_metadata_str = match metadata { let solc_metadata_str = match metadata {
serde_json::Value::String(solc_metadata_str) => { serde_json::Value::String(solc_metadata_str) =>
solc_metadata_str.as_str() solc_metadata_str.as_str(),
}
serde_json::Value::Object(metadata_object) => { serde_json::Value::Object(metadata_object) => {
let solc_metadata_value = metadata_object let solc_metadata_value = metadata_object
.get("solc_metadata") .get("solc_metadata")
@@ -269,18 +263,16 @@ impl SolidityCompiler for Resolc {
solc_metadata_value solc_metadata_value
.as_str() .as_str()
.context("The 'solc_metadata' field is not a string")? .context("The 'solc_metadata' field is not a string")?
} },
serde_json::Value::Null serde_json::Value::Null |
| serde_json::Value::Bool(_) serde_json::Value::Bool(_) |
| serde_json::Value::Number(_) serde_json::Value::Number(_) |
| serde_json::Value::Array(_) => { serde_json::Value::Array(_) => {
anyhow::bail!("Unsupported type of metadata {metadata:?}") anyhow::bail!("Unsupported type of metadata {metadata:?}")
} },
}; };
let solc_metadata = serde_json::from_str::<serde_json::Value>( let solc_metadata =
solc_metadata_str, serde_json::from_str::<serde_json::Value>(solc_metadata_str).context(
)
.context(
"Failed to deserialize the solc_metadata as a serde_json generic value", "Failed to deserialize the solc_metadata as a serde_json generic value",
)?; )?;
let output_value = solc_metadata let output_value = solc_metadata
@@ -305,7 +297,7 @@ impl SolidityCompiler for Resolc {
optimize_setting: ModeOptimizerSetting, optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool { ) -> bool {
pipeline == ModePipeline::ViaYulIR pipeline == ModePipeline::ViaYulIR &&
&& SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline) SolidityCompiler::supports_mode(&self.0.solc, optimize_setting, pipeline)
} }
} }
+9 -17
View File
@@ -21,8 +21,7 @@ use foundry_compilers_artifacts::{
output_selection::{ output_selection::{
BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection, BytecodeOutputSelection, ContractOutputSelection, EvmOutputSelection, OutputSelection,
}, },
solc::CompilerOutput as SolcOutput, solc::{CompilerOutput as SolcOutput, *},
solc::*,
}; };
use semver::Version; use semver::Version;
use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand}; use tokio::{io::AsyncWriteExt, process::Command as AsyncCommand};
@@ -57,9 +56,7 @@ impl Solc {
// resolution for us. Therefore, even if the download didn't proceed, this function will // resolution for us. Therefore, even if the download didn't proceed, this function will
// resolve the version requirement into a canonical version of the compiler. It's then up // resolve the version requirement into a canonical version of the compiler. It's then up
// to us to either use the provided path or not. // to us to either use the provided path or not.
let version = version let version = version.into().unwrap_or_else(|| solc_configuration.version.clone().into());
.into()
.unwrap_or_else(|| solc_configuration.version.clone().into());
let (version, path) = let (version, path) =
download_solc(working_directory_configuration.as_path(), version, false) download_solc(working_directory_configuration.as_path(), version, false)
.await .await
@@ -73,10 +70,7 @@ impl Solc {
solc_version = %version, solc_version = %version,
"Created a new solc compiler object" "Created a new solc compiler object"
); );
Self(Arc::new(SolcInner { Self(Arc::new(SolcInner { solc_path: path, solc_version: version }))
solc_path: path,
solc_version: version,
}))
}) })
.clone()) .clone())
} }
@@ -253,10 +247,7 @@ impl SolidityCompiler for Solc {
let map = compiler_output let map = compiler_output
.contracts .contracts
.entry(contract_path.canonicalize().with_context(|| { .entry(contract_path.canonicalize().with_context(|| {
format!( format!("Failed to canonicalize contract path {}", contract_path.display())
"Failed to canonicalize contract path {}",
contract_path.display()
)
})?) })?)
.or_default(); .or_default();
for (contract_name, contract_info) in contracts.into_iter() { for (contract_name, contract_info) in contracts.into_iter() {
@@ -284,10 +275,11 @@ impl SolidityCompiler for Solc {
_optimize_setting: ModeOptimizerSetting, _optimize_setting: ModeOptimizerSetting,
pipeline: ModePipeline, pipeline: ModePipeline,
) -> bool { ) -> bool {
// solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support mode E // solc 0.8.13 and above supports --via-ir, and less than that does not. Thus, we support
// (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler is new enough. // mode E (ie no Yul IR) in either case, but only support Y (via Yul IR) if the compiler
pipeline == ModePipeline::ViaEVMAssembly // is new enough.
|| (pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul()) pipeline == ModePipeline::ViaEVMAssembly ||
(pipeline == ModePipeline::ViaYulIR && self.compiler_supports_yul())
} }
} }
+21 -34
View File
@@ -154,7 +154,7 @@ impl AsRef<GenesisConfiguration> for Context {
Self::Benchmark(..) => { Self::Benchmark(..) => {
static GENESIS: LazyLock<GenesisConfiguration> = LazyLock::new(Default::default); static GENESIS: LazyLock<GenesisConfiguration> = LazyLock::new(Default::default);
&GENESIS &GENESIS
} },
Self::ExportJsonSchema => unreachable!(), Self::ExportJsonSchema => unreachable!(),
} }
} }
@@ -202,11 +202,11 @@ impl AsRef<ReportConfiguration> for Context {
#[derive(Clone, Debug, Parser, Serialize)] #[derive(Clone, Debug, Parser, Serialize)]
pub struct TestExecutionContext { pub struct TestExecutionContext {
/// The working directory that the program will use for all of the temporary artifacts needed at /// The working directory that the program will use for all of the temporary artifacts needed
/// runtime. /// at runtime.
/// ///
/// If not specified, then a temporary directory will be created and used by the program for all /// If not specified, then a temporary directory will be created and used by the program for
/// temporary artifacts. /// all temporary artifacts.
#[clap( #[clap(
short, short,
long, long,
@@ -282,11 +282,11 @@ pub struct TestExecutionContext {
#[derive(Clone, Debug, Parser, Serialize)] #[derive(Clone, Debug, Parser, Serialize)]
pub struct BenchmarkingContext { pub struct BenchmarkingContext {
/// The working directory that the program will use for all of the temporary artifacts needed at /// The working directory that the program will use for all of the temporary artifacts needed
/// runtime. /// at runtime.
/// ///
/// If not specified, then a temporary directory will be created and used by the program for all /// If not specified, then a temporary directory will be created and used by the program for
/// temporary artifacts. /// all temporary artifacts.
#[clap( #[clap(
short, short,
long, long,
@@ -580,8 +580,8 @@ pub struct ResolcConfiguration {
pub struct PolkadotParachainConfiguration { pub struct PolkadotParachainConfiguration {
/// Specifies the path of the polkadot-parachain node to be used by the tool. /// Specifies the path of the polkadot-parachain node to be used by the tool.
/// ///
/// If this is not specified, then the tool assumes that it should use the polkadot-parachain binary /// If this is not specified, then the tool assumes that it should use the polkadot-parachain
/// that's provided in the user's $PATH. /// binary that's provided in the user's $PATH.
#[clap( #[clap(
id = "polkadot-parachain.path", id = "polkadot-parachain.path",
long = "polkadot-parachain.path", long = "polkadot-parachain.path",
@@ -624,13 +624,9 @@ pub struct GethConfiguration {
pub struct KurtosisConfiguration { pub struct KurtosisConfiguration {
/// Specifies the path of the kurtosis node to be used by the tool. /// Specifies the path of the kurtosis node to be used by the tool.
/// ///
/// If this is not specified, then the tool assumes that it should use the kurtosis binary that's /// If this is not specified, then the tool assumes that it should use the kurtosis binary
/// provided in the user's $PATH. /// that's provided in the user's $PATH.
#[clap( #[clap(id = "kurtosis.path", long = "kurtosis.path", default_value = "kurtosis")]
id = "kurtosis.path",
long = "kurtosis.path",
default_value = "kurtosis"
)]
pub path: PathBuf, pub path: PathBuf,
} }
@@ -641,11 +637,7 @@ pub struct KitchensinkConfiguration {
/// ///
/// If this is not specified, then the tool assumes that it should use the kitchensink binary /// If this is not specified, then the tool assumes that it should use the kitchensink binary
/// that's provided in the user's $PATH. /// that's provided in the user's $PATH.
#[clap( #[clap(id = "kitchensink.path", long = "kitchensink.path", default_value = "substrate-node")]
id = "kitchensink.path",
long = "kitchensink.path",
default_value = "substrate-node"
)]
pub path: PathBuf, pub path: PathBuf,
/// The amount of time to wait upon startup before considering that the node timed out. /// The amount of time to wait upon startup before considering that the node timed out.
@@ -663,8 +655,8 @@ pub struct KitchensinkConfiguration {
pub struct ReviveDevNodeConfiguration { pub struct ReviveDevNodeConfiguration {
/// Specifies the path of the revive dev node to be used by the tool. /// Specifies the path of the revive dev node to be used by the tool.
/// ///
/// If this is not specified, then the tool assumes that it should use the revive dev node binary /// If this is not specified, then the tool assumes that it should use the revive dev node
/// that's provided in the user's $PATH. /// binary that's provided in the user's $PATH.
#[clap( #[clap(
id = "revive-dev-node.path", id = "revive-dev-node.path",
long = "revive-dev-node.path", long = "revive-dev-node.path",
@@ -731,11 +723,11 @@ impl GenesisConfiguration {
Some(genesis_path) => { Some(genesis_path) => {
let genesis_content = read_to_string(genesis_path)?; let genesis_content = read_to_string(genesis_path)?;
serde_json::from_str(genesis_content.as_str())? serde_json::from_str(genesis_content.as_str())?
} },
None => DEFAULT_GENESIS.clone(), None => DEFAULT_GENESIS.clone(),
}; };
Ok(self.genesis.get_or_init(|| genesis)) Ok(self.genesis.get_or_init(|| genesis))
} },
} }
} }
} }
@@ -823,10 +815,7 @@ impl ConcurrencyConfiguration {
pub fn concurrency_limit(&self) -> Option<usize> { pub fn concurrency_limit(&self) -> Option<usize> {
match self.ignore_concurrency_limit { match self.ignore_concurrency_limit {
true => None, true => None,
false => Some( false => Some(self.number_concurrent_tasks.unwrap_or(20 * self.number_of_nodes)),
self.number_concurrent_tasks
.unwrap_or(20 * self.number_of_nodes),
),
} }
} }
} }
@@ -917,9 +906,7 @@ impl Serialize for WorkingDirectoryConfiguration {
} }
fn parse_duration(s: &str) -> anyhow::Result<Duration> { fn parse_duration(s: &str) -> anyhow::Result<Duration> {
u64::from_str(s) u64::from_str(s).map(Duration::from_millis).map_err(Into::into)
.map(Duration::from_millis)
.map_err(Into::into)
} }
/// The Solidity compatible node implementation. /// The Solidity compatible node implementation.
@@ -59,8 +59,8 @@ pub struct Driver<'a, I> {
/// The definition of the test that the driver is instructed to execute. /// The definition of the test that the driver is instructed to execute.
test_definition: &'a TestDefinition<'a>, test_definition: &'a TestDefinition<'a>,
/// The private key allocator used by this driver and other drivers when account allocations are /// The private key allocator used by this driver and other drivers when account allocations
/// needed. /// are needed.
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>, private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
/// The execution state associated with the platform. /// The execution state associated with the platform.
@@ -206,9 +206,7 @@ where
"Deployed library" "Deployed library"
); );
let library_address = receipt let library_address = receipt.contract_address.expect("Failed to deploy the library");
.contract_address
.expect("Failed to deploy the library");
deployed_libraries.get_or_insert_default().insert( deployed_libraries.get_or_insert_default().insert(
library_instance.clone(), library_instance.clone(),
@@ -236,10 +234,8 @@ where
}) })
.context("Failed to compile the post-link contracts")?; .context("Failed to compile the post-link contracts")?;
self.execution_state = ExecutionState::new( self.execution_state =
compiler_output.contracts, ExecutionState::new(compiler_output.contracts, deployed_libraries.unwrap_or_default());
deployed_libraries.unwrap_or_default(),
);
Ok(()) Ok(())
} }
@@ -325,11 +321,7 @@ where
) -> Result<HashMap<ContractInstance, TransactionReceipt>> { ) -> Result<HashMap<ContractInstance, TransactionReceipt>> {
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new(); let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
for instance in step.find_all_contract_instances().into_iter() { for instance in step.find_all_contract_instances().into_iter() {
if !self if !self.execution_state.deployed_contracts.contains_key(&instance) {
.execution_state
.deployed_contracts
.contains_key(&instance)
{
instances_we_must_deploy.entry(instance).or_insert(false); instances_we_must_deploy.entry(instance).or_insert(false);
} }
} }
@@ -341,15 +333,11 @@ where
let mut receipts = HashMap::new(); let mut receipts = HashMap::new();
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() { for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
let calldata = deploy_with_constructor_arguments.then_some(&step.calldata); let calldata = deploy_with_constructor_arguments.then_some(&step.calldata);
let value = deploy_with_constructor_arguments let value = deploy_with_constructor_arguments.then_some(step.value).flatten();
.then_some(step.value)
.flatten();
let caller = { let caller = {
let context = self.default_resolution_context(); let context = self.default_resolution_context();
step.caller step.caller.resolve_address(self.resolver.as_ref(), context).await?
.resolve_address(self.resolver.as_ref(), context)
.await?
}; };
if let (_, _, Some(receipt)) = self if let (_, _, Some(receipt)) = self
.get_or_deploy_contract_instance(&instance, caller, calldata, value) .get_or_deploy_contract_instance(&instance, caller, calldata, value)
@@ -379,7 +367,7 @@ where
.as_transaction(self.resolver.as_ref(), self.default_resolution_context()) .as_transaction(self.resolver.as_ref(), self.default_resolution_context())
.await?; .await?;
self.execute_transaction(tx).await self.execute_transaction(tx).await
} },
} }
} }
@@ -424,18 +412,13 @@ where
}; };
// Handling the return data variable assignments. // Handling the return data variable assignments.
for (variable_name, output_word) in assignments.return_data.iter().zip( for (variable_name, output_word) in assignments
tracing_result .return_data
.output .iter()
.as_ref() .zip(tracing_result.output.as_ref().unwrap_or_default().to_vec().chunks(32))
.unwrap_or_default() {
.to_vec()
.chunks(32),
) {
let value = U256::from_be_slice(output_word); let value = U256::from_be_slice(output_word);
self.execution_state self.execution_state.variables.insert(variable_name.clone(), value);
.variables
.insert(variable_name.clone(), value);
tracing::info!( tracing::info!(
variable_name, variable_name,
variable_value = hex::encode(value.to_be_bytes::<32>()), variable_value = hex::encode(value.to_be_bytes::<32>()),
@@ -503,9 +486,7 @@ where
// receipt and how this would impact the architecture and the possibility of us not waiting // receipt and how this would impact the architecture and the possibility of us not waiting
// for receipts in the future. // for receipts in the future.
self.watcher_tx self.watcher_tx
.send(WatcherEvent::RepetitionStartEvent { .send(WatcherEvent::RepetitionStartEvent { ignore_block_before: 0 })
ignore_block_before: 0,
})
.context("Failed to send message on the watcher's tx")?; .context("Failed to send message on the watcher's tx")?;
let res = futures::future::try_join_all(tasks) let res = futures::future::try_join_all(tasks)
@@ -533,9 +514,7 @@ where
let account = private_key.address(); let account = private_key.address();
let variable = U256::from_be_slice(account.0.as_slice()); let variable = U256::from_be_slice(account.0.as_slice());
self.execution_state self.execution_state.variables.insert(variable_name.to_string(), variable);
.variables
.insert(variable_name.to_string(), variable);
Ok(1) Ok(1)
} }
@@ -560,10 +539,8 @@ where
calldata: Option<&Calldata>, calldata: Option<&Calldata>,
value: Option<EtherValue>, value: Option<EtherValue>,
) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> { ) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
if let Some((_, address, abi)) = self if let Some((_, address, abi)) =
.execution_state self.execution_state.deployed_contracts.get(contract_instance)
.deployed_contracts
.get(contract_instance)
{ {
info!( info!(
@@ -603,19 +580,10 @@ where
calldata: Option<&Calldata>, calldata: Option<&Calldata>,
value: Option<EtherValue>, value: Option<EtherValue>,
) -> Result<(Address, JsonAbi, TransactionReceipt)> { ) -> Result<(Address, JsonAbi, TransactionReceipt)> {
let Some(ContractPathAndIdent { let Some(ContractPathAndIdent { contract_source_path, contract_ident }) =
contract_source_path, self.test_definition.metadata.contract_sources()?.remove(contract_instance)
contract_ident,
}) = self
.test_definition
.metadata
.contract_sources()?
.remove(contract_instance)
else { else {
anyhow::bail!( anyhow::bail!("Contract source not found for instance {:?}", contract_instance)
"Contract source not found for instance {:?}",
contract_instance
)
}; };
let Some((code, abi)) = self let Some((code, abi)) = self
@@ -625,10 +593,7 @@ where
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref())) .and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
.cloned() .cloned()
else { else {
anyhow::bail!( anyhow::bail!("Failed to find information for contract {:?}", contract_instance)
"Failed to find information for contract {:?}",
contract_instance
)
}; };
let mut code = match alloy::hex::decode(&code) { let mut code = match alloy::hex::decode(&code) {
@@ -641,7 +606,7 @@ where
"Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking"
); );
anyhow::bail!("Failed to hex-decode the byte code {}", error) anyhow::bail!("Failed to hex-decode the byte code {}", error)
} },
}; };
if let Some(calldata) = calldata { if let Some(calldata) = calldata {
@@ -665,7 +630,7 @@ where
Err(error) => { Err(error) => {
tracing::error!(?error, "Contract deployment transaction failed."); tracing::error!(?error, "Contract deployment transaction failed.");
return Err(error); return Err(error);
} },
}; };
let Some(address) = receipt.contract_address else { let Some(address) = receipt.contract_address else {
@@ -680,10 +645,9 @@ where
.reporter .reporter
.report_contract_deployed_event(contract_instance.clone(), address)?; .report_contract_deployed_event(contract_instance.clone(), address)?;
self.execution_state.deployed_contracts.insert( self.execution_state
contract_instance.clone(), .deployed_contracts
(contract_ident, address, abi.clone()), .insert(contract_instance.clone(), (contract_ident, address, abi.clone()));
);
Ok((address, abi, receipt)) Ok((address, abi, receipt))
} }
@@ -696,9 +660,7 @@ where
match step_address { match step_address {
StepAddress::Address(address) => Ok(*address), StepAddress::Address(address) => Ok(*address),
StepAddress::ResolvableAddress(resolvable) => { StepAddress::ResolvableAddress(resolvable) => {
let Some(instance) = resolvable let Some(instance) = resolvable.strip_suffix(".address").map(ContractInstance::new)
.strip_suffix(".address")
.map(ContractInstance::new)
else { else {
bail!("Not an address variable"); bail!("Not an address variable");
}; };
@@ -711,7 +673,7 @@ where
) )
.await .await
.map(|v| v.0) .map(|v| v.0)
} },
} }
} }
// endregion:Contract Deployment // endregion:Contract Deployment
@@ -757,7 +719,7 @@ where
Ok(receipt) => { Ok(receipt) => {
info!("Polling succeeded, receipt found"); info!("Polling succeeded, receipt found");
Ok(ControlFlow::Break(receipt)) Ok(ControlFlow::Break(receipt))
} },
Err(_) => Ok(ControlFlow::Continue(())), Err(_) => Ok(ControlFlow::Continue(())),
} }
} }
@@ -96,13 +96,8 @@ pub async fn handle_differential_benchmarks(
// Creating the objects that will be shared between the various runs. The cached compiler is the // Creating the objects that will be shared between the various runs. The cached compiler is the
// only one at the current moment of time that's safe to share between runs. // only one at the current moment of time that's safe to share between runs.
let cached_compiler = CachedCompiler::new( let cached_compiler = CachedCompiler::new(
context context.working_directory.as_path().join("compilation_cache"),
.working_directory context.compilation_configuration.invalidate_compilation_cache,
.as_path()
.join("compilation_cache"),
context
.compilation_configuration
.invalidate_compilation_cache,
) )
.await .await
.map(Arc::new) .map(Arc::new)
@@ -161,9 +156,7 @@ pub async fn handle_differential_benchmarks(
watcher.run(), watcher.run(),
driver.execute_all().inspect(|_| { driver.execute_all().inspect(|_| {
info!("All transactions submitted - driver completed execution"); info!("All transactions submitted - driver completed execution");
watcher_tx watcher_tx.send(WatcherEvent::AllTransactionsSubmitted).unwrap()
.send(WatcherEvent::AllTransactionsSubmitted)
.unwrap()
}), }),
) )
.await .await
@@ -10,14 +10,15 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
#[derive(Clone)] #[derive(Clone)]
/// The state associated with the test execution of one of the workloads. /// The state associated with the test execution of one of the workloads.
pub struct ExecutionState { pub struct ExecutionState {
/// The compiled contracts, these contracts have been compiled and have had the libraries linked /// The compiled contracts, these contracts have been compiled and have had the libraries
/// against them and therefore they're ready to be deployed on-demand. /// linked against them and therefore they're ready to be deployed on-demand.
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
/// A map of all of the deployed contracts and information about them. /// A map of all of the deployed contracts and information about them.
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>, pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
/// This map stores the variables used for each one of the cases contained in the metadata file. /// This map stores the variables used for each one of the cases contained in the metadata
/// file.
pub variables: HashMap<String, U256>, pub variables: HashMap<String, U256>,
} }
@@ -26,11 +27,7 @@ impl ExecutionState {
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>, deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
) -> Self { ) -> Self {
Self { Self { compiled_contracts, deployed_contracts, variables: Default::default() }
compiled_contracts,
deployed_contracts,
variables: Default::default(),
}
} }
pub fn empty() -> Self { pub fn empty() -> Self {
@@ -33,14 +33,7 @@ impl Watcher {
blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>, blocks_stream: Pin<Box<dyn Stream<Item = MinedBlockInformation>>>,
) -> (Self, UnboundedSender<WatcherEvent>) { ) -> (Self, UnboundedSender<WatcherEvent>) {
let (tx, rx) = unbounded_channel::<WatcherEvent>(); let (tx, rx) = unbounded_channel::<WatcherEvent>();
( (Self { platform_identifier, rx, blocks_stream }, tx)
Self {
platform_identifier,
rx,
blocks_stream,
},
tx,
)
} }
#[instrument(level = "info", skip_all)] #[instrument(level = "info", skip_all)]
@@ -49,9 +42,8 @@ impl Watcher {
// the watcher of the last block number that it should ignore and what the block number is // the watcher of the last block number that it should ignore and what the block number is
// for the first important block that it should look for. // for the first important block that it should look for.
let ignore_block_before = loop { let ignore_block_before = loop {
let Some(WatcherEvent::RepetitionStartEvent { let Some(WatcherEvent::RepetitionStartEvent { ignore_block_before }) =
ignore_block_before, self.rx.recv().await
}) = self.rx.recv().await
else { else {
continue; continue;
}; };
@@ -80,19 +72,16 @@ impl Watcher {
// Subsequent repetition starts are ignored since certain workloads can // Subsequent repetition starts are ignored since certain workloads can
// contain nested repetitions and therefore there's no use in doing any // contain nested repetitions and therefore there's no use in doing any
// action if the repetitions are nested. // action if the repetitions are nested.
WatcherEvent::RepetitionStartEvent { .. } => {} WatcherEvent::RepetitionStartEvent { .. } => {},
WatcherEvent::SubmittedTransaction { transaction_hash } => { WatcherEvent::SubmittedTransaction { transaction_hash } => {
watch_for_transaction_hashes watch_for_transaction_hashes.write().await.insert(transaction_hash);
.write() },
.await
.insert(transaction_hash);
}
WatcherEvent::AllTransactionsSubmitted => { WatcherEvent::AllTransactionsSubmitted => {
*all_transactions_submitted.write().await = true; *all_transactions_submitted.write().await = true;
self.rx.close(); self.rx.close();
info!("Watcher's Events Watching Task Finished"); info!("Watcher's Events Watching Task Finished");
break; break;
} },
} }
} }
} }
@@ -111,8 +100,8 @@ impl Watcher {
continue; continue;
} }
if *all_transactions_submitted.read().await if *all_transactions_submitted.read().await &&
&& watch_for_transaction_hashes.read().await.is_empty() watch_for_transaction_hashes.read().await.is_empty()
{ {
break; break;
} }
@@ -151,15 +140,8 @@ impl Watcher {
use std::io::Write; use std::io::Write;
let mut stderr = std::io::stderr().lock(); let mut stderr = std::io::stderr().lock();
writeln!( writeln!(stderr, "Watcher information for {}", self.platform_identifier)?;
stderr, writeln!(stderr, "block_number,block_timestamp,mined_gas,block_gas_limit,tx_count")?;
"Watcher information for {}",
self.platform_identifier
)?;
writeln!(
stderr,
"block_number,block_timestamp,mined_gas,block_gas_limit,tx_count"
)?;
for block in mined_blocks_information { for block in mined_blocks_information {
writeln!( writeln!(
stderr, stderr,
+73 -99
View File
@@ -112,9 +112,7 @@ impl<'a> Driver<'a, StepsIterator> {
pub async fn execute_all(mut self) -> Result<usize> { pub async fn execute_all(mut self) -> Result<usize> {
let platform_drivers = std::mem::take(&mut self.platform_drivers); let platform_drivers = std::mem::take(&mut self.platform_drivers);
let results = futures::future::try_join_all( let results = futures::future::try_join_all(
platform_drivers platform_drivers.into_values().map(|driver| driver.execute_all()),
.into_values()
.map(|driver| driver.execute_all()),
) )
.await .await
.context("Failed to execute all of the steps on the driver")?; .context("Failed to execute all of the steps on the driver")?;
@@ -131,8 +129,8 @@ pub struct PlatformDriver<'a, I> {
/// The definition of the test that the driver is instructed to execute. /// The definition of the test that the driver is instructed to execute.
test_definition: &'a TestDefinition<'a>, test_definition: &'a TestDefinition<'a>,
/// The private key allocator used by this driver and other drivers when account allocations are /// The private key allocator used by this driver and other drivers when account allocations
/// needed. /// are needed.
private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>, private_key_allocator: Arc<Mutex<PrivateKeyAllocator>>,
/// The execution state associated with the platform. /// The execution state associated with the platform.
@@ -252,11 +250,8 @@ where
TransactionRequest::default().from(deployer_address), TransactionRequest::default().from(deployer_address),
code, code,
); );
let receipt = platform_information let receipt =
.node platform_information.node.execute_transaction(tx).await.inspect_err(|err| {
.execute_transaction(tx)
.await
.inspect_err(|err| {
error!( error!(
?err, ?err,
%library_instance, %library_instance,
@@ -265,9 +260,7 @@ where
) )
})?; })?;
let library_address = receipt let library_address = receipt.contract_address.expect("Failed to deploy the library");
.contract_address
.expect("Failed to deploy the library");
deployed_libraries.get_or_insert_default().insert( deployed_libraries.get_or_insert_default().insert(
library_instance.clone(), library_instance.clone(),
@@ -295,10 +288,7 @@ where
}) })
.context("Failed to compile the post-link contracts")?; .context("Failed to compile the post-link contracts")?;
Ok(ExecutionState::new( Ok(ExecutionState::new(compiler_output.contracts, deployed_libraries.unwrap_or_default()))
compiler_output.contracts,
deployed_libraries.unwrap_or_default(),
))
} }
// endregion:Constructors & Initialization // endregion:Constructors & Initialization
@@ -364,14 +354,31 @@ where
_: &StepPath, _: &StepPath,
step: &FunctionCallStep, step: &FunctionCallStep,
) -> Result<usize> { ) -> Result<usize> {
// Check if this step expects an exception
let expects_exception = step.expected.as_ref().map_or(false, |expected| match expected {
Expected::Expected(exp) => exp.exception,
Expected::ExpectedMany(exps) => exps.iter().any(|exp| exp.exception),
Expected::Calldata(_) => false,
});
let deployment_receipts = self let deployment_receipts = self
.handle_function_call_contract_deployment(step) .handle_function_call_contract_deployment(step)
.await .await
.context("Failed to deploy contracts for the function call step")?; .context("Failed to deploy contracts for the function call step")?;
let execution_receipt = self
.handle_function_call_execution(step, deployment_receipts) let execution_receipt =
.await match self.handle_function_call_execution(step, deployment_receipts).await {
.context("Failed to handle the function call execution")?; Ok(receipt) => Some(receipt),
Err(err) => {
if !expects_exception {
return Err(err).context("Failed to handle the function call execution");
}
tracing::info!("Transaction failed as expected");
None
},
};
if let Some(execution_receipt) = execution_receipt {
let tracing_result = self let tracing_result = self
.handle_function_call_call_frame_tracing(execution_receipt.transaction_hash) .handle_function_call_call_frame_tracing(execution_receipt.transaction_hash)
.await .await
@@ -382,6 +389,8 @@ where
self.handle_function_call_assertions(step, &execution_receipt, &tracing_result) self.handle_function_call_assertions(step, &execution_receipt, &tracing_result)
.await .await
.context("Failed to handle function call assertions")?; .context("Failed to handle function call assertions")?;
}
Ok(1) Ok(1)
} }
@@ -392,11 +401,7 @@ where
) -> Result<HashMap<ContractInstance, TransactionReceipt>> { ) -> Result<HashMap<ContractInstance, TransactionReceipt>> {
let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new(); let mut instances_we_must_deploy = IndexMap::<ContractInstance, bool>::new();
for instance in step.find_all_contract_instances().into_iter() { for instance in step.find_all_contract_instances().into_iter() {
if !self if !self.execution_state.deployed_contracts.contains_key(&instance) {
.execution_state
.deployed_contracts
.contains_key(&instance)
{
instances_we_must_deploy.entry(instance).or_insert(false); instances_we_must_deploy.entry(instance).or_insert(false);
} }
} }
@@ -408,16 +413,13 @@ where
let mut receipts = HashMap::new(); let mut receipts = HashMap::new();
for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() { for (instance, deploy_with_constructor_arguments) in instances_we_must_deploy.into_iter() {
let calldata = deploy_with_constructor_arguments.then_some(&step.calldata); let calldata = deploy_with_constructor_arguments.then_some(&step.calldata);
let value = deploy_with_constructor_arguments let value = deploy_with_constructor_arguments.then_some(step.value).flatten();
.then_some(step.value)
.flatten();
let caller = { let caller = {
let context = self.default_resolution_context(); let context = self.default_resolution_context();
let resolver = self.platform_information.node.resolver().await?; let resolver = self.platform_information.node.resolver().await?;
step.caller let resolved = step.caller.resolve_address(resolver.as_ref(), context).await?;
.resolve_address(resolver.as_ref(), context) self.platform_information.node.resolve_signer_or_default(resolved)
.await?
}; };
if let (_, _, Some(receipt)) = self if let (_, _, Some(receipt)) = self
.get_or_deploy_contract_instance(&instance, caller, calldata, value) .get_or_deploy_contract_instance(&instance, caller, calldata, value)
@@ -445,18 +447,23 @@ where
.context("Failed to find deployment receipt for constructor call"), .context("Failed to find deployment receipt for constructor call"),
Method::Fallback | Method::FunctionName(_) => { Method::Fallback | Method::FunctionName(_) => {
let resolver = self.platform_information.node.resolver().await?; let resolver = self.platform_information.node.resolver().await?;
let tx = match step let mut tx = match step
.as_transaction(resolver.as_ref(), self.default_resolution_context()) .as_transaction(resolver.as_ref(), self.default_resolution_context())
.await .await
{ {
Ok(tx) => tx, Ok(tx) => tx,
Err(err) => { Err(err) => {
return Err(err); return Err(err);
} },
}; };
self.platform_information.node.execute_transaction(tx).await // Resolve the signer to ensure we use an address that has keys
if let Some(from) = tx.from {
tx.from = Some(self.platform_information.node.resolve_signer_or_default(from));
} }
self.platform_information.node.execute_transaction(tx).await
},
} }
} }
@@ -503,18 +510,13 @@ where
}; };
// Handling the return data variable assignments. // Handling the return data variable assignments.
for (variable_name, output_word) in assignments.return_data.iter().zip( for (variable_name, output_word) in assignments
tracing_result .return_data
.output .iter()
.as_ref() .zip(tracing_result.output.as_ref().unwrap_or_default().to_vec().chunks(32))
.unwrap_or_default() {
.to_vec()
.chunks(32),
) {
let value = U256::from_be_slice(output_word); let value = U256::from_be_slice(output_word);
self.execution_state self.execution_state.variables.insert(variable_name.clone(), value);
.variables
.insert(variable_name.clone(), value);
tracing::info!( tracing::info!(
variable_name, variable_name,
variable_value = hex::encode(value.to_be_bytes::<32>()), variable_value = hex::encode(value.to_be_bytes::<32>()),
@@ -534,18 +536,12 @@ where
) -> Result<()> { ) -> Result<()> {
// Resolving the `step.expected` into a series of expectations that we can then assert on. // Resolving the `step.expected` into a series of expectations that we can then assert on.
let mut expectations = match step { let mut expectations = match step {
FunctionCallStep { FunctionCallStep { expected: Some(Expected::Calldata(calldata)), .. } =>
expected: Some(Expected::Calldata(calldata)), vec![ExpectedOutput::new().with_calldata(calldata.clone())],
.. FunctionCallStep { expected: Some(Expected::Expected(expected)), .. } =>
} => vec![ExpectedOutput::new().with_calldata(calldata.clone())], vec![expected.clone()],
FunctionCallStep { FunctionCallStep { expected: Some(Expected::ExpectedMany(expected)), .. } =>
expected: Some(Expected::Expected(expected)), expected.clone(),
..
} => vec![expected.clone()],
FunctionCallStep {
expected: Some(Expected::ExpectedMany(expected)),
..
} => expected.clone(),
FunctionCallStep { expected: None, .. } => vec![ExpectedOutput::new().with_success()], FunctionCallStep { expected: None, .. } => vec![ExpectedOutput::new().with_success()],
}; };
@@ -664,11 +660,8 @@ where
} }
// Handling the topics assertion. // Handling the topics assertion.
for (expected, actual) in expected_event for (expected, actual) in
.topics expected_event.topics.as_slice().iter().zip(actual_event.topics())
.as_slice()
.iter()
.zip(actual_event.topics())
{ {
let expected = Calldata::new_compound([expected]); let expected = Calldata::new_compound([expected]);
if !expected if !expected
@@ -838,9 +831,7 @@ where
let account = private_key.address(); let account = private_key.address();
let variable = U256::from_be_slice(account.0.as_slice()); let variable = U256::from_be_slice(account.0.as_slice());
self.execution_state self.execution_state.variables.insert(variable_name.to_string(), variable);
.variables
.insert(variable_name.to_string(), variable);
Ok(1) Ok(1)
} }
@@ -863,10 +854,8 @@ where
calldata: Option<&Calldata>, calldata: Option<&Calldata>,
value: Option<EtherValue>, value: Option<EtherValue>,
) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> { ) -> Result<(Address, JsonAbi, Option<TransactionReceipt>)> {
if let Some((_, address, abi)) = self if let Some((_, address, abi)) =
.execution_state self.execution_state.deployed_contracts.get(contract_instance)
.deployed_contracts
.get(contract_instance)
{ {
info!( info!(
@@ -904,19 +893,10 @@ where
calldata: Option<&Calldata>, calldata: Option<&Calldata>,
value: Option<EtherValue>, value: Option<EtherValue>,
) -> Result<(Address, JsonAbi, TransactionReceipt)> { ) -> Result<(Address, JsonAbi, TransactionReceipt)> {
let Some(ContractPathAndIdent { let Some(ContractPathAndIdent { contract_source_path, contract_ident }) =
contract_source_path, self.test_definition.metadata.contract_sources()?.remove(contract_instance)
contract_ident,
}) = self
.test_definition
.metadata
.contract_sources()?
.remove(contract_instance)
else { else {
anyhow::bail!( anyhow::bail!("Contract source not found for instance {:?}", contract_instance)
"Contract source not found for instance {:?}",
contract_instance
)
}; };
let Some((code, abi)) = self let Some((code, abi)) = self
@@ -926,10 +906,7 @@ where
.and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref())) .and_then(|source_file_contracts| source_file_contracts.get(contract_ident.as_ref()))
.cloned() .cloned()
else { else {
anyhow::bail!( anyhow::bail!("Failed to find information for contract {:?}", contract_instance)
"Failed to find information for contract {:?}",
contract_instance
)
}; };
let mut code = match alloy::hex::decode(&code) { let mut code = match alloy::hex::decode(&code) {
@@ -942,18 +919,18 @@ where
"Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking" "Failed to hex-decode byte code - This could possibly mean that the bytecode requires linking"
); );
anyhow::bail!("Failed to hex-decode the byte code {}", error) anyhow::bail!("Failed to hex-decode the byte code {}", error)
} },
}; };
if let Some(calldata) = calldata { if let Some(calldata) = calldata {
let resolver = self.platform_information.node.resolver().await?; let resolver = self.platform_information.node.resolver().await?;
let calldata = calldata let calldata =
.calldata(resolver.as_ref(), self.default_resolution_context()) calldata.calldata(resolver.as_ref(), self.default_resolution_context()).await?;
.await?;
code.extend(calldata); code.extend(calldata);
} }
let tx = { let tx = {
let deployer = self.platform_information.node.resolve_signer_or_default(deployer);
let tx = TransactionRequest::default().from(deployer); let tx = TransactionRequest::default().from(deployer);
let tx = match value { let tx = match value {
Some(ref value) => tx.value(value.into_inner()), Some(ref value) => tx.value(value.into_inner()),
@@ -967,7 +944,7 @@ where
Err(error) => { Err(error) => {
tracing::error!(?error, "Contract deployment transaction failed."); tracing::error!(?error, "Contract deployment transaction failed.");
return Err(error); return Err(error);
} },
}; };
let Some(address) = receipt.contract_address else { let Some(address) = receipt.contract_address else {
@@ -982,10 +959,9 @@ where
.reporter .reporter
.report_contract_deployed_event(contract_instance.clone(), address)?; .report_contract_deployed_event(contract_instance.clone(), address)?;
self.execution_state.deployed_contracts.insert( self.execution_state
contract_instance.clone(), .deployed_contracts
(contract_ident, address, abi.clone()), .insert(contract_instance.clone(), (contract_ident, address, abi.clone()));
);
Ok((address, abi, receipt)) Ok((address, abi, receipt))
} }
@@ -998,9 +974,7 @@ where
match step_address { match step_address {
StepAddress::Address(address) => Ok(*address), StepAddress::Address(address) => Ok(*address),
StepAddress::ResolvableAddress(resolvable) => { StepAddress::ResolvableAddress(resolvable) => {
let Some(instance) = resolvable let Some(instance) = resolvable.strip_suffix(".address").map(ContractInstance::new)
.strip_suffix(".address")
.map(ContractInstance::new)
else { else {
bail!("Not an address variable"); bail!("Not an address variable");
}; };
@@ -1013,7 +987,7 @@ where
) )
.await .await
.map(|v| v.0) .map(|v| v.0)
} },
} }
} }
// endregion:Contract Deployment // endregion:Contract Deployment
@@ -7,10 +7,10 @@ use std::{
time::{Duration, Instant}, time::{Duration, Instant},
}; };
use crate::Platform;
use anyhow::Context as _; use anyhow::Context as _;
use futures::{FutureExt, StreamExt}; use futures::{FutureExt, StreamExt};
use revive_dt_common::types::PrivateKeyAllocator; use revive_dt_common::types::PrivateKeyAllocator;
use revive_dt_core::Platform;
use tokio::sync::{Mutex, RwLock, Semaphore}; use tokio::sync::{Mutex, RwLock, Semaphore};
use tracing::{Instrument, error, info, info_span, instrument}; use tracing::{Instrument, error, info, info_span, instrument};
@@ -85,13 +85,8 @@ pub async fn handle_differential_tests(
// Creating everything else required for the driver to run. // Creating everything else required for the driver to run.
let cached_compiler = CachedCompiler::new( let cached_compiler = CachedCompiler::new(
context context.working_directory.as_path().join("compilation_cache"),
.working_directory context.compilation_configuration.invalidate_compilation_cache,
.as_path()
.join("compilation_cache"),
context
.compilation_configuration
.invalidate_compilation_cache,
) )
.await .await
.map(Arc::new) .map(Arc::new)
@@ -146,7 +141,7 @@ pub async fn handle_differential_tests(
drop(permit); drop(permit);
running_task_list.write().await.remove(&test_id); running_task_list.write().await.remove(&test_id);
return; return;
} },
}; };
info!("Created the driver for the test case"); info!("Created the driver for the test case");
@@ -161,7 +156,7 @@ pub async fn handle_differential_tests(
.report_test_failed_event(format!("{error:#}")) .report_test_failed_event(format!("{error:#}"))
.expect("Can't fail"); .expect("Can't fail");
error!("Test Case Failed"); error!("Test Case Failed");
} },
}; };
info!("Finished the execution of the test case"); info!("Finished the execution of the test case");
drop(permit); drop(permit);
@@ -172,20 +167,14 @@ pub async fn handle_differential_tests(
)) ))
.inspect(|_| { .inspect(|_| {
info!("Finished executing all test cases"); info!("Finished executing all test cases");
reporter_clone reporter_clone.report_completion_event().expect("Can't fail")
.report_completion_event()
.expect("Can't fail")
}); });
let cli_reporting_task = start_cli_reporting_task(reporter); let cli_reporting_task = start_cli_reporting_task(reporter);
tokio::task::spawn(async move { tokio::task::spawn(async move {
loop { loop {
let remaining_tasks = running_task_list.read().await; let remaining_tasks = running_task_list.read().await;
info!( info!(count = remaining_tasks.len(), ?remaining_tasks, "Remaining Tests");
count = remaining_tasks.len(),
?remaining_tasks,
"Remaining Tests"
);
tokio::time::sleep(Duration::from_secs(10)).await tokio::time::sleep(Duration::from_secs(10)).await
} }
}); });
@@ -234,7 +223,7 @@ async fn start_cli_reporting_task(reporter: Reporter) {
"{}{}Case Succeeded{} - Steps Executed: {}{}", "{}{}Case Succeeded{} - Steps Executed: {}{}",
GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET GREEN, BOLD, BOLD_RESET, steps_executed, COLOR_RESET
) )
} },
TestCaseStatus::Failed { reason } => { TestCaseStatus::Failed { reason } => {
number_of_failures += 1; number_of_failures += 1;
writeln!( writeln!(
@@ -246,7 +235,7 @@ async fn start_cli_reporting_task(reporter: Reporter) {
reason.trim(), reason.trim(),
COLOR_RESET, COLOR_RESET,
) )
} },
TestCaseStatus::Ignored { reason, .. } => writeln!( TestCaseStatus::Ignored { reason, .. } => writeln!(
buf, buf,
"{}{}Case Ignored{} - Reason: {}{}", "{}{}Case Ignored{} - Reason: {}{}",
@@ -10,14 +10,15 @@ use revive_dt_format::metadata::{ContractIdent, ContractInstance};
#[derive(Clone)] #[derive(Clone)]
/// The state associated with the test execution of one of the tests. /// The state associated with the test execution of one of the tests.
pub struct ExecutionState { pub struct ExecutionState {
/// The compiled contracts, these contracts have been compiled and have had the libraries linked /// The compiled contracts, these contracts have been compiled and have had the libraries
/// against them and therefore they're ready to be deployed on-demand. /// linked against them and therefore they're ready to be deployed on-demand.
pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, pub compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
/// A map of all of the deployed contracts and information about them. /// A map of all of the deployed contracts and information about them.
pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>, pub deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
/// This map stores the variables used for each one of the cases contained in the metadata file. /// This map stores the variables used for each one of the cases contained in the metadata
/// file.
pub variables: HashMap<String, U256>, pub variables: HashMap<String, U256>,
} }
@@ -26,10 +27,6 @@ impl ExecutionState {
compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>, compiled_contracts: HashMap<PathBuf, HashMap<String, (String, JsonAbi)>>,
deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>, deployed_contracts: HashMap<ContractInstance, (ContractIdent, Address, JsonAbi)>,
) -> Self { ) -> Self {
Self { Self { compiled_contracts, deployed_contracts, variables: Default::default() }
compiled_contracts,
deployed_contracts,
variables: Default::default(),
}
} }
} }
+18 -29
View File
@@ -8,10 +8,10 @@ use std::{
sync::{Arc, LazyLock}, sync::{Arc, LazyLock},
}; };
use crate::Platform;
use futures::FutureExt; use futures::FutureExt;
use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier}; use revive_dt_common::{iterators::FilesWithExtensionIterator, types::CompilerIdentifier};
use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler}; use revive_dt_compiler::{Compiler, CompilerOutput, Mode, SolidityCompiler};
use revive_dt_core::Platform;
use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata}; use revive_dt_format::metadata::{ContractIdent, ContractInstance, Metadata};
use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address}; use alloy::{hex::ToHexExt, json_abi::JsonAbi, primitives::Address};
@@ -41,10 +41,7 @@ impl<'a> CachedCompiler<'a> {
.await .await
.context("Failed to invalidate compilation cache directory")?; .context("Failed to invalidate compilation cache directory")?;
} }
Ok(Self { Ok(Self { artifacts_cache: cache, cache_key_lock: Default::default() })
artifacts_cache: cache,
cache_key_lock: Default::default(),
})
} }
/// Compiles or gets the compilation artifacts from the cache. /// Compiles or gets the compilation artifacts from the cache.
@@ -112,7 +109,7 @@ impl<'a> CachedCompiler<'a> {
.await .await
.context("Compilation callback for deployed libraries failed")? .context("Compilation callback for deployed libraries failed")?
.compiler_output .compiler_output
} },
// If no deployed libraries are specified then we can follow the cached flow and attempt // If no deployed libraries are specified then we can follow the cached flow and attempt
// to lookup the compilation artifacts in the cache. // to lookup the compilation artifacts in the cache.
None => { None => {
@@ -126,7 +123,7 @@ impl<'a> CachedCompiler<'a> {
Some(value) => { Some(value) => {
drop(read_guard); drop(read_guard);
value value
} },
None => { None => {
drop(read_guard); drop(read_guard);
self.cache_key_lock self.cache_key_lock
@@ -135,7 +132,7 @@ impl<'a> CachedCompiler<'a> {
.entry(cache_key.clone()) .entry(cache_key.clone())
.or_default() .or_default()
.clone() .clone()
} },
}; };
let _guard = mutex.lock().await; let _guard = mutex.lock().await;
@@ -163,7 +160,7 @@ impl<'a> CachedCompiler<'a> {
.expect("Can't happen"); .expect("Can't happen");
} }
cache_value.compiler_output cache_value.compiler_output
} },
None => { None => {
let compiler_output = compilation_callback() let compiler_output = compilation_callback()
.await .await
@@ -172,18 +169,16 @@ impl<'a> CachedCompiler<'a> {
self.artifacts_cache self.artifacts_cache
.insert( .insert(
&cache_key, &cache_key,
&CacheValue { &CacheValue { compiler_output: compiler_output.clone() },
compiler_output: compiler_output.clone(),
},
) )
.await .await
.context( .context(
"Failed to write the cached value of the compilation artifacts", "Failed to write the cached value of the compilation artifacts",
)?; )?;
compiler_output compiler_output
},
} }
} },
}
}; };
Ok(compiled_contracts) Ok(compiled_contracts)
@@ -225,9 +220,7 @@ async fn compile_contracts(
.flat_map(|value| value.iter()) .flat_map(|value| value.iter())
.map(|(instance, (ident, address, abi))| (instance, ident, address, abi)) .map(|(instance, (ident, address, abi))| (instance, ident, address, abi))
.flat_map(|(_, ident, address, _)| { .flat_map(|(_, ident, address, _)| {
all_sources_in_dir all_sources_in_dir.iter().map(move |path| (ident, address, path))
.iter()
.map(move |path| (ident, address, path))
}) })
.fold(compiler, |compiler, (ident, address, path)| { .fold(compiler, |compiler, (ident, address, path)| {
compiler.with_library(path, ident.as_str(), *address) compiler.with_library(path, ident.as_str(), *address)
@@ -248,7 +241,7 @@ async fn compile_contracts(
output.clone(), output.clone(),
) )
.expect("Can't happen"); .expect("Can't happen");
} },
(Ok(output), false) => { (Ok(output), false) => {
reporter reporter
.report_pre_link_contracts_compilation_succeeded_event( .report_pre_link_contracts_compilation_succeeded_event(
@@ -259,7 +252,7 @@ async fn compile_contracts(
output.clone(), output.clone(),
) )
.expect("Can't happen"); .expect("Can't happen");
} },
(Err(err), true) => { (Err(err), true) => {
reporter reporter
.report_post_link_contracts_compilation_failed_event( .report_post_link_contracts_compilation_failed_event(
@@ -269,7 +262,7 @@ async fn compile_contracts(
format!("{err:#}"), format!("{err:#}"),
) )
.expect("Can't happen"); .expect("Can't happen");
} },
(Err(err), false) => { (Err(err), false) => {
reporter reporter
.report_pre_link_contracts_compilation_failed_event( .report_pre_link_contracts_compilation_failed_event(
@@ -279,7 +272,7 @@ async fn compile_contracts(
format!("{err:#}"), format!("{err:#}"),
) )
.expect("Can't happen"); .expect("Can't happen");
} },
} }
output output
@@ -291,9 +284,7 @@ struct ArtifactsCache {
impl ArtifactsCache { impl ArtifactsCache {
pub fn new(path: impl AsRef<Path>) -> Self { pub fn new(path: impl AsRef<Path>) -> Self {
Self { Self { path: path.as_ref().to_path_buf() }
path: path.as_ref().to_path_buf(),
}
} }
#[instrument(level = "debug", skip_all, err)] #[instrument(level = "debug", skip_all, err)]
@@ -319,9 +310,7 @@ impl ArtifactsCache {
pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> { pub async fn get(&self, key: &CacheKey<'_>) -> Option<CacheValue> {
let key = bson::to_vec(key).ok()?; let key = bson::to_vec(key).ok()?;
let value = cacache::read(self.path.as_path(), key.encode_hex()) let value = cacache::read(self.path.as_path(), key.encode_hex()).await.ok()?;
.await
.ok()?;
let value = bson::from_slice::<CacheValue>(&value).ok()?; let value = bson::from_slice::<CacheValue>(&value).ok()?;
Some(value) Some(value)
} }
@@ -336,13 +325,13 @@ impl ArtifactsCache {
Some(value) => { Some(value) => {
debug!("Cache hit"); debug!("Cache hit");
Ok(value) Ok(value)
} },
None => { None => {
debug!("Cache miss"); debug!("Cache miss");
let value = callback().await?; let value = callback().await?;
self.insert(key, &value).await?; self.insert(key, &value).await?;
Ok(value) Ok(value)
} },
} }
} }
} }
+4 -9
View File
@@ -2,9 +2,9 @@
use std::sync::atomic::{AtomicUsize, Ordering}; use std::sync::atomic::{AtomicUsize, Ordering};
use crate::Platform;
use anyhow::Context as _; use anyhow::Context as _;
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_core::Platform;
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
/// The node pool starts one or more [Node] which then can be accessed /// The node pool starts one or more [Node] which then can be accessed
@@ -37,18 +37,13 @@ impl NodePool {
); );
} }
let pre_transactions_tasks = nodes let pre_transactions_tasks =
.iter_mut() nodes.iter_mut().map(|node| node.pre_transactions()).collect::<Vec<_>>();
.map(|node| node.pre_transactions())
.collect::<Vec<_>>();
futures::future::try_join_all(pre_transactions_tasks) futures::future::try_join_all(pre_transactions_tasks)
.await .await
.context("Failed to run the pre-transactions task")?; .context("Failed to run the pre-transactions task")?;
Ok(Self { Ok(Self { nodes, next: Default::default() })
nodes,
next: Default::default(),
})
} }
/// Get a handle to the next node. /// Get a handle to the next node.
+15 -34
View File
@@ -1,28 +1,22 @@
use std::collections::BTreeMap; use std::{borrow::Cow, collections::BTreeMap, path::Path, sync::Arc};
use std::sync::Arc;
use std::{borrow::Cow, path::Path};
use futures::{Stream, StreamExt, stream}; use futures::{Stream, StreamExt, stream};
use indexmap::{IndexMap, indexmap}; use indexmap::{IndexMap, indexmap};
use revive_dt_common::iterators::EitherIter; use revive_dt_common::{iterators::EitherIter, types::PlatformIdentifier};
use revive_dt_common::types::PlatformIdentifier;
use revive_dt_config::Context; use revive_dt_config::Context;
use revive_dt_format::mode::ParsedMode; use revive_dt_format::mode::ParsedMode;
use serde_json::{Value, json}; use serde_json::{Value, json};
use revive_dt_compiler::Mode; use revive_dt_compiler::{Mode, SolidityCompiler};
use revive_dt_compiler::SolidityCompiler;
use revive_dt_format::{ use revive_dt_format::{
case::{Case, CaseIdx}, case::{Case, CaseIdx},
metadata::MetadataFile, metadata::MetadataFile,
}; };
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use revive_dt_report::{ExecutionSpecificReporter, Reporter}; use revive_dt_report::{ExecutionSpecificReporter, Reporter, TestSpecificReporter, TestSpecifier};
use revive_dt_report::{TestSpecificReporter, TestSpecifier};
use tracing::{debug, error, info}; use tracing::{debug, error, info};
use crate::Platform; use crate::{Platform, helpers::NodePool};
use crate::helpers::NodePool;
pub async fn create_test_definitions_stream<'a>( pub async fn create_test_definitions_stream<'a>(
// This is only required for creating the compiler objects and is not used anywhere else in the // This is only required for creating the compiler objects and is not used anywhere else in the
@@ -69,18 +63,15 @@ pub async fn create_test_definitions_stream<'a>(
) )
}) })
}) })
// Inform the reporter of each one of the test cases that were discovered which we expect to // Inform the reporter of each one of the test cases that were discovered which we
// run. // expect to run.
.inspect(|(_, _, _, _, reporter)| { .inspect(|(_, _, _, _, reporter)| {
reporter reporter.report_test_case_discovery_event().expect("Can't fail");
.report_test_case_discovery_event()
.expect("Can't fail");
}), }),
) )
// Creating the Test Definition objects from all of the various objects we have and creating // Creating the Test Definition objects from all of the various objects we have and creating
// their required dependencies (e.g., compiler). // their required dependencies (e.g., compiler).
.filter_map( .filter_map(move |(metadata_file, case_idx, case, mode, reporter)| async move {
move |(metadata_file, case_idx, case, mode, reporter)| async move {
let mut platforms = BTreeMap::new(); let mut platforms = BTreeMap::new();
for (platform, node_pool) in platforms_and_nodes.values() { for (platform, node_pool) in platforms_and_nodes.values() {
let node = node_pool.round_robbin(); let node = node_pool.round_robbin();
@@ -109,12 +100,7 @@ pub async fn create_test_definitions_stream<'a>(
platforms.insert( platforms.insert(
platform.platform_identifier(), platform.platform_identifier(),
TestPlatformInformation { TestPlatformInformation { platform: *platform, node, compiler, reporter },
platform: *platform,
node,
compiler,
reporter,
},
); );
} }
@@ -136,8 +122,7 @@ pub async fn create_test_definitions_stream<'a>(
/* Reporter */ /* Reporter */
reporter, reporter,
}) })
}, })
)
// Filter out the test cases which are incompatible or that can't run in the current setup. // Filter out the test cases which are incompatible or that can't run in the current setup.
.filter_map(move |test| async move { .filter_map(move |test| async move {
match test.check_compatibility() { match test.check_compatibility() {
@@ -162,7 +147,7 @@ pub async fn create_test_definitions_stream<'a>(
) )
.expect("Can't fail"); .expect("Can't fail");
None None
} },
} }
}) })
.inspect(|test| { .inspect(|test| {
@@ -236,9 +221,8 @@ impl<'a> TestDefinition<'a> {
for (_, platform_information) in self.platforms.iter() { for (_, platform_information) in self.platforms.iter() {
let is_allowed_for_platform = match self.metadata.targets.as_ref() { let is_allowed_for_platform = match self.metadata.targets.as_ref() {
None => true, None => true,
Some(required_vm_identifiers) => { Some(required_vm_identifiers) =>
required_vm_identifiers.contains(&platform_information.platform.vm_identifier()) required_vm_identifiers.contains(&platform_information.platform.vm_identifier()),
}
}; };
is_allowed &= is_allowed_for_platform; is_allowed &= is_allowed_for_platform;
error_map.insert( error_map.insert(
@@ -280,10 +264,7 @@ impl<'a> TestDefinition<'a> {
if is_allowed { if is_allowed {
Ok(()) Ok(())
} else { } else {
Err(( Err(("EVM version is incompatible for the platforms specified", error_map))
"EVM version is incompatible for the platforms specified",
error_map,
))
} }
} }
+48 -74
View File
@@ -3,6 +3,9 @@
//! This crate defines the testing configuration and //! This crate defines the testing configuration and
//! provides a helper utility to execute tests. //! provides a helper utility to execute tests.
pub mod differential_tests;
pub mod helpers;
use std::{ use std::{
pin::Pin, pin::Pin,
thread::{self, JoinHandle}, thread::{self, JoinHandle},
@@ -14,13 +17,17 @@ use revive_dt_common::types::*;
use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc}; use revive_dt_compiler::{SolidityCompiler, revive_resolc::Resolc, solc::Solc};
use revive_dt_config::*; use revive_dt_config::*;
use revive_dt_node::{ use revive_dt_node::{
Node, node_implementations::geth::GethNode, Node,
node_implementations::lighthouse_geth::LighthouseGethNode, node_implementations::{
node_implementations::substrate::SubstrateNode, node_implementations::zombienet::ZombieNode, geth::GethNode, lighthouse_geth::LighthouseGethNode, substrate::SubstrateNode,
zombienet::ZombieNode,
},
}; };
use revive_dt_node_interaction::EthereumNode; use revive_dt_node_interaction::EthereumNode;
use tracing::info; use tracing::info;
pub use helpers::CachedCompiler;
/// A trait that describes the interface for the platforms that are supported by the tool. /// A trait that describes the interface for the platforms that are supported by the tool.
#[allow(clippy::type_complexity)] #[allow(clippy::type_complexity)]
pub trait Platform { pub trait Platform {
@@ -30,11 +37,7 @@ pub trait Platform {
/// Returns a full identifier for the platform. /// Returns a full identifier for the platform.
fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) { fn full_identifier(&self) -> (NodeIdentifier, VmIdentifier, CompilerIdentifier) {
( (self.node_identifier(), self.vm_identifier(), self.compiler_identifier())
self.node_identifier(),
self.vm_identifier(),
self.compiler_identifier(),
)
} }
/// Returns the identifier of the node used. /// Returns the identifier of the node used.
@@ -176,9 +179,7 @@ impl Platform for KitchensinkPolkavmResolcPlatform {
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context) let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context).path.clone();
.path
.clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = SubstrateNode::new( let node = SubstrateNode::new(
@@ -228,9 +229,7 @@ impl Platform for KitchensinkRevmSolcPlatform {
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context) let kitchensink_path = AsRef::<KitchensinkConfiguration>::as_ref(&context).path.clone();
.path
.clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = SubstrateNode::new( let node = SubstrateNode::new(
@@ -280,9 +279,8 @@ impl Platform for ReviveDevNodePolkavmResolcPlatform {
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context) let revive_dev_node_path =
.path AsRef::<ReviveDevNodeConfiguration>::as_ref(&context).path.clone();
.clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = SubstrateNode::new( let node = SubstrateNode::new(
@@ -332,9 +330,8 @@ impl Platform for ReviveDevNodeRevmSolcPlatform {
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let revive_dev_node_path = AsRef::<ReviveDevNodeConfiguration>::as_ref(&context) let revive_dev_node_path =
.path AsRef::<ReviveDevNodeConfiguration>::as_ref(&context).path.clone();
.clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = SubstrateNode::new( let node = SubstrateNode::new(
@@ -384,9 +381,8 @@ impl Platform for ZombienetPolkavmResolcPlatform {
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context) let polkadot_parachain_path =
.path AsRef::<PolkadotParachainConfiguration>::as_ref(&context).path.clone();
.clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = ZombieNode::new(polkadot_parachain_path, context); let node = ZombieNode::new(polkadot_parachain_path, context);
@@ -432,9 +428,8 @@ impl Platform for ZombienetRevmSolcPlatform {
context: Context, context: Context,
) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> { ) -> anyhow::Result<JoinHandle<anyhow::Result<Box<dyn EthereumNode + Send + Sync>>>> {
let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context); let genesis_configuration = AsRef::<GenesisConfiguration>::as_ref(&context);
let polkadot_parachain_path = AsRef::<PolkadotParachainConfiguration>::as_ref(&context) let polkadot_parachain_path =
.path AsRef::<PolkadotParachainConfiguration>::as_ref(&context).path.clone();
.clone();
let genesis = genesis_configuration.genesis()?.clone(); let genesis = genesis_configuration.genesis()?.clone();
Ok(thread::spawn(move || { Ok(thread::spawn(move || {
let node = ZombieNode::new(polkadot_parachain_path, context); let node = ZombieNode::new(polkadot_parachain_path, context);
@@ -459,24 +454,18 @@ impl From<PlatformIdentifier> for Box<dyn Platform> {
fn from(value: PlatformIdentifier) -> Self { fn from(value: PlatformIdentifier) -> Self {
match value { match value {
PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>, PlatformIdentifier::GethEvmSolc => Box::new(GethEvmSolcPlatform) as Box<_>,
PlatformIdentifier::LighthouseGethEvmSolc => { PlatformIdentifier::LighthouseGethEvmSolc =>
Box::new(LighthouseGethEvmSolcPlatform) as Box<_> Box::new(LighthouseGethEvmSolcPlatform) as Box<_>,
} PlatformIdentifier::KitchensinkPolkavmResolc =>
PlatformIdentifier::KitchensinkPolkavmResolc => { Box::new(KitchensinkPolkavmResolcPlatform) as Box<_>,
Box::new(KitchensinkPolkavmResolcPlatform) as Box<_> PlatformIdentifier::KitchensinkRevmSolc =>
} Box::new(KitchensinkRevmSolcPlatform) as Box<_>,
PlatformIdentifier::KitchensinkRevmSolc => { PlatformIdentifier::ReviveDevNodePolkavmResolc =>
Box::new(KitchensinkRevmSolcPlatform) as Box<_> Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_>,
} PlatformIdentifier::ReviveDevNodeRevmSolc =>
PlatformIdentifier::ReviveDevNodePolkavmResolc => { Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>,
Box::new(ReviveDevNodePolkavmResolcPlatform) as Box<_> PlatformIdentifier::ZombienetPolkavmResolc =>
} Box::new(ZombienetPolkavmResolcPlatform) as Box<_>,
PlatformIdentifier::ReviveDevNodeRevmSolc => {
Box::new(ReviveDevNodeRevmSolcPlatform) as Box<_>
}
PlatformIdentifier::ZombienetPolkavmResolc => {
Box::new(ZombienetPolkavmResolcPlatform) as Box<_>
}
PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>, PlatformIdentifier::ZombienetRevmSolc => Box::new(ZombienetRevmSolcPlatform) as Box<_>,
} }
} }
@@ -486,24 +475,18 @@ impl From<PlatformIdentifier> for &dyn Platform {
fn from(value: PlatformIdentifier) -> Self { fn from(value: PlatformIdentifier) -> Self {
match value { match value {
PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform, PlatformIdentifier::GethEvmSolc => &GethEvmSolcPlatform as &dyn Platform,
PlatformIdentifier::LighthouseGethEvmSolc => { PlatformIdentifier::LighthouseGethEvmSolc =>
&LighthouseGethEvmSolcPlatform as &dyn Platform &LighthouseGethEvmSolcPlatform as &dyn Platform,
} PlatformIdentifier::KitchensinkPolkavmResolc =>
PlatformIdentifier::KitchensinkPolkavmResolc => { &KitchensinkPolkavmResolcPlatform as &dyn Platform,
&KitchensinkPolkavmResolcPlatform as &dyn Platform PlatformIdentifier::KitchensinkRevmSolc =>
} &KitchensinkRevmSolcPlatform as &dyn Platform,
PlatformIdentifier::KitchensinkRevmSolc => { PlatformIdentifier::ReviveDevNodePolkavmResolc =>
&KitchensinkRevmSolcPlatform as &dyn Platform &ReviveDevNodePolkavmResolcPlatform as &dyn Platform,
} PlatformIdentifier::ReviveDevNodeRevmSolc =>
PlatformIdentifier::ReviveDevNodePolkavmResolc => { &ReviveDevNodeRevmSolcPlatform as &dyn Platform,
&ReviveDevNodePolkavmResolcPlatform as &dyn Platform PlatformIdentifier::ZombienetPolkavmResolc =>
} &ZombienetPolkavmResolcPlatform as &dyn Platform,
PlatformIdentifier::ReviveDevNodeRevmSolc => {
&ReviveDevNodeRevmSolcPlatform as &dyn Platform
}
PlatformIdentifier::ZombienetPolkavmResolc => {
&ZombienetPolkavmResolcPlatform as &dyn Platform
}
PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform, PlatformIdentifier::ZombienetRevmSolc => &ZombienetRevmSolcPlatform as &dyn Platform,
} }
} }
@@ -513,17 +496,8 @@ fn spawn_node<T: Node + EthereumNode + Send + Sync>(
mut node: T, mut node: T,
genesis: Genesis, genesis: Genesis,
) -> anyhow::Result<T> { ) -> anyhow::Result<T> {
info!( info!(id = node.id(), connection_string = node.connection_string(), "Spawning node");
id = node.id(), node.spawn(genesis).context("Failed to spawn node process")?;
connection_string = node.connection_string(), info!(id = node.id(), connection_string = node.connection_string(), "Spawned node");
"Spawning node"
);
node.spawn(genesis)
.context("Failed to spawn node process")?;
info!(
id = node.id(),
connection_string = node.connection_string(),
"Spawned node"
);
Ok(node) Ok(node)
} }
+1 -1
View File
@@ -76,6 +76,6 @@ fn main() -> anyhow::Result<()> {
let schema = schema_for!(Metadata); let schema = schema_for!(Metadata);
println!("{}", serde_json::to_string_pretty(&schema).unwrap()); println!("{}", serde_json::to_string_pretty(&schema).unwrap());
Ok(()) Ok(())
} },
} }
} }
+4 -10
View File
@@ -45,8 +45,8 @@ pub struct Case {
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub expected: Option<Expected>, pub expected: Option<Expected>,
/// An optional boolean which defines if the case as a whole should be ignored. If null then the /// An optional boolean which defines if the case as a whole should be ignored. If null then
/// case will not be ignored. /// the case will not be ignored.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>, pub ignore: Option<bool>,
} }
@@ -54,11 +54,7 @@ pub struct Case {
impl Case { impl Case {
pub fn steps_iterator(&self) -> impl Iterator<Item = Step> { pub fn steps_iterator(&self) -> impl Iterator<Item = Step> {
let steps_len = self.steps.len(); let steps_len = self.steps.len();
self.steps self.steps.clone().into_iter().enumerate().map(move |(idx, mut step)| {
.clone()
.into_iter()
.enumerate()
.map(move |(idx, mut step)| {
let Step::FunctionCall(ref mut input) = step else { let Step::FunctionCall(ref mut input) = step else {
return step; return step;
}; };
@@ -84,9 +80,7 @@ impl Case {
&self, &self,
default_repeat_count: usize, default_repeat_count: usize,
) -> Box<dyn Iterator<Item = Step> + '_> { ) -> Box<dyn Iterator<Item = Step> + '_> {
let contains_repeat = self let contains_repeat = self.steps_iterator().any(|step| matches!(&step, Step::Repeat(..)));
.steps_iterator()
.any(|step| matches!(&step, Step::Repeat(..)));
if contains_repeat { if contains_repeat {
Box::new(self.steps_iterator()) as Box<_> Box::new(self.steps_iterator()) as Box<_>
} else { } else {
+9 -17
View File
@@ -86,11 +86,7 @@ impl Corpus {
.collect::<Vec<_>>(); .collect::<Vec<_>>();
tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path)); tests.sort_by(|a, b| a.metadata_file_path.cmp(&b.metadata_file_path));
tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path); tests.dedup_by(|a, b| a.metadata_file_path == b.metadata_file_path);
info!( info!(len = tests.len(), corpus_name = self.name(), "Found tests in Corpus");
len = tests.len(),
corpus_name = self.name(),
"Found tests in Corpus"
);
tests tests
} }
@@ -102,23 +98,19 @@ impl Corpus {
pub fn paths_iter(&self) -> impl Iterator<Item = &Path> { pub fn paths_iter(&self) -> impl Iterator<Item = &Path> {
match self { match self {
Corpus::SinglePath { path, .. } => { Corpus::SinglePath { path, .. } =>
Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>> Box::new(std::iter::once(path.as_path())) as Box<dyn Iterator<Item = _>>,
} Corpus::MultiplePaths { paths, .. } =>
Corpus::MultiplePaths { paths, .. } => { Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>,
Box::new(paths.iter().map(|path| path.as_path())) as Box<dyn Iterator<Item = _>>
}
} }
} }
pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> { pub fn paths_iter_mut(&mut self) -> impl Iterator<Item = &mut PathBuf> {
match self { match self {
Corpus::SinglePath { path, .. } => { Corpus::SinglePath { path, .. } =>
Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>> Box::new(std::iter::once(path)) as Box<dyn Iterator<Item = _>>,
} Corpus::MultiplePaths { paths, .. } =>
Corpus::MultiplePaths { paths, .. } => { Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>,
Box::new(paths.iter_mut()) as Box<dyn Iterator<Item = _>>
}
} }
} }
+32 -76
View File
@@ -31,8 +31,8 @@ pub struct MetadataFile {
/// The path of the metadata file. This will either be a JSON or solidity file. /// The path of the metadata file. This will either be a JSON or solidity file.
pub metadata_file_path: PathBuf, pub metadata_file_path: PathBuf,
/// This is the path contained within the corpus file. This could either be the path of some dir /// This is the path contained within the corpus file. This could either be the path of some
/// or could be the actual metadata file path. /// dir or could be the actual metadata file path.
pub corpus_file_path: PathBuf, pub corpus_file_path: PathBuf,
/// The metadata contained within the file. /// The metadata contained within the file.
@@ -44,9 +44,7 @@ impl MetadataFile {
if self.corpus_file_path.is_file() { if self.corpus_file_path.is_file() {
&self.corpus_file_path &self.corpus_file_path
} else { } else {
self.metadata_file_path self.metadata_file_path.strip_prefix(&self.corpus_file_path).unwrap()
.strip_prefix(&self.corpus_file_path)
.unwrap()
} }
} }
} }
@@ -69,13 +67,13 @@ impl Deref for MetadataFile {
/// of steps and assertions that should be performed as part of the test case. /// of steps and assertions that should be performed as part of the test case.
#[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)] #[derive(Debug, Default, Serialize, Deserialize, JsonSchema, Clone, Eq, PartialEq)]
pub struct Metadata { pub struct Metadata {
/// This is an optional comment on the metadata file which has no impact on the execution in any /// This is an optional comment on the metadata file which has no impact on the execution in
/// way. /// any way.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub comment: Option<String>, pub comment: Option<String>,
/// An optional boolean which defines if the metadata file as a whole should be ignored. If null /// An optional boolean which defines if the metadata file as a whole should be ignored. If
/// then the metadata file will not be ignored. /// null then the metadata file will not be ignored.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub ignore: Option<bool>, pub ignore: Option<bool>,
@@ -94,8 +92,8 @@ pub struct Metadata {
/// This is a map where the key is the name of the contract instance and the value is the /// This is a map where the key is the name of the contract instance and the value is the
/// contract's path and ident in the file. /// contract's path and ident in the file.
/// ///
/// If any contract is to be used by the test then it must be included in here first so that the /// If any contract is to be used by the test then it must be included in here first so that
/// framework is aware of its path, compiles it, and prepares it. /// the framework is aware of its path, compiles it, and prepares it.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>, pub contracts: Option<BTreeMap<ContractInstance, ContractPathAndIdent>>,
@@ -123,8 +121,9 @@ pub struct Metadata {
pub required_evm_version: Option<EvmVersionRequirement>, pub required_evm_version: Option<EvmVersionRequirement>,
/// A set of compilation directives that will be passed to the compiler whenever the contracts /// A set of compilation directives that will be passed to the compiler whenever the contracts
/// for the test are being compiled. Note that this differs from the [`Mode`]s in that a [`Mode`] /// for the test are being compiled. Note that this differs from the [`Mode`]s in that a
/// is just a filter for when a test can run whereas this is an instruction to the compiler. /// [`Mode`] is just a filter for when a test can run whereas this is an instruction to the
/// compiler.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub compiler_directives: Option<CompilationDirectives>, pub compiler_directives: Option<CompilationDirectives>,
} }
@@ -158,19 +157,10 @@ impl Metadata {
return Ok(sources); return Ok(sources);
}; };
for ( for (alias, ContractPathAndIdent { contract_source_path, contract_ident }) in contracts {
alias,
ContractPathAndIdent {
contract_source_path,
contract_ident,
},
) in contracts
{
let alias = alias.clone(); let alias = alias.clone();
let absolute_path = directory let absolute_path =
.join(contract_source_path) directory.join(contract_source_path).canonicalize().map_err(|error| {
.canonicalize()
.map_err(|error| {
anyhow::anyhow!( anyhow::anyhow!(
"Failed to canonicalize contract source path '{}': {error}", "Failed to canonicalize contract source path '{}': {error}",
directory.join(contract_source_path).display() directory.join(contract_source_path).display()
@@ -180,10 +170,7 @@ impl Metadata {
sources.insert( sources.insert(
alias, alias,
ContractPathAndIdent { ContractPathAndIdent { contract_source_path: absolute_path, contract_ident },
contract_source_path: absolute_path,
contract_ident,
},
); );
} }
@@ -221,11 +208,11 @@ impl Metadata {
Ok(mut metadata) => { Ok(mut metadata) => {
metadata.file_path = Some(path.to_path_buf()); metadata.file_path = Some(path.to_path_buf());
Some(metadata) Some(metadata)
} },
Err(err) => { Err(err) => {
error!(path = %path.display(), %err, "Deserialization of metadata failed"); error!(path = %path.display(), %err, "Deserialization of metadata failed");
None None
} },
} }
} }
@@ -258,11 +245,11 @@ impl Metadata {
.into(), .into(),
); );
Some(metadata) Some(metadata)
} },
Err(err) => { Err(err) => {
error!(path = %path.display(), %err, "Failed to deserialize metadata"); error!(path = %path.display(), %err, "Failed to deserialize metadata");
None None
} },
} }
} }
@@ -326,7 +313,8 @@ define_wrapper_type!(
)] )]
#[serde(try_from = "String", into = "String")] #[serde(try_from = "String", into = "String")]
pub struct ContractPathAndIdent { pub struct ContractPathAndIdent {
/// The path of the contract source code relative to the directory containing the metadata file. /// The path of the contract source code relative to the directory containing the metadata
/// file.
pub contract_source_path: PathBuf, pub contract_source_path: PathBuf,
/// The identifier of the contract. /// The identifier of the contract.
@@ -335,12 +323,7 @@ pub struct ContractPathAndIdent {
impl Display for ContractPathAndIdent { impl Display for ContractPathAndIdent {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!( write!(f, "{}:{}", self.contract_source_path.display(), self.contract_ident.as_ref())
f,
"{}:{}",
self.contract_source_path.display(),
self.contract_ident.as_ref()
)
} }
} }
@@ -360,7 +343,7 @@ impl FromStr for ContractPathAndIdent {
Some(ref mut path) => { Some(ref mut path) => {
path.push(':'); path.push(':');
path.push_str(next_item); path.push_str(next_item);
} },
None => path = Some(next_item.to_owned()), None => path = Some(next_item.to_owned()),
} }
} else { } else {
@@ -380,7 +363,7 @@ impl FromStr for ContractPathAndIdent {
contract_source_path: PathBuf::from(path), contract_source_path: PathBuf::from(path),
contract_ident: ContractIdent::new(identifier), contract_ident: ContractIdent::new(identifier),
}) })
} },
(None, None) => anyhow::bail!("Failed to find the path and identifier"), (None, None) => anyhow::bail!("Failed to find the path and identifier"),
} }
} }
@@ -418,43 +401,23 @@ pub struct EvmVersionRequirement {
impl EvmVersionRequirement { impl EvmVersionRequirement {
pub fn new_greater_than_or_equals(version: EVMVersion) -> Self { pub fn new_greater_than_or_equals(version: EVMVersion) -> Self {
Self { Self { ordering: Ordering::Greater, or_equal: true, evm_version: version }
ordering: Ordering::Greater,
or_equal: true,
evm_version: version,
}
} }
pub fn new_greater_than(version: EVMVersion) -> Self { pub fn new_greater_than(version: EVMVersion) -> Self {
Self { Self { ordering: Ordering::Greater, or_equal: false, evm_version: version }
ordering: Ordering::Greater,
or_equal: false,
evm_version: version,
}
} }
pub fn new_equals(version: EVMVersion) -> Self { pub fn new_equals(version: EVMVersion) -> Self {
Self { Self { ordering: Ordering::Equal, or_equal: false, evm_version: version }
ordering: Ordering::Equal,
or_equal: false,
evm_version: version,
}
} }
pub fn new_less_than(version: EVMVersion) -> Self { pub fn new_less_than(version: EVMVersion) -> Self {
Self { Self { ordering: Ordering::Less, or_equal: false, evm_version: version }
ordering: Ordering::Less,
or_equal: false,
evm_version: version,
}
} }
pub fn new_less_than_or_equals(version: EVMVersion) -> Self { pub fn new_less_than_or_equals(version: EVMVersion) -> Self {
Self { Self { ordering: Ordering::Less, or_equal: true, evm_version: version }
ordering: Ordering::Less,
or_equal: true,
evm_version: version,
}
} }
pub fn matches(&self, other: &EVMVersion) -> bool { pub fn matches(&self, other: &EVMVersion) -> bool {
@@ -465,11 +428,7 @@ impl EvmVersionRequirement {
impl Display for EvmVersionRequirement { impl Display for EvmVersionRequirement {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let Self { let Self { ordering, or_equal, evm_version } = self;
ordering,
or_equal,
evm_version,
} = self;
match ordering { match ordering {
Ordering::Less => write!(f, "<")?, Ordering::Less => write!(f, "<")?,
Ordering::Equal => write!(f, "=")?, Ordering::Equal => write!(f, "=")?,
@@ -596,10 +555,7 @@ mod test {
// Assert // Assert
let identifier = identifier.expect("Failed to parse"); let identifier = identifier.expect("Failed to parse");
assert_eq!( assert_eq!(identifier.contract_source_path.display().to_string(), "ERC20/ERC20.sol");
identifier.contract_source_path.display().to_string(),
"ERC20/ERC20.sol"
);
assert_eq!(identifier.contract_ident, "ERC20".to_owned().into()); assert_eq!(identifier.contract_ident, "ERC20".to_owned().into());
// Act // Act
+11 -26
View File
@@ -1,13 +1,12 @@
use anyhow::Context as _; use anyhow::Context as _;
use regex::Regex; use regex::Regex;
use revive_dt_common::iterators::EitherIter; use revive_dt_common::{
use revive_dt_common::types::{Mode, ModeOptimizerSetting, ModePipeline}; iterators::EitherIter,
types::{Mode, ModeOptimizerSetting, ModePipeline},
};
use schemars::JsonSchema; use schemars::JsonSchema;
use serde::{Deserialize, Serialize}; use serde::{Deserialize, Serialize};
use std::collections::HashSet; use std::{collections::HashSet, fmt::Display, str::FromStr, sync::LazyLock};
use std::fmt::Display;
use std::str::FromStr;
use std::sync::LazyLock;
/// This represents a mode that has been parsed from test metadata. /// This represents a mode that has been parsed from test metadata.
/// ///
@@ -78,12 +77,7 @@ impl FromStr for ParsedMode {
None => None, None => None,
}; };
Ok(ParsedMode { Ok(ParsedMode { pipeline, optimize_flag, optimize_setting, version })
pipeline,
optimize_flag,
optimize_setting,
version,
})
} }
} }
@@ -139,13 +133,9 @@ impl ParsedMode {
|p| EitherIter::B(std::iter::once(*p)), |p| EitherIter::B(std::iter::once(*p)),
); );
let optimize_flag_setting = self.optimize_flag.map(|flag| { let optimize_flag_setting = self
if flag { .optimize_flag
ModeOptimizerSetting::M3 .map(|flag| if flag { ModeOptimizerSetting::M3 } else { ModeOptimizerSetting::M0 });
} else {
ModeOptimizerSetting::M0
}
});
let optimize_flag_iter = match optimize_flag_setting { let optimize_flag_iter = match optimize_flag_setting {
Some(setting) => EitherIter::A(std::iter::once(setting)), Some(setting) => EitherIter::A(std::iter::once(setting)),
@@ -158,9 +148,7 @@ impl ParsedMode {
); );
pipeline_iter.flat_map(move |pipeline| { pipeline_iter.flat_map(move |pipeline| {
optimize_settings_iter optimize_settings_iter.clone().map(move |optimize_setting| Mode {
.clone()
.map(move |optimize_setting| Mode {
pipeline, pipeline,
optimize_setting, optimize_setting,
version: self.version.clone(), version: self.version.clone(),
@@ -236,10 +224,7 @@ mod tests {
("Y+", vec!["Y M3"]), ("Y+", vec!["Y M3"]),
("Y-", vec!["Y M0"]), ("Y-", vec!["Y M0"]),
("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]), ("Y <=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8"]),
( ("<=0.8", vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"]),
"<=0.8",
vec!["Y M0 <=0.8", "Y M3 <=0.8", "E M0 <=0.8", "E M3 <=0.8"],
),
]; ];
for (actual, expected) in strings { for (actual, expected) in strings {
+57 -126
View File
@@ -1,11 +1,10 @@
use std::{collections::HashMap, fmt::Display, str::FromStr}; use std::{collections::HashMap, fmt::Display, str::FromStr};
use alloy::primitives::{FixedBytes, utils::parse_units};
use alloy::{ use alloy::{
eips::BlockNumberOrTag, eips::BlockNumberOrTag,
json_abi::Function, json_abi::Function,
network::TransactionBuilder, network::TransactionBuilder,
primitives::{Address, Bytes, U256}, primitives::{Address, Bytes, FixedBytes, U256, utils::parse_units},
rpc::types::TransactionRequest, rpc::types::TransactionRequest,
}; };
use anyhow::Context as _; use anyhow::Context as _;
@@ -17,8 +16,10 @@ use serde::{Deserialize, Serialize};
use revive_dt_common::macros::define_wrapper_type; use revive_dt_common::macros::define_wrapper_type;
use tracing::{Instrument, info_span, instrument}; use tracing::{Instrument, info_span, instrument};
use crate::traits::ResolverApi; use crate::{
use crate::{metadata::ContractInstance, traits::ResolutionContext}; metadata::ContractInstance,
traits::{ResolutionContext, ResolverApi},
};
/// A test step. /// A test step.
/// ///
@@ -77,12 +78,7 @@ impl StepPath {
impl Display for StepPath { impl Display for StepPath {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.0 self.0.iter().map(|idx| idx.to_string()).collect::<Vec<_>>().join(".").fmt(f)
.iter()
.map(|idx| idx.to_string())
.collect::<Vec<_>>()
.join(".")
.fmt(f)
} }
} }
@@ -147,8 +143,8 @@ pub struct FunctionCallStep {
#[schemars(skip)] #[schemars(skip)]
pub storage: Option<HashMap<String, Calldata>>, pub storage: Option<HashMap<String, Calldata>>,
/// Variable assignment to perform in the framework allowing us to reference them again later on /// Variable assignment to perform in the framework allowing us to reference them again later
/// during the execution. /// on during the execution.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub variable_assignments: Option<VariableAssignments>, pub variable_assignments: Option<VariableAssignments>,
} }
@@ -455,9 +451,7 @@ impl StepAddress {
impl FunctionCallStep { impl FunctionCallStep {
pub const fn default_caller_address() -> Address { pub const fn default_caller_address() -> Address {
Address(FixedBytes(alloy::hex!( Address(FixedBytes(alloy::hex!("0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1")))
"0x90F8bf6A479f320ead074411a4B0e7944Ea8c9C1"
)))
} }
pub const fn default_caller() -> StepAddress { pub const fn default_caller() -> StepAddress {
@@ -482,20 +476,19 @@ impl FunctionCallStep {
.context("Failed to produce calldata for deployer/fallback method")?; .context("Failed to produce calldata for deployer/fallback method")?;
Ok(calldata.into()) Ok(calldata.into())
} },
Method::FunctionName(ref function_name) => { Method::FunctionName(ref function_name) => {
let Some(abi) = context.deployed_contract_abi(&self.instance) else { let Some(abi) = context.deployed_contract_abi(&self.instance) else {
anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref()); anyhow::bail!("ABI for instance '{}' not found", self.instance.as_ref());
}; };
// We follow the same logic that's implemented in the matter-labs-tester where they resolve // We follow the same logic that's implemented in the matter-labs-tester where they
// the function name into a function selector and they assume that he function doesn't have // resolve the function name into a function selector and they assume that he
// any existing overloads. // function doesn't have any existing overloads.
// Overloads are handled by providing the full function signature in the "function // Overloads are handled by providing the full function signature in the "function
// name". // name".
// https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190 // https://github.com/matter-labs/era-compiler-tester/blob/1dfa7d07cba0734ca97e24704f12dd57f6990c2c/compiler_tester/src/test/case/input/mod.rs#L158-L190
let selector = let selector = if function_name.contains('(') && function_name.contains(')') {
if function_name.contains('(') && function_name.contains(')') {
Function::parse(function_name) Function::parse(function_name)
.context( .context(
"Failed to parse the provided function name into a function signature", "Failed to parse the provided function name into a function signature",
@@ -511,19 +504,21 @@ impl FunctionCallStep {
&self.instance &self.instance
) )
}) })
.with_context(|| format!( .with_context(|| {
format!(
"Failed to resolve function selector for {:?} on instance {:?}", "Failed to resolve function selector for {:?} on instance {:?}",
function_name, &self.instance function_name, &self.instance
))? )
})?
.selector() .selector()
}; };
// Allocating a vector that we will be using for the calldata. The vector size will be: // Allocating a vector that we will be using for the calldata. The vector size will
// 4 bytes for the function selector. // be: 4 bytes for the function selector.
// function.inputs.len() * 32 bytes for the arguments (each argument is a U256). // function.inputs.len() * 32 bytes for the arguments (each argument is a U256).
// //
// We're using indices in the following code in order to avoid the need for us to allocate // We're using indices in the following code in order to avoid the need for us to
// a new buffer for each one of the resolved arguments. // allocate a new buffer for each one of the resolved arguments.
let mut calldata = Vec::<u8>::with_capacity(4 + self.calldata.size_requirement()); let mut calldata = Vec::<u8>::with_capacity(4 + self.calldata.size_requirement());
calldata.extend(selector.0); calldata.extend(selector.0);
self.calldata self.calldata
@@ -532,7 +527,7 @@ impl FunctionCallStep {
.context("Failed to append encoded argument to calldata buffer")?; .context("Failed to append encoded argument to calldata buffer")?;
Ok(calldata.into()) Ok(calldata.into())
} },
} }
} }
@@ -547,11 +542,9 @@ impl FunctionCallStep {
.await .await
.context("Failed to encode input bytes for transaction request")?; .context("Failed to encode input bytes for transaction request")?;
let caller = self.caller.resolve_address(resolver, context).await?; let caller = self.caller.resolve_address(resolver, context).await?;
let transaction_request = TransactionRequest::default().from(caller).value( let transaction_request = TransactionRequest::default()
self.value .from(caller)
.map(|value| value.into_inner()) .value(self.value.map(|value| value.into_inner()).unwrap_or_default());
.unwrap_or_default(),
);
match self.method { match self.method {
Method::Deployer => Ok(transaction_request.with_deploy_code(input_data)), Method::Deployer => Ok(transaction_request.with_deploy_code(input_data)),
_ => Ok(transaction_request _ => Ok(transaction_request
@@ -633,8 +626,7 @@ impl Calldata {
context: ResolutionContext<'_>, context: ResolutionContext<'_>,
) -> anyhow::Result<Vec<u8>> { ) -> anyhow::Result<Vec<u8>> {
let mut buffer = Vec::<u8>::with_capacity(self.size_requirement()); let mut buffer = Vec::<u8>::with_capacity(self.size_requirement());
self.calldata_into_slice(&mut buffer, resolver, context) self.calldata_into_slice(&mut buffer, resolver, context).await?;
.await?;
Ok(buffer) Ok(buffer)
} }
@@ -647,7 +639,7 @@ impl Calldata {
match self { match self {
Calldata::Single(bytes) => { Calldata::Single(bytes) => {
buffer.extend_from_slice(bytes); buffer.extend_from_slice(bytes);
} },
Calldata::Compound(items) => { Calldata::Compound(items) => {
let resolved = stream::iter(items.iter().enumerate()) let resolved = stream::iter(items.iter().enumerate())
.map(|(arg_idx, arg)| async move { .map(|(arg_idx, arg)| async move {
@@ -662,7 +654,7 @@ impl Calldata {
.context("Failed to resolve one or more calldata arguments")?; .context("Failed to resolve one or more calldata arguments")?;
buffer.extend(resolved.into_iter().flatten()); buffer.extend(resolved.into_iter().flatten());
} },
}; };
Ok(()) Ok(())
} }
@@ -711,7 +703,7 @@ impl Calldata {
.all(|v| async move { v.is_ok_and(|v| v) }) .all(|v| async move { v.is_ok_and(|v| v) })
.map(Ok) .map(Ok)
.await .await
} },
} }
} }
} }
@@ -725,10 +717,7 @@ impl CalldataItem {
) -> anyhow::Result<U256> { ) -> anyhow::Result<U256> {
let mut stack = Vec::<CalldataToken<U256>>::new(); let mut stack = Vec::<CalldataToken<U256>>::new();
for token in self for token in self.calldata_tokens().map(|token| token.resolve(resolver, context)) {
.calldata_tokens()
.map(|token| token.resolve(resolver, context))
{
let token = token.await?; let token = token.await?;
let new_token = match token { let new_token = match token {
CalldataToken::Item(_) => token, CalldataToken::Item(_) => token,
@@ -750,17 +739,15 @@ impl CalldataItem {
Operation::BitwiseAnd => Some(left_operand & right_operand), Operation::BitwiseAnd => Some(left_operand & right_operand),
Operation::BitwiseOr => Some(left_operand | right_operand), Operation::BitwiseOr => Some(left_operand | right_operand),
Operation::BitwiseXor => Some(left_operand ^ right_operand), Operation::BitwiseXor => Some(left_operand ^ right_operand),
Operation::ShiftLeft => { Operation::ShiftLeft =>
Some(left_operand << usize::try_from(right_operand)?) Some(left_operand << usize::try_from(right_operand)?),
} Operation::ShiftRight =>
Operation::ShiftRight => { Some(left_operand >> usize::try_from(right_operand)?),
Some(left_operand >> usize::try_from(right_operand)?)
}
} }
.context("Invalid calldata arithmetic operation - Invalid operation")?; .context("Invalid calldata arithmetic operation - Invalid operation")?;
CalldataToken::Item(result) CalldataToken::Item(result)
} },
}; };
stack.push(new_token) stack.push(new_token)
} }
@@ -769,9 +756,7 @@ impl CalldataItem {
// Empty stack means that we got an empty compound calldata which we resolve to zero. // Empty stack means that we got an empty compound calldata which we resolve to zero.
[] => Ok(U256::ZERO), [] => Ok(U256::ZERO),
[CalldataToken::Item(item)] => Ok(*item), [CalldataToken::Item(item)] => Ok(*item),
_ => Err(anyhow::anyhow!( _ => Err(anyhow::anyhow!("Invalid calldata arithmetic operation - Invalid stack")),
"Invalid calldata arithmetic operation - Invalid stack"
)),
} }
} }
@@ -915,16 +900,13 @@ impl<T: AsRef<str>> CalldataToken<T> {
.await .await
.map(U256::from) .map(U256::from)
} else if let Some(variable_name) = item.strip_prefix(Self::VARIABLE_PREFIX) { } else if let Some(variable_name) = item.strip_prefix(Self::VARIABLE_PREFIX) {
context context.variable(variable_name).context("Variable lookup failed").copied()
.variable(variable_name)
.context("Variable lookup failed")
.copied()
} else { } else {
U256::from_str_radix(item, 10) U256::from_str_radix(item, 10)
.map_err(|error| anyhow::anyhow!("Invalid decimal literal: {}", error)) .map_err(|error| anyhow::anyhow!("Invalid decimal literal: {}", error))
}; };
value.map(CalldataToken::Item) value.map(CalldataToken::Item)
} },
Self::Operation(operation) => Ok(CalldataToken::Operation(operation)), Self::Operation(operation) => Ok(CalldataToken::Operation(operation)),
} }
} }
@@ -959,9 +941,12 @@ impl<'de> Deserialize<'de> for EtherValue {
#[cfg(test)] #[cfg(test)]
mod tests { mod tests {
use alloy::primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address}; use alloy::{
use alloy::sol_types::SolValue; eips::BlockNumberOrTag,
use alloy::{eips::BlockNumberOrTag, json_abi::JsonAbi}; json_abi::JsonAbi,
primitives::{BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, address},
sol_types::SolValue,
};
use std::{collections::HashMap, pin::Pin}; use std::{collections::HashMap, pin::Pin};
use super::*; use super::*;
@@ -1045,13 +1030,7 @@ mod tests {
"#; "#;
let parsed_abi: JsonAbi = serde_json::from_str(raw_metadata).unwrap(); let parsed_abi: JsonAbi = serde_json::from_str(raw_metadata).unwrap();
let selector = parsed_abi let selector = parsed_abi.function("store").unwrap().first().unwrap().selector().0;
.function("store")
.unwrap()
.first()
.unwrap()
.selector()
.0;
let input = FunctionCallStep { let input = FunctionCallStep {
instance: ContractInstance::new("Contract"), instance: ContractInstance::new("Contract"),
@@ -1089,13 +1068,7 @@ mod tests {
]"#; ]"#;
let parsed_abi: JsonAbi = serde_json::from_str(raw_abi).unwrap(); let parsed_abi: JsonAbi = serde_json::from_str(raw_abi).unwrap();
let selector = parsed_abi let selector = parsed_abi.function("send").unwrap().first().unwrap().selector().0;
.function("send")
.unwrap()
.first()
.unwrap()
.selector()
.0;
let input: FunctionCallStep = FunctionCallStep { let input: FunctionCallStep = FunctionCallStep {
instance: "Contract".to_owned().into(), instance: "Contract".to_owned().into(),
@@ -1117,10 +1090,7 @@ mod tests {
type T = (alloy::primitives::Address,); type T = (alloy::primitives::Address,);
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap(); let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
assert_eq!( assert_eq!(decoded.0, address!("0x1000000000000000000000000000000000000001"));
decoded.0,
address!("0x1000000000000000000000000000000000000001")
);
} }
#[tokio::test] #[tokio::test]
@@ -1136,13 +1106,7 @@ mod tests {
]"#; ]"#;
let parsed_abi: JsonAbi = serde_json::from_str(raw_abi).unwrap(); let parsed_abi: JsonAbi = serde_json::from_str(raw_abi).unwrap();
let selector = parsed_abi let selector = parsed_abi.function("send").unwrap().first().unwrap().selector().0;
.function("send")
.unwrap()
.first()
.unwrap()
.selector()
.0;
let input: FunctionCallStep = FunctionCallStep { let input: FunctionCallStep = FunctionCallStep {
instance: ContractInstance::new("Contract"), instance: ContractInstance::new("Contract"),
@@ -1164,10 +1128,7 @@ mod tests {
type T = (alloy::primitives::Address,); type T = (alloy::primitives::Address,);
let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap(); let decoded: T = T::abi_decode(&encoded.0[4..]).unwrap();
assert_eq!( assert_eq!(decoded.0, address!("0x1000000000000000000000000000000000000001"));
decoded.0,
address!("0x1000000000000000000000000000000000000001")
);
} }
async fn resolve_calldata_item( async fn resolve_calldata_item(
@@ -1204,12 +1165,7 @@ mod tests {
let resolved = resolved.expect("Failed to resolve argument"); let resolved = resolved.expect("Failed to resolve argument");
assert_eq!( assert_eq!(
resolved, resolved,
U256::from( U256::from(MockResolver.block_gas_limit(Default::default()).await.unwrap())
MockResolver
.block_gas_limit(Default::default())
.await
.unwrap()
)
) )
} }
@@ -1226,11 +1182,7 @@ mod tests {
assert_eq!( assert_eq!(
resolved, resolved,
U256::from_be_slice( U256::from_be_slice(
MockResolver MockResolver.block_coinbase(Default::default()).await.unwrap().as_ref()
.block_coinbase(Default::default())
.await
.unwrap()
.as_ref()
) )
) )
} }
@@ -1245,13 +1197,7 @@ mod tests {
// Assert // Assert
let resolved = resolved.expect("Failed to resolve argument"); let resolved = resolved.expect("Failed to resolve argument");
assert_eq!( assert_eq!(resolved, MockResolver.block_difficulty(Default::default()).await.unwrap())
resolved,
MockResolver
.block_difficulty(Default::default())
.await
.unwrap()
)
} }
#[tokio::test] #[tokio::test]
@@ -1266,11 +1212,7 @@ mod tests {
let resolved = resolved.expect("Failed to resolve argument"); let resolved = resolved.expect("Failed to resolve argument");
assert_eq!( assert_eq!(
resolved, resolved,
MockResolver MockResolver.block_base_fee(Default::default()).await.map(U256::from).unwrap()
.block_base_fee(Default::default())
.await
.map(U256::from)
.unwrap()
) )
} }
@@ -1300,10 +1242,7 @@ mod tests {
// Assert // Assert
let resolved = resolved.expect("Failed to resolve argument"); let resolved = resolved.expect("Failed to resolve argument");
assert_eq!( assert_eq!(resolved, U256::from(MockResolver.last_block_number().await.unwrap()))
resolved,
U256::from(MockResolver.last_block_number().await.unwrap())
)
} }
#[tokio::test] #[tokio::test]
@@ -1318,12 +1257,7 @@ mod tests {
let resolved = resolved.expect("Failed to resolve argument"); let resolved = resolved.expect("Failed to resolve argument");
assert_eq!( assert_eq!(
resolved, resolved,
U256::from( U256::from(MockResolver.block_timestamp(Default::default()).await.unwrap())
MockResolver
.block_timestamp(Default::default())
.await
.unwrap()
)
) )
} }
@@ -1401,10 +1335,7 @@ mod tests {
// Assert // Assert
let resolved = resolved.expect("Failed to resolve argument"); let resolved = resolved.expect("Failed to resolve argument");
assert_eq!( assert_eq!(resolved, U256::from(MockResolver.last_block_number().await.unwrap() + 10));
resolved,
U256::from(MockResolver.last_block_number().await.unwrap() + 10)
);
} }
#[tokio::test] #[tokio::test]
+12 -13
View File
@@ -1,10 +1,10 @@
use std::collections::HashMap; use std::{collections::HashMap, pin::Pin};
use std::pin::Pin;
use alloy::eips::BlockNumberOrTag; use alloy::{
use alloy::json_abi::JsonAbi; eips::BlockNumberOrTag,
use alloy::primitives::TxHash; json_abi::JsonAbi,
use alloy::primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, U256}; primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, TxHash, U256},
};
use anyhow::Result; use anyhow::Result;
use crate::metadata::{ContractIdent, ContractInstance}; use crate::metadata::{ContractIdent, ContractInstance};
@@ -135,11 +135,11 @@ impl<'a> ResolutionContext<'a> {
match self.block_number { match self.block_number {
Some(block_number) => match number { Some(block_number) => match number {
BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number), BlockNumberOrTag::Latest => BlockNumberOrTag::Number(*block_number),
n @ (BlockNumberOrTag::Finalized n @ (BlockNumberOrTag::Finalized |
| BlockNumberOrTag::Safe BlockNumberOrTag::Safe |
| BlockNumberOrTag::Earliest BlockNumberOrTag::Earliest |
| BlockNumberOrTag::Pending BlockNumberOrTag::Pending |
| BlockNumberOrTag::Number(_)) => n, BlockNumberOrTag::Number(_)) => n,
}, },
None => number, None => number,
} }
@@ -162,8 +162,7 @@ impl<'a> ResolutionContext<'a> {
} }
pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> { pub fn variable(&self, name: impl AsRef<str>) -> Option<&U256> {
self.variables self.variables.and_then(|variables| variables.get(name.as_ref()))
.and_then(|variables| variables.get(name.as_ref()))
} }
pub fn tip_block_number(&self) -> Option<&'a BlockNumber> { pub fn tip_block_number(&self) -> Option<&'a BlockNumber> {
+34
View File
@@ -0,0 +1,34 @@
[package]
name = "ml-test-runner"
description = "ML-based test runner for executing differential tests file by file"
version.workspace = true
authors.workspace = true
license.workspace = true
edition.workspace = true
repository.workspace = true
rust-version.workspace = true
[[bin]]
name = "ml-test-runner"
path = "src/main.rs"
[dependencies]
revive-dt-common = { workspace = true }
revive-dt-compiler = { workspace = true }
revive-dt-config = { workspace = true }
revive-dt-core = { workspace = true }
revive-dt-format = { workspace = true }
revive-dt-node = { workspace = true }
revive-dt-node-interaction = { workspace = true }
revive-dt-report = { workspace = true }
alloy = { workspace = true }
anyhow = { workspace = true }
clap = { workspace = true }
tokio = { workspace = true }
temp-dir = { workspace = true }
tracing = { workspace = true }
tracing-subscriber = { workspace = true }
[lints]
workspace = true
+74
View File
@@ -0,0 +1,74 @@
# ML Test Runner
A test runner for executing Revive differential tests file-by-file with cargo-test-style output.
This is similar to the `retester` binary but designed for ML-based test execution with a focus on:
- Running tests file-by-file (rather than in bulk)
- Caching passed tests to skip them in future runs
- Providing cargo-test-style output for easy integration with ML pipelines
- Single platform testing (rather than differential testing)
## Features
- **File-by-file execution**: Run tests on individual `.sol` files, corpus files (`.json`), or recursively walk directories
- **Cached results**: Skip tests that have already passed using `--cached-passed`
- **Fail fast**: Stop on first failure with `--bail`
- **Cargo-like output**: Familiar test output format with colored pass/fail indicators
- **Platform support**: Test against `geth` or `kitchensink` platforms
## Usage
```bash
# Run a single .sol file (compile-only mode, default)
./ml-test-runner path/to/test.sol --platform geth
# Run all tests in a corpus file
./ml-test-runner path/to/corpus.json --platform kitchensink
# Walk a directory recursively for .sol files
./ml-test-runner path/to/tests/ --platform geth
# Use cached results and bail on first failure
./ml-test-runner path/to/tests/ --cached-passed ./cache.txt --bail
# Start the platform and execute tests (full mode)
./ml-test-runner path/to/tests/ --platform geth --start-platform
# Enable verbose logging (info, debug, or trace level)
RUST_LOG=info ./ml-test-runner path/to/tests/
RUST_LOG=debug ./ml-test-runner path/to/tests/ --start-platform
RUST_LOG=trace ./ml-test-runner path/to/tests/ --start-platform
```
## Arguments
- `<PATH>` - Path to test file (`.sol`), corpus file (`.json`), or folder of `.sol` files
- `--cached-passed <FILE>` - File to track tests that have already passed
- `--bail` - Stop after the first file failure
- `--platform <PLATFORM>` - Platform to test against (`geth`, `kitchensink`, or `zombienet`, default: `geth`)
- `--start-platform` - Start the platform and execute tests (default: `false`, compile-only mode)
## Output Format
The runner produces cargo-test-style output:
```
test path/to/test1.sol ... ok
test path/to/test2.sol ... FAILED
test path/to/test3.sol ... cached
failures:
---- path/to/test2.sol ----
Error: ...
test result: FAILED. 1 passed; 1 failed; 1 cached; finished in 2.34s
```
## Building
```bash
cargo build --release -p ml-test-runner
```
The binary will be available at `target/release/ml-test-runner`.
+639
View File
@@ -0,0 +1,639 @@
use anyhow::Context;
use clap::Parser;
use revive_dt_common::{
iterators::FilesWithExtensionIterator,
types::{PlatformIdentifier, PrivateKeyAllocator},
};
use revive_dt_config::TestExecutionContext;
use revive_dt_core::{
CachedCompiler, Platform,
helpers::{TestDefinition, TestPlatformInformation},
};
use revive_dt_format::{
case::CaseIdx,
corpus::Corpus,
metadata::{Metadata, MetadataFile},
};
use std::{
borrow::Cow,
collections::{BTreeMap, HashSet},
fs::File,
io::{BufRead, BufReader, BufWriter, Write},
path::{Path, PathBuf},
sync::Arc,
time::{Duration, Instant},
};
use temp_dir::TempDir;
use tokio::sync::Mutex;
use tracing::info;
use tracing_subscriber::{EnvFilter, FmtSubscriber};
/// ML-based test runner for executing differential tests file by file
#[derive(Debug, Parser)]
#[command(name = "ml-test-runner")]
struct MlTestRunnerArgs {
/// Path to test file (.sol), corpus file (.json), or folder containing .sol files
#[arg(value_name = "PATH")]
path: PathBuf,
/// File to cache tests that have already passed
#[arg(long = "cached-passed")]
cached_passed: Option<PathBuf>,
/// File to store tests that have failed (defaults to .<platform>-failed)
#[arg(long = "cached-failed")]
cached_failed: Option<PathBuf>,
/// Stop after the first file failure
#[arg(long = "bail")]
bail: bool,
/// Platform to test against (e.g., geth-evm-solc, kitchensink-polkavm-resolc)
#[arg(long = "platform", default_value = "geth-evm-solc")]
platform: PlatformIdentifier,
/// Start the platform and wait for RPC readiness
#[arg(long = "start-platform", default_value = "false")]
start_platform: bool,
/// Private key to use for wallet initialization (hex string with or without 0x prefix)
#[arg(
long = "private-key",
default_value = "0x5fb92d6e98884f76de468fa3f6278f8807c48bebc13595d45af5bdc4da702133"
)]
private_key: String,
/// RPC port to connect to when using existing node
#[arg(long = "rpc-port", default_value = "8545")]
rpc_port: u16,
/// Show verbose output including cached tests and detailed error messages
#[arg(long = "verbose", short = 'v')]
verbose: bool,
}
fn main() -> anyhow::Result<()> {
let args = MlTestRunnerArgs::parse();
// Only set up tracing if RUST_LOG is explicitly set or --verbose is passed
if std::env::var("RUST_LOG").is_ok() || args.verbose {
let subscriber = FmtSubscriber::builder()
.with_env_filter(EnvFilter::from_default_env())
.with_writer(std::io::stderr)
.finish();
tracing::subscriber::set_global_default(subscriber)
.expect("Failed to set tracing subscriber");
}
info!("ML test runner starting");
info!("Platform: {:?}", args.platform);
info!("Start platform: {}", args.start_platform);
tokio::runtime::Builder::new_multi_thread()
.enable_all()
.build()
.expect("Failed building the Runtime")
.block_on(run(args))
}
/// Wait for HTTP server to be ready by attempting to connect to the specified port
async fn wait_for_http_server(port: u16) -> anyhow::Result<()> {
const MAX_RETRIES: u32 = 60;
const RETRY_DELAY: Duration = Duration::from_secs(1);
for attempt in 1..=MAX_RETRIES {
match tokio::net::TcpStream::connect(format!("127.0.0.1:{}", port)).await {
Ok(_) => {
info!("Successfully connected to HTTP server on port {} (attempt {})", port, attempt);
return Ok(());
},
Err(e) => {
if attempt == MAX_RETRIES {
anyhow::bail!(
"Failed to connect to HTTP server on port {} after {} attempts: {}",
port,
MAX_RETRIES,
e
);
}
if attempt % 10 == 0 {
info!(
"Still waiting for HTTP server on port {} (attempt {}/{})",
port, attempt, MAX_RETRIES
);
}
tokio::time::sleep(RETRY_DELAY).await;
},
}
}
unreachable!()
}
async fn run(args: MlTestRunnerArgs) -> anyhow::Result<()> {
let start_time = Instant::now();
info!("Discovering test files from: {}", args.path.display());
let test_files = discover_test_files(&args.path)?;
info!("Found {} test file(s)", test_files.len());
let cached_passed = if let Some(cache_file) = &args.cached_passed {
let cached = load_cached_passed(cache_file)?;
info!("Loaded {} cached passed test(s)", cached.len());
cached
} else {
HashSet::new()
};
let cached_passed = Arc::new(Mutex::new(cached_passed));
// Set up cached-failed file (defaults to .<platform>-failed)
let cached_failed_path = args
.cached_failed
.clone()
.unwrap_or_else(|| PathBuf::from(format!(".{:?}-failed", args.platform)));
let cached_failed = Arc::new(Mutex::new(HashSet::<String>::new()));
// Get the platform based on CLI args
let platform: &dyn Platform = match args.platform {
PlatformIdentifier::GethEvmSolc => &revive_dt_core::GethEvmSolcPlatform,
PlatformIdentifier::LighthouseGethEvmSolc => &revive_dt_core::LighthouseGethEvmSolcPlatform,
PlatformIdentifier::KitchensinkPolkavmResolc =>
&revive_dt_core::KitchensinkPolkavmResolcPlatform,
PlatformIdentifier::KitchensinkRevmSolc => &revive_dt_core::KitchensinkRevmSolcPlatform,
PlatformIdentifier::ReviveDevNodePolkavmResolc =>
&revive_dt_core::ReviveDevNodePolkavmResolcPlatform,
PlatformIdentifier::ReviveDevNodeRevmSolc => &revive_dt_core::ReviveDevNodeRevmSolcPlatform,
PlatformIdentifier::ZombienetPolkavmResolc =>
&revive_dt_core::ZombienetPolkavmResolcPlatform,
PlatformIdentifier::ZombienetRevmSolc => &revive_dt_core::ZombienetRevmSolcPlatform,
};
let test_context = TestExecutionContext::default();
let context = revive_dt_config::Context::Test(Box::new(test_context));
let node: &'static dyn revive_dt_node_interaction::EthereumNode = if args.start_platform {
info!("Starting blockchain node...");
let node_handle =
platform.new_node(context.clone()).context("Failed to spawn node thread")?;
info!("Waiting for node to start...");
let node = node_handle
.join()
.map_err(|e| anyhow::anyhow!("Node thread panicked: {:?}", e))?
.context("Failed to start node")?;
info!("Node started with ID: {}, connection: {}", node.id(), node.connection_string());
let node = Box::leak(node);
info!("Running pre-transactions...");
node.pre_transactions().await.context("Failed to run pre-transactions")?;
info!("Pre-transactions completed");
node
} else {
info!("Using existing node at port {}", args.rpc_port);
// Wait for the HTTP server to be ready
info!("Waiting for HTTP server to be ready on port {}...", args.rpc_port);
wait_for_http_server(args.rpc_port).await?;
info!("HTTP server is ready");
let existing_node: Box<dyn revive_dt_node_interaction::EthereumNode> = match args.platform {
PlatformIdentifier::GethEvmSolc | PlatformIdentifier::LighthouseGethEvmSolc =>
Box::new(
revive_dt_node::node_implementations::geth::GethNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
),
PlatformIdentifier::KitchensinkPolkavmResolc |
PlatformIdentifier::KitchensinkRevmSolc |
PlatformIdentifier::ReviveDevNodePolkavmResolc |
PlatformIdentifier::ReviveDevNodeRevmSolc |
PlatformIdentifier::ZombienetPolkavmResolc |
PlatformIdentifier::ZombienetRevmSolc => Box::new(
revive_dt_node::node_implementations::substrate::SubstrateNode::new_existing(
&args.private_key,
args.rpc_port,
)
.await?,
),
};
Box::leak(existing_node)
};
let mut passed_files = 0;
let mut failed_files = 0;
let mut skipped_files = 0;
let mut failures = Vec::new();
const GREEN: &str = "\x1B[32m";
const RED: &str = "\x1B[31m";
const YELLOW: &str = "\x1B[33m";
const COLOUR_RESET: &str = "\x1B[0m";
const BOLD: &str = "\x1B[1m";
const BOLD_RESET: &str = "\x1B[22m";
for test_file in test_files {
let file_display = test_file.display().to_string();
info!("\n\n == Executing test file: {file_display} == \n\n");
// Check if already passed
{
let cache = cached_passed.lock().await;
if cache.contains(&file_display) {
if args.verbose {
println!("test {file_display} ... {YELLOW}cached{COLOUR_RESET}");
}
skipped_files += 1;
continue;
}
}
info!("Loading metadata from: {}", test_file.display());
let metadata_file = match load_metadata_file(&test_file) {
Ok(mf) => {
info!("Loaded metadata with {} case(s)", mf.cases.len());
mf
},
Err(e) => {
// Skip files without metadata instead of treating them as failures
info!("Skipping {} (no metadata): {}", file_display, e);
skipped_files += 1;
continue;
},
};
// Execute test with 10 second timeout
let test_result = tokio::time::timeout(
Duration::from_secs(20),
execute_test_file(&metadata_file, platform, node, &context),
)
.await;
let result = match test_result {
Ok(Ok(_)) => Ok(()),
Ok(Err(e)) => Err(e),
Err(_) => Err(anyhow::anyhow!("Test timed out after 20 seconds")),
};
match result {
Ok(_) => {
println!("test {file_display} ... {GREEN}ok{COLOUR_RESET}");
passed_files += 1;
// Update cache
if let Some(cache_file) = &args.cached_passed {
let mut cache = cached_passed.lock().await;
cache.insert(file_display);
if let Err(e) = save_cached_passed(cache_file, &cache) {
info!("Failed to save cache: {}", e);
}
}
},
Err(e) => {
println!("test {file_display} ... {RED}FAILED{COLOUR_RESET}");
failed_files += 1;
let error_detail = if args.verbose { format!("{:?}", e) } else { format!("{}", e) };
failures.push((file_display.clone(), error_detail));
// Update cached-failed
{
let mut cache = cached_failed.lock().await;
cache.insert(file_display);
if let Err(e) = save_cached_failed(&cached_failed_path, &cache) {
info!("Failed to save cached-failed: {}", e);
}
}
if args.bail {
info!("Bailing after first failure");
break;
}
},
}
}
// Print summary
println!();
if !failures.is_empty() && args.verbose {
println!("{BOLD}failures:{BOLD_RESET}");
println!();
for (file, error) in &failures {
println!("---- {} ----", file);
println!("{}", error);
println!();
}
}
let elapsed = start_time.elapsed();
println!(
"test result: {}. {} passed; {} failed; {} cached; finished in {:.2}s",
if failed_files == 0 {
format!("{GREEN}ok{COLOUR_RESET}")
} else {
format!("{RED}FAILED{COLOUR_RESET}")
},
passed_files,
failed_files,
skipped_files,
elapsed.as_secs_f64()
);
if failed_files > 0 {
std::process::exit(1);
}
Ok(())
}
/// Discover test files from the given path
fn discover_test_files(path: &Path) -> anyhow::Result<Vec<PathBuf>> {
if !path.exists() {
anyhow::bail!("Path does not exist: {}", path.display());
}
let mut files = Vec::new();
if path.is_file() {
let extension = path.extension().and_then(|s| s.to_str()).unwrap_or("");
match extension {
"sol" => {
// Single .sol file
files.push(path.to_path_buf());
},
"json" => {
// Corpus file - enumerate its tests
let corpus = Corpus::try_from_path(path)?;
let metadata_files = corpus.enumerate_tests();
for metadata in metadata_files {
files.push(metadata.metadata_file_path);
}
},
_ => anyhow::bail!("Unsupported file extension: {}. Expected .sol or .json", extension),
}
} else if path.is_dir() {
// First, find all test.json files
let mut test_json_dirs = HashSet::new();
for json_file in FilesWithExtensionIterator::new(path)
.with_allowed_extension("json")
.with_use_cached_fs(true)
{
if json_file.file_name().and_then(|s| s.to_str()) == Some("test.json") {
if let Some(parent) = json_file.parent() {
test_json_dirs.insert(parent.to_path_buf());
}
// Try to parse as corpus file first, then as metadata file
if let Ok(corpus) = Corpus::try_from_path(&json_file) {
// It's a corpus file - enumerate its tests
let metadata_files = corpus.enumerate_tests();
for metadata in metadata_files {
files.push(metadata.metadata_file_path);
}
} else {
// It's a metadata file - use it directly
files.push(json_file);
}
}
}
// Then, find .sol files that are NOT in directories with test.json
for sol_file in FilesWithExtensionIterator::new(path)
.with_allowed_extension("sol")
.with_use_cached_fs(true)
{
if let Some(parent) = sol_file.parent() {
if !test_json_dirs.contains(parent) {
files.push(sol_file);
}
} else {
files.push(sol_file);
}
}
} else {
anyhow::bail!("Path is neither a file nor a directory: {}", path.display());
}
Ok(files)
}
/// Load metadata from a test file
fn load_metadata_file(path: &Path) -> anyhow::Result<MetadataFile> {
let metadata = Metadata::try_from_file(path)
.ok_or_else(|| anyhow::anyhow!("Failed to load metadata from {}", path.display()))?;
Ok(MetadataFile {
metadata_file_path: path.to_path_buf(),
corpus_file_path: path.to_path_buf(),
content: metadata,
})
}
/// Execute all test cases in a metadata file
async fn execute_test_file(
metadata_file: &MetadataFile,
platform: &dyn Platform,
node: &'static dyn revive_dt_node_interaction::EthereumNode,
context: &revive_dt_config::Context,
) -> anyhow::Result<()> {
if metadata_file.cases.is_empty() {
anyhow::bail!("No test cases found in file");
}
info!("Processing {} test case(s)", metadata_file.cases.len());
let temp_dir = TempDir::new()?;
info!("Created temporary directory: {}", temp_dir.path().display());
info!("Initializing cached compiler");
let cached_compiler = CachedCompiler::new(temp_dir.path().join("compilation_cache"), false)
.await
.map(Arc::new)
.context("Failed to create cached compiler")?;
let private_key_allocator =
Arc::new(Mutex::new(PrivateKeyAllocator::new(alloy::primitives::U256::from(100))));
let (reporter, report_task) =
revive_dt_report::ReportAggregator::new(context.clone()).into_task();
tokio::spawn(report_task);
info!("Building test definitions for {} case(s)", metadata_file.cases.len());
let mut test_definitions = Vec::new();
for (case_idx, case) in metadata_file.cases.iter().enumerate() {
info!("Building test definition for case {}", case_idx);
let test_def = build_test_definition(
metadata_file,
case,
case_idx,
platform,
node,
&context,
&reporter,
)
.await?;
if let Some(test_def) = test_def {
info!("Test definition for case {} created successfully", case_idx);
test_definitions.push(test_def);
}
}
info!("Executing {} test definition(s)", test_definitions.len());
for (idx, test_definition) in test_definitions.iter().enumerate() {
info!("─────────────────────────────────────────────────────────────────");
info!(
"Executing case {}/{}: case_idx={}, mode={}, steps={}",
idx + 1,
test_definitions.len(),
test_definition.case_idx,
test_definition.mode,
test_definition.case.steps.len()
);
info!("Creating driver for case {}", test_definition.case_idx);
let driver = revive_dt_core::differential_tests::Driver::new_root(
test_definition,
private_key_allocator.clone(),
&cached_compiler,
)
.await
.context("Failed to create driver")?;
info!(
"Running {} step(s) for case {}",
test_definition.case.steps.len(),
test_definition.case_idx
);
let steps_executed = driver
.execute_all()
.await
.context(format!("Failed to execute case {}", test_definition.case_idx))?;
info!(
"✓ Case {} completed successfully, executed {} step(s)",
test_definition.case_idx, steps_executed
);
}
info!("─────────────────────────────────────────────────────────────────");
info!("All {} test case(s) executed successfully", test_definitions.len());
Ok(())
}
/// Build a test definition for a single test case
async fn build_test_definition<'a>(
metadata_file: &'a MetadataFile,
case: &'a revive_dt_format::case::Case,
case_idx: usize,
platform: &'a dyn Platform,
node: &'a dyn revive_dt_node_interaction::EthereumNode,
context: &revive_dt_config::Context,
reporter: &revive_dt_report::Reporter,
) -> anyhow::Result<Option<TestDefinition<'a>>> {
let mode = case
.modes
.as_ref()
.or(metadata_file.modes.as_ref())
.and_then(|modes| modes.first())
.and_then(|parsed_mode| parsed_mode.to_modes().next())
.map(Cow::Owned)
.or_else(|| revive_dt_compiler::Mode::all().next().map(Cow::Borrowed))
.unwrap();
let compiler = platform
.new_compiler(context.clone(), mode.version.clone().map(Into::into))
.await
.context("Failed to create compiler")?;
let test_reporter =
reporter.test_specific_reporter(Arc::new(revive_dt_report::TestSpecifier {
solc_mode: mode.as_ref().clone(),
metadata_file_path: metadata_file.metadata_file_path.clone(),
case_idx: CaseIdx::new(case_idx),
}));
let execution_reporter =
test_reporter.execution_specific_reporter(node.id(), platform.platform_identifier());
let mut platforms = BTreeMap::new();
platforms.insert(
platform.platform_identifier(),
TestPlatformInformation { platform, node, compiler, reporter: execution_reporter },
);
let test_definition = TestDefinition {
metadata: metadata_file,
metadata_file_path: &metadata_file.metadata_file_path,
mode,
case_idx: CaseIdx::new(case_idx),
case,
platforms,
reporter: test_reporter,
};
if let Err((reason, _)) = test_definition.check_compatibility() {
info!("Skipping case {}: {}", case_idx, reason);
return Ok(None);
}
Ok(Some(test_definition))
}
/// Load cached passed tests from file
fn load_cached_passed(path: &Path) -> anyhow::Result<HashSet<String>> {
if !path.exists() {
return Ok(HashSet::new());
}
let file = File::open(path).context("Failed to open cached-passed file")?;
let reader = BufReader::new(file);
let mut cache = HashSet::new();
for line in reader.lines() {
let line = line?;
let trimmed = line.trim();
if !trimmed.is_empty() {
cache.insert(trimmed.to_string());
}
}
Ok(cache)
}
/// Save cached passed tests to file
fn save_cached_passed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
let file = File::create(path).context("Failed to create cached-passed file")?;
let mut writer = BufWriter::new(file);
let mut entries: Vec<_> = cache.iter().collect();
entries.sort();
for entry in entries {
writeln!(writer, "{}", entry)?;
}
writer.flush()?;
Ok(())
}
/// Save cached failed tests to file
fn save_cached_failed(path: &Path, cache: &HashSet<String>) -> anyhow::Result<()> {
let file = File::create(path).context("Failed to create cached-failed file")?;
let mut writer = BufWriter::new(file);
let mut entries: Vec<_> = cache.iter().collect();
entries.sort();
for entry in entries {
writeln!(writer, "{}", entry)?;
}
writer.flush()?;
Ok(())
}
+12 -5
View File
@@ -1,11 +1,14 @@
//! This crate implements all node interactions. //! This crate implements all node interactions.
use std::pin::Pin; use std::{pin::Pin, sync::Arc};
use std::sync::Arc;
use alloy::primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256}; use alloy::{
use alloy::rpc::types::trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace}; primitives::{Address, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256},
use alloy::rpc::types::{EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest}; rpc::types::{
EIP1186AccountProofResponse, TransactionReceipt, TransactionRequest,
trace::geth::{DiffMode, GethDebugTracingOptions, GethTrace},
},
};
use anyhow::Result; use anyhow::Result;
use futures::Stream; use futures::Stream;
@@ -74,6 +77,10 @@ pub trait EthereumNode {
+ '_, + '_,
>, >,
>; >;
/// Checks if the provided address is in the wallet. If it is, returns the address.
/// Otherwise, returns the default signer's address.
fn resolve_signer_or_default(&self, address: Address) -> Address;
} }
#[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)] #[derive(Clone, Debug, PartialEq, Eq, PartialOrd, Ord, Hash)]
+13 -32
View File
@@ -33,10 +33,7 @@ impl Process {
let log_file_prefix = log_file_prefix.into(); let log_file_prefix = log_file_prefix.into();
let (stdout_file_name, stderr_file_name) = match log_file_prefix { let (stdout_file_name, stderr_file_name) = match log_file_prefix {
Some(prefix) => ( Some(prefix) => (format!("{prefix}_stdout.log"), format!("{prefix}_stderr.log")),
format!("{prefix}_stdout.log"),
format!("{prefix}_stderr.log"),
),
None => ("stdout.log".to_string(), "stderr.log".to_string()), None => ("stdout.log".to_string(), "stderr.log".to_string()),
}; };
@@ -57,23 +54,19 @@ impl Process {
.context("Failed to open the stderr logs file")?; .context("Failed to open the stderr logs file")?;
let mut command = { let mut command = {
let stdout_logs_file = stdout_logs_file let stdout_logs_file =
.try_clone() stdout_logs_file.try_clone().context("Failed to clone the stdout logs file")?;
.context("Failed to clone the stdout logs file")?; let stderr_logs_file =
let stderr_logs_file = stderr_logs_file stderr_logs_file.try_clone().context("Failed to clone the stderr logs file")?;
.try_clone()
.context("Failed to clone the stderr logs file")?;
let mut command = Command::new(binary_path.as_ref()); let mut command = Command::new(binary_path.as_ref());
command_building_callback(&mut command, stdout_logs_file, stderr_logs_file); command_building_callback(&mut command, stdout_logs_file, stderr_logs_file);
command command
}; };
let mut child = command let mut child = command.spawn().context("Failed to spawn the built command")?;
.spawn()
.context("Failed to spawn the built command")?;
match process_readiness_wait_behavior { match process_readiness_wait_behavior {
ProcessReadinessWaitBehavior::NoStartupWait => {} ProcessReadinessWaitBehavior::NoStartupWait => {},
ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration), ProcessReadinessWaitBehavior::WaitDuration(duration) => std::thread::sleep(duration),
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration, max_wait_duration,
@@ -126,35 +119,23 @@ impl Process {
) )
} }
} }
} },
ProcessReadinessWaitBehavior::WaitForCommandToExit => { ProcessReadinessWaitBehavior::WaitForCommandToExit => {
if !child if !child.wait().context("Failed waiting for process to finish")?.success() {
.wait()
.context("Failed waiting for process to finish")?
.success()
{
anyhow::bail!("Failed to spawn command"); anyhow::bail!("Failed to spawn command");
} }
} },
} }
Ok(Self { Ok(Self { child, stdout_logs_file, stderr_logs_file })
child,
stdout_logs_file,
stderr_logs_file,
})
} }
} }
impl Drop for Process { impl Drop for Process {
fn drop(&mut self) { fn drop(&mut self) {
self.child.kill().expect("Failed to kill the process"); self.child.kill().expect("Failed to kill the process");
self.stdout_logs_file self.stdout_logs_file.flush().expect("Failed to flush the stdout logs file");
.flush() self.stderr_logs_file.flush().expect("Failed to flush the stderr logs file");
.expect("Failed to flush the stdout logs file");
self.stderr_logs_file
.flush()
.expect("Failed to flush the stderr logs file");
} }
} }
+164 -83
View File
@@ -18,7 +18,9 @@ use alloy::{
eips::BlockNumberOrTag, eips::BlockNumberOrTag,
genesis::{Genesis, GenesisAccount}, genesis::{Genesis, GenesisAccount},
network::{Ethereum, EthereumWallet, NetworkWallet}, network::{Ethereum, EthereumWallet, NetworkWallet},
primitives::{Address, BlockHash, BlockNumber, BlockTimestamp, StorageKey, TxHash, U256}, primitives::{
Address, BlockHash, BlockNumber, BlockTimestamp, ChainId, StorageKey, TxHash, U256,
},
providers::{ providers::{
Provider, Provider,
ext::DebugApi, ext::DebugApi,
@@ -75,6 +77,7 @@ pub struct GethNode {
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<Ethereum, Arc<EthereumWallet>>>,
chain_id: ChainId,
} }
impl GethNode { impl GethNode {
@@ -91,8 +94,8 @@ impl GethNode {
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress"; const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet"; const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60); const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(10);
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60); const TRACE_POLLING_DURATION: Duration = Duration::from_secs(10);
pub fn new( pub fn new(
context: impl AsRef<WorkingDirectoryConfiguration> context: impl AsRef<WorkingDirectoryConfiguration>
@@ -105,9 +108,7 @@ impl GethNode {
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context); let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context); let geth_configuration = AsRef::<GethConfiguration>::as_ref(&context);
let geth_directory = working_directory_configuration let geth_directory = working_directory_configuration.as_path().join(Self::BASE_DIRECTORY);
.as_path()
.join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = geth_directory.join(id.to_string()); let base_directory = geth_directory.join(id.to_string());
@@ -125,9 +126,117 @@ impl GethNode {
wallet: wallet.clone(), wallet: wallet.clone(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
provider: Default::default(), provider: Default::default(),
chain_id: CHAIN_ID,
} }
} }
pub async fn new_existing(private_key: &str, rpc_port: u16) -> anyhow::Result<Self> {
use alloy::{
primitives::FixedBytes,
providers::{Provider, ProviderBuilder},
signers::local::PrivateKeySigner,
};
let key_str = private_key.trim().strip_prefix("0x").unwrap_or(private_key.trim());
let key_bytes = alloy::hex::decode(key_str)
.map_err(|e| anyhow::anyhow!("Failed to decode private key hex: {}", e))?;
if key_bytes.len() != 32 {
anyhow::bail!(
"Private key must be 32 bytes (64 hex characters), got {}",
key_bytes.len()
);
}
let mut bytes = [0u8; 32];
bytes.copy_from_slice(&key_bytes);
let signer = PrivateKeySigner::from_bytes(&FixedBytes(bytes))
.map_err(|e| anyhow::anyhow!("Failed to create signer from private key: {}", e))?;
let address = signer.address();
let wallet = Arc::new(EthereumWallet::new(signer));
let connection_string = format!("http://localhost:{}", rpc_port);
let chain_id = ProviderBuilder::new()
.connect_http(connection_string.parse()?)
.get_chain_id()
.await
.context("Failed to query chain ID from RPC")?;
let node = Self {
connection_string: format!("http://localhost:{}", rpc_port),
base_directory: PathBuf::new(),
data_directory: PathBuf::new(),
logs_directory: PathBuf::new(),
geth: PathBuf::new(),
id: 0,
chain_id,
handle: None,
start_timeout: Duration::from_secs(0),
wallet,
nonce_manager: Default::default(),
provider: Default::default(),
};
// Check balance and fund if needed
node.ensure_funded(address).await?;
Ok(node)
}
/// Ensure that the given address has at least 1000 ETH, funding it from the node's managed
/// account if necessary.
async fn ensure_funded(&self, address: Address) -> anyhow::Result<()> {
use alloy::{
primitives::utils::{format_ether, parse_ether},
providers::{Provider, ProviderBuilder},
};
let provider = ProviderBuilder::new().connect_http(self.connection_string.parse()?);
let balance = provider.get_balance(address).await?;
let min_balance = parse_ether("1000")?;
if balance >= min_balance {
tracing::info!(
"Wallet {} already has sufficient balance: {} ETH",
address,
format_ether(balance)
);
return Ok(());
}
tracing::info!(
"Funding wallet {} (current: {} ETH, target: 1000 ETH)",
address,
format_ether(balance)
);
// Get the node's managed account
let accounts = provider.get_accounts().await?;
if accounts.is_empty() {
anyhow::bail!("No managed accounts available on the node to fund wallet");
}
let from_account = accounts[0];
let funding_amount = min_balance - balance;
let tx = TransactionRequest::default()
.from(from_account)
.to(address)
.value(funding_amount);
provider
.send_transaction(tx)
.await?
.get_receipt()
.await
.context("Failed to get receipt for funding transaction")?;
tracing::info!("Successfully funded wallet {}", address);
Ok(())
}
/// Create the node directory and call `geth init` to configure the genesis. /// Create the node directory and call `geth init` to configure the genesis.
#[instrument(level = "info", skip_all, fields(geth_node_id = self.id))] #[instrument(level = "info", skip_all, fields(geth_node_id = self.id))]
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
@@ -222,15 +331,14 @@ impl GethNode {
ProcessReadinessWaitBehavior::TimeBoundedWaitFunction { ProcessReadinessWaitBehavior::TimeBoundedWaitFunction {
max_wait_duration: self.start_timeout, max_wait_duration: self.start_timeout,
check_function: Box::new(|_, stderr_line| match stderr_line { check_function: Box::new(|_, stderr_line| match stderr_line {
Some(line) => { Some(line) =>
if line.contains(Self::ERROR_MARKER) { if line.contains(Self::ERROR_MARKER) {
anyhow::bail!("Failed to start geth {line}"); anyhow::bail!("Failed to start geth {line}");
} else if line.contains(Self::READY_MARKER) { } else if line.contains(Self::READY_MARKER) {
Ok(true) Ok(true)
} else { } else {
Ok(false) Ok(false)
} },
}
None => Ok(false), None => Ok(false),
}), }),
}, },
@@ -243,7 +351,7 @@ impl GethNode {
self.shutdown() self.shutdown()
.context("Failed to gracefully shutdown after geth start error")?; .context("Failed to gracefully shutdown after geth start error")?;
return Err(err); return Err(err);
} },
} }
Ok(self) Ok(self)
@@ -255,7 +363,7 @@ impl GethNode {
construct_concurrency_limited_provider::<Ethereum, _>( construct_concurrency_limited_provider::<Ethereum, _>(
self.connection_string.as_str(), self.connection_string.as_str(),
FallbackGasFiller::default(), FallbackGasFiller::default(),
ChainIdFiller::new(Some(CHAIN_ID)), ChainIdFiller::new(Some(self.chain_id)),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
) )
@@ -349,23 +457,25 @@ impl EthereumNode for GethNode {
.context("Failed to submit transaction to geth node")?; .context("Failed to submit transaction to geth node")?;
let transaction_hash = *pending_transaction.tx_hash(); let transaction_hash = *pending_transaction.tx_hash();
// The following is a fix for the "transaction indexing is in progress" error that we used // The following is a fix for the "transaction indexing is in progress" error that we
// to get. You can find more information on this in the following GH issue in geth // used to get. You can find more information on this in the following GH issue in
// https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on, // geth https://github.com/ethereum/go-ethereum/issues/28877. To summarize what's going on,
// before we can get the receipt of the transaction it needs to have been indexed by the // before we can get the receipt of the transaction it needs to have been indexed by the
// node's indexer. Just because the transaction has been confirmed it doesn't mean that it // node's indexer. Just because the transaction has been confirmed it doesn't mean that
// has been indexed. When we call alloy's `get_receipt` it checks if the transaction was // it has been indexed. When we call alloy's `get_receipt` it checks if the
// confirmed. If it has been, then it will call `eth_getTransactionReceipt` method which // transaction was confirmed. If it has been, then it will call
// _might_ return the above error if the tx has not yet been indexed yet. So, we need to // `eth_getTransactionReceipt` method which _might_ return the above error if the tx
// implement a retry mechanism for the receipt to keep retrying to get it until it // has not yet been indexed yet. So, we need to implement a retry mechanism for the
// eventually works, but we only do that if the error we get back is the "transaction // receipt to keep retrying to get it until it eventually works, but we only do that
// indexing is in progress" error or if the receipt is None. // if the error we get back is the "transaction indexing is in progress" error or if
// the receipt is None.
// //
// Getting the transaction indexed and taking a receipt can take a long time especially when // Getting the transaction indexed and taking a receipt can take a long time especially
// a lot of transactions are being submitted to the node. Thus, while initially we only // when a lot of transactions are being submitted to the node. Thus, while initially
// allowed for 60 seconds of waiting with a 1 second delay in polling, we need to allow for // we only allowed for 60 seconds of waiting with a 1 second delay in polling, we
// a larger wait time. Therefore, in here we allow for 5 minutes of waiting with exponential // need to allow for a larger wait time. Therefore, in here we allow for 5 minutes of
// backoff each time we attempt to get the receipt and find that it's not available. // waiting with exponential backoff each time we attempt to get the receipt and find
// that it's not available.
poll( poll(
Self::RECEIPT_POLLING_DURATION, Self::RECEIPT_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)), PollingWaitBehavior::Constant(Duration::from_millis(200)),
@@ -381,15 +491,12 @@ impl EthereumNode for GethNode {
true => Ok(ControlFlow::Continue(())), true => Ok(ControlFlow::Continue(())),
false => Err(error.into()), false => Err(error.into()),
} }
} },
} }
} }
}, },
) )
.instrument(tracing::info_span!( .instrument(tracing::info_span!("Awaiting transaction receipt", ?transaction_hash))
"Awaiting transaction receipt",
?transaction_hash
))
.await .await
}) })
} }
@@ -401,10 +508,8 @@ impl EthereumNode for GethNode {
trace_options: GethDebugTracingOptions, trace_options: GethDebugTracingOptions,
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
Box::pin(async move { Box::pin(async move {
let provider = self let provider =
.provider() self.provider().await.context("Failed to create provider for tracing")?;
.await
.context("Failed to create provider for tracing")?;
poll( poll(
Self::TRACE_POLLING_DURATION, Self::TRACE_POLLING_DURATION,
PollingWaitBehavior::Constant(Duration::from_millis(200)), PollingWaitBehavior::Constant(Duration::from_millis(200)),
@@ -412,10 +517,7 @@ impl EthereumNode for GethNode {
let provider = provider.clone(); let provider = provider.clone();
let trace_options = trace_options.clone(); let trace_options = trace_options.clone();
async move { async move {
match provider match provider.debug_trace_transaction(tx_hash, trace_options).await {
.debug_trace_transaction(tx_hash, trace_options)
.await
{
Ok(trace) => Ok(ControlFlow::Break(trace)), Ok(trace) => Ok(ControlFlow::Break(trace)),
Err(error) => { Err(error) => {
let error_string = error.to_string(); let error_string = error.to_string();
@@ -423,7 +525,7 @@ impl EthereumNode for GethNode {
true => Ok(ControlFlow::Continue(())), true => Ok(ControlFlow::Continue(())),
false => Err(error.into()), false => Err(error.into()),
} }
} },
} }
} }
}, },
@@ -542,6 +644,16 @@ impl EthereumNode for GethNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>) as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
}) })
} }
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
} }
pub struct GethNodeResolver { pub struct GethNodeResolver {
@@ -628,10 +740,7 @@ impl ResolverApi for GethNodeResolver {
.context("Failed to get the geth block")? .context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?") .context("Failed to get the Geth block, perhaps there are no blocks?")
.and_then(|block| { .and_then(|block| {
block block.header.base_fee_per_gas.context("Failed to get the base fee per gas")
.header
.base_fee_per_gas
.context("Failed to get the base fee per gas")
}) })
}) })
} }
@@ -748,11 +857,7 @@ mod tests {
// Arrange // Arrange
let (context, node) = shared_state(); let (context, node) = shared_state();
let account_address = context let account_address = context.wallet_configuration.wallet().default_signer().address();
.wallet_configuration
.wallet()
.default_signer()
.address();
let transaction = TransactionRequest::default() let transaction = TransactionRequest::default()
.to(account_address) .to(account_address)
.value(U256::from(100_000_000_000_000u128)); .value(U256::from(100_000_000_000_000u128));
@@ -775,10 +880,7 @@ mod tests {
// Assert // Assert
let version = version.expect("Failed to get the version"); let version = version.expect("Failed to get the version");
assert!( assert!(version.starts_with("geth version"), "expected version string, got: '{version}'");
version.starts_with("geth version"),
"expected version string, got: '{version}'"
);
} }
#[tokio::test] #[tokio::test]
@@ -802,12 +904,8 @@ mod tests {
let node = shared_node(); let node = shared_node();
// Act // Act
let gas_limit = node let gas_limit =
.resolver() node.resolver().await.unwrap().block_gas_limit(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_gas_limit(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = gas_limit.expect("Failed to get the gas limit"); let _ = gas_limit.expect("Failed to get the gas limit");
@@ -820,12 +918,8 @@ mod tests {
let node = shared_node(); let node = shared_node();
// Act // Act
let coinbase = node let coinbase =
.resolver() node.resolver().await.unwrap().block_coinbase(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_coinbase(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = coinbase.expect("Failed to get the coinbase"); let _ = coinbase.expect("Failed to get the coinbase");
@@ -838,12 +932,8 @@ mod tests {
let node = shared_node(); let node = shared_node();
// Act // Act
let block_difficulty = node let block_difficulty =
.resolver() node.resolver().await.unwrap().block_difficulty(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_difficulty(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_difficulty.expect("Failed to get the block difficulty"); let _ = block_difficulty.expect("Failed to get the block difficulty");
@@ -856,12 +946,7 @@ mod tests {
let node = shared_node(); let node = shared_node();
// Act // Act
let block_hash = node let block_hash = node.resolver().await.unwrap().block_hash(BlockNumberOrTag::Latest).await;
.resolver()
.await
.unwrap()
.block_hash(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_hash.expect("Failed to get the block hash"); let _ = block_hash.expect("Failed to get the block hash");
@@ -874,12 +959,8 @@ mod tests {
let node = shared_node(); let node = shared_node();
// Act // Act
let block_timestamp = node let block_timestamp =
.resolver() node.resolver().await.unwrap().block_timestamp(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_timestamp(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_timestamp.expect("Failed to get the block timestamp"); let _ = block_timestamp.expect("Failed to get the block timestamp");
@@ -116,7 +116,7 @@ impl LighthouseGethNode {
const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress"; const TRANSACTION_INDEXING_ERROR: &str = "transaction indexing is in progress";
const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet"; const TRANSACTION_TRACING_ERROR: &str = "historical state not available in path scheme yet";
const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(5 * 60); const RECEIPT_POLLING_DURATION: Duration = Duration::from_secs(30);
const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60); const TRACE_POLLING_DURATION: Duration = Duration::from_secs(60);
const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete"; const VALIDATOR_MNEMONIC: &str = "giant issue aisle success illegal bike spike question tent bar rely arctic volcano long crawl hungry vocal artwork sniff fantasy very lucky have athlete";
@@ -132,9 +132,7 @@ impl LighthouseGethNode {
let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context); let wallet_configuration = AsRef::<WalletConfiguration>::as_ref(&context);
let kurtosis_configuration = AsRef::<KurtosisConfiguration>::as_ref(&context); let kurtosis_configuration = AsRef::<KurtosisConfiguration>::as_ref(&context);
let geth_directory = working_directory_configuration let geth_directory = working_directory_configuration.as_path().join(Self::BASE_DIRECTORY);
.as_path()
.join(Self::BASE_DIRECTORY);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = geth_directory.join(id.to_string()); let base_directory = geth_directory.join(id.to_string());
@@ -147,10 +145,7 @@ impl LighthouseGethNode {
http_connection_string: String::default(), http_connection_string: String::default(),
enclave_name: format!( enclave_name: format!(
"enclave-{}-{}", "enclave-{}-{}",
SystemTime::now() SystemTime::now().duration_since(UNIX_EPOCH).expect("Must not fail").as_nanos(),
.duration_since(UNIX_EPOCH)
.expect("Must not fail")
.as_nanos(),
id id
), ),
@@ -526,15 +521,12 @@ impl LighthouseGethNode {
true => Ok(ControlFlow::Continue(())), true => Ok(ControlFlow::Continue(())),
false => Err(error.into()), false => Err(error.into()),
} }
} },
} }
} }
}, },
) )
.instrument(tracing::info_span!( .instrument(tracing::info_span!("Awaiting transaction receipt", ?transaction_hash))
"Awaiting transaction receipt",
?transaction_hash
))
.await .await
}) })
} }
@@ -623,9 +615,7 @@ impl EthereumNode for LighthouseGethNode {
) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<GethTrace>> + '_>> {
Box::pin(async move { Box::pin(async move {
let provider = Arc::new( let provider = Arc::new(
self.http_provider() self.http_provider().await.context("Failed to create provider for tracing")?,
.await
.context("Failed to create provider for tracing")?,
); );
poll( poll(
Self::TRACE_POLLING_DURATION, Self::TRACE_POLLING_DURATION,
@@ -634,10 +624,7 @@ impl EthereumNode for LighthouseGethNode {
let provider = provider.clone(); let provider = provider.clone();
let trace_options = trace_options.clone(); let trace_options = trace_options.clone();
async move { async move {
match provider match provider.debug_trace_transaction(tx_hash, trace_options).await {
.debug_trace_transaction(tx_hash, trace_options)
.await
{
Ok(trace) => Ok(ControlFlow::Break(trace)), Ok(trace) => Ok(ControlFlow::Break(trace)),
Err(error) => { Err(error) => {
let error_string = error.to_string(); let error_string = error.to_string();
@@ -645,7 +632,7 @@ impl EthereumNode for LighthouseGethNode {
true => Ok(ControlFlow::Continue(())), true => Ok(ControlFlow::Continue(())),
false => Err(error.into()), false => Err(error.into()),
} }
} },
} }
} }
}, },
@@ -761,6 +748,16 @@ impl EthereumNode for LighthouseGethNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>) as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
}) })
} }
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
} }
pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> { pub struct LighthouseGethNodeResolver<F: TxFiller<Ethereum>, P: Provider<Ethereum>> {
@@ -849,10 +846,7 @@ impl<F: TxFiller<Ethereum>, P: Provider<Ethereum>> ResolverApi
.context("Failed to get the geth block")? .context("Failed to get the geth block")?
.context("Failed to get the Geth block, perhaps there are no blocks?") .context("Failed to get the Geth block, perhaps there are no blocks?")
.and_then(|block| { .and_then(|block| {
block block.header.base_fee_per_gas.context("Failed to get the base fee per gas")
.header
.base_fee_per_gas
.context("Failed to get the base fee per gas")
}) })
}) })
} }
@@ -906,11 +900,7 @@ impl Node for LighthouseGethNode {
.spawn() .spawn()
.expect("Failed to spawn the enclave kill command"); .expect("Failed to spawn the enclave kill command");
if !child if !child.wait().expect("Failed to wait for the enclave kill command").success() {
.wait()
.expect("Failed to wait for the enclave kill command")
.success()
{
let stdout = { let stdout = {
let mut stdout = String::default(); let mut stdout = String::default();
child child
@@ -1136,11 +1126,7 @@ mod tests {
let (context, node) = new_node(); let (context, node) = new_node();
node.fund_all_accounts().await.expect("Failed"); node.fund_all_accounts().await.expect("Failed");
let account_address = context let account_address = context.wallet_configuration.wallet().default_signer().address();
.wallet_configuration
.wallet()
.default_signer()
.address();
let transaction = TransactionRequest::default() let transaction = TransactionRequest::default()
.to(account_address) .to(account_address)
.value(U256::from(100_000_000_000_000u128)); .value(U256::from(100_000_000_000_000u128));
@@ -1163,10 +1149,7 @@ mod tests {
// Assert // Assert
let version = version.expect("Failed to get the version"); let version = version.expect("Failed to get the version");
assert!( assert!(version.starts_with("CLI Version"), "expected version string, got: '{version}'");
version.starts_with("CLI Version"),
"expected version string, got: '{version}'"
);
} }
#[tokio::test] #[tokio::test]
@@ -1190,12 +1173,8 @@ mod tests {
let (_context, node) = new_node(); let (_context, node) = new_node();
// Act // Act
let gas_limit = node let gas_limit =
.resolver() node.resolver().await.unwrap().block_gas_limit(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_gas_limit(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = gas_limit.expect("Failed to get the gas limit"); let _ = gas_limit.expect("Failed to get the gas limit");
@@ -1208,12 +1187,8 @@ mod tests {
let (_context, node) = new_node(); let (_context, node) = new_node();
// Act // Act
let coinbase = node let coinbase =
.resolver() node.resolver().await.unwrap().block_coinbase(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_coinbase(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = coinbase.expect("Failed to get the coinbase"); let _ = coinbase.expect("Failed to get the coinbase");
@@ -1226,12 +1201,8 @@ mod tests {
let (_context, node) = new_node(); let (_context, node) = new_node();
// Act // Act
let block_difficulty = node let block_difficulty =
.resolver() node.resolver().await.unwrap().block_difficulty(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_difficulty(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_difficulty.expect("Failed to get the block difficulty"); let _ = block_difficulty.expect("Failed to get the block difficulty");
@@ -1244,12 +1215,7 @@ mod tests {
let (_context, node) = new_node(); let (_context, node) = new_node();
// Act // Act
let block_hash = node let block_hash = node.resolver().await.unwrap().block_hash(BlockNumberOrTag::Latest).await;
.resolver()
.await
.unwrap()
.block_hash(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_hash.expect("Failed to get the block hash"); let _ = block_hash.expect("Failed to get the block hash");
@@ -1262,12 +1228,8 @@ mod tests {
let (_context, node) = new_node(); let (_context, node) = new_node();
// Act // Act
let block_timestamp = node let block_timestamp =
.resolver() node.resolver().await.unwrap().block_timestamp(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_timestamp(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_timestamp.expect("Failed to get the block timestamp"); let _ = block_timestamp.expect("Failed to get the block timestamp");
@@ -80,6 +80,7 @@ pub struct SubstrateNode {
wallet: Arc<EthereumWallet>, wallet: Arc<EthereumWallet>,
nonce_manager: CachedNonceManager, nonce_manager: CachedNonceManager,
provider: OnceCell<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>>, provider: OnceCell<ConcreteProvider<ReviveNetwork, Arc<EthereumWallet>>>,
chain_id: alloy::primitives::ChainId,
} }
impl SubstrateNode { impl SubstrateNode {
@@ -108,9 +109,7 @@ impl SubstrateNode {
) -> Self { ) -> Self {
let working_directory_path = let working_directory_path =
AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path(); AsRef::<WorkingDirectoryConfiguration>::as_ref(&context).as_path();
let eth_rpc_path = AsRef::<EthRpcConfiguration>::as_ref(&context) let eth_rpc_path = AsRef::<EthRpcConfiguration>::as_ref(&context).path.as_path();
.path
.as_path();
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet(); let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
let substrate_directory = working_directory_path.join(Self::BASE_DIRECTORY); let substrate_directory = working_directory_path.join(Self::BASE_DIRECTORY);
@@ -131,9 +130,61 @@ impl SubstrateNode {
wallet: wallet.clone(), wallet: wallet.clone(),
nonce_manager: Default::default(), nonce_manager: Default::default(),
provider: Default::default(), provider: Default::default(),
chain_id: CHAIN_ID,
} }
} }
pub async fn new_existing(private_key: &str, rpc_port: u16) -> anyhow::Result<Self> {
use alloy::{
primitives::FixedBytes,
providers::{Provider, ProviderBuilder},
signers::local::PrivateKeySigner,
};
let key_str = private_key.trim().strip_prefix("0x").unwrap_or(private_key.trim());
let key_bytes = alloy::hex::decode(key_str)
.map_err(|e| anyhow::anyhow!("Failed to decode private key hex: {}", e))?;
if key_bytes.len() != 32 {
anyhow::bail!(
"Private key must be 32 bytes (64 hex characters), got {}",
key_bytes.len()
);
}
let mut bytes = [0u8; 32];
bytes.copy_from_slice(&key_bytes);
let signer = PrivateKeySigner::from_bytes(&FixedBytes(bytes))
.map_err(|e| anyhow::anyhow!("Failed to create signer from private key: {}", e))?;
let wallet = Arc::new(EthereumWallet::new(signer));
let rpc_url = format!("http://localhost:{}", rpc_port);
// Query the chain ID from the RPC
let chain_id = ProviderBuilder::new()
.connect_http(rpc_url.parse()?)
.get_chain_id()
.await
.context("Failed to query chain ID from RPC")?;
Ok(Self {
id: 0,
node_binary: PathBuf::new(),
eth_proxy_binary: PathBuf::new(),
export_chainspec_command: String::new(),
rpc_url,
base_directory: PathBuf::new(),
logs_directory: PathBuf::new(),
substrate_process: None,
eth_proxy_process: None,
wallet,
nonce_manager: Default::default(),
provider: Default::default(),
chain_id,
})
}
fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> { fn init(&mut self, mut genesis: Genesis) -> anyhow::Result<&mut Self> {
let _ = remove_dir_all(self.base_directory.as_path()); let _ = remove_dir_all(self.base_directory.as_path());
let _ = clear_directory(&self.base_directory); let _ = clear_directory(&self.base_directory);
@@ -264,7 +315,7 @@ impl SubstrateNode {
self.shutdown() self.shutdown()
.context("Failed to gracefully shutdown after substrate start error")?; .context("Failed to gracefully shutdown after substrate start error")?;
return Err(err); return Err(err);
} },
} }
let eth_proxy_process = Process::new( let eth_proxy_process = Process::new(
@@ -299,7 +350,7 @@ impl SubstrateNode {
self.shutdown() self.shutdown()
.context("Failed to gracefully shutdown after eth proxy start error")?; .context("Failed to gracefully shutdown after eth proxy start error")?;
return Err(err); return Err(err);
} },
} }
Ok(()) Ok(())
@@ -309,10 +360,7 @@ impl SubstrateNode {
&self, &self,
genesis: &Genesis, genesis: &Genesis,
) -> anyhow::Result<Vec<(String, u128)>> { ) -> anyhow::Result<Vec<(String, u128)>> {
genesis genesis.alloc.iter().try_fold(Vec::new(), |mut vec, (address, acc)| {
.alloc
.iter()
.try_fold(Vec::new(), |mut vec, (address, acc)| {
let substrate_address = Self::eth_to_substrate_address(address); let substrate_address = Self::eth_to_substrate_address(address);
let balance = acc.balance.try_into()?; let balance = acc.balance.try_into()?;
vec.push((substrate_address, balance)); vec.push((substrate_address, balance));
@@ -350,7 +398,7 @@ impl SubstrateNode {
construct_concurrency_limited_provider::<ReviveNetwork, _>( construct_concurrency_limited_provider::<ReviveNetwork, _>(
self.rpc_url.as_str(), self.rpc_url.as_str(),
FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000), FallbackGasFiller::new(u64::MAX, 5_000_000_000, 1_000_000_000),
ChainIdFiller::new(Some(CHAIN_ID)), ChainIdFiller::new(Some(self.chain_id)),
NonceFiller::new(self.nonce_manager.clone()), NonceFiller::new(self.nonce_manager.clone()),
self.wallet.clone(), self.wallet.clone(),
) )
@@ -412,10 +460,7 @@ impl EthereumNode for SubstrateNode {
transaction: TransactionRequest, transaction: TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move { Box::pin(async move {
let provider = self let provider = self.provider().await.context("Failed to create the provider")?;
.provider()
.await
.context("Failed to create the provider")?;
execute_transaction(provider, transaction).await execute_transaction(provider, transaction).await
}) })
} }
@@ -541,6 +586,16 @@ impl EthereumNode for SubstrateNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>) as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
}) })
} }
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
} }
pub struct SubstrateNodeResolver { pub struct SubstrateNodeResolver {
@@ -627,10 +682,7 @@ impl ResolverApi for SubstrateNodeResolver {
.context("Failed to get the substrate block")? .context("Failed to get the substrate block")?
.context("Failed to get the substrate block, perhaps the chain has no blocks?") .context("Failed to get the substrate block, perhaps the chain has no blocks?")
.and_then(|block| { .and_then(|block| {
block block.header.base_fee_per_gas.context("Failed to get the base fee per gas")
.header
.base_fee_per_gas
.context("Failed to get the base fee per gas")
}) })
}) })
} }
@@ -909,33 +961,27 @@ impl TransactionBuilder<ReviveNetwork> for <Ethereum as Network>::TransactionReq
); );
match result { match result {
Ok(unsigned_tx) => Ok(unsigned_tx), Ok(unsigned_tx) => Ok(unsigned_tx),
Err(UnbuiltTransactionError { request, error }) => { Err(UnbuiltTransactionError { request, error }) =>
Err(UnbuiltTransactionError::<ReviveNetwork> { Err(UnbuiltTransactionError::<ReviveNetwork> {
request, request,
error: match error { error: match error {
TransactionBuilderError::InvalidTransactionRequest(tx_type, items) => { TransactionBuilderError::InvalidTransactionRequest(tx_type, items) =>
TransactionBuilderError::InvalidTransactionRequest(tx_type, items) TransactionBuilderError::InvalidTransactionRequest(tx_type, items),
} TransactionBuilderError::UnsupportedSignatureType =>
TransactionBuilderError::UnsupportedSignatureType => { TransactionBuilderError::UnsupportedSignatureType,
TransactionBuilderError::UnsupportedSignatureType TransactionBuilderError::Signer(error) =>
} TransactionBuilderError::Signer(error),
TransactionBuilderError::Signer(error) => { TransactionBuilderError::Custom(error) =>
TransactionBuilderError::Signer(error) TransactionBuilderError::Custom(error),
}
TransactionBuilderError::Custom(error) => {
TransactionBuilderError::Custom(error)
}
}, },
}) }),
}
} }
} }
async fn build<W: alloy::network::NetworkWallet<ReviveNetwork>>( async fn build<W: alloy::network::NetworkWallet<ReviveNetwork>>(
self, self,
wallet: &W, wallet: &W,
) -> Result<<ReviveNetwork as Network>::TxEnvelope, TransactionBuilderError<ReviveNetwork>> ) -> Result<<ReviveNetwork as Network>::TxEnvelope, TransactionBuilderError<ReviveNetwork>> {
{
Ok(wallet.sign_request(self).await?) Ok(wallet.sign_request(self).await?)
} }
} }
@@ -1195,11 +1241,7 @@ mod tests {
let provider = node.provider().await.expect("Failed to create provider"); let provider = node.provider().await.expect("Failed to create provider");
let account_address = context let account_address = context.wallet_configuration.wallet().default_signer().address();
.wallet_configuration
.wallet()
.default_signer()
.address();
let transaction = TransactionRequest::default() let transaction = TransactionRequest::default()
.to(account_address) .to(account_address)
.value(U256::from(100_000_000_000_000u128)); .value(U256::from(100_000_000_000_000u128));
@@ -1244,9 +1286,8 @@ mod tests {
.expect("init failed"); .expect("init failed");
// Check that the patched chainspec file was generated // Check that the patched chainspec file was generated
let final_chainspec_path = dummy_node let final_chainspec_path =
.base_directory dummy_node.base_directory.join(SubstrateNode::CHAIN_SPEC_JSON_FILE);
.join(SubstrateNode::CHAIN_SPEC_JSON_FILE);
assert!(final_chainspec_path.exists(), "Chainspec file should exist"); assert!(final_chainspec_path.exists(), "Chainspec file should exist");
let contents = fs::read_to_string(&final_chainspec_path).expect("Failed to read chainspec"); let contents = fs::read_to_string(&final_chainspec_path).expect("Failed to read chainspec");
@@ -1352,10 +1393,7 @@ mod tests {
for (eth_addr, expected_ss58) in cases { for (eth_addr, expected_ss58) in cases {
let result = SubstrateNode::eth_to_substrate_address(&eth_addr.parse().unwrap()); let result = SubstrateNode::eth_to_substrate_address(&eth_addr.parse().unwrap());
assert_eq!( assert_eq!(result, expected_ss58, "Mismatch for Ethereum address {eth_addr}");
result, expected_ss58,
"Mismatch for Ethereum address {eth_addr}"
);
} }
} }
@@ -1406,12 +1444,8 @@ mod tests {
let node = shared_node(); let node = shared_node();
// Act // Act
let gas_limit = node let gas_limit =
.resolver() node.resolver().await.unwrap().block_gas_limit(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_gas_limit(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = gas_limit.expect("Failed to get the gas limit"); let _ = gas_limit.expect("Failed to get the gas limit");
@@ -1424,12 +1458,8 @@ mod tests {
let node = shared_node(); let node = shared_node();
// Act // Act
let coinbase = node let coinbase =
.resolver() node.resolver().await.unwrap().block_coinbase(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_coinbase(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = coinbase.expect("Failed to get the coinbase"); let _ = coinbase.expect("Failed to get the coinbase");
@@ -1442,12 +1472,8 @@ mod tests {
let node = shared_node(); let node = shared_node();
// Act // Act
let block_difficulty = node let block_difficulty =
.resolver() node.resolver().await.unwrap().block_difficulty(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_difficulty(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_difficulty.expect("Failed to get the block difficulty"); let _ = block_difficulty.expect("Failed to get the block difficulty");
@@ -1460,12 +1486,7 @@ mod tests {
let node = shared_node(); let node = shared_node();
// Act // Act
let block_hash = node let block_hash = node.resolver().await.unwrap().block_hash(BlockNumberOrTag::Latest).await;
.resolver()
.await
.unwrap()
.block_hash(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_hash.expect("Failed to get the block hash"); let _ = block_hash.expect("Failed to get the block hash");
@@ -1478,12 +1499,8 @@ mod tests {
let node = shared_node(); let node = shared_node();
// Act // Act
let block_timestamp = node let block_timestamp =
.resolver() node.resolver().await.unwrap().block_timestamp(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_timestamp(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_timestamp.expect("Failed to get the block timestamp"); let _ = block_timestamp.expect("Failed to get the block timestamp");
@@ -3,25 +3,17 @@
//! ## Required Binaries //! ## Required Binaries
//! This module requires the following binaries to be compiled and available in your PATH: //! This module requires the following binaries to be compiled and available in your PATH:
//! //!
//! 1. **polkadot-parachain**: //! 1. **polkadot-parachain**: ```bash git clone https://github.com/paritytech/polkadot-sdk.git cd
//! ```bash //! polkadot-sdk cargo build --release --locked -p polkadot-parachain-bin --bin
//! git clone https://github.com/paritytech/polkadot-sdk.git //! polkadot-parachain ```
//! cd polkadot-sdk
//! cargo build --release --locked -p polkadot-parachain-bin --bin polkadot-parachain
//! ```
//! //!
//! 2. **eth-rpc** (Revive EVM RPC server): //! 2. **eth-rpc** (Revive EVM RPC server): ```bash git clone https://github.com/paritytech/polkadot-sdk.git
//! ```bash //! cd polkadot-sdk cargo build --locked --profile production -p pallet-revive-eth-rpc --bin
//! git clone https://github.com/paritytech/polkadot-sdk.git //! eth-rpc ```
//! cd polkadot-sdk
//! cargo build --locked --profile production -p pallet-revive-eth-rpc --bin eth-rpc
//! ```
//! //!
//! 3. **polkadot** (for the relay chain): //! 3. **polkadot** (for the relay chain): ```bash # In polkadot-sdk directory cargo build --locked
//! ```bash //! --profile testnet --features fast-runtime --bin polkadot --bin polkadot-prepare-worker --bin
//! # In polkadot-sdk directory //! polkadot-execute-worker ```
//! cargo build --locked --profile testnet --features fast-runtime --bin polkadot --bin polkadot-prepare-worker --bin polkadot-execute-worker
//! ```
//! //!
//! Make sure to add the build output directories to your PATH or provide //! Make sure to add the build output directories to your PATH or provide
//! the full paths in your configuration. //! the full paths in your configuration.
@@ -130,14 +122,10 @@ impl ZombieNode {
+ AsRef<EthRpcConfiguration> + AsRef<EthRpcConfiguration>
+ AsRef<WalletConfiguration>, + AsRef<WalletConfiguration>,
) -> Self { ) -> Self {
let eth_proxy_binary = AsRef::<EthRpcConfiguration>::as_ref(&context) let eth_proxy_binary = AsRef::<EthRpcConfiguration>::as_ref(&context).path.to_owned();
.path
.to_owned();
let working_directory_path = AsRef::<WorkingDirectoryConfiguration>::as_ref(&context); let working_directory_path = AsRef::<WorkingDirectoryConfiguration>::as_ref(&context);
let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst); let id = NODE_COUNT.fetch_add(1, Ordering::SeqCst);
let base_directory = working_directory_path let base_directory = working_directory_path.join(Self::BASE_DIRECTORY).join(id.to_string());
.join(Self::BASE_DIRECTORY)
.join(id.to_string());
let base_directory = base_directory.canonicalize().unwrap_or(base_directory); let base_directory = base_directory.canonicalize().unwrap_or(base_directory);
let logs_directory = base_directory.join(Self::LOGS_DIRECTORY); let logs_directory = base_directory.join(Self::LOGS_DIRECTORY);
let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet(); let wallet = AsRef::<WalletConfiguration>::as_ref(&context).wallet();
@@ -205,10 +193,8 @@ impl ZombieNode {
} }
fn spawn_process(&mut self) -> anyhow::Result<()> { fn spawn_process(&mut self) -> anyhow::Result<()> {
let network_config = self let network_config =
.network_config self.network_config.clone().context("Node not initialized, call init() first")?;
.clone()
.context("Node not initialized, call init() first")?;
let rt = tokio::runtime::Runtime::new().unwrap(); let rt = tokio::runtime::Runtime::new().unwrap();
let network = rt.block_on(async { let network = rt.block_on(async {
@@ -256,7 +242,7 @@ impl ZombieNode {
self.shutdown() self.shutdown()
.context("Failed to gracefully shutdown after eth proxy start error")?; .context("Failed to gracefully shutdown after eth proxy start error")?;
return Err(err); return Err(err);
} },
} }
tracing::debug!("eth-rpc is up"); tracing::debug!("eth-rpc is up");
@@ -280,10 +266,7 @@ impl ZombieNode {
let output = cmd.output().context("Failed to export the chain-spec")?; let output = cmd.output().context("Failed to export the chain-spec")?;
if !output.status.success() { if !output.status.success() {
anyhow::bail!( anyhow::bail!("Build chain-spec failed: {}", String::from_utf8_lossy(&output.stderr));
"Build chain-spec failed: {}",
String::from_utf8_lossy(&output.stderr)
);
} }
let content = String::from_utf8(output.stdout) let content = String::from_utf8(output.stdout)
@@ -344,10 +327,7 @@ impl ZombieNode {
&self, &self,
genesis: &Genesis, genesis: &Genesis,
) -> anyhow::Result<Vec<(String, u128)>> { ) -> anyhow::Result<Vec<(String, u128)>> {
genesis genesis.alloc.iter().try_fold(Vec::new(), |mut vec, (address, acc)| {
.alloc
.iter()
.try_fold(Vec::new(), |mut vec, (address, acc)| {
let polkadot_address = Self::eth_to_polkadot_address(address); let polkadot_address = Self::eth_to_polkadot_address(address);
let balance = acc.balance.try_into()?; let balance = acc.balance.try_into()?;
vec.push((polkadot_address, balance)); vec.push((polkadot_address, balance));
@@ -448,16 +428,20 @@ impl EthereumNode for ZombieNode {
transaction: alloy::rpc::types::TransactionRequest, transaction: alloy::rpc::types::TransactionRequest,
) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> { ) -> Pin<Box<dyn Future<Output = anyhow::Result<TransactionReceipt>> + '_>> {
Box::pin(async move { Box::pin(async move {
let receipt = self let pending = self
.provider() .provider()
.await .await
.context("Failed to create provider for transaction submission")? .context("Failed to create provider for transaction submission")?
.send_transaction(transaction) .send_transaction(transaction)
.await .await
.context("Failed to submit transaction to proxy")? .context("Failed to submit transaction to proxy")?;
.get_receipt()
let receipt =
tokio::time::timeout(std::time::Duration::from_secs(120), pending.get_receipt())
.await .await
.context("Timeout waiting for transaction receipt")?
.context("Failed to fetch transaction receipt from proxy")?; .context("Failed to fetch transaction receipt from proxy")?;
Ok(receipt) Ok(receipt)
}) })
} }
@@ -585,6 +569,16 @@ impl EthereumNode for ZombieNode {
as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>) as Pin<Box<dyn Stream<Item = MinedBlockInformation>>>)
}) })
} }
fn resolve_signer_or_default(&self, address: Address) -> Address {
let signer_addresses: Vec<_> =
<EthereumWallet as NetworkWallet<Ethereum>>::signer_addresses(&self.wallet).collect();
if signer_addresses.contains(&address) {
address
} else {
self.wallet.default_signer().address()
}
}
} }
pub struct ZombieNodeResolver<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> { pub struct ZombieNodeResolver<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> {
@@ -673,10 +667,7 @@ impl<F: TxFiller<ReviveNetwork>, P: Provider<ReviveNetwork>> ResolverApi
.context("Failed to get the zombie block")? .context("Failed to get the zombie block")?
.context("Failed to get the zombie block, perhaps the chain has no blocks?") .context("Failed to get the zombie block, perhaps the chain has no blocks?")
.and_then(|block| { .and_then(|block| {
block block.header.base_fee_per_gas.context("Failed to get the base fee per gas")
.header
.base_fee_per_gas
.context("Failed to get the base fee per gas")
}) })
}) })
} }
@@ -788,10 +779,8 @@ mod tests {
pub async fn new_node() -> (TestExecutionContext, ZombieNode) { pub async fn new_node() -> (TestExecutionContext, ZombieNode) {
let context = test_config(); let context = test_config();
let mut node = ZombieNode::new( let mut node =
context.polkadot_parachain_configuration.path.clone(), ZombieNode::new(context.polkadot_parachain_configuration.path.clone(), &context);
&context,
);
let genesis = context.genesis_configuration.genesis().unwrap().clone(); let genesis = context.genesis_configuration.genesis().unwrap().clone();
node.init(genesis).unwrap(); node.init(genesis).unwrap();
@@ -856,14 +845,11 @@ mod tests {
"#; "#;
let context = test_config(); let context = test_config();
let mut node = ZombieNode::new( let mut node =
context.polkadot_parachain_configuration.path.clone(), ZombieNode::new(context.polkadot_parachain_configuration.path.clone(), &context);
&context,
);
// Call `init()` // Call `init()`
node.init(serde_json::from_str(genesis_content).unwrap()) node.init(serde_json::from_str(genesis_content).unwrap()).expect("init failed");
.expect("init failed");
// Check that the patched chainspec file was generated // Check that the patched chainspec file was generated
let final_chainspec_path = node.base_directory.join(ZombieNode::CHAIN_SPEC_JSON_FILE); let final_chainspec_path = node.base_directory.join(ZombieNode::CHAIN_SPEC_JSON_FILE);
@@ -904,10 +890,7 @@ mod tests {
"#; "#;
let context = test_config(); let context = test_config();
let node = ZombieNode::new( let node = ZombieNode::new(context.polkadot_parachain_configuration.path.clone(), &context);
context.polkadot_parachain_configuration.path.clone(),
&context,
);
let result = node let result = node
.extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap()) .extract_balance_from_genesis_file(&serde_json::from_str(genesis_json).unwrap())
@@ -969,10 +952,7 @@ mod tests {
for (eth_addr, expected_ss58) in cases { for (eth_addr, expected_ss58) in cases {
let result = ZombieNode::eth_to_polkadot_address(&eth_addr.parse().unwrap()); let result = ZombieNode::eth_to_polkadot_address(&eth_addr.parse().unwrap());
assert_eq!( assert_eq!(result, expected_ss58, "Mismatch for Ethereum address {eth_addr}");
result, expected_ss58,
"Mismatch for Ethereum address {eth_addr}"
);
} }
} }
@@ -980,10 +960,7 @@ mod tests {
fn eth_rpc_version_works() { fn eth_rpc_version_works() {
// Arrange // Arrange
let context = test_config(); let context = test_config();
let node = ZombieNode::new( let node = ZombieNode::new(context.polkadot_parachain_configuration.path.clone(), &context);
context.polkadot_parachain_configuration.path.clone(),
&context,
);
// Act // Act
let version = node.eth_rpc_version().unwrap(); let version = node.eth_rpc_version().unwrap();
@@ -999,10 +976,7 @@ mod tests {
fn version_works() { fn version_works() {
// Arrange // Arrange
let context = test_config(); let context = test_config();
let node = ZombieNode::new( let node = ZombieNode::new(context.polkadot_parachain_configuration.path.clone(), &context);
context.polkadot_parachain_configuration.path.clone(),
&context,
);
// Act // Act
let version = node.version().unwrap(); let version = node.version().unwrap();
@@ -1040,12 +1014,8 @@ mod tests {
let node = shared_node().await; let node = shared_node().await;
// Act // Act
let gas_limit = node let gas_limit =
.resolver() node.resolver().await.unwrap().block_gas_limit(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_gas_limit(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = gas_limit.expect("Failed to get the gas limit"); let _ = gas_limit.expect("Failed to get the gas limit");
@@ -1058,12 +1028,8 @@ mod tests {
let node = shared_node().await; let node = shared_node().await;
// Act // Act
let coinbase = node let coinbase =
.resolver() node.resolver().await.unwrap().block_coinbase(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_coinbase(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = coinbase.expect("Failed to get the coinbase"); let _ = coinbase.expect("Failed to get the coinbase");
@@ -1076,12 +1042,8 @@ mod tests {
let node = shared_node().await; let node = shared_node().await;
// Act // Act
let block_difficulty = node let block_difficulty =
.resolver() node.resolver().await.unwrap().block_difficulty(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_difficulty(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_difficulty.expect("Failed to get the block difficulty"); let _ = block_difficulty.expect("Failed to get the block difficulty");
@@ -1094,12 +1056,7 @@ mod tests {
let node = shared_node().await; let node = shared_node().await;
// Act // Act
let block_hash = node let block_hash = node.resolver().await.unwrap().block_hash(BlockNumberOrTag::Latest).await;
.resolver()
.await
.unwrap()
.block_hash(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_hash.expect("Failed to get the block hash"); let _ = block_hash.expect("Failed to get the block hash");
@@ -1112,12 +1069,8 @@ mod tests {
let node = shared_node().await; let node = shared_node().await;
// Act // Act
let block_timestamp = node let block_timestamp =
.resolver() node.resolver().await.unwrap().block_timestamp(BlockNumberOrTag::Latest).await;
.await
.unwrap()
.block_timestamp(BlockNumberOrTag::Latest)
.await;
// Assert // Assert
let _ = block_timestamp.expect("Failed to get the block timestamp"); let _ = block_timestamp.expect("Failed to get the block timestamp");
@@ -11,9 +11,7 @@ pub struct ConcurrencyLimiterLayer {
impl ConcurrencyLimiterLayer { impl ConcurrencyLimiterLayer {
pub fn new(permit_count: usize) -> Self { pub fn new(permit_count: usize) -> Self {
Self { Self { semaphore: Arc::new(Semaphore::new(permit_count)) }
semaphore: Arc::new(Semaphore::new(permit_count)),
}
} }
} }
@@ -21,10 +19,7 @@ impl<S> Layer<S> for ConcurrencyLimiterLayer {
type Service = ConcurrencyLimiterService<S>; type Service = ConcurrencyLimiterService<S>;
fn layer(&self, inner: S) -> Self::Service { fn layer(&self, inner: S) -> Self::Service {
ConcurrencyLimiterService { ConcurrencyLimiterService { service: inner, semaphore: self.semaphore.clone() }
service: inner,
semaphore: self.semaphore.clone(),
}
} }
} }
@@ -55,10 +50,7 @@ where
let future = self.service.call(req); let future = self.service.call(req);
Box::pin(async move { Box::pin(async move {
let _permit = semaphore let _permit = semaphore.acquire().await.expect("Semaphore has been closed");
.acquire()
.await
.expect("Semaphore has been closed");
tracing::debug!( tracing::debug!(
available_permits = semaphore.available_permits(), available_permits = semaphore.available_permits(),
"Acquired Semaphore Permit" "Acquired Semaphore Permit"
@@ -21,18 +21,13 @@ impl FallbackGasFiller {
default_max_fee_per_gas: u128, default_max_fee_per_gas: u128,
default_priority_fee: u128, default_priority_fee: u128,
) -> Self { ) -> Self {
Self { Self { inner: GasFiller, default_gas_limit, default_max_fee_per_gas, default_priority_fee }
inner: GasFiller,
default_gas_limit,
default_max_fee_per_gas,
default_priority_fee,
}
} }
} }
impl Default for FallbackGasFiller { impl Default for FallbackGasFiller {
fn default() -> Self { fn default() -> Self {
FallbackGasFiller::new(25_000_000, 1_000_000_000, 1_000_000_000) FallbackGasFiller::new(10_000_000, 1_000_000_000, 1_000_000_000)
} }
} }
@@ -56,12 +51,9 @@ where
provider: &P, provider: &P,
tx: &<N as Network>::TransactionRequest, tx: &<N as Network>::TransactionRequest,
) -> TransportResult<Self::Fillable> { ) -> TransportResult<Self::Fillable> {
// Try to fetch GasFillers fillable (gas_price, base_fee, estimate_gas, …) // Try to fetch GasFiller's "fillable" (gas_price, base_fee, estimate_gas, …)
// If it errors (i.e. tx would revert under eth_estimateGas), swallow it. // Propagate errors so caller can handle them appropriately
match self.inner.prepare(provider, tx).await { self.inner.prepare(provider, tx).await.map(Some)
Ok(fill) => Ok(Some(fill)),
Err(_) => Ok(None),
}
} }
async fn fill( async fn fill(
+12 -16
View File
@@ -80,10 +80,8 @@ where
NonceFiller: TxFiller<N>, NonceFiller: TxFiller<N>,
WalletFiller<W>: TxFiller<N>, WalletFiller<W>: TxFiller<N>,
{ {
let sendable_transaction = provider let sendable_transaction =
.fill(transaction) provider.fill(transaction).await.context("Failed to fill transaction")?;
.await
.context("Failed to fill transaction")?;
let transaction_envelope = sendable_transaction let transaction_envelope = sendable_transaction
.try_into_envelope() .try_into_envelope()
@@ -100,29 +98,27 @@ where
} else { } else {
return Err(error).context(format!("Failed to submit transaction {tx_hash}")); return Err(error).context(format!("Failed to submit transaction {tx_hash}"));
} }
} },
}; };
debug!(%tx_hash, "Submitted Transaction"); debug!(%tx_hash, "Submitted Transaction");
pending_transaction.set_timeout(Some(Duration::from_secs(120))); pending_transaction.set_timeout(Some(Duration::from_secs(120)));
let tx_hash = pending_transaction.watch().await.context(format!( let tx_hash = pending_transaction
"Transaction inclusion watching timeout for {tx_hash}" .watch()
))?; .await
.context(format!("Transaction inclusion watching timeout for {tx_hash}"))?;
poll( debug!(%tx_hash, "Transaction included, polling for receipt");
Duration::from_secs(60),
PollingWaitBehavior::Constant(Duration::from_secs(3)), poll(Duration::from_secs(30), PollingWaitBehavior::Constant(Duration::from_secs(3)), || {
|| {
let provider = provider.clone(); let provider = provider.clone();
async move { async move {
match provider.get_transaction_receipt(tx_hash).await { match provider.get_transaction_receipt(tx_hash).await {
Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)), Ok(Some(receipt)) => Ok(ControlFlow::Break(receipt)),
_ => Ok(ControlFlow::Continue(())), _ => Ok(ControlFlow::Continue(())),
} }
} }
}, })
)
.await .await
.context(format!("Polling for receipt failed for {tx_hash}")) .context(format!("Polling for receipt timed out for {tx_hash}"))
} }
+39 -88
View File
@@ -66,50 +66,45 @@ impl ReportAggregator {
match event { match event {
RunnerEvent::SubscribeToEvents(event) => { RunnerEvent::SubscribeToEvents(event) => {
self.handle_subscribe_to_events_event(*event); self.handle_subscribe_to_events_event(*event);
} },
RunnerEvent::CorpusFileDiscovery(event) => { RunnerEvent::CorpusFileDiscovery(event) =>
self.handle_corpus_file_discovered_event(*event) self.handle_corpus_file_discovered_event(*event),
}
RunnerEvent::MetadataFileDiscovery(event) => { RunnerEvent::MetadataFileDiscovery(event) => {
self.handle_metadata_file_discovery_event(*event); self.handle_metadata_file_discovery_event(*event);
} },
RunnerEvent::TestCaseDiscovery(event) => { RunnerEvent::TestCaseDiscovery(event) => {
self.handle_test_case_discovery(*event); self.handle_test_case_discovery(*event);
} },
RunnerEvent::TestSucceeded(event) => { RunnerEvent::TestSucceeded(event) => {
self.handle_test_succeeded_event(*event); self.handle_test_succeeded_event(*event);
} },
RunnerEvent::TestFailed(event) => { RunnerEvent::TestFailed(event) => {
self.handle_test_failed_event(*event); self.handle_test_failed_event(*event);
} },
RunnerEvent::TestIgnored(event) => { RunnerEvent::TestIgnored(event) => {
self.handle_test_ignored_event(*event); self.handle_test_ignored_event(*event);
} },
RunnerEvent::NodeAssigned(event) => { RunnerEvent::NodeAssigned(event) => {
self.handle_node_assigned_event(*event); self.handle_node_assigned_event(*event);
} },
RunnerEvent::PreLinkContractsCompilationSucceeded(event) => { RunnerEvent::PreLinkContractsCompilationSucceeded(event) =>
self.handle_pre_link_contracts_compilation_succeeded_event(*event) self.handle_pre_link_contracts_compilation_succeeded_event(*event),
} RunnerEvent::PostLinkContractsCompilationSucceeded(event) =>
RunnerEvent::PostLinkContractsCompilationSucceeded(event) => { self.handle_post_link_contracts_compilation_succeeded_event(*event),
self.handle_post_link_contracts_compilation_succeeded_event(*event) RunnerEvent::PreLinkContractsCompilationFailed(event) =>
} self.handle_pre_link_contracts_compilation_failed_event(*event),
RunnerEvent::PreLinkContractsCompilationFailed(event) => { RunnerEvent::PostLinkContractsCompilationFailed(event) =>
self.handle_pre_link_contracts_compilation_failed_event(*event) self.handle_post_link_contracts_compilation_failed_event(*event),
}
RunnerEvent::PostLinkContractsCompilationFailed(event) => {
self.handle_post_link_contracts_compilation_failed_event(*event)
}
RunnerEvent::LibrariesDeployed(event) => { RunnerEvent::LibrariesDeployed(event) => {
self.handle_libraries_deployed_event(*event); self.handle_libraries_deployed_event(*event);
} },
RunnerEvent::ContractDeployed(event) => { RunnerEvent::ContractDeployed(event) => {
self.handle_contract_deployed_event(*event); self.handle_contract_deployed_event(*event);
} },
RunnerEvent::Completion(event) => { RunnerEvent::Completion(event) => {
self.handle_completion(*event); self.handle_completion(*event);
break; break;
} },
} }
} }
debug!("Report aggregation completed"); debug!("Report aggregation completed");
@@ -123,12 +118,8 @@ impl ReportAggregator {
file_name.push_str(".json"); file_name.push_str(".json");
file_name file_name
}; };
let file_path = self let file_path =
.report self.report.context.working_directory_configuration().as_path().join(file_name);
.context
.working_directory_configuration()
.as_path()
.join(file_name);
let file = OpenOptions::new() let file = OpenOptions::new()
.create(true) .create(true)
.write(true) .write(true)
@@ -136,10 +127,7 @@ impl ReportAggregator {
.read(false) .read(false)
.open(&file_path) .open(&file_path)
.with_context(|| { .with_context(|| {
format!( format!("Failed to open report file for writing: {}", file_path.display())
"Failed to open report file for writing: {}",
file_path.display()
)
})?; })?;
serde_json::to_writer_pretty(&file, &self.report).with_context(|| { serde_json::to_writer_pretty(&file, &self.report).with_context(|| {
format!("Failed to serialize report JSON to {}", file_path.display()) format!("Failed to serialize report JSON to {}", file_path.display())
@@ -180,9 +168,8 @@ impl ReportAggregator {
// Add information on the fact that the case was ignored to the report. // Add information on the fact that the case was ignored to the report.
let test_case_report = self.test_case_report(&event.test_specifier); let test_case_report = self.test_case_report(&event.test_specifier);
test_case_report.status = Some(TestCaseStatus::Succeeded { test_case_report.status =
steps_executed: event.steps_executed, Some(TestCaseStatus::Succeeded { steps_executed: event.steps_executed });
});
self.handle_post_test_case_status_update(&event.test_specifier); self.handle_post_test_case_status_update(&event.test_specifier);
} }
@@ -197,9 +184,7 @@ impl ReportAggregator {
// Add information on the fact that the case was ignored to the report. // Add information on the fact that the case was ignored to the report.
let test_case_report = self.test_case_report(&event.test_specifier); let test_case_report = self.test_case_report(&event.test_specifier);
test_case_report.status = Some(TestCaseStatus::Failed { test_case_report.status = Some(TestCaseStatus::Failed { reason: event.reason });
reason: event.reason,
});
self.handle_post_test_case_status_update(&event.test_specifier); self.handle_post_test_case_status_update(&event.test_specifier);
} }
@@ -241,10 +226,7 @@ impl ReportAggregator {
.or_default() .or_default()
.iter() .iter()
.map(|(case_idx, case_report)| { .map(|(case_idx, case_report)| {
( (*case_idx, case_report.status.clone().expect("Can't be uninitialized"))
*case_idx,
case_report.status.clone().expect("Can't be uninitialized"),
)
}) })
.collect::<BTreeMap<_, _>>(); .collect::<BTreeMap<_, _>>();
let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted { let event = ReporterEvent::MetadataFileSolcModeCombinationExecutionCompleted {
@@ -276,29 +258,13 @@ impl ReportAggregator {
&mut self, &mut self,
event: PreLinkContractsCompilationSucceededEvent, event: PreLinkContractsCompilationSucceededEvent,
) { ) {
let include_input = self let include_input = self.report.context.report_configuration().include_compiler_input;
.report let include_output = self.report.context.report_configuration().include_compiler_output;
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input { let compiler_input = if include_input { event.compiler_input } else { None };
event.compiler_input let compiler_output = if include_output { Some(event.compiler_output) } else { None };
} else {
None
};
let compiler_output = if include_output {
Some(event.compiler_output)
} else {
None
};
execution_information.pre_link_compilation_status = Some(CompilationStatus::Success { execution_information.pre_link_compilation_status = Some(CompilationStatus::Success {
is_cached: event.is_cached, is_cached: event.is_cached,
@@ -313,29 +279,13 @@ impl ReportAggregator {
&mut self, &mut self,
event: PostLinkContractsCompilationSucceededEvent, event: PostLinkContractsCompilationSucceededEvent,
) { ) {
let include_input = self let include_input = self.report.context.report_configuration().include_compiler_input;
.report let include_output = self.report.context.report_configuration().include_compiler_output;
.context
.report_configuration()
.include_compiler_input;
let include_output = self
.report
.context
.report_configuration()
.include_compiler_output;
let execution_information = self.execution_information(&event.execution_specifier); let execution_information = self.execution_information(&event.execution_specifier);
let compiler_input = if include_input { let compiler_input = if include_input { event.compiler_input } else { None };
event.compiler_input let compiler_output = if include_output { Some(event.compiler_output) } else { None };
} else {
None
};
let compiler_output = if include_output {
Some(event.compiler_output)
} else {
None
};
execution_information.post_link_compilation_status = Some(CompilationStatus::Success { execution_information.post_link_compilation_status = Some(CompilationStatus::Success {
is_cached: event.is_cached, is_cached: event.is_cached,
@@ -375,8 +325,8 @@ impl ReportAggregator {
} }
fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) { fn handle_libraries_deployed_event(&mut self, event: LibrariesDeployedEvent) {
self.execution_information(&event.execution_specifier) self.execution_information(&event.execution_specifier).deployed_libraries =
.deployed_libraries = Some(event.libraries); Some(event.libraries);
} }
fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) { fn handle_contract_deployed_event(&mut self, event: ContractDeployedEvent) {
@@ -442,7 +392,8 @@ impl Report {
#[derive(Clone, Debug, Serialize, Default)] #[derive(Clone, Debug, Serialize, Default)]
pub struct TestCaseReport { pub struct TestCaseReport {
/// Information on the status of the test case and whether it succeeded, failed, or was ignored. /// Information on the status of the test case and whether it succeeded, failed, or was
/// ignored.
#[serde(skip_serializing_if = "Option::is_none")] #[serde(skip_serializing_if = "Option::is_none")]
pub status: Option<TestCaseStatus>, pub status: Option<TestCaseStatus>,
/// Information related to the execution on one of the platforms. /// Information related to the execution on one of the platforms.
+4 -2
View File
@@ -8,8 +8,10 @@ use anyhow::Context as _;
use indexmap::IndexMap; use indexmap::IndexMap;
use revive_dt_common::types::PlatformIdentifier; use revive_dt_common::types::PlatformIdentifier;
use revive_dt_compiler::{CompilerInput, CompilerOutput}; use revive_dt_compiler::{CompilerInput, CompilerOutput};
use revive_dt_format::metadata::Metadata; use revive_dt_format::{
use revive_dt_format::{corpus::Corpus, metadata::ContractInstance}; corpus::Corpus,
metadata::{ContractInstance, Metadata},
};
use semver::Version; use semver::Version;
use tokio::sync::{broadcast, oneshot}; use tokio::sync::{broadcast, oneshot};
+5 -24
View File
@@ -34,19 +34,11 @@ pub(crate) async fn get_or_download(
} }
create_dir_all(&target_directory).with_context(|| { create_dir_all(&target_directory).with_context(|| {
format!( format!("Failed to create solc cache directory: {}", target_directory.display())
"Failed to create solc cache directory: {}",
target_directory.display()
)
})?; })?;
download_to_file(&target_file, downloader) download_to_file(&target_file, downloader)
.await .await
.with_context(|| { .with_context(|| format!("Failed to write downloaded solc to {}", target_file.display()))?;
format!(
"Failed to write downloaded solc to {}",
target_file.display()
)
})?;
cache.insert(target_file.clone()); cache.insert(target_file.clone());
Ok((downloader.version.clone(), target_file)) Ok((downloader.version.clone(), target_file))
@@ -70,12 +62,7 @@ async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::R
} }
let mut file = BufWriter::new(file); let mut file = BufWriter::new(file);
file.write_all( file.write_all(&downloader.download().await.context("Failed to download solc binary bytes")?)
&downloader
.download()
.await
.context("Failed to download solc binary bytes")?,
)
.with_context(|| format!("Failed to write solc binary to {}", path.display()))?; .with_context(|| format!("Failed to write solc binary to {}", path.display()))?;
file.flush() file.flush()
.with_context(|| format!("Failed to flush file {}", path.display()))?; .with_context(|| format!("Failed to flush file {}", path.display()))?;
@@ -91,17 +78,11 @@ async fn download_to_file(path: &Path, downloader: &SolcDownloader) -> anyhow::R
.stdout(std::process::Stdio::null()) .stdout(std::process::Stdio::null())
.spawn() .spawn()
.with_context(|| { .with_context(|| {
format!( format!("Failed to spawn xattr to remove quarantine attribute on {}", path.display())
"Failed to spawn xattr to remove quarantine attribute on {}",
path.display()
)
})? })?
.wait() .wait()
.with_context(|| { .with_context(|| {
format!( format!("Failed waiting for xattr operation to complete on {}", path.display())
"Failed waiting for xattr operation to complete on {}",
path.display()
)
})?; })?;
Ok(()) Ok(())
+11 -52
View File
@@ -67,11 +67,7 @@ impl SolcDownloader {
) -> anyhow::Result<Self> { ) -> anyhow::Result<Self> {
let version_or_requirement = version.into(); let version_or_requirement = version.into();
match version_or_requirement { match version_or_requirement {
VersionOrRequirement::Version(version) => Ok(Self { VersionOrRequirement::Version(version) => Ok(Self { version, target, list }),
version,
target,
list,
}),
VersionOrRequirement::Requirement(requirement) => { VersionOrRequirement::Requirement(requirement) => {
let Some(version) = List::download(list) let Some(version) = List::download(list)
.await .await
@@ -84,12 +80,8 @@ impl SolcDownloader {
else { else {
anyhow::bail!("Failed to find a version that satisfies {requirement:?}"); anyhow::bail!("Failed to find a version that satisfies {requirement:?}");
}; };
Ok(Self { Ok(Self { version, target, list })
version, },
target,
list,
})
}
} }
} }
@@ -130,11 +122,7 @@ impl SolcDownloader {
})?; })?;
let path = build.path.clone(); let path = build.path.clone();
let expected_digest = build let expected_digest = build.sha256.strip_prefix("0x").unwrap_or(&build.sha256).to_string();
.sha256
.strip_prefix("0x")
.unwrap_or(&build.sha256)
.to_string();
let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display()); let url = format!("{}/{}/{}", Self::BASE_URL, self.target, path.display());
let file = reqwest::get(&url) let file = reqwest::get(&url)
@@ -159,54 +147,25 @@ mod tests {
#[tokio::test] #[tokio::test]
async fn try_get_windows() { async fn try_get_windows() {
let version = List::download(List::WINDOWS_URL) let version = List::download(List::WINDOWS_URL).await.unwrap().latest_release;
.await SolcDownloader::windows(version).await.unwrap().download().await.unwrap();
.unwrap()
.latest_release;
SolcDownloader::windows(version)
.await
.unwrap()
.download()
.await
.unwrap();
} }
#[tokio::test] #[tokio::test]
async fn try_get_macosx() { async fn try_get_macosx() {
let version = List::download(List::MACOSX_URL) let version = List::download(List::MACOSX_URL).await.unwrap().latest_release;
.await SolcDownloader::macosx(version).await.unwrap().download().await.unwrap();
.unwrap()
.latest_release;
SolcDownloader::macosx(version)
.await
.unwrap()
.download()
.await
.unwrap();
} }
#[tokio::test] #[tokio::test]
async fn try_get_linux() { async fn try_get_linux() {
let version = List::download(List::LINUX_URL) let version = List::download(List::LINUX_URL).await.unwrap().latest_release;
.await SolcDownloader::linux(version).await.unwrap().download().await.unwrap();
.unwrap()
.latest_release;
SolcDownloader::linux(version)
.await
.unwrap()
.download()
.await
.unwrap();
} }
#[tokio::test] #[tokio::test]
async fn try_get_wasm() { async fn try_get_wasm() {
let version = List::download(List::WASM_URL).await.unwrap().latest_release; let version = List::download(List::WASM_URL).await.unwrap().latest_release;
SolcDownloader::wasm(version) SolcDownloader::wasm(version).await.unwrap().download().await.unwrap();
.await
.unwrap()
.download()
.await
.unwrap();
} }
} }